dissertation/references.bib

656 lines
31 KiB
BibTeX
Raw Normal View History

2019-12-06 16:28:10 +00:00
@inproceedings{livescan3d,
author = {{Kowalski}, M. and {Naruniec}, J. and {Daniluk}, M.},
booktitle = {2015 International Conference on 3D Vision},
doi = {10.1109/3DV.2015.43},
issn = {null},
keywords = {data acquisition; image reconstruction; image sensors; natural scenes; public domain software; LiveScan3D; 3D data acquisition system; multiple Kinect v2 sensors; free-open source system; live-3D data acquisition; physical configuration; data gathering; object capture; 3D panorama creation; head shape reconstruction; 3D dynamic scene reconstruction; Three-dimensional displays; Sensors; Cameras; Calibration; Servers; Transforms; Computers; Kinect; 3D reconstruction; LiveScan3D; open source},
month = {Oct},
pages = {318--325},
title = {Livescan3D: A Fast and Inexpensive 3D Data Acquisition System for Multiple Kinect v2 Sensors},
url = {https://ieeexplore.ieee.org/document/7335499},
urldate = {2020-03-27},
2019-12-06 16:28:10 +00:00
year = {2015}
}
2020-01-09 09:37:42 +00:00
@inproceedings{holoportation,
author = {Orts, Sergio and Rhemann, Christoph and Fanello, Sean and Kim, David and Kowdle, Adarsh and Chang, Wayne and Degtyarev, Yury and Davidson, Philip and Khamis, Sameh and Dou, Minsong and Tankovich, Vladimir and Loop, Charles and Cai, Qin and Chou, Philip and Mennicken, Sarah and Valentin, Julien and Kohli, Pushmeet and Pradeep, Vivek and Wang, Shenlong and Izadi, Shahram},
booktitle = {Proceedings of the 29th Annual Symposium on User Interface Software and Technology},
2020-01-09 09:37:42 +00:00
doi = {10.1145/2984511.2984517},
month = {10},
organization = {Microsoft Research},
title = {Holoportation: Virtual 3D Teleportation in Real-time},
url = {https://www.researchgate.net/publication/306544236_Holoportation_Virtual_3D_Teleportation_in_Real-time},
urldate = {2020-03-27},
2020-01-09 09:37:42 +00:00
year = {2016}
}
@article{Immersive-telepresence,
author = {{Fuchs}, H. and {State}, A. and {Bazin}, J.},
doi = {10.1109/MC.2014.185},
issn = {1558-0814},
journal = {Computer},
keywords = {image reconstruction; three-dimensional displays; virtual reality; immersive 3D telepresence; 3D acquisition; 3D reconstruction; 3D display; Three-dimensional displays; Cameras; Image reconstruction; Real-time systems; Stereo image processing; Glass; Solid modeling; 3D telepresence; 3D acquisition; 3D reconstruction; 3D display; computer vision; graphics; visualization; augmented reality; BeingThere Centre},
month = {July},
number = {7},
pages = {46--52},
publisher = {IEEE},
2020-01-09 09:37:42 +00:00
title = {Immersive 3D Telepresence},
url = {https://ieeexplore.ieee.org/document/6861875/},
urldate = {2020-03-27},
2020-01-09 09:37:42 +00:00
volume = {47},
year = {2014}
}
@article{group-to-group-telepresence,
author = {{Beck}, S. and {Kunert}, A. and {Kulik}, A. and {Froehlich}, B.},
doi = {10.1109/TVCG.2013.33},
issn = {2160-9306},
journal = {IEEE Transactions on Visualization and Computer Graphics},
keywords = {image colour analysis; image sensors; solid modelling; stereo image processing; virtual reality; immersive group-to-group telepresence; shared virtual 3D world; coupled projection-based multiuser setups; stereoscopic images; local interaction space; color cameras; registered depth cameras; captured 3D information; virtual user representations; virtual city; world-in-miniature metaphor; Calibration; Cameras; Servers; Streaming media; Image reconstruction; Image color analysis; Virtual reality; Multi-user virtual reality; telepresence; 3D capture.; Computer Graphics; Computer Simulation; Group Processes; Humans; Imaging; Three-Dimensional; Models; Biological; Social Behavior; Telecommunications; User-Computer Interface},
month = {April},
number = {4},
pages = {616--625},
publisher = {IEEE},
2020-01-09 09:37:42 +00:00
title = {Immersive Group-to-Group Telepresence},
url = {https://ieeexplore.ieee.org/document/6479190},
urldate = {2020-03-27},
2020-01-09 09:37:42 +00:00
volume = {19},
year = {2013}
}
@online{marvin-minksy,
author = {Ackerman, Evan and Guizzo, Erico},
date = {1-2-2016},
month = feb,
organization = {International Society for Presence Research},
title = {Marvin Minsky (1927-2016) and telepresence},
url = {https://ispr.info/2016/02/01/marvin-minsky-1927-2016-and-telepresence},
urldate = {2020-03-27},
2020-01-09 09:37:42 +00:00
year = {2016}
}
@inproceedings{buxton-telepresence,
address = {Toronto, Ontario, Canada},
author = {Buxton, William},
booktitle = {Proceedings of Graphics Interface '92},
doi = {10.20380/GI1992.15},
isbn = {0-9695338-1-0},
issn = {0713-5424},
location = {Vancouver, British Columbia, Canada},
numpages = {7},
pages = {123--129},
publisher = {Canadian Information Processing Society},
series = {GI 1992},
title = {Telepresence: Integrating shared task and person spaces},
url = {https://www.billbuxton.com/TelepShrdSpce.pdf},
urldate = {2020-03-27},
2020-01-09 09:37:42 +00:00
year = {1992}
}
@article{blue-c,
address = {New York, NY, USA},
author = {Gross, Markus and W{\"u}rmlin, Stephan and Naef, Martin and Lamboray, Edouard and Spagno, Christian and Kunz, Andreas and Koller-Meier, Esther and Svoboda, Tomas and {Van Gool}, Luc and Lang, Silke and et al.},
doi = {10.1145/882262.882350},
issn = {0730-0301},
issue_date = {July 2003},
journal = {ACM Trans. Graph.},
keywords = {3D Video; virtual environments; real-time graphics; graphics hardware; spatially immersive displays},
month = jul,
number = {3},
numpages = {9},
pages = {819--827},
publisher = {Association for Computing Machinery},
title = {Blue-c: A Spatially Immersive Display and 3D Video Portal for Telepresence},
url = {https://doi.org/10.1145/882262.882350},
urldate = {2020-03-27},
2020-01-09 09:37:42 +00:00
volume = {22},
year = {2003}
}
@article{wim,
author = {Stoakley, Richard and Conway, Matthew and Pausch, Y},
booktitle = {CHI},
2020-01-09 09:37:42 +00:00
doi = {10.1145/223904.223938},
editor = {Katz, Irvin R. and Mack, Robert L. and Marks, Linn and Rosson, Mary Beth and Nielsen, Jakob},
isbn = {0-201-84705-1},
2020-01-09 09:37:42 +00:00
month = {02},
pages = {265--272},
publisher = {ACM/Addison-Wesley},
2020-01-09 09:37:42 +00:00
title = {Virtual Reality on a WIM: Interactive Worlds in Miniature},
url = {https://doi.org/10.1145/223904.223938},
urldate = {2020-03-27},
2020-01-09 09:37:42 +00:00
year = {1970}
}
2020-01-12 02:13:26 +00:00
@article{original-kinect-microsoft,
author = {Zhang, Zhengyou},
doi = {10.1109/MMUL.2012.24},
issn = {1941-0166},
2020-01-12 02:13:26 +00:00
journal = {IEEE MultiMedia},
keywords = {Cameras; Three Dimensional Displays; Sensors; Games; Video Recording; Multimedia; Microsoft Kinect; Human-Computer Interaction; Motion Capture; Computer Vision; Engineering; Computer Science},
language = {eng},
month = feb,
2020-01-12 02:13:26 +00:00
number = {2},
number2 = {2},
pages = {4--10},
2020-01-12 02:13:26 +00:00
publisher = {IEEE},
title = {Microsoft Kinect Sensor and Its Effect},
url = {https://ieeexplore.ieee.org/document/6190806},
urldate = {2020-03-27},
2020-01-12 02:13:26 +00:00
volume = {19},
year = {2012-02}
}
@inproceedings{new-kinect,
address = {Gottingen},
author = {Lachat, E and Macher, H and Landes, T and Grussenmeyer, P},
issn = {16821750},
journal = {The International Archives of Photogrammetry, Remote Sensing and Spatial Information Sciences},
keywords = {Visual Arts},
language = {eng},
number = {5},
pages = {93,100},
publisher = {Copernicus GmbH},
title = {FIRST EXPERIENCES WITH KINECT V2 SENSOR FOR CLOSE RANGE 3D MODELLING},
url = {https://www.researchgate.net/publication/274352936_First_experiences_with_kinect_V2_sensor_for_close_range_3D_modelling},
urldate = {2020-03-27},
2020-01-12 02:13:26 +00:00
volume = {XL-5/W4},
year = {2015}
}
@article{greenhouse-kinect,
author = {Nissimov, Sharon and Goldberger, Jacob and Alchanatis, Victor},
doi = {10.1016/j.compag.2015.02.001},
2020-01-12 02:13:26 +00:00
issn = {0168-1699},
journal = {Computers and Electronics in Agriculture},
keywords = {Obstacle Detection; Navigation; Kinect Sensor; Rgb-D; Agriculture},
language = {eng},
number = {C},
pages = {104,115},
publisher = {Elsevier B.V},
title = {Obstacle detection in a greenhouse environment using the Kinect sensor},
url = {https://www.sciencedirect.com/science/article/pii/S0168169915000435},
urldate = {2020-03-27},
2020-01-12 02:13:26 +00:00
volume = {113},
year = {2015-04}
}
@article{ar/vr-construction,
abstract = {Construction is a high hazard industry which involves many factors that are potentially dangerous to workers. Safety has always been advocated by many construction companies, and they have been working hard to make sure their employees are protected from fatalities and injuries. With the advent of Virtual and Augmented Reality (VR/AR), there has been a witnessed trend of capitalizing on sophisticated immersive VR/AR applications to create forgiving environments for visualizing complex workplace situations, building up risk-preventive knowledge and undergoing training. To better understand the state-of-the-art of VR/AR applications in construction safety (VR/AR-CS) and from which to uncover the related issues and propose possible improvements, this paper starts with a review and synthesis of research evidence for several VR/AR prototypes, products and the related training and evaluation paradigms. Predicated upon a wide range of well-acknowledged scholarly journals, this paper comes up with...},
address = {Amsterdam},
author = {Li, Xiao and Yi, Wen and Chi, Hung-Lin and Wang, Xiangyu and Chan, Albert},
doi = {10.1016/j.autcon.2017.11.003},
2020-01-12 02:13:26 +00:00
issn = {0926-5805},
journal = {Automation in Construction},
keywords = {Studies; Augmented Reality; Occupational Safety; Safety Training; Construction Industry; Augmented Reality; Journals; Hard Surfacing; Inspection; Virtual Reality; Occupational Safety; Taxonomy; Hazard Identification; Training},
language = {eng},
pages = {150--162},
2020-01-12 02:13:26 +00:00
publisher = {Elsevier BV},
title = {A critical review of virtual and augmented reality (VR/AR) applications in construction safety},
url = {https://www.sciencedirect.com/science/article/abs/pii/S0926580517309962},
urldate = {2020-03-27},
2020-01-12 02:13:26 +00:00
volume = {86},
year = {2018-02-01}
}
@inproceedings{kinectv1/v2-accuracy-precision,
author = {Wasenm{\"u}ller, Oliver and Stricker, Didier},
booktitle = {ACCV Workshops (2)},
2020-01-12 02:13:26 +00:00
doi = {10.1007/978-3-319-54427-4_3},
editor = {Chen, Chu-Song and Lu, Jiwen and Ma, Kai-Kuang},
2020-01-12 02:13:26 +00:00
month = {11},
pages = {34--45},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
2020-01-12 02:13:26 +00:00
title = {Comparison of Kinect V1 and V2 Depth Images in Terms of Accuracy and Precision},
url = {https://link.springer.com/chapter/10.1007/978-3-319-54427-4\_3},
urldate = {2020-03-27},
volume = 10117,
2020-01-12 02:13:26 +00:00
year = {2016}
}
@inproceedings{remixed-reality,
address = {New York, NY, USA},
articleno = {Paper 129},
author = {Lindlbauer, David and Wilson, Andy D.},
booktitle = {Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems},
doi = {10.1145/3173574.3173703},
isbn = {9781450356206},
keywords = {augmented reality; remixed reality; virtual reality},
location = {Montreal QC, Canada},
numpages = {13},
publisher = {Association for Computing Machinery},
series = {CHI {\rq}18},
title = {Remixed Reality: Manipulating Space and Time in Augmented Reality},
url = {https://doi.org/10.1145/3173574.3173703},
urldate = {2020-03-27},
2020-01-12 02:13:26 +00:00
year = {2018}
}
@inproceedings{velt,
author = {Fender, Andreas and M{\"u}ller, J{\"o}rg},
booktitle = {ISS},
2020-01-12 02:13:26 +00:00
doi = {10.1145/3279778.3279794},
editor = {Koike, Hideki and Ratti, Carlo and Takeuchi, Yuichiro and Fukuchi, Kentaro and Scott, Stacey and Plasencia, Diego Mart{\'\i}nez},
isbn = {978-1-4503-5694-7},
2020-01-12 02:13:26 +00:00
month = {11},
pages = {73--83},
publisher = {ACM},
2020-01-12 02:13:26 +00:00
title = {Velt: A Framework for Multi RGB-D Camera Systems},
url = {https://doi.org/10.1145/3279778.3279794},
urldate = {2020-03-27},
2020-01-12 02:13:26 +00:00
year = {2018}
}
2020-01-14 15:38:55 +00:00
@inproceedings{roomalive,
abstract = {RoomAlive is a proof-of-concept prototype that transforms any room into an immersive, augmented entertainment experience. Our system enables new interactive projection mapping experiences that dynamically adapts content to any room. Users can touch, shoot, stomp, dodge and steer projected content that seamlessly co-exists with their existing physical environment. The basic building blocks of RoomAlive are projector-depth camera units, which can be combined through a scalable, distributed framework. The projector-depth camera units are individually autocalibrating, self-localizing, and create a unified model of the room with no user intervention. We investigate the design space of gaming experiences that are possible with RoomAlive and explore methods for dynamically mapping content based on room layout and user position. Finally we showcase four experience prototypes that demonstrate the novel interactive experiences that are possible with RoomAlive and discuss the design challenges of adapting any game to any room.},
author = {Jones, Brett and Sodhi, Rajinder and Murdock, Michael and Mehra, Ravish and Benko, Hrvoje and Wilson, Andy and Ofek, Eyal and MacIntyre, Blair and Raghuvanshi, Nikunj and Shapira, Lior},
2020-05-07 21:59:13 +01:00
booktitle = {Proceedings of the 27th Annual ACM Symposium on User Interface Software and Technology},
doi = {10.1145/2642918.2647383},
isbn = {978-1-4503-3069-5},
2020-01-14 15:38:55 +00:00
month = {October},
pages = {637--644},
publisher = {ACM},
2020-05-07 21:59:13 +01:00
series = {UIST '14},
2020-01-14 15:38:55 +00:00
title = {RoomAlive: Magical Experiences Enabled by Scalable, Adaptive Projector-Camera Units},
2020-05-07 21:59:13 +01:00
url = {http://doi.acm.org/10.1145/2642918.2647383},
urldate = {2020-03-27},
2020-01-14 15:38:55 +00:00
year = {2014}
}
@article{kinect-specs,
author = {Jiao, Jichao and Yuan, Libin and Tang, Weihua and Deng, Zhongliang and Wu, Qi},
doi = {10.3390/ijgi6110349},
journal = {ISPRS International Journal of Geo-Information},
month = {11},
pages = {349},
title = {A Post-Rectification Approach of Depth Images of Kinect v2 for 3D Reconstruction of Indoor Scenes},
url = {https://www.researchgate.net/publication/321048476_A_Post-Rectification_Approach_of_Depth_Images_of_Kinect_v2_for_3D_Reconstruction_of_Indoor_Scenes},
urldate = {2020-03-28},
2020-01-14 15:38:55 +00:00
volume = {6},
year = {2017}
}
@article{ICP,
author = {{Besl}, P. J. and {McKay}, N. D.},
doi = {10.1109/34.121791},
issn = {1939-3539},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
keywords = {computational geometry; convergence of numerical methods; iterative methods; optimisation; pattern recognition; picture processing; 3D shape registration; pattern recognition; point set registration; iterative closest point; geometric entity; mean-square distance metric; convergence; geometric model; Solid modeling; Motion estimation; Iterative closest point algorithm; Iterative algorithms; Testing; Inspection; Shape measurement; Iterative methods; Convergence; Quaternions},
month = {Feb},
number = {2},
pages = {239--256},
title = {A method for registration of 3-D shapes},
url = {https://ieeexplore.ieee.org/document/121791},
urldate = {2020-03-27},
2020-01-14 15:38:55 +00:00
volume = {14},
year = {1992}
}
@online{ARCore,
author = {Google},
date = {1},
month = mar,
organization = {Google},
title = {Google ARCore},
url = {https://developers.google.com/ar},
urldate = {2020-03-27},
year = {2018}
}
@online{arcore-unity,
author = {Google},
date = {2018-02-23},
organization = {Google},
title = {ARCore Unity SDK},
url = {https://github.com/google-ar/arcore-unity-sdk},
urldate = {2020-03-27}
}
@online{arkit,
author = {Apple},
date = {2017-06-05},
2020-05-07 21:59:13 +01:00
organization = {Apple},
title = {ARKit},
2020-05-07 21:59:13 +01:00
url = {https://developer.apple.com/augmented-reality/arkit},
urldate = {2020-03-27}
}
@online{arfoundation,
author = {Unity},
date = {2018-05-2},
month = may,
organization = {Unity Technologies},
title = {AR Foundation},
url = {https://unity.com/unity/features/arfoundation},
urldate = {2020-03-27},
year = {2018}
}
@article{reality-virtuality-continuum,
author = {Milgram, Paul and Takemura, Haruo and Utsumi, Akira and Kishino, Fumio},
doi = {10.1117/12.197321},
editor = {Das, Hari},
journal = {Telemanipulator and Telepresence Technologies},
month = {01},
organization = {International Society for Optics and Photonics},
pages = {282 -- 292},
publisher = {SPIE},
title = {Augmented reality: A class of displays on the reality-virtuality continuum},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/2351/0000/Augmented-reality--a-class-of-displays-on-the-reality/10.1117/12.197321.short},
urldate = {2020-03-27},
volume = {2351},
year = {1994}
}
@article{mr-taxonomy,
author = {Milgram, Paul and Kishino, Fumio},
journal = {IEICE Trans. Information Systems},
month = {12},
pages = {1321--1329},
title = {A Taxonomy of Mixed Reality Visual Displays},
url = {https://www.researchgate.net/publication/231514051_A_Taxonomy_of_Mixed_Reality_Visual_Displays},
urldate = {2020-03-28},
volume = {vol. E77-D, no. 12},
year = {1994}
}
@online{livescan3d-hololens,
author = {{Kowalski}, M. and {Naruniec}, J. and {Daniluk}, M.},
keywords = {data acquisition; image reconstruction; image sensors; natural scenes; public domain software; LiveScan3D; 3D data acquisition system; multiple Kinect v2 sensors; free-open source system; live-3D data acquisition; physical configuration; data gathering; object capture; 3D panorama creation; head shape reconstruction; 3D dynamic scene reconstruction; Three-dimensional displays; Sensors; Cameras; Calibration; Servers; Transforms; Computers; Kinect; 3D reconstruction; LiveScan3D; open source},
month = jan,
title = {Livescan3D Hololens},
url = {https://github.com/MarekKowalski/LiveScan3D-Hololens},
urldate = {2020-03-30},
year = {2017}
}
@online{pokemonGO,
author = {Niantic},
date = {2016-07-06},
organization = {The Pokemon Company},
title = {Pokemon GO},
url = {https://pokemongolive.com/en},
urldate = {2020-03-30}
}
@article{all-reality,
author = {Mann, Steve and Furness, Tom and Yuan, Yu and Iorio, Jay and Wang, Zixin},
date = {2018-04-20},
journal = {ArXiv},
title = {All Reality: Virtual, Augmented, Mixed (X), Mediated (X, Y), and Multimediated Reality},
url = {https://arxiv.org/abs/1804.08386},
urldate = {2020-03-31},
volume = {abs/1804.08386},
year = {2018}
}
@thesis{spatial-computing,
author = {Greenwold, Simon and Paradiso, Joseph A.},
journal = {Massachusetts Institute of Technology, Master},
publisher = {Citeseer},
title = {Spatial Computing},
url = {https://acg.media.mit.edu/people/simong/thesis/SpatialComputing.pdf},
urldate = {2020-03-31},
year = {2003}
}
@online{livescan3d-android,
author = {Selinis, Ioannis},
organization = {University of Surrey 5GIC},
title = {LiveScan3D Android},
url = {https://github.com/Sarsoo/LiveScan3D-Unity/tree/13b8a2d92da48eaf294f7e22eb4be2b5897cd186},
urldate = {2020-03-31},
year = {2019}
}
@article{point-cloud-surface,
author = {Berger, Matthew and Tagliasacchi, Andrea and Seversky, Lee and Alliez, Pierre and Guennebaud, Gael and Levine, Joshua and Sharf, Andrei and Silva, Claudio},
doi = {10.1111/cgf.12802},
journal = {Computer Graphics Forum},
number = {1},
pages = {301--329},
pdf = {https://hal.inria.fr/hal-01348404/file/survey-author.pdf},
publisher = {{Wiley}},
title = {A Survey of Surface Reconstruction from Point Clouds},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/cgf.12802},
urldate = {2020-03-31},
volume = {36},
year = {2017}
}
@online{lean-touch,
author = {Wilkes, Carlos},
date = {2019-08-01},
title = {Lean Touch},
url = {https://carloswilkes.com/#LeanTouch},
urldate = {2020-04-11}
}
@online{livescan3d-buffers,
author = {Selinis, Ioannis},
organization = {University of Surrey 5GIC},
title = {LiveScan3D: Buffers \& a non-blocking network},
url = {https://github.com/Sarsoo/LiveScan3D/commit/8ce756cfb29406d5f1202632a638514f0de848aa},
urldate = {2020-04-15},
year = {2020}
}
@online{ema,
author = {Tham, Ming},
month = apr,
organization = {Newcastle University},
title = {Exponentially Weighted Moving Average Filter},
url = {https://web.archive.org/web/20100329135531/http://lorien.ncl.ac.uk/ming/filter/filewma.htm},
urldate = {2020-05-04},
year = {2000}
}
@online{unity,
author = {Unity},
month = jun,
organization = {Unity Technologies},
title = {Unity},
url = {https://unity3d.com},
urldate = {2020-05-05},
year = {2005}
}
@article{visual-hull-laurentini,
author = {Laurentini, Aldo},
doi = {10.1109/34.273735},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
journaltitle = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
month = feb,
number = {2},
pages = {150--162},
title = {The visual hull concept for silhouette-based image understanding},
url = {https://ieeexplore.ieee.org/abstract/document/273735},
urldate = {2020-05-06},
volume = {16},
year = {1994}
}
@inproceedings{cuda-visual-hull,
abstract = {In this paper we present two efficient GPU-based visual hull computation algorithms. We compare them in terms of performance using image sets of varying size and different voxel resolutions. In addition, we present a real-time 3D reconstruction system which uses the proposed GPU-based reconstruction method to achieve real-time performance (30 fps) using 16 cameras and 4 PCs.},
author = {{Ladikos}, A. and {Benhimane}, S. and {Navab}, N.},
booktitle = {2008 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops},
doi = {10.1109/CVPRW.2008.4563098},
isbn = {978-1-4244-2339-2},
issn = {2160-7508},
keywords = {computational geometry; image reconstruction; image resolution; visual hull computation; real-time 3D reconstruction; CUDA; GPU; image sets; voxel resolutions; real-time performance; Image reconstruction; Cameras; Real time systems; Reconstruction algorithms; Personal communication networks; Shape; Hardware; Image segmentation; Distributed computing; Computer science},
month = {June},
pages = {1--8},
publisher = {IEEE},
title = {Efficient visual hull computation for real-time 3D reconstruction using CUDA},
url = {https://ieeexplore.ieee.org/abstract/document/4563098},
urldate = {2020-05-06},
year = {2008}
}
@article{microsoft-mixed-reality-studios,
address = {New York, NY, USA},
articleno = {69},
author = {Collet, Alvaro and Chuang, Ming and Sweeney, Pat and Gillett, Don and Evseev, Dennis and Calabrese, David and Hoppe, Hugues and Kirk, Adam and Sullivan, Steve},
doi = {10.1145/2766945},
issn = {0730-0301},
issue_date = {August 2015},
journal = {ACM Trans. Graph.},
keywords = {MPEG; multi-view stereo; mesh tracking; 3D video; geometry compression; surface reconstruction},
month = jul,
number = {4},
numpages = {13},
publisher = {Association for Computing Machinery},
title = {High-Quality Streamable Free-Viewpoint Video},
url = {https://opdhsblobprod03.blob.core.windows.net/contents/503db294612a42b3b95420aaabac44cc/4090ebdf84f413793b8cc1a1271a10c5?sv=2018-03-28&sr=b&si=ReadPolicy&sig=WE0q8BmNHfMXk7lk1SvB8KuKBavTZn%2BXHdYzcNRFtDk%3D&st=2020-05-06T12%3A29%3A54Z&se=2020-05-07T12%3A39%3A54Z},
urldate = {2020-05-06},
volume = {34},
year = {2015}
}
@inproceedings{hull-correspondences,
author = {{Ahmed}, N. and {Theobalt}, C. and {Rossl}, C. and {Thrun}, S. and {Seidel}, H.},
booktitle = {2008 IEEE Conference on Computer Vision and Pattern Recognition},
doi = {10.1109/CVPR.2008.4587758},
isbn = {978-1-4244-2242-5},
issn = {1063-6919},
month = jun,
pages = {1--8},
title = {Dense correspondence finding for parametrization-free animation reconstruction from video},
url = {https://ieeexplore.ieee.org/abstract/document/4587758},
urldate = {2020-05-06},
year = {2008}
}
@article{surrey-surface-capture,
author = {{Starck}, J. and {Hilton}, A.},
doi = {10.1109/MCG.2007.68},
journal = {IEEE Computer Graphics and Applications},
number = {3},
pages = {21--31},
title = {Surface Capture for Performance-Based Animation},
url = {https://ieeexplore.ieee.org/abstract/document/4178157},
urldate = {2020-05-06},
volume = {27},
year = {2007}
}
@online{structure-sensor,
author = {Occipital},
month = sep,
organization = {Occipital},
title = {Structure Sensor},
url = {https://structure.io/structure-sensor},
urldate = {2020-05-06},
year = {2013}
}
2020-05-07 21:59:13 +01:00
@article{sfs-over-time,
author = {Cheung, Kong Man and Baker, Simon and Kanade, Takeo},
doi = {10.1007/s11263-005-4881-5},
journal = {International Journal of Computer Vision},
keywords = {3D Reconstruction; Shape-From-Silhouette; Visual Hull; Across Time; Stereo; Temporal Alignment; Alignment Ambiguity; Visibility},
month = {May},
number = {3},
pages = {221--247},
title = {Shape-From-Silhouette Across Time Part I: Theory and Algorithms},
url = {https://www.ri.cmu.edu/publications/shape-from-silhouette-across-time-part-i-theory-and-algorithms},
urldate = {2020-05-07},
volume = {62},
year = {2005}
}
@inproceedings{laurentini-solids-revolution,
author = {{Laurentini}, A.},
booktitle = {[1992] Proceedings. 11th IAPR International Conference on Pattern Recognition},
doi = {10.1109/ICPR.1992.201662},
pages = {720--724},
title = {The visual hull of solids of revolution},
url = {https://ieeexplore.ieee.org/document/201662},
urldate = {2020-05-07},
year = {1992}
}
@phdthesis{sfs-video-cm,
address = {Pittsburgh, PA},
author = {Cheung, Kong Man},
keywords = {Temporal Shape-From-Silhouette; Visual Hull Alignment; Human Kinematic Modeling; Markeless Motion Tracking; Motion Rendering and Transfer},
month = {October},
number = {CMU-RI-TR-03-44},
school = {Carnegie Mellon University},
title = {Visual Hull Construction, Alignment and Refinement for Human Kinematic Modeling, Motion Tracking and Rendering},
url = {https://www.ri.cmu.edu/publications/visual-hull-construction-alignment-and-refinement-for-human-kinematic-modeling-motion-tracking-and-rendering/},
urldate = {2020-05-07},
year = {2003}
}
@article{ar-adrenalectomy,
author = {Lin, Mao-Sheng and Wu, JungleChi-Hsiang and Wu, Hurng-Sheng and Liu, JackKai-Che},
doi = {10.4103/UROS.UROS_3_18},
journal = {Urological Science},
month = {05},
title = {Augmented reality-Assisted single-incision laparoscopic adrenalectomy: Comparison with pure single incision laparoscopic technique},
url = {https://www.researchgate.net/publication/324480263_Augmented_reality-Assisted_single-incision_laparoscopic_adrenalectomy_Comparison_with_pure_single_incision_laparoscopic_technique},
urldate = {2020-05-07},
volume = {29},
year = {2018}
}
@article{ar-anatomy,
abstract = {Although cadavers constitute the gold standard for teaching anatomy to medical and health science students, there are substantial financial, ethical, and supervisory constraints on their use. In addition, although anatomy remains one of the fundamental areas of medical education, universities have decreased the hours allocated to teaching gross anatomy in favor of applied clinical work. The release of virtual (VR) and augmented reality (AR) devices allows learning to occur through hands-on immersive experiences. The aim of this research was to assess whether learning structural anatomy utilizing VR or AR is as effective as tablet-based (TB) applications, and whether these modes allowed enhanced student learning, engagement and performance. Participants (n = 59) were randomly allocated to one of the three learning modes: VR, AR, or TB and completed a lesson on skull anatomy, after which they completed an anatomical knowledge assessment. Student perceptions of each learning mode and any adverse effects experienced were recorded. No significant differences were found between mean assessment scores in VR, AR, or TB. During the lessons however, VR participants were more likely to exhibit adverse effects such as headaches (25\% in VR P < 0.05), dizziness (40\% in VR, P < 0.001), or blurred vision (35\% in VR, P < 0.01). Both VR and AR are as valuable for teaching anatomy as tablet devices, but also promote intrinsic benefits such as increased learner immersion and engagement. These outcomes show great promise for the effective use of virtual and augmented reality as means to supplement lesson content in anatomical education. Anat Sci Educ 10: 549--559. {\copyright} 2017 American Association of Anatomists.},
author = {Moro, Christian and {\v S}tromberga, Zane and Raikos, Athanasios and Stirling, Allan},
doi = {10.1002/ase.1696},
eprint = {https://anatomypubs.onlinelibrary.wiley.com/doi/pdf/10.1002/ase.1696},
journal = {Anatomical Sciences Education},
keywords = {gross anatomy education; health sciences education; undergraduate education; medical education; virtual reality; augmented reality; mixed reality; computer-aided instruction; oculus rift; tablet applications},
number = {6},
pages = {549--559},
title = {The effectiveness of virtual and augmented reality in health sciences and medical anatomy},
url = {https://anatomypubs.onlinelibrary.wiley.com/doi/abs/10.1002/ase.1696},
urldate = {2020-05-07},
volume = {10},
year = {2017}
}
@article{ar-commerce,
abstract = {This study evaluates the effectiveness of augmented reality (AR) as an e-commerce tool using two products --- sunglasses and watches. Study 1 explores the effectiveness of AR by comparing it to a conventional website. The results show that AR provides effective communication benefits by generating greater novelty, immersion, enjoyment, and usefulness, resulting in positive attitudes toward medium and purchase intention, compared to the web-based product presentations. Study 2 compares the paths by which consumers evaluate products through AR versus web with a focus on interactivity and vividness. It is revealed that immersion mediates the relationship between interactivity/vividness and two outcome variables --- usefulness and enjoyment in the AR condition compared to the web condition where no significant paths between interactivity and immersion and between previous media experience and media novelty are found. Participants' subjective opinions about AR are examined through opinion mining to better understand consumer responses to AR.},
author = {Yim, Mark Yi-Cheon and Chu, Shu-Chuan and Sauer, Paul L.},
doi = {10.1016/j.intmar.2017.04.001},
issn = {1094-9968},
journal = {Journal of Interactive Marketing},
keywords = {Augmented reality; Interactivity; Vividness; Immersion; Novelty; Previous media experience},
pages = {89--103},
title = {Is Augmented Reality Technology an Effective Tool for E-commerce? An Interactivity and Vividness Perspective},
url = {http://www.sciencedirect.com/science/article/pii/S1094996817300336},
urldate = {2020-05-07},
volume = {39},
year = {2017}
}
@online{ikea-place,
author = {IKEA},
month = sep,
organization = {Inter IKEA Systems B.V.},
title = {IKEA Place},
url = {https://apps.apple.com/ie/app/ikea-place/id1279244498},
urldate = {2020-05-07},
year = {2017}
}
@article{ar-education,
abstract = {Augmented reality (AR) is an educational medium increasingly accessible to young users such as elementary school and high school students. Although previous research has shown that AR systems have the potential to improve student learning, the educational community remains unclear regarding the educational usefulness of AR and regarding contexts in which this technology is more effective than other educational mediums. This paper addresses these topics by analyzing 26 publications that have previously compared student learning in AR versus non-AR applications. It identifies a list of positive and negative impacts of AR experiences on student learning and highlights factors that are potentially underlying these effects. This set of factors is argued to cause differences in educational effectiveness between AR and other media. Furthermore, based on the analysis, the paper presents a heuristic questionnaire generated for judging the educational potential of AR experiences.},
author = {Radu, Iulian},
doi = {10.1007/s00779-013-0747-y},
issn = {1617-4917},
journal = {Personal and Ubiquitous Computing},
number = {6},
pages = {1533--1543},
risfield_0_da = {2014/08/01},
title = {Augmented reality in education: a meta-review and cross-media analysis},
url = {https://link.springer.com/article/10.1007/s00779-013-0747-y},
urldate = {2020-05-07},
volume = {18},
year = {2014}
}