Skip to content
Permalink
Branch: master
Find file Copy path
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
468 lines (434 sloc) 40.5 KB
% Encoding: UTF-8
@InProceedings{Qiu2016,
author = {Qiu, Weichao and Yuille, Alan},
title = {{UnrealCV}: Connecting Computer Vision to {Unreal Engine}},
booktitle = {Computer Vision -- ECCV 2016 Workshops},
year = {2016},
editor = {Hua, Gang and J{\'e}gou, Herv{\'e}},
pages = {909--916},
address = {Cham},
publisher = {Springer International Publishing},
abstract = {Computer graphics can not only generate synthetic images and ground truth but it also offers the possibility of constructing virtual worlds in which: (i) an agent can perceive, navigate, and take actions guided by AI algorithms, (ii) properties of the worlds can be modified (e.g., material and reflectance), (iii) physical simulations can be performed, and (iv) algorithms can be learnt and evaluated. But creating realistic virtual worlds is not easy. The game industry, however, has spent a lot of effort creating 3D worlds, which a player can interact with. So researchers can build on these resources to create virtual worlds, provided we can access and modify the internal data structures of the games. To enable this we created an open-source plugin UnrealCV (Project website: http://unrealcv.github.io) for a popular game engine Unreal Engine 4 (UE4). We show two applications: (i) a proof of concept image dataset, and (ii) linking Caffe with the virtual world to test deep network algorithms.},
groups = {Engine is open source, Produces photo-realistic images, Interoperability with external software, Engine is well-maintained, Availability of pre-made assets},
isbn = {978-3-319-49409-8},
url = {https://link.springer.com/chapter/10.1007/978-3-319-49409-8_75},
}
@InProceedings{Boyd2017,
author = {Reece A. Boyd and Salvador E. Barbosa},
title = {Reinforcement Learning for All: An Implementation Using {Unreal Engine} Blueprint},
booktitle = {2017 International Conference on Computational Science and Computational Intelligence ({CSCI})},
year = {2017},
pages = {787-792},
month = {Dec},
publisher = {{IEEE}},
abstract = {Game engines, like Unreal, Unity, and Cryengine, provide cutting edge graphics, sophisticated physics modeling, and integrated audio, greatly simplifying game development. These advanced features are often accessible through visual scripting interfaces used in rapid prototyping and by non-programmers. The goal of this research was to demonstrate that these tools can support implementation of artificial intelligence techniques, such as reinforcement learning, that have the potential to yield dynamic characters that are not pre-programmed, but rather learn their behavior via algorithms. Its novelties are the implementation of a Q-Learning bot, created in Unreal Engine's visual scripting tool, known as Blueprint.},
doi = {10.1109/CSCI.2017.136},
groups = {Blueprints visual scripting language},
keywords = {Artificial intelligence;bot learning;visual scripting;reinforcement learning;game technologies},
}
@InProceedings{Shah2018,
author = {Shah, Shital and Dey, Debadeepta and Lovett, Chris and Kapoor, Ashish},
title = {{AirSim}: High-Fidelity Visual and Physical Simulation for Autonomous Vehicles},
booktitle = {Field and Service Robotics},
year = {2018},
editor = {Hutter, Marco and Siegwart, Roland},
pages = {621--635},
address = {Cham},
publisher = {Springer International Publishing},
abstract = {Developing and testing algorithms for autonomous vehicles in real world is an expensive and time consuming process. Also, in order to utilize recent advances in machine intelligence and deep learning we need to collect a large amount of annotated training data in a variety of conditions and environments. We present a new simulator built on Unreal Engine that offers physically and visually realistic simulations for both of these goals. Our simulator includes a physics engine that can operate at a high frequency for real-time hardware-in-the-loop (HITL) simulations with support for popular protocols (e.g. MavLink). The simulator is designed from the ground up to be extensible to accommodate new types of vehicles, hardware platforms and software protocols. In addition, the modular design enables various components to be easily usable independently in other projects. We demonstrate the simulator by first implementing a quadrotor as an autonomous vehicle and then experimentally comparing the software components with real-world flights.},
groups = {Engine is open source, Produces photo-realistic images, Availability of pre-made assets, High-quality physics/collision simulation, Engine is cross-platform},
isbn = {978-3-319-67361-5},
url = {https://link.springer.com/chapter/10.1007/978-3-319-67361-5_40},
}
@InProceedings{Bock2018,
author = {Bock, Marcel and Schreiber, Andreas},
title = {Visualization of Neural Networks in Virtual Reality Using {Unreal Engine}},
booktitle = {Proceedings of the 24th ACM Symposium on Virtual Reality Software and Technology},
year = {2018},
series = {VRST '18},
pages = {132:1--132:2},
address = {New York, NY, USA},
publisher = {ACM},
acmid = {3281605},
articleno = {132},
doi = {10.1145/3281505.3281605},
isbn = {978-1-4503-6086-9},
keywords = {deep learning, explainable ai, neural networks, visualization},
location = {Tokyo, Japan},
numpages = {2},
url = {http://doi.acm.org/10.1145/3281505.3281605},
}
@Article{Bak2018,
author = {Slawomir Bak and Peter Carr and Jean{-}Fran{\c{c}}ois Lalonde},
title = {Domain Adaptation through Synthesis for Unsupervised Person Re-identification},
journal = {CoRR},
year = {2018},
volume = {abs/1804.10094},
archiveprefix = {arXiv},
bibsource = {dblp computer science bibliography, https://dblp.org},
biburl = {https://dblp.org/rec/bib/journals/corr/abs-1804-10094},
eprint = {1804.10094},
timestamp = {Mon, 13 Aug 2018 16:47:43 +0200},
url = {http://arxiv.org/abs/1804.10094},
}
@InProceedings{Ktena2015,
author = {S. I. Ktena and W. Abbott and A. A. Faisal},
title = {A virtual reality platform for safe evaluation and training of natural gaze-based wheelchair driving},
booktitle = {2015 7th International IEEE/EMBS Conference on Neural Engineering (NER)},
year = {2015},
pages = {236-239},
month = {April},
abstract = {The importance of ensuring user safety throughout the training and evaluation process of brain-machine interfaces is not to be neglected. In this study, a virtual reality software system was built with the intention to create a safe environment, where the performance of wheelchair control interfaces could be tested and compared. We use this to evaluate our eye tracking input methodology, a promising solution for hands-free wheelchair navigation, because of the abundance of control commands that it offers and its intuitive nature. Natural eye movements have long been considered to reflect cognitive processes and are highly correlated with user intentions. Therefore, the sequence of gaze locations during navigation is recorded and analyzed, in order to search and unveil patterns in saccadic movements. Moreover, this study compares different eye-based solutions that have previously been implemented, and proposes a new, more natural approach. The preliminary results on N = 6 healthy subjects indicate that the proposed free-view solution leads to 18.4% faster completion of the task (440 sec) benchmarked against a naive free-view approach.},
doi = {10.1109/NER.2015.7146603},
groups = {High-quality physics/collision simulation},
issn = {1948-3546},
keywords = {benchmark testing;biomechanics;brain;computer software;electro-oculography;gaze tracking;medical signal processing;neurophysiology;training;user interfaces;virtual reality;vision;wheelchairs;safe evaluation-training;natural gaze-based wheelchair driving;brain-machine interfaces;virtual reality software system;safe environment;wheelchair control interfaces;eye tracking input methodology;hand-free wheelchair navigation;natural eye movements;cognitive processes;user intentions;gaze locations;eye-based solutions;saccadic movements;naive free-view approach;Wheelchairs;Training;Navigation;Virtual reality;Three-dimensional displays;Robots;Engines},
}
@Article{Carrio2018,
author = {Adrian Carrio and Sai Vemprala and Andres Ripoll and Srikanth Saripalli and Pascual Campoy},
title = {Drone Detection Using Depth Maps},
journal = {CoRR},
year = {2018},
volume = {abs/1808.00259},
archiveprefix = {arXiv},
bibsource = {dblp computer science bibliography, https://dblp.org},
biburl = {https://dblp.org/rec/bib/journals/corr/abs-1808-00259},
eprint = {1808.00259},
groups = {Produces photo-realistic images},
timestamp = {Sun, 02 Sep 2018 15:01:55 +0200},
url = {http://arxiv.org/abs/1808.00259},
}
@InProceedings{Tremblay2018,
author = {Tremblay, Jonathan and To, Thang and Birchfield, Stan},
title = {{Falling Things}: A Synthetic Dataset for {3D} Object Detection and Pose Estimation},
booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops},
year = {2018},
month = {June},
url = {https://research.nvidia.com/publication/2018-06_Falling-Things},
}
@InProceedings{Jha2018,
author = {S. Jha and S. S. Banerjee and J. Cyriac and Z. T. Kalbarczyk and R. K. Iyer},
title = {{AVFI}: Fault Injection for Autonomous Vehicles},
booktitle = {2018 48th Annual IEEE/IFIP International Conference on Dependable Systems and Networks Workshops (DSN-W)},
year = {2018},
pages = {55-56},
month = {June},
abstract = {Autonomous vehicle (AV) technology is rapidly becoming a reality on U.S. roads, offering the promise of improvements in traffic management, safety, and the comfort and efficiency of vehicular travel. With this increasing popularity and ubiquitous deployment, resilience has become a critical requirement for public acceptance and adoption. Recent studies into the resilience of AVs have shown that though the AV systems are improving over time, they have not reached human levels of automation. Prior work in this area has studied the safety and resilience of individual components of the AV system (e.g., testing of neural networks powering the perception function). However, methods for holistic end-to-end resilience assessment of AV systems are still non-existent.},
doi = {10.1109/DSN-W.2018.00027},
issn = {2325-6664},
keywords = {neural nets;road safety;road traffic;ubiquitous computing;AVFI;fault injection;autonomous vehicle technology;traffic management;ubiquitous deployment;public acceptance;AV system;holistic end-to-end resilience assessment;vehicular travel efficiency;public adoption;human levels;Resilience;Autonomous vehicles;Neural networks;Cameras;Servers;Engines;Measurement;Reliability;Autonomous Vehicles;Fault Injection},
}
@InProceedings{Sun2017,
author = {Q. Sun and E. Gonzalez and B. Abadines},
title = {A wearable sensor based hand movement rehabilitation and evaluation system},
booktitle = {2017 Eleventh International Conference on Sensing Technology (ICST)},
year = {2017},
pages = {1-4},
month = {Dec},
abstract = {This paper presents a wearable hand movement rehabilitation system for stroke patients. The system is developed based on data glove and keyboard games. Rehabilitation practice is achieved via hand gesture recognition. In this work, the data glove with bending sensors is good for motion data collection during hand movement rehabilitation. The hand animation model, combined with keyboard games, enables the stroke patient under test to see its fingers movements and exercise process. In feedback stage, the rehabilitation evaluation and recommendation are provided based on the recognition of hand gestures. The experimental results have demonstrated a high accuracy on overt gesture recognition and a reasonable accuracy on complex key press gesture recognition.},
doi = {10.1109/ICSensT.2017.8304471},
groups = {Interoperability with external software},
issn = {2156-8073},
keywords = {data gloves;gesture recognition;human computer interaction;medical computing;neurophysiology;patient rehabilitation;hand animation model;motion data collection;bending sensors;hand gesture recognition;rehabilitation practice;keyboard games;data glove;wearable hand movement rehabilitation system;evaluation system;wearable sensor;complex key press gesture recognition;overt gesture recognition;hand gestures;rehabilitation evaluation;fingers movements;stroke patient;Presses;Sensors;Data gloves;Gesture recognition;Animation;Games;Medical treatment},
}
@InProceedings{Bondi2018,
author = {Elizabeth Bondi and Ashish Kapoor and Debadeepta Dey and James Piavis and Shital Shah and Robert Hannaford and Arvind Iyer and Lucas Joppa and Milind Tambe},
title = {Near Real-Time Detection of Poachers from Drones in {AirSim}},
booktitle = {Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence, {IJCAI-18}},
year = {2018},
pages = {5814--5816},
month = {7},
publisher = {International Joint Conferences on Artificial Intelligence Organization},
doi = {10.24963/ijcai.2018/847},
url = {https://doi.org/10.24963/ijcai.2018/847},
}
@InProceedings{Griffith2017,
author = {Griffith, Tami and Ablanedo, Jennie and Dwyer, Tabitha},
title = {Leveraging a Virtual Environment to Prepare for School Shootings},
booktitle = {Virtual, Augmented and Mixed Reality},
year = {2017},
editor = {Lackey, Stephanie and Chen, Jessie},
pages = {325--338},
address = {Cham},
publisher = {Springer International Publishing},
abstract = {Active-shooter incidents within a school setting involve a unique subset of active-shooter events. These events tend to have significant differences in duration and outcome to events that occur in other locations, often being resolved before, or when, first-responders arrive on the scene. The frequency and seriousness of these events inspired the US Department of Homeland Security, Science and Technology Directorate's First Responder's Group (DHS S{\&}T FRG) to leverage ongoing work with the US Army Research Laboratory, Human Research and Engineering Directorate, Advanced Training {\&} Simulation Division (ARL HRED ATSD) to establish a prototype virtual school environment to prepare teachers, administrators and staff on how to respond and work with Law Enforcement (LE) in the event of a school shooting. This virtual platform allows school staff and LE to practice various strategies and even supports analysis into how different security measures within the school environment might change the dynamic of an attack and response. The goal is to train affected groups together in advance of an attack to improve coordination and reduce response time and casualties. This paper illustrates design choices for training school teachers, administrators and other staff in a virtual environment in the event of a school shooting. These choices demonstrate unique development strategies related to controlling Artificial Intelligence (AI) through simple user interfaces, managing crowd behaviors and ultimately will include the ability to apply game engine level rules to different buildings or maps.},
doi = {10.1007/978-3-319-57987-0_26},
isbn = {978-3-319-57987-0},
}
@InProceedings{Lerer2016,
author = {Lerer, Adam and Gross, Sam and Fergus, Rob},
title = {Learning Physical Intuition of Block Towers by Example},
booktitle = {Proceedings of the 33rd International Conference on International Conference on Machine Learning - Volume 48},
year = {2016},
series = {ICML'16},
pages = {430--438},
publisher = {JMLR.org},
acmid = {3045437},
groups = {Engine is open source, Produces photo-realistic images, High-quality physics/collision simulation},
location = {New York, NY, USA},
numpages = {9},
url = {http://dl.acm.org/citation.cfm?id=3045390.3045437},
}
@InProceedings{Wu2018,
author = {T. Wu and S. Tseng and C. Lai and C. Ho and Y. Lai},
title = {Navigating Assistance System for Quadcopter with Deep Reinforcement Learning},
booktitle = {2018 1st International Cognitive Cities Conference (IC3)},
year = {2018},
pages = {16-19},
month = {Aug},
abstract = {In this paper, we present a deep reinforcement learning method for quadcopter bypassing the obstacle on the flying path. In the past study, algorithm only control the forward direction about quadcopter. In this letter, we use two function to control quadcopter. One is quadcopter navigating function. It is based on calculating coordination point and find the straight path to goal. The other function is collision avoidance function. It is implemented by deep Q-network model. Both two function will output rotating degree, agent will combine both output and turn direct. Besides, deep Q-network can also make quadcopter fly up and down to bypass the obstacle and arrive at goal. Our experimental result shows that collision rate is 14% after 500 flights. Based on this work, we will train more complex sense and transfer model to real quadcopter.},
doi = {10.1109/IC3.2018.00013},
keywords = {Training;Collision avoidance;Navigation;Atmospheric modeling;Engines;Convolution;deep reinforcement learning;obstacle avoid;quadcopter control},
}
@InProceedings{Lin2016,
author = {Lin, Jenny and Guo, Xingwen and Shao, Jingyu and Jiang, Chenfanfu and Zhu, Yixin and Zhu, Song-Chun},
title = {A Virtual Reality Platform for Dynamic Human-scene Interaction},
booktitle = {SIGGRAPH ASIA 2016 Virtual Reality Meets Physical Reality: Modelling and Simulating Virtual Humans and Environments},
year = {2016},
series = {SA '16},
pages = {11:1--11:4},
address = {New York, NY, USA},
publisher = {ACM},
acmid = {2992144},
articleno = {11},
doi = {10.1145/2992138.2992144},
groups = {High-quality physics/collision simulation},
isbn = {978-1-4503-4548-4},
keywords = {3D scene dataset, benchmark suite, virtual reality},
location = {Macau},
numpages = {4},
url = {http://doi.acm.org/10.1145/2992138.2992144},
}
@InProceedings{Bergmann2017,
author = {Bergmann, Till and Balzer, Matthias and Hopp, Torsten and van {de Kamp}, Thomas and Kopmann, Andreas and Jerome, Nicholas Tan and Zapf, Michael},
title = {Inspiration from {VR} Gaming Technology: Deep Immersion and Realistic Interaction for Scientific Visualization},
booktitle = {IVAPP 2017 : 8th International Conference on Information Visualization Theory and Applications, Porto, Portugal, 27. Februar - 1. März.2017. Vol.: 3. Ed.: L. Linsen (IVAPP is part of VISIGRAPP, the 12th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications)},
year = {2017},
pages = {330-334},
publisher = {{SciTePress, Setúbal}},
note = {54.02.02; LK 01},
isbn = {978-989-758-228-8},
language = {english},
url = {https://publikationen.bibliothek.kit.edu/1000078131},
}
@Article{Zhang2016,
author = {Yi Zhang and Weichao Qiu and Qi Chen and Xiaolin Hu and Alan L. Yuille},
title = {{UnrealStereo}: A Synthetic Dataset for Analyzing Stereo Vision},
journal = {CoRR},
year = {2016},
volume = {abs/1612.04647},
archiveprefix = {arXiv},
bibsource = {dblp computer science bibliography, https://dblp.org},
biburl = {https://dblp.org/rec/bib/journals/corr/ZhangQCHY16},
eprint = {1612.04647},
groups = {Engine is open source, Availability of pre-made assets, High-quality physics/collision simulation},
timestamp = {Mon, 13 Aug 2018 16:47:06 +0200},
url = {http://arxiv.org/abs/1612.04647},
}
@InProceedings{Capece2017,
author = {Capece, Nicola and Erra, Ugo and Romano, Simone and Scanniello, Giuseppe},
title = {Visualising a Software System as a City Through Virtual Reality},
booktitle = {Augmented Reality, Virtual Reality, and Computer Graphics},
year = {2017},
editor = {De Paolis, Lucio Tommaso and Bourdot, Patrick and Mongelli, Antonio},
pages = {319--327},
address = {Cham},
publisher = {Springer International Publishing},
abstract = {We describe a technique developed using C++ language and Unreal Engine 4 that allows users to visualise software systems written in object-oriented Java through virtual reality and using the city metaphor. Our aim is to use virtual reality to visualise the metrics of classes and packages of a software system. In this paper, we present a prototype. The ultimate goal will be to demonstrate that it is possible to use virtual reality to better understand software.},
doi = {10.1007/978-3-319-60928-7_28},
groups = {Blueprints visual scripting language},
isbn = {978-3-319-60928-7},
}
@InProceedings{Smyth2018,
author = {D. L. Smyth and F. G. Glavin and M. G. Madden},
title = {Using a Game Engine to Simulate Critical Incidents and Data Collection by Autonomous Drones},
booktitle = {2018 IEEE Games, Entertainment, Media Conference (GEM)},
year = {2018},
pages = {1-9},
month = {Aug},
abstract = {Using a game engine, we have developed a virtual environment which models important aspects of critical incident scenarios. We focused on modelling phenomena relating to the identification and gathering of key forensic evidence, in order to develop and test a system which can handle chemical, biological, radiological/nuclear or explosive (CBRNe) events autonomously. This allows us to build and validate AI-based technologies, which can be trained and tested in our custom virtual environment before being deployed in real-world scenarios. We have used our virtual scenario to rapidly prototype a system which can use simulated Remote Aerial Vehicles (RAV s) to gather images from the environment for the purpose of mapping. Our environment provides us with an effective medium through which we can develop and test various AI methodologies for critical incident scene assessment, in a safe and controlled manner.},
doi = {10.1109/GEM.2018.8516527},
groups = {Produces photo-realistic images, Blueprints visual scripting language},
keywords = {autonomous aerial vehicles;computer games;control engineering computing;image recognition;virtual reality;game engine;data collection;autonomous drones;critical incident scenarios;modelling phenomena;key forensic evidence;virtual scenario;AI technologies;virtual environment;scene assessment;remote aerial vehicles;Global Positioning System;Games;Engines;Virtual environments;Task analysis;Ionizing radiation;Entertainment industry;Unreal Engine;Autonomous;Virtual World},
}
@Article{Khirodkar2018,
author = {Rawal Khirodkar and Donghyun Yoo and Kris M. Kitani},
title = {Domain Randomization for Scene-Specific Car Detection and Pose Estimation},
journal = {CoRR},
year = {2018},
volume = {abs/1811.05939},
archiveprefix = {arXiv},
bibsource = {dblp computer science bibliography, https://dblp.org},
biburl = {https://dblp.org/rec/bib/journals/corr/abs-1811-05939},
eprint = {1811.05939},
groups = {Produces photo-realistic images},
timestamp = {Sat, 24 Nov 2018 17:52:00 +0100},
url = {http://arxiv.org/abs/1811.05939},
}
@InProceedings{Kolagunda2018,
author = {Kolagunda, Abhishek and Sorensen, Scott and Mehralivand, Sherif and Saponaro, Philip and Treible, Wayne and Turkbey, Baris and Pinto, Peter and Choyke, Peter and Kambhamettu, Chandra},
title = {A Mixed Reality Guidance System for Robot Assisted Laparoscopic Radical Prostatectomy},
booktitle = {OR 2.0 Context-Aware Operating Theaters, Computer Assisted Robotic Endoscopy, Clinical Image-Based Procedures, and Skin Image Analysis},
year = {2018},
editor = {Stoyanov, Danail and Taylor, Zeike and Sarikaya, Duygu and McLeod, Jonathan and Gonz{\'a}lez Ballester, Miguel Angel and Codella, Noel C.F. and Martel, Anne and Maier-Hein, Lena and Malpani, Anand and Zenati, Marco A. and De Ribaupierre, Sandrine and Xiongbiao, Luo and Collins, Toby and Reichl, Tobias and Drechsler, Klaus and Erdt, Marius and Linguraru, Marius George and Oyarzun Laura, Cristina and Shekhar, Raj and Wesarg, Stefan and Celebi, M. Emre and Dana, Kristin and Halpern, Allan},
pages = {164--174},
address = {Cham},
publisher = {Springer International Publishing},
abstract = {Robotic surgery with preoperative imaging data for planning have become increasingly common for surgical treatment of patients. For surgeons using robotic surgical platforms, maintaining spatial awareness of the anatomical structures in the surgical area is key for good outcomes. We propose a Mixed Reality system which allows surgeons to visualize and interact with aligned anatomical models extracted from preoperative imagery as well as the in vivo imagery from the stereo laparoscope. To develop this system, we have employed techniques to 3D reconstruct stereo laparoscope images, model 3D shape of the anatomical structures from preoperative MRI stack and align the two 3D surfaces. The application we have developed allows surgeons to visualize occluded and obscured organ boundaries as well as other important anatomy that is not visible through the laparoscope alone, facilitating better spatial awareness during surgery. The system was deployed in 9 robot assisted laparoscopic prostatectomy procedures as part of a feasibility study.},
isbn = {978-3-030-01201-4},
}
@Article{Du2016,
author = {J. Du and C. Mouser and W. Sheng},
title = {Design and Evaluation of a Teleoperated Robotic {3-D} Mapping System using an {RGB-D} Sensor},
journal = {IEEE Transactions on Systems, Man, and Cybernetics: Systems},
year = {2016},
volume = {46},
number = {5},
pages = {718-724},
month = {May},
issn = {2168-2216},
abstract = {In this correspondence paper, we develop a teleoperated robotic 3-D mapping (TeRoM) system which enables efficient human-guided mapping of remote environments for realistic rendering and visualization. First, the hardware design of the TeRoM system is proposed which is based on a Pioneer mobile robot platform and a rotating RGB-D camera. A client/server architecture is developed to allow the data to be processed in a remote server, which makes it possible to implement 3-D mapping on robots with limited resources. Second, a 3-D map is created in real-time while an operator controls the robot and the pan-tilt unit remotely using a joystick. Then the map is converted into a mesh using the marching cubes algorithm and optimized to reduce the data volume. Finally the mesh is imported and rendered in a 3-D rendering engine for interactive and intuitive display. We evaluate the performance of the TeRoM system in terms of accuracy, processing speed, reliability, and manipulability.},
doi = {10.1109/TSMC.2015.2461186},
keywords = {cameras;client-server systems;image sensors;interactive devices;mobile robots;path planning;reliability;rendering (computer graphics);robot vision;telerobotics;teleoperated robotic 3D mapping system;RGB-D sensor;human-guided mapping;remote environments;TeRoM system;Pioneer mobile robot platform;rotating RGB-D camera;client-server architecture;pan-tilt unit;joystick;marching cube algorithm;3D rendering engine;processing speed;Cameras;Servers;Three-dimensional displays;Optimization;Robot vision systems;Client/server model;RGB-D camera;robotic mapping;Client/server model;RGB-D camera;robotic mapping},
}
@InProceedings{Paraiso2017,
author = {Paraiso, Karla and Interrante, Victoria},
title = {Can Virtual Human Entourage Elements Facilitate Accurate Distance Judgments in {VR}?},
booktitle = {Virtual Reality and Augmented Reality},
year = {2017},
editor = {Barbic, Jernej and D'Cruz, Mirabelle and Latoschik, Marc Erich and Slater, Mel and Bourdot, Patrick},
pages = {119--133},
address = {Cham},
publisher = {Springer International Publishing},
abstract = {Entourage elements are widely used in architectural renderings to provide a sense of scale and bring the drawings to life. We explore the potential of using a photorealistic, three-dimensional, exact-scale model of a known person as an entourage element to ameliorate the classical problem of distance underestimation in immersive virtual environments, for the purposes of enhancing spatial perception accuracy during architectural design reviews.},
doi = {10.1007/978-3-319-72323-5_8},
isbn = {978-3-319-72323-5},
}
@Article{Peng2018,
author = {Cheng Peng and Volkan Isler},
title = {Adaptive View Planning for Aerial {3D} Reconstruction of Complex Scenes},
journal = {CoRR},
year = {2018},
volume = {abs/1805.00506},
archiveprefix = {arXiv},
bibsource = {dblp computer science bibliography, https://dblp.org},
biburl = {https://dblp.org/rec/bib/journals/corr/abs-1805-00506},
eprint = {1805.00506},
groups = {Produces photo-realistic images},
timestamp = {Mon, 13 Aug 2018 16:47:17 +0200},
url = {http://arxiv.org/abs/1805.00506},
}
@Article{Zhang2018,
author = {Zhang, Xiaoting and Liu, Jihong and Chen, Qing and Song, Hao and Zhan, Qianyi and Lu, Jing},
title = {A {3D} virtual Weft-knitting Engineering learning system based on {Unreal Engine 4}},
journal = {Computer Applications in Engineering Education},
year = {2018},
volume = {26},
number = {6},
pages = {2223-2236},
abstract = {Abstract The traditional practice teaching mode of Weft-knitting Engineering leads to unsatisfactory teaching outcomes due to the lack of hardware in laboratory and security issue in knitting mill. This paper presents a 3D virtual Weft-knitting Engineering learning system based on Unreal Engine 4. The technological design of weft-knitted fabrics including automatic design of jacquard pattern and the transformation of jacquard pattern, knitting notation, needle set-out, and cams arrangement is realized through the establishment of mathematical model and algorithm in Unreal Engine 4. Corresponding cam configuration operation from the first person perspective is implemented in virtual practice operation part. The system provides learners a holistic cognition of weft-knitting practice from machine structure, design of the weft-knitted fabrics, virtual practice operation on the machine to 3D interactive Virtual Weft-knitted fabrics. The experiment result shows that the virtual learning system developed in this study can improve the teaching effect of Weft-knitting Engineering especially in operational skills, and the combination of traditional plus virtual learning system method is the better way to optimize the teaching effect. The learners’ feedback indicates that 3D virtual Weft-knitting Engineering learning system is more convenient and immersive in which they feel like using real machine in real knitting mill. With the software combination of Unreal Engine 4 and Houdini, the bottom frame of the program offers great expansibility for more complex machine model and operation to the system in future. The design framework and discussions in this paper may be useful for developing virtual learning system in other areas.},
doi = {10.1002/cae.22030},
eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1002/cae.22030},
groups = {Produces photo-realistic images, Blueprints visual scripting language},
keywords = {3D virtual learning system, blue print, Unreal Engine 4, visualization scripting, Weft-knitting Engineering},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/cae.22030},
}
@InProceedings{Cover2018,
author = {A. Cover and R. D. Posser and J. P. Campos and R. Rieder},
title = {Methodology of Communication between a Criminal Database and a Virtual Reality Environment for Forensic Study},
booktitle = {2017 19th Symposium on Virtual and Augmented Reality (SVR)},
year = {2018},
volume = {00},
pages = {215-222},
month = {Nov.},
doi = {10.1109/SVR.2017.35},
keywords = {Three-dimensional displays;Forensics;Software;Solid modeling;Visualization;Virtual environments},
url = {doi.ieeecomputersociety.org/10.1109/SVR.2017.35},
}
@InProceedings{Skinner2016,
author = {J. Skinner and S. Garg and N. Sünderhauf and P. Corke and B. Upcroft and M. Milford},
title = {High-fidelity simulation for evaluating robotic vision performance},
booktitle = {2016 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
year = {2016},
pages = {2737-2744},
month = {Oct},
abstract = {Robotic vision, unlike computer vision, typically involves processing a stream of images from a camera with time varying pose operating in an environment with time varying lighting conditions and moving objects. Repeating robotic vision experiments under identical conditions is often impossible, making it difficult to compare different algorithms. For machine learning applications a critical bottleneck is the limited amount of real world image data that can be captured and labelled for both training and testing purposes. In this paper we investigate the use of a photo-realistic simulation tool to address these challenges, in three specific domains: robust place recognition, visual SLAM and object recognition. For the first two problems we generate images from a complex 3D environment with systematically varying camera paths, camera viewpoints and lighting conditions. For the first time we are able to systematically characterise the performance of these algorithms as paths and lighting conditions change. In particular, we are able to systematically generate varying camera viewpoint datasets that would be difficult or impossible to generate in the real world. We also compare algorithm results for a camera in a real environment and a simulated camera in a simulation model of that real environment. Finally, for the object recognition domain, we generate labelled image data and characterise the viewpoint dependency of a current convolution neural network in performing object recognition. Together these results provide a multi-domain demonstration of the beneficial properties of using simulation to characterise and analyse a wide range of robotic vision algorithms.},
doi = {10.1109/IROS.2016.7759425},
groups = {Produces photo-realistic images},
issn = {2153-0866},
keywords = {cameras;learning (artificial intelligence);neural nets;object recognition;robot vision;SLAM (robots);multidomain demonstration;convolution neural network;lighting conditions;robust place recognition;object recognition;visual SLAM;photo-realistic simulation tool;machine learning;camera;robotic vision performance evaluation;high-fidelity simulation;Cameras;Lighting;Engines;Visualization;Simultaneous localization and mapping;Robot vision systems},
}
@InProceedings{McMahon2018,
author = {M. McMahon and M. Schukat},
title = {A low-Cost, Open-Source, {BCI- VR} Game Control Development Environment Prototype for Game Based Neurorehabilitation},
booktitle = {2018 IEEE Games, Entertainment, Media Conference (GEM)},
year = {2018},
pages = {1-9},
month = {Aug},
abstract = {In this paper we present a low-cost and open-source brain-computer interface (BCI) virtual-reality (VR) Game Control Development Environment prototype, which we demonstrate using real-time signal processing of Electroencephalography (EEG) event-related desynchronization and synchronization changes (ERD/ERS) within the Precentral Gyrus (Motor Cortex), allowing a user to control a 3D object within a Virtual Reality Environment. This BCI-VR system prototype was functionally tested on multiple participants and demonstrated live before an audience during the 2017 `Hack the Brain' at the Dublin Science Gallery. The availability of such an open-source, effective, BCI-VR Game Control Development Environment, at a level acceptable for industry experimentation, has the potential to open up this field to a much wider range of researchers and games developers and to assist the investigation of gaming experiences which both incorporate the specific control features available through BCI as a core element of the game play and the potential for its use in neurorehabilitation.},
doi = {10.1109/GEM.2018.8516468},
keywords = {brain-computer interfaces;computer games;electroencephalography;medical signal processing;neurophysiology;patient rehabilitation;virtual reality;precentral gyrus;Electroencephalography event-related desynchronization changes;BCI-VR game control development environment prototype;open-source brain-computer interface;game play;gaming experiences;BCI-VR system prototype;Virtual Reality Environment;real-time signal processing;virtual-reality;game based neurorehabilitation;Games;Electroencephalography;Prototypes;Electrodes;Open source software;Three-dimensional displays;Virtual reality;BCI;VR;Brain Computer Interface;Virtual Reality;Event-Related Potentials;open-source;Game Development;N eurorehabilitation},
}
@Article{Nash2018,
author = {B. Nash and A. Walker and T. Chambers},
title = {A simulator based on virtual reality to dismantle a research reactor assembly using master-slave manipulators},
journal = {Annals of Nuclear Energy},
year = {2018},
volume = {120},
pages = {1 - 7},
issn = {0306-4549},
abstract = {The use of simulation within the nuclear industry has been limited by the perceived cost and benefits that such tools offer. In this paper, we present the development of a simulator based on the use of master-slave-manipulators to dismantle a small research reactor core assembly. We discuss the design of the simulator hardware and the different software elements that make up the system. We show how this type of tool can be used to explore various options during the development of a nuclear decommissioning process. Finally, we discuss the benefits that have been observed from using the system to support the decommissioning of the CONSORT research reactor.},
doi = {https://doi.org/10.1016/j.anucene.2018.05.018},
groups = {High-quality physics/collision simulation, Blueprints visual scripting language, Engine is open source},
keywords = {Decommissioning, Virtual reality, Simulation, Training, Process development},
url = {http://www.sciencedirect.com/science/article/pii/S0306454918302482},
}
@InProceedings{Rauhoeft2015,
author = {Rauhoeft, Greg and Leyrer, Markus and Thompson, William B. and Stefanucci, Jeanine K. and Klatzky, Roberta L. and Mohler, Betty J.},
title = {Evoking and Assessing Vastness in Virtual Environments},
booktitle = {Proceedings of the ACM SIGGRAPH Symposium on Applied Perception},
year = {2015},
series = {SAP '15},
pages = {51--54},
address = {New York, NY, USA},
publisher = {ACM},
acmid = {2804425},
doi = {10.1145/2804408.2804425},
isbn = {978-1-4503-3812-7},
keywords = {measures, vastness, virtual reality},
location = {Tbingen, Germany},
numpages = {4},
url = {http://doi.acm.org/10.1145/2804408.2804425},
}
@InProceedings{Bondi2018a,
author = {Bondi, Elizabeth and Dey, Debadeepta and Kapoor, Ashish and Piavis, Jim and Shah, Shital and Fang, Fei and Dilkina, Bistra and Hannaford, Robert and Iyer, Arvind and Joppa, Lucas and Tambe, Milind},
title = {{AirSim-W}: A Simulation Environment for Wildlife Conservation with {UAVs}},
booktitle = {Proceedings of the 1st ACM SIGCAS Conference on Computing and Sustainable Societies},
year = {2018},
series = {COMPASS '18},
pages = {40:1--40:12},
address = {New York, NY, USA},
publisher = {ACM},
acmid = {3209880},
articleno = {40},
doi = {10.1145/3209811.3209880},
groups = {Availability of pre-made assets},
isbn = {978-1-4503-5816-3},
keywords = {drones, object detection, simulation, unmanned aerial vehicles, wildlife conservation},
location = {Menlo Park and San Jose, CA, USA},
numpages = {12},
url = {http://doi.acm.org/10.1145/3209811.3209880},
}
@Comment{jabref-meta: databaseType:bibtex;}
@Comment{jabref-meta: grouping:
0 AllEntriesGroup:;
1 StaticGroup:rationale\;2\;1\;\;\;\;;
2 StaticGroup:Engine is open source\;0\;1\;\;\;\;;
2 StaticGroup:Produces photo-realistic images\;0\;1\;\;\;\;;
2 StaticGroup:Interoperability with external software\;0\;1\;\;\;\;;
2 StaticGroup:Engine is well-maintained\;0\;1\;\;\;\;;
2 StaticGroup:Availability of pre-made assets\;0\;1\;\;\;\;;
2 StaticGroup:High-quality physics/collision simulation\;0\;1\;\;\;\;;
2 StaticGroup:Engine is cross-platform\;0\;1\;\;\;\;;
2 StaticGroup:Blueprints visual scripting language\;0\;1\;\;\;\;;
}
You can’t perform that action at this time.