2024
- A. Belmonte-Baeza, M. Cazorla, and J. Pomares, «Path planning and reinforcement learning-driven control of on-orbit free-flying multi-arm robots,» in Proceedings of the 2025 ieee aerospace conference, 2024.
[Bibtex]@inproceedings{aerospace2024, author = {A. Belmonte-Baeza and M. Cazorla and J. Pomares}, booktitle = {Proceedings of the 2025 IEEE Aerospace Conference}, title = {Path Planning and Reinforcement Learning-driven Control of On-orbit Free-flying Multi-arm Robots}, year = {2024} }
- H. Penades, F. Escalona, and M. Cazorla, «A comparative analysis of metric combinations in face verification with machine learning techniques,» in Proceedings of the 5th international electronic conference on applied sciences, 2024.
[Bibtex]@inproceedings{Penades2024, author = {H. Penades and F. Escalona and M. Cazorla }, booktitle = {Proceedings of The 5th International Electronic Conference on Applied Sciences}, title = {A comparative analysis of metric combinations in face verification with machine learning techniques}, year = {2024} }
- F. Gomez-Donoso, F. Escalona, F. Dargère, and M. Cazorla, «Vfld: voxelized fractal local descriptor,» Applied sciences, vol. 14, iss. 20, 2024.
[Bibtex]@Article{app14209414, AUTHOR = {Gomez-Donoso, Francisco and Escalona, Felix and Dargère, Florian and Cazorla, Miguel}, TITLE = {VFLD: Voxelized Fractal Local Descriptor}, JOURNAL = {Applied Sciences}, VOLUME = {14}, YEAR = {2024}, NUMBER = {20}, ARTICLE-NUMBER = {9414}, URL = {https://www.mdpi.com/2076-3417/14/20/9414}, ISSN = {2076-3417}, ABSTRACT = {A variety of methods for 3D object recognition and registration based on a deep learning pipeline have recently emerged. Nonetheless, these methods require large amounts of data that are not easy to obtain, sometimes rendering them virtually useless in real-life scenarios due to a lack of generalization capabilities. To counter this, we propose a novel local descriptor that takes advantage of the fractal dimension. For each 3D point, we create a descriptor by computing the fractal dimension of the neighbors at different radii. Our redmethod has many benefits, such as being agnostic to the sensor of choice and noise, up to a level, and having few parameters to tinker with. Furthermore, it requires no training and does not rely on semantic information. We test our descriptor using well-known datasets and it largely outperforms Fast Point Feature Histogram, which is the state-of-the-art descriptor for 3D data. We also apply our descriptor to a registration pipeline and achieve accurate three-dimensional representations of the scenes, which are captured with a commercial sensor.}, DOI = {10.3390/app14209414} }
- A. Carmona-Rodríguez, F. Gomez-Donoso, M. Cazorla, A. Cobo-Vivero, R. Aguilar, A. A. Ramos-Esplá, and E. Guijarro-García, «Artificial intelligence as a tool for bionomic transects: the case of isidella elongata forests in western mediterranean,» in Proceedings of the ix international symposium on marine science, 2024.
[Bibtex]@inproceedings{isms, author = {A. Carmona-Rodr\'{i}guez and F. Gomez-Donoso and M. Cazorla and A. Cobo-Vivero and R. Aguilar and A.A. Ramos-Espl\'{a} and E. Guijarro-Garc\'{i}a}, booktitle = {Proceedings of the IX International Symposium on Marine Science}, title = {ARTIFICIAL INTELLIGENCE AS A TOOL FOR BIONOMIC TRANSECTS: THE CASE OF Isidella elongata FORESTS IN WESTERN MEDITERRANEAN}, year = {2024} }
- C. Losantos-Pulido, F. Gomez-Donoso, F. Escalona-Moncholí, and M. Cazorla, «Paws: personal assistance walking system for the visually impaired,» in Proceedings of the xxiv international workshop of physical agents, 2024.
[Bibtex]@inproceedings{Losantos2024, author = {Losantos-Pulido, Carmen and Gomez-Donoso, Francisco and Escalona-Monchol\'{i}, F\'{e}lix and Cazorla, Miguel}, booktitle = {Proceedings of the XXIV International Workshop of Physical Agents}, title = {PAWS: Personal Assistance Walking System for the Visually Impaired}, year = {2024} }
- S. Garcia-Muñoz, S. Suescun-Fernandez, F. Gomez-Donoso, and M. Cazorla, «Multimodal emotional recognition for human-robot interaction in social robotics,» in Proceedings of the xxiv international workshop of physical agents, 2024.
[Bibtex]@inproceedings{GarciaMuños2024, author = {Garcia-Muñoz, Sergio and Suescun-Fernandez, Sergio and Gomez-Donoso, Francisco and Cazorla, Miguel}, booktitle = {Proceedings of the XXIV International Workshop of Physical Agents}, title = {Multimodal Emotional Recognition for Human-Robot Interaction in Social Robotics}, year = {2024} }
- L. J. Marhuenda, F. Gomez-Donoso, and M. Cazorla, «Harto: human activity recognition through optical flow architecture,» in The ieee world congress on computational intelligence, 2024.
[Bibtex]@inproceedings{Marhuenda2024, author = {Marhuenda, Luis Jesus and Gomez-Donoso, Francisco and Cazorla, Miguel}, booktitle = {The IEEE World Congress on Computational Intelligence}, title = {HARTO: Human Activity Recognition Through Optical Flow Architecture}, year = {2024} }
- M. González-Ruíz, V. H. Diaz-Ramirez, M. Cazorla, and R. uarez-Salazar, «A comparative study of facial feature classification methods,» in Spie optics + photonics 2024, 2024.
[Bibtex]@inproceedings{Gonzalez2024, author = {Gonz\'{a}lez-Ru\'{i}z, Mart\'{i}n and V\'{i}ctor H. Diaz-Ramirez and Miguel Cazorla and uarez-Salazar, Rigoberto}, booktitle = {SPIE Optics + Photonics 2024}, title = {A comparative study of facial feature classification methods}, year = {2024} }
- B. Dominguez-Dager, F. Gomez-Donoso, R. Roig-Vila, F. Escalona, and M. Cazorla, «Holograms for seamless integration of remote students in the classroom,» Virtual reality, vol. 28, iss. 1, p. 24, 2024.
[Bibtex]@article{dominguez2024holograms, title={Holograms for seamless integration of remote students in the classroom}, author={Dominguez-Dager, Bessie and Gomez-Donoso, Francisco and Roig-Vila, Rosabel and Escalona, Felix and Cazorla, Miguel}, journal={Virtual Reality}, volume={28}, number={1}, pages={24}, year={2024}, publisher={Springer} }
2023
- E. G. Caldwell-Marin, M. Cazorla, and J. M. Cañas-Plaza, «Experimental analysis of the effectiveness of a cyber-physical robotic system to assist speech and language pathologists in high school,» Journal of new approaches in educational research, vol. 12, iss. 1, p. 40–61, 2023.
[Bibtex]@article{caldwell2023experimental, title={Experimental analysis of the effectiveness of a cyber-physical robotic system to assist speech and language pathologists in high school}, author={Caldwell-Marin, Eldon Glen and Cazorla, Miguel and Ca{\~n}as-Plaza, Jos{\'e} Mar{\'\i}a}, journal={Journal of New Approaches in Educational Research}, volume={12}, number={1}, pages={40--61}, year={2023}, publisher={Springer} }
- L. Marquez-Carpintero, M. Pina-Navarro, S. Suescun-Ferrandiz, F. Escalona, F. Gomez-Donoso, R. Roig-Vila, and M. Cazorla, «Artificial intelligence-based system for detecting attention levels in students,» Journal of visualized experiments: jove, iss. 202, 2023.
[Bibtex]@article{marquez2023artificial, title={Artificial Intelligence-based System for Detecting Attention Levels in Students}, author={Marquez-Carpintero, Luis and Pina-Navarro, Monica and Suescun-Ferrandiz, Sergio and Escalona, Felix and Gomez-Donoso, Francisco and Roig-Vila, Rosabel and Cazorla, Miguel}, journal={Journal of visualized experiments: JoVE}, number={202}, year={2023} }
- F. Gomez-Donoso, B. Dominguez-Dager, F. Escalona, J. Montoyo-Bojo, and M. Cazorla, «Hypergaze: gaze tracking with a color camera,» in The 23rd international workshop de agentes físicos (waf), 2023.
[Bibtex]@inproceedings{Gomez-DonosoWAF2023, author = {Francisco Gomez-Donoso and Bessie Dominguez-Dager and F\'{e}lix Escalona and Javier Montoyo-Bojo and Miguel Cazorla}, title = {HyperGaze: Gaze Tracking with a Color Camera}, booktitle = {The 23rd International Workshop de Agentes F\'{i}sicos (WAF)}, year = {2023} }
- S. Suescun, F. Gomez-Donoso, L. Marquez-Carpintero, and M. Cazorla, «Genetic algorithms for self-driving drones,» in The 23rd international workshop de agentes físicos (waf), 2023.
[Bibtex]@inproceedings{SuescunWAF2023, author = {Sergio Suescun and Francisco Gomez-Donoso and Luis Marquez-Carpintero and Miguel Cazorla}, title = {Genetic Algorithms for Self-Driving Drones}, booktitle = {The 23rd International Workshop de Agentes F\'{i}sicos (WAF)}, year = {2023} }
- E. Cruz, E. Gomez, A. M. Acosta-Reyes, F. Gomez-Donoso, M. Cazorla, and J. C. Rangel, «Enhancing poultry management practices: automated chicken counting and flock monitoring using yolo,» in The 23rd international workshop de agentes físicos (waf), 2023.
[Bibtex]@inproceedings{CruzWAF2023, author = { Edmanuel Cruz and Edgar Gomez and Adiz Mariel Acosta-Reyes and Francisco Gomez-Donoso and Miguel Cazorla and Jose Carlos Rangel}, title = {Enhancing Poultry Management Practices: Automated Chicken Counting and Flock Monitoring using YOLO}, booktitle = {The 23rd International Workshop de Agentes F\'{i}sicos (WAF)}, year = {2023} }
- E. Alvarez, R. Alvarez Sanchez, and M. Cazorla, «Exploring transferability on adversarial attacks,» Ieee access, vol. Accepted, 2023.
[Bibtex]@ARTICLE{alvarez2023, author={Alvarez, Enrique and Alvarez Sanchez, Rafael and Cazorla, Miguel }, title={Exploring Transferability on Adversarial Attacks}, journal={IEEE Access}, year={2023}, volume={Accepted}, number={}, }
- M. Torres Mendoza, R. Alvarez Sanchez, and M. Cazorla, «A malware detection approach based on feature engineering and behavior analysis,» Ieee access, vol. Accepted, 2023.
[Bibtex]@ARTICLE{torres2023, author={Torres Mendoza, Manuel and Alvarez Sanchez, Rafael and Cazorla, Miguel }, title={A Malware Detection Approach Based on Feature Engineering and Behavior Analysis}, journal={IEEE Access}, year={2023}, volume={Accepted}, number={}, }
- C. Mejia-Escobar, M. Cazorla, and E. Martinez-Martin, «Improving facial expression recognition through data preparation & merging,» Ieee access, vol. Accepted, 2023.
[Bibtex]@ARTICLE{mejia2023, author={Mejia-Escobar, Christian and Cazorla, Miguel and Martinez-Martin, Ester}, title={Improving Facial Expression Recognition through Data Preparation & Merging}, journal={IEEE Access}, year={2023}, volume={Accepted}, number={}, }
- F. Escalona, F. Gomez-Donoso, F. Morillas-Espejo, M. Pina-Navarro, L. M. Carpintero, and M. Cazorla, «Aatiende: automatic attention evaluation on a non-invasive device,» in Iwann 2023, 2023.
[Bibtex]@INPROCEEDINGs{EscalonaIWANN2023, Author = { F. Escalona and F. Gomez-Donoso and Francisco Morillas-Espejo and M\'{o}nica Pina-Navarro and Luis M\'{a}rquez Carpintero and Miguel Cazorla}, Title = {AATiENDe: Automatic ATtention Evaluation on a Non-invasive Device}, Booktitle = {IWANN 2023}, Year = {2023} }
- R. Martinez-Roig, M. Cazorla, and J. M. Esteve Faubel, «Social robotics in music education: a systematic review,» Frontiers in education, vol. Accepted, 2023.
[Bibtex]@ARTICLE{martinez2023, author={Martinez-Roig, Rosabel and Cazorla, Miguel and Esteve Faubel, Jose Maria}, journal={Frontiers in Education}, title={Social robotics in music education: a systematic review}, year={2023}, volume={Accepted}, number={}, }
- F. Romero-Ramirez, R. Munyoz-Salinas, M. Marín, M. Cazorla, and R. Medina-Carnicer, «Sslam: speeded up visual slam mixing artificial markers and temporary keypoints,» Sensors, vol. Accepted, 2023.
[Bibtex]@ARTICLE{romero2023, author={Romero-Ramirez, Francisco and Munyoz-Salinas, Rafael and Mar\'{i}n, Manuel and Cazorla, Miguel and Medina-Carnicer, Rafael}, journal={Sensors}, title={sSLAM: Speeded Up visual SLAM mixing artificial markers and temporary keypoints }, year={2023}, volume={Accepted}, number={}, }
- F. Gomez-Donoso, J. Castano, F. Escalona-Moncholi, and M. Cazorla, «Three-dimensional reconstruction using sfm for actual pedestrian classification,» Expert sytems with applications, vol. 213, 2023.
[Bibtex]@ARTICLE{gomezDonoso2022c, author={Gomez-Donoso, Francisco and Castano, Julio and Escalona-Moncholi, Felix and Cazorla, Miguel}, journal={Expert Sytems with Applications}, title={Three-Dimensional Reconstruction Using SFM for Actual Pedestrian Classification }, year={2023}, volume={213}, number={}, }
- C. Mejia, M. Cazorla, and E. Martinez-Martin, «Towards a better performance in facial expression recognition: a data-centric approach,» Computational intelligence and neuroscience, vol. 2023, 2023.
[Bibtex]@ARTICLE{christian2022a, author={Mejia, Christian and Cazorla, Miguel and Martinez-Martin, Ester}, journal={Computational Intelligence and Neuroscience}, title={Towards a better performance in facial expression recognition: a data-centric approach }, year={2023}, volume={2023}, number={}, }
2022
- F. Gomez-Donoso, F. Escalona-Moncholi, and M. Cazorla, «Vfkd: voxelized fractal keypoint detector,» in Proc. of international joint conference on neural networks (ijcnn), 2022.
[Bibtex]@INPROCEEDINGs{GomezDonoso2022IJCNN, Author = { Francisco Gomez-Donoso and Felix Escalona-Moncholi and Miguel Cazorla}, Title = {VFKD: Voxelized Fractal Keypoint Detector}, Booktitle = {Proc. of International Joint Conference on Neural Networks (IJCNN)}, Year = {2022} }
- F. Gomez-Donoso, F. Escalona, S. Orts-Escolano, A. Garcia-Garcia, J. Garcia-Rodriguez, and M. Cazorla, «3dslicelenet: recognizing 3d objects using a slice-representation,» Ieee access, vol. 10, pp. 15378-15392, 2022.
[Bibtex]@ARTICLE{9701312, author={Gomez-Donoso, Francisco and Escalona, Felix and Orts-Escolano, Sergio and Garcia-Garcia, Alberto and Garcia-Rodriguez, Jose and Cazorla, Miguel}, journal={IEEE Access}, title={3DSliceLeNet: Recognizing 3D Objects Using a Slice-Representation}, year={2022}, volume={10}, number={}, pages={15378-15392}, doi={10.1109/ACCESS.2022.3148387}}
- F. Gomez-Donoso, M. Moreno-Martineza, and M. Cazorla, «Uroac: urban objects in any-light conditions,» Data in brief, vol. Aceptado, 2022.
[Bibtex]@ARTICLE{donoso2022b, author={F. Gomez-Donoso and Marcos Moreno-Martineza and Miguel Cazorla}, journal={Data in brief}, title={UrOAC: Urban Objects in Any-light Conditions}, year={2022}, volume={Aceptado}, number={}}
- J. C. Rangel, E. Cruz, and M. Cazorla, «Automatic understanding and mapping of regions in cities using google,» Applied sciences, vol. 12, iss. 6, 2022.
[Bibtex]@ARTICLE{Rangel2022, author={Jose Carlos Rangel and Edmanuel Cruz and Miguel Cazorla}, journal={Applied Sciences}, title={Automatic understanding and mapping of regions in cities using Google}, year={2022}, volume={12}, number={6}}
- K. Lakatos, G. Gonzalez-Serrano, J. Hoballah, J. Brooker, S. Jeong, C. Evans, P. Krauledat, P. W. Hansen, K. M. Elias, M. Patankar, V. Fülöp, P. A. Konstantinopoulos, and D. W. Cramer, «Application of a novel microscopic technique for quantifying ca125 binding to circulating mononuclear cells in longitudinal specimens during treatment for ovarian cancer,» Journal of ovarian research, vol. 15, 2022.
[Bibtex]@article{Lakatos2022, abstract = {Background: Measurement of serum CA125, an antigenic fragment of human mucin 16 (MUC16), is used to monitor the clinical progression of epithelial ovarian cancer (EOC). However, rather than simply a passive marker reflecting tumor burden, MUC16 may have a more active role by binding to immune cells and altering their tumor response. We developed a research tool to measure MUC16-binding to the surfaces of peripheral blood mononuclear cell (PBMC) subtypes and tested its research value using specimens collected serially from a woman being treated for high grade serous EOC. Methods: Cryopreserved PBMCs were mixed with anti-CA125 antibody-labeled plasmonic gold nanoparticles (PNPs) to detect cell surface MUC16-binding along with fluorescent stains to identify B cells, NK cells, NK-T cells, T cells, and monocytes. From 3D darkfield images, a computer algorithm was applied to enumerate PNP-binding and fluorescence microscopy to identify cell lineage. Average MUC16-binding was determined by fitting a Poisson distribution to PNP-counts across similar cell types. MUC16-binding to cell types was correlated with treatment details, CA125 levels, and complete blood count (CBC) data. Results: Over a 21-month period, monocytes had the highest level of MUC16-binding which was positively correlated with serum CA125 and inversely correlated with circulating monocyte and lymphocyte counts. Fluctuations of PNP-binding to NK cells were associated temporally with types of chemotherapy and surgical events. Levels of MUC16 bound to NK cells were positively correlated with levels of MUC16 bound to T and NK-T cells and inversely correlated with circulating platelets. Conclusions: Assessment of MUC16-binding among cryopreserved PBMC cell types can be accomplished using darkfield and fluorescence microscopy. Correlations observed between level of binding by cell type with serum CA125, CBC data, and treatment details suggest that the new techniques may offer novel insights into EOC’s clinical course.}, author = {Korn\'{e}l Lakatos and Germ\'{a}n Gonzalez-Serrano and Jawad Hoballah and Jeff Brooker and Sinyoung Jeong and Conor Evans and Petra Krauledat and W. Peter Hansen and Kevin M. Elias and Manish Patankar and Vilmos Fülöp and Panagiotis A. Konstantinopoulos and Daniel W. Cramer}, doi = {10.1186/s13048-022-00957-7}, issn = {17572215}, issue = {1}, journal = {Journal of Ovarian Research}, keywords = {CA125,Microscopy,Mononuclear cells,Nanoparticles,Ovarian Cancer}, month = {12}, pmid = {35219339}, publisher = {BioMed Central Ltd}, title = {Application of a novel microscopic technique for quantifying CA125 binding to circulating mononuclear cells in longitudinal specimens during treatment for ovarian cancer}, volume = {15}, year = {2022}, }
2021
- C. Mejia, E. Martinez-Martin, and M. Cazorla, «Webpage categorization using deep learning,» in Proc. of the 16th international conference on soft computing models in industrial and environmental applications (soco), 2021.
[Bibtex]@INPROCEEDINGs{SOCOChristian2021, Author = { Christian Mejia and Ester Martinez-Martin and Miguel Cazorla}, Title = {Webpage Categorization using Deep Learning}, Booktitle = {Proc. of the 16th International Conference on Soft Computing Models in Industrial and Environmental Applications (SOCO)}, Year = {2021} }
- Z. Bauer, Z. Li, S. Orts, M. Cazorla, M. Pollefeys, and M. OSwald, «Nvs-monodepth: improving monocular depth prediction with novel view synthesis,» in Proceedings of the international conference on 3d vision (3dv), 2021.
[Bibtex]@INPROCEEDINGs{3DVZuria2021, Author = { Zuria Bauer and Zuoyue Li and Sergio Orts and Miguel Cazorla and Marc Pollefeys and Martin OSwald }, Title = {NVS-MonoDepth: Improving Monocular Depth Prediction with Novel View Synthesis}, Booktitle = {Proceedings of the International Conference on 3D Vision (3DV)}, Year = {2021} }
- E. Alvarez, R. Alvarez, and M. Cazorla, «Studying the transferability of non-targeted adversarial attacks,» in Proc. of international joint conference on neural networks (ijcnn), 2021.
[Bibtex]@INPROCEEDINGs{IJCNNEnrique2021, Author = { Enrique Alvarez and Rafael Alvarez and Miguel Cazorla}, Title = {Studying the Transferability of Non-Targeted Adversarial Attacks}, Booktitle = {Proc. of International Joint Conference on Neural Networks (IJCNN)}, Year = {2021} }
- E. Martinez-Martin and F. Morillas-Espejo, «Deep learning techniques for spanish sign language interpretation,» Computational intelligence and neuroscience, vol. 2021, 2021.
[Bibtex]@ARTICLE{martinez-martin2021a, author={Ester Martinez-Martin and Francisco Morillas-Espejo}, journal={Computational Intelligence and Neuroscience}, title={Deep Learning Techniques for Spanish Sign Language Interpretation}, year={2021}, volume={2021}, doi={https://doi.org/10.1155/2021/5532580} }
- E. Martinez-Martin and A. Costa, «Assistive technology for elderly care: an overview,» Ieee access, vol. 9, pp. 92420-92430, 2021.
[Bibtex]@ARTICLE{martinez-martin2021b, author={Ester Martinez-Martin and Angelo Costa}, journal={IEEE Access}, title={Assistive Technology for Elderly Care: An Overview}, year={2021}, volume={9}, pages={92420-92430}, doi={10.1109/ACCESS.2021.3092407} }
- E. Martinez-Martin and A. del~Pobil, «Robot vision for manipulation: a trip to real-world applications,» Ieee access, vol. 9, pp. 3471-3481, 2021.
[Bibtex]@ARTICLE{martinez-martin2021c, author={Ester Martinez-Martin and Angel~P. del~Pobil}, journal={IEEE Access}, title={Robot Vision for Manipulation: A Trip to Real-World Applications}, year={2021}, volume={9}, number={}, pages={3471-3481}, doi={10.1109/ACCESS.2020.3048053} }
- E. Martinez-Martin, E. Ferrer, I. Vasilev, and A. P. del Pobil, «The uji aerial librarian robot: a quadcopter for visual library inventory and book localisation,» Sensors, vol. 21, iss. 4, 2021.
[Bibtex]@Article{martinez-martin2021d, AUTHOR = {Martinez-Martin, Ester and Ferrer, Eric and Vasilev, Ilia and del Pobil, Angel P.}, TITLE = {The UJI Aerial Librarian Robot: A Quadcopter for Visual Library Inventory and Book Localisation}, JOURNAL = {Sensors}, VOLUME = {21}, YEAR = {2021}, NUMBER = {4}, ISSN = {1424-8220}, DOI = {10.3390/s21041079} }
- E. Martinez-Martin, E. Ferrer, I. Vasilev, and A. P. del Pobil, «An autonomous drone for image-based inspection of bookshelves,» in 29th european signal processing conference – eusipco 2021, 2021.
[Bibtex]@inproceedings{martinez-martin2021e, author = {Martinez-Martin, Ester and Ferrer, Eric and Vasilev, Ilia and del Pobil, Angel P.}, title = {An Autonomous Drone for Image-Based Inspection of Bookshelves}, booktitle = {29th European Signal Processing Conference - EUSIPCO 2021}, year = {2021} }
- E. Caldwell, M. Cazorla, and J. M. C. Plaza, «Designing a cyber-physical robotic platform to assist speech-language pathologists,» Assistive technologies, vol. Aceptado, 2021.
[Bibtex]@ARTICLE{Eldon2021, author={Eldon Caldwell and Miguel Cazorla and Jose Maria Cañas Plaza}, journal={Assistive Technologies}, title={Designing a Cyber-physical Robotic Platform to assist Speech-Language Pathologists}, year={2021}, volume={Aceptado}, number={}}
- F. Gomez-Donoso, F. Escalona, N. Nasri, and M. Cazorla, «A hand motor skills rehabilitation for the injured implemented on a social robot,» Applied sciences, vol. 11, iss. 7, 2021.
[Bibtex]@ARTICLE{Donoso2021b, author={F. Gomez-Donoso and F. Escalona and N. Nasri and M. Cazorla}, journal={Applied Sciences}, title={A Hand Motor Skills Rehabilitation for the Injured Implemented on a Social Robot}, year={2021}, volume={11}, number={7}}
- F. Gomez-Donoso, F. Escalona, F. Pérez-Esteve, and M. Cazorla, «Accurate multilevel classification for wildlife images,» Computational intelligence and neuroscience, vol. 2021, 2021.
[Bibtex]@ARTICLE{Donoso2021a, author={Gomez-Donoso, F. and Escalona, F. and P\'{e}rez-Esteve, F. and Cazorla, M.}, journal={Computational Intelligence and Neuroscience}, title={Accurate Multilevel Classification for Wildlife Images}, year={2021}, volume={2021}, number={}, pages={},}
- F. Peng, S. Jeong, G. Gonzalez-Serrano, H. Marks, A. Ho, E. Roussakis, P. B. Krauledat, P. Hansen, and C. L. Evans, «Assessment of glial fibrillary acidic protein binding to the surface of leukocytes with dark-field imaging and computational analysis,» Advanced functional materials, vol. 2009229, pp. 1-9, 2021.
[Bibtex]@article{Peng2021, abstract = {The glial fibrillary acidic protein (GFAP) is widely established as a traumatic brain injury (TBI) biomarker and can be used in early diagnosis. As essential primary human immune cells, leukocytes are recruited to injured cerebral sites during TBI response, where they can interact with and potentially bind to TBI biomarkers. To date, no studies have demonstrated ultra-low GFAP binding enumeration on leukocytes. Herein, a dark-field imaging technique coupled with computational analysis is introduced to quantify GFAP bound to peripheral blood mononuclear cells (PBMCs). Dark-field microscopy (DFM) with a custom-written image acquisition software is developed for rapid 3D PBMC imaging by utilizing specific antiGFAP monoclonal antibody functionalized gold nanoparticles (antiGFAP-AuNPs) as contrast-generating probes. Subsequently, the developed algorithm is utilized in processing thousands of acquired images for rapid visualization and enumeration of bound antiGFAP-AuNP on each leukocyte. The proposed method is demonstrates the specific binding of GFAP to the surface of PBMCs on a healthy donor blood. Thereafter, subpopulations of PBMCs with antiGFAP-AuNP binding are identified with the assistance of fluorescence imaging and DFM imaging, paving a new way to understanding the relationship between TBI and leukocyte classes. Hence, this study offers a rapid and ultra-sensitive strategy for biomarker assessment following TBI.}, author = {Fei Peng and Sinyoung Jeong and German Gonzalez-Serrano and Haley Marks and Alexander Ho and Emmanuel Roussakis and Petra B. Krauledat and Peter Hansen and Conor L. Evans}, doi = {10.1002/adfm.202009229}, issn = {16163028}, journal = {Advanced Functional Materials}, keywords = {dark-field imaging,glial fibrillary acidic proteins,gold nanoparticles,machine learning,traumatic brain injury}, pages = {1-9}, title = {Assessment of Glial Fibrillary Acidic Protein Binding to the Surface of Leukocytes with Dark-Field Imaging and Computational Analysis}, volume = {2009229}, year = {2021}, }
- G. Gonzalez-Serrano, K. Lakatos, J. Hoballah, R. Fritz-klaus, L. Al-johani, J. Brooker, S. Jeong, C. L. Evans, P. Krauledat, D. W. Cramer, R. A. Hoffman, P. W. Hansen, and M. S. Patankar, «Characterization of cell-bound ca125 on immune cell subtypes of ovarian cancer patients using a novel imaging platform,» Cancers, vol. 13, p. 2072, 2021.
[Bibtex]@article{Gonzalez2021, author = {German Gonzalez-Serrano and Kornel Lakatos and Jawad Hoballah and Roberta Fritz-klaus and Lojain Al-johani and Jeff Brooker and Sinyoung Jeong and Conor L. Evans and Petra Krauledat and Daniel W Cramer and Robert A. Hoffman and W. Peter Hansen and Manish S Patankar}, doi = {https://doi. org/10.3390/cancers13092072}, issue = {9}, journal = {Cancers}, pages = {2072}, title = {Characterization of Cell-Bound CA125 on Immune Cell Subtypes of Ovarian Cancer Patients Using a Novel Imaging Platform}, volume = {13}, year = {2021}, }
2020
- F. Gomez-Donoso, F. Escalona, A. Bañuls, D. Abellan, and M. Cazorla, «Monocular 3d hand pose estimation for teleoperating low-cost actuators,» in The 21st international workshop de agentes físicos (waf), 2020.
[Bibtex]@inproceedings{Cruz2018c, author = {Francisco Gomez-Donoso and F\'{e}lix Escalona and Alejandro Bañuls and Daniel Abellan and Miguel Cazorla}, title = {Monocular 3D Hand Pose Estimation for Teleoperating Low-cost Actuators}, booktitle = {The 21st International Workshop de Agentes F\'{i}sicos (WAF)}, year = {2020} }
- J. F. Domenech, F. Escalona, F. Gomez-Donoso, and M. Cazorla, «A voxelized fractal descriptor for 3d object recognition,» Ieee access, vol. 8, p. 161958–161968, 2020.
[Bibtex]@ARTICLE{Domenech2020, author={J.F. Domenech and F. {Escalona} and F. {Gomez-Donoso} and M. {Cazorla}}, journal={IEEE Access}, title={A Voxelized Fractal Descriptor for 3D Object Recognition}, year={2020}, volume={8}, pages={161958--161968}}
- N. Nasri, S. Orts-Escolano, and M. Cazorla, «A semg-controlled 3d game for rehabilitation therapies: real-time time hand gesture recognition using deep learning techniques,» Sensors, vol. 20, iss. 22, 2020.
[Bibtex]@ARTICLE{Nadia2020, author={Nadia Nasri and Sergio Orts-Escolano and Miguel Cazorla}, journal={Sensors}, title={A sEMG-controlled 3D game for rehabilitation therapies: real-time time hand gesture recognition using deep learning techniques}, year={2020}, volume={20}, number={22}}
- E. Martinez-Martin, M. Cazorla, and S. Orts-Escolano, ,» Electronics, vol. 9, iss. 5, 2020.
[Bibtex]@Article{Gomez-Donoso20, author="Ester Martinez-Martin and Miguel Cazorla and Sergio Orts-Escolano", title="Machine Learning Techniques for Assistive Robotics”, journal="Electronics", year="2020", volume="9", number="5" }
- F. Gomez-Donoso, F. Escalona, and M. Cazorla, ,» Applied sciences, vol. 10, iss. 10, 2020.
[Bibtex]@Article{Gomez-Donoso20, author="Francisco Gomez-Donoso and Felix Escalona and Miguel Cazorla", title=" Par3DNet: Using 3DCNNs for Object Recognition on Tridimensional Partial Views”, journal="Applied Sciences", year="2020", volume="10", number="10" }
- W. Zhou, E. Cruz, S. Worrall, F. Gomez-Donoso, M. Cazorla, and E. Nebot, «Weakly-supervised road condition classificationusing automatically generated labels,» in Proc. of the 23rd ieee international conference on intelligent transportation systems (itsc), 2020.
[Bibtex]@INPROCEEDINGs{ITSC2020, Author = { Wei Zhou and Edmanuel Cruz and Stewart Worrall and Francisco Gomez-Donoso and Miguel Cazorla and Eduardo Nebot}, Title = {Weakly-supervised Road Condition ClassificationUsing Automatically Generated Labels}, Booktitle = {Proc. of The 23rd IEEE International Conference on Intelligent Transportation Systems (ITSC)}, Year = {2020} }
- C. Cano-Espinosa, M. Cazorla, and G. Gonzalez-Serrano, ,» Applied sciences, vol. 10, iss. 8, p. 2945, 2020.
[Bibtex]@Article{Cano-Espinosa20, author="Carlos Cano-Espinosa and Miguel Cazorla and German Gonzalez-Serrano", title="Computer Aided Detection of Pulmonary Embolism Using Multi-slice Multi-axial Segmentation”, journal="Applied Sciences", year="2020", volume="10", number="8", doi = {10.3390/APP10082945}, issn = {2076-3417}, pages = {2945} }
- F. Martin-Rico, F. G. Donoso, F. Escalona, J. G. Rodriguez, and M. Cazorla, ,» Integrated computer-aided engineering, vol. 50, iss. 1, pp. 14-28, 2020.
[Bibtex]@Article{Martin-Rico2020, author="Francisco Martin-Rico and Francisco Gomez Donoso and Felix Escalona and Jose Garcia Rodriguez and Miguel Cazorla", title="Semantic Visual Recognition in a Cognitive Architecture for Social Robots”, journal="Integrated Computer-Aided Engineering", year="2020", volume="50", number="1", pages="14-28" }
- F. Gomez-Donoso, E. Cruz, M. Cazorla, S. Worrall, and E. Nebot, «Using a 3d cnn for rejecting false positives on pedestrian detection,» in Proc. of international joint conference on neural networks (ijcnn), 2020.
[Bibtex]@INPROCEEDINGs{IJCNNFran2020, Author = { Francisco Gomez-Donoso and Edmanuel Cruz and Miguel Cazorla and Stewart Worrall and Eduardo Nebot}, Title = {Using a 3D CNN for Rejecting False Positives on Pedestrian Detection}, Booktitle = {Proc. of International Joint Conference on Neural Networks (IJCNN)}, Year = {2020} }
- F. Escalona, D. Viejo, R. Fisher, and M. Cazorla, «Nurbsnet: a nurbs approach for 3d object recognition,» in Proc. of international joint conference on neural networks (ijcnn), 2020.
[Bibtex]@INPROCEEDINGs{IJCNNFelix2020, Author = {Felix Escalona and Diego Viejo and Robert Fisher and Miguel Cazorla}, Title = {NurbsNet: A Nurbs approach for 3d object recognition}, Booktitle = {Proc. of International Joint Conference on Neural Networks (IJCNN)}, Year = {2020} }
- E. Martínez-Martín, F. Escalona, and M. Cazorla, ,» Electronics, vol. 9, iss. 2, 2020.
[Bibtex]@Article{Rangel2020Electronics, author="Ester Mart\'{i}nez-Mart\'{i}n and F\'{e}lix Escalona and Miguel Cazorla", title="Socially Assistive Robots for Disabled People: A survey”, journal="Electronics", year="2020", volume="9", number="2" }
- C. Cano-Espinosa, G. Gonzalez-Serrano, G. R. Washko, M. Cazorla, and R. S. J. Estepar, «Biomarker localization from deep learning regression networks,» Ieee transactions on medical imaging, vol. 39, iss. 6, p. 2121–2132, 2020.
[Bibtex]@Article{Espinosa20, author="Carlos Cano-Espinosa and German Gonzalez-Serrano and George R. Washko and Miguel Cazorla and Raul San Jose Estepar", title="Biomarker Localization from Deep Learning Regression Networks", journal="IEEE Transactions on Medical Imaging", year="2020", volume="39", number="6", pages="2121--2132" }
- F. Escalona, E. Martinez-Martin, E. Cruz, M. Cazorla, and F. Gomez-Donoso, «Eva: evaluating at-home rehabilitation exercises using augmented reality and low-cost sensors,» Virtual reality, vol. 24, p. 567–581, 2020.
[Bibtex]@Article{Escalona2019, author="Escalona, Felix and Martinez-Martin, Ester and Cruz, Edmanuel and Cazorla, Miguel and Gomez-Donoso, Francisco", title="EVA: EVAluating at-home rehabilitation exercises using augmented reality and low-cost sensors", journal="Virtual Reality", volume = "24", year="2020", pages="567--581", abstract="Over one billion people in the world live with some form of disability. This is incessantly increasing due to aging population and chronic diseases. Among the emerging social needs, rehabilitation services are the most required. However, they are scarce and expensive what considerably limits access to them. In this paper, we propose EVA, an augmented reality platform to engage and supervise rehabilitation sessions at home using low-cost sensors. It also stores the user's statistics and allows therapists to tailor the exercise programs according to their performance. This system has been evaluated in both qualitative and quantitative ways obtaining very promising results.", issn="1434-9957", doi="10.1007/s10055-019-00419-4" }
- E. Cruz, J. C. Rangel, F. Gomez-Donoso, and M. Cazorla, ,» Applied intelligence, vol. 50, iss. 1, pp. 14-28, 2020.
[Bibtex]@article{Cruz2020AI, title = "How to add new knowledge to already trained deep learning models applied to semantic localization”, journal = "Applied Intelligence", volume = "50", number = "1", pages = "14-28", year = "2020", author = "Edmanuel Cruz and Jos\'{e} Carlos Rangel and Francisco Gomez-Donoso and Miguel Cazorla" }
- Z. Bauer, A. Dominguez, E. Cruz, F. Gomez-Donoso, S. Orts-Escolano, and M. Cazorla, «Enhancing perception for the visually impaired with deep learning techniques and low-cost wearable sensors,» Pattern recognition letters, vol. 137, p. 27–36, 2020.
[Bibtex]@article{rompetechos2019, title = "Enhancing Perception for the Visually Impaired with Deep Learning Techniques and Low-cost Wearable Sensors", journal = "Pattern Recognition Letters", volume = "137", year = "2020", pages = "27--36", author = "Zuria Bauer and Alejandro Dominguez and Edmanuel Cruz and Francisco Gomez-Donoso and Sergio Orts-Escolano and Miguel Cazorla" }
- J. Garcia-Rodriguez, F. Gomez-Donoso, A. Garcia-Garcia, M. Cazorla, sergio Orts-Escolano, sergiu Oprea, Z. Bauer, J. Castro-Vargas, P. Martinez-Gonzalez, D. Ivorra-Piqueres, F. Escalona-Moncholí, E. Aguirre, M. Garcia-silviente, M. Garcia-Perez, J. M. Cañas, F. Martin-Rico, J. Gines, and F. Rivas-Montero, «Combaho: a deep learning system for integrating brain injury patients in society,» Pattern recognition letters, vol. 137, p. 80–90, 2020.
[Bibtex]@article{combaho2019, title = "COMBAHO: A Deep Learning system for Integrating Brain Injury Patients in society", journal = "Pattern Recognition Letters", volume = "137", pages = "80--90", year = "2020", author = "Jose Garcia-Rodriguez and Francisco Gomez-Donoso and Alberto Garcia-Garcia and Miguel Cazorla and sergio Orts-Escolano and sergiu Oprea and Zuria Bauer and John Castro-Vargas and Pablo Martinez-Gonzalez and David Ivorra-Piqueres and F\'{e}lix Escalona-Monchol\'{i} and Eugenio Aguirre and Miguel Garcia-silviente and Marcelo Garcia-Perez and Jose M Cañas and Francisco Martin-Rico and Jonathan Gines and Francisco Rivas-Montero" }
- S. Jeong, G. Gonzalez-Serrano, A. Ho, N. Nowell, L. A. Austin, J. Hoballah, F. Mubarak, A. Kapur, M. S. Patankar, D. W. Cramer, P. Krauledat, P. W. Hansen, and C. L. Evans, «Plasmonic nanoparticle-based digital cytometry to quantify muc16 binding on the surface of leukocytes in ovarian cancer,» Acs sensors, vol. 5, pp. 2772-2782, 2020.
[Bibtex]@article{Jeong2020, abstract = {Although levels of the circulating ovarian cancer marker (CA125) can distinguish ovarian masses that are likely to be malignant and correlate with severity of disease, serum CA125 has not proved useful in general population screening. Recently, cell culture studies have indicated that MUC16 may bind to the Siglec-9 receptor on natural killer (NK) cells where it downregulates the cytotoxicity of NK cells, allowing ovarian cancer cells to evade immune surveillance. We present evidence that the presence of MUC16 can be locally visualized and imaged on the surface of peripheral blood mononuclear cells (PBMCs) in ovarian cancer via a novel "digital"cytometry technique that incorporates: (i) OC125 monoclonal antibody-conjugated gold nanoparticles as optical nanoprobes, (ii) a high contrast dark-field microscopy system to detect PBMC-bound gold nanoparticles, and (iii) a computational algorithm for automatic counting of these nanoparticles to estimate the quantity of surface-bound MUC16. The quantitative detection of our technique was successfully demonstrated by discriminating clones of the ovarian cancer cell line, OVCAR3, based on low, intermediate, and high expression levels of MUC16. Additionally, PBMC surface-bound MUC16 was tracked in an ovarian cancer patient over a 17 month period; the results suggest that the binding of MUC16 on the surface of immune cells may play an early indicator for recurrent metastasis 6 months before computational tomography-based clinical diagnosis. We also demonstrate that the levels of surface-bound MUC16 on PBMCs from five ovarian cancer patients were greater than those from five healthy controls.}, author = {Sinyoung Jeong and Germ\'{a}n Gonzalez-Serrano and Alexander Ho and Nicholas Nowell and Lauren A. Austin and Jawad Hoballah and Fatima Mubarak and Arvinder Kapur and Manish S. Patankar and Daniel W. Cramer and Petra Krauledat and W. Peter Hansen and Conor L. Evans}, doi = {10.1021/acssensors.0c00567}, issn = {23793694}, issue = {9}, journal = {ACS Sensors}, keywords = {MUC16/CA125,computational analysis,dark-field microscopy,digital cytometry,leukocytes,longitudinal study,ovarian cancer,plasmonic gold nanoparticle}, pages = {2772-2782}, pmid = {32847358}, title = {Plasmonic Nanoparticle-Based Digital Cytometry to Quantify MUC16 Binding on the Surface of Leukocytes in Ovarian Cancer}, volume = {5}, year = {2020}, }
2019
- F. Gomez-Donoso, F. Escalona, F. M. Rivas, J. M. Cañas, and M. Cazorla, «Enhancing the ambient assisted living capabilities with a mobile robot,» Computational intelligence and neuroscience, vol. 2019, 2019.
[Bibtex]@article{gomez2019enhancing, title={Enhancing the ambient assisted living capabilities with a mobile robot}, author={Gomez-Donoso, Francisco and Escalona, F{\'e}lix and Rivas, Francisco Miguel and Ca{\~n}as, Jose Maria and Cazorla, Miguel}, journal={Computational intelligence and neuroscience}, volume={2019}, year={2019}, publisher={Hindawi} }
- Z. Bauer, F. Escalona, E. Cruz, M. Cazorla, and F. Gomez-Donoso, «Refining the fusion of pepper robot and estimated depth maps method for improved 3d perception,» Ieee access, vol. 7, pp. 185076-185085, 2019.
[Bibtex]@ARTICLE{8936685, author={Z. {Bauer} and F. {Escalona} and E. {Cruz} and M. {Cazorla} and F. {Gomez-Donoso}}, journal={IEEE Access}, title={Refining the Fusion of Pepper Robot and Estimated Depth Maps Method for Improved 3D Perception}, year={2019}, volume={7}, number={}, pages={185076-185085},}
- J. M. Torres-Camara, F. Escalona, F. Gomez-Donoso, and M. Cazorla, «Map slammer: densifying scattered kslam 3d maps with estimated depth,» in Proc. of the robot’2019: fourth iberian robotics conference, 2019.
[Bibtex]@INPROCEEDINGs{robot2019b, Author = {Jose Miguel Torres-Camara and Felix Escalona and Francisco Gomez-Donoso and Miguel Cazorla}, Title = {Map slammer: Densifying scattered KSLAM 3D Maps with Estimated Depth}, Booktitle = {Proc. of the ROBOT'2019: Fourth Iberian Robotics Conference}, month={November}, Year = {2019} }
- D. Azuar, G. Gallud, F. Escalona, F. Gomez-Donoso, and M. Cazorla, «A story-telling social robot with emotion recognition capabilities for the intellectually challenged,» in Proc. of the robot’2019: fourth iberian robotics conference, 2019.
[Bibtex]@INPROCEEDINGs{robot2019, Author = {David Azuar and Guillermo Gallud and Felix Escalona and Francisco Gomez-Donoso and Miguel Cazorla}, Title = {A story-telling social Robot with Emotion Recognition Capabilities for the Intellectually Challenged}, Booktitle = {Proc. of the ROBOT'2019: Fourth Iberian Robotics Conference}, month={November}, Year = {2019} }
- E. Martinez-Martin, A. Costa, and M. Cazorla, «Pharos 2.0 – a physical assistant robot system improved,» Sensors, vol. 2019, 2019.
[Bibtex]@article{Martinez2019Sensors, author = {Ester Martinez-Martin and Angelo Costa and Miguel Cazorla}, year = {2019}, title = {PHAROS 2.0 – A PHysical Assistant RObot System improved}, journal = {Sensors}, volume = {2019}, year = {2019} }
- Z. Bauer, F. Gomez-Donoso, E. Cruz, sergio Orts-Escolano, and M. Cazorla, «Uasol, a large-scale high-resolution outdoor stereo dataset,» Scientific data, vol. 6, iss. 1, p. 1–14, 2019.
[Bibtex]@article{bauer2019uasol, author = {Bauer, Zuria and Gomez-Donoso, Francisco and Cruz, Edmanuel and Orts-Escolano, sergio and Cazorla, Miguel}, year = {2019}, title = {UASOL, a large-scale high-resolution outdoor stereo dataset}, journal = {Scientific Data}, publisher = {Nature Publishing Group}, issn = {2052-4463}, doi = {10.1038/s41597-019-0168-5}, volume = {6}, month = {8}, pages = {1--14}, number = {1}, url = {https://doi.org/10.1038/s41597-019-0168-5}, abstract = {In this paper, we propose a new dataset for outdoor depth estimation from single and stereo RGB images. The dataset was acquired from the point of view of a pedestrian. Currently, the most novel approaches take advantage of deep learning-based techniques, which have proven to outperform traditional state-of-the-art computer vision methods. Nonetheless, these methods require large amounts of reliable ground-truth data. Despite there already existing several datasets that could be used for depth estimation, almost none of them are outdoor-oriented from an egocentric point of view. Our dataset introduces a large number of high-definition pairs of color frames and corresponding depth maps from a human perspective. In addition, the proposed dataset also features human interaction and great variability of data, as shown in this work. Machine-accessible metadata file describing the reported data (ISA-Tab format)} }
- F. Gomez-Donoso, sergio Orts-Escolano, and M. Cazorla, ,» Expert systems with applications, vol. 136, p. 327–337, 2019.
[Bibtex]@article{Donoso2019ESWA, title = "Accurate and Efficient 3D Hand Pose Regression for Robot Hand Teleoperation using a Monocular RGB Camera”, journal = "Expert systems With Applications", volume = "136", pages = "327--337", year = "2019", author = "Francisco Gomez-Donoso and sergio Orts-Escolano and Miguel Cazorla" }
- E. Martinez-Martin and M. Cazorla, ,» Ieee access, vol. 7, pp. 75515-75529, 2019.
[Bibtex]@article{Martinez-Martin2019Access, title = "A socially Assistive Robot for Elderly Exercise Promotion”, journal = "IEEE Access", volume = "7", pages = "75515-75529", year = "2019", author = "Ester Martinez-Martin and Miguel Cazorla" }
- E. Martinez-Martin and M. Cazorla, ,» Computational intelligence and neuroscience, vol. 2019, 2019.
[Bibtex]@article{Martin2019Review, title = "Rehabilitation Technology: Assistance From Hospital to Home”, journal = "Computational Intelligence and Neuroscience", volume = "2019", year = "2019", author = "Ester Martinez-Martin and Miguel Cazorla" }
- N. Nasri, sergio Orts-Escolano, F. Gomez-Donoso, and M. Cazorla, «Using inferred gestures from semg signal to teleoperate a domestic robot for the disabled,» in Proc. of the 15th international work-conference on artificial neural networks, 2019.
[Bibtex]@INPROCEEDINGs{Iwwan2019, Author = {Nadia Nasri and sergio Orts-Escolano and Francisco Gomez-Donoso and Miguel Cazorla}, Title = {USING INFERRED GESTURES FROM SEMG SIGNAL TO TELEOPERATE A DOMESTIC ROBOT FOR THE DISABLED}, Booktitle = {Proc. of the 15th International Work-Conference on Artificial Neural Networks}, Year = {2019} }
- F. Martin-Rico, F. Gomez-Donoso, F. Escalona, M. Cazorla, and J. Garcia-Rodriguez, «Artificial semantic memory with autonomous learning applied to social robots,» in Proc. of international work-conference on the interplay between natural and artificial computation, 2019.
[Bibtex]@INPROCEEDINGs{Iwinac2019, Author = {Francisco Martin-Rico and Francisco Gomez-Donoso and Felix Escalona and Miguel Cazorla and Jose Garcia-Rodriguez}, Title = {Artificial semantic Memory with Autonomous Learning applied to social Robots}, Booktitle = {Proc. of International Work-Conference on the Interplay Between Natural and Artificial Computation}, Year = {2019} }
- F. Gomez-Donoso, F. Escalona, F. Rivas-Montero, J. M. Cañas, and M. Cazorla, «Enhancing the ambient assisted living capabilities with a mobile robot,» Computational intelligence and neuroscience, vol. 2019, 2019.
[Bibtex]@article{cinMadrid2019, title = "Enhancing the Ambient Assisted Living Capabilities with a Mobile Robot", journal = "Computational Intelligence and Neuroscience", volume = "2019", year = "2019", author = "Francisco Gomez-Donoso and Felix Escalona and Francisco Rivas-Montero and Jos\'{e} M. Cañas and Miguel Cazorla" }
- N. Nasri, sergio Orts-Escolano, F. Gomez-Donoso, and M. Cazorla, «Inferring static hand poses from a low-cost non-intrusive semg sensor,» Sensors, vol. 2019, 2019.
[Bibtex]@article{Nadia2019sensors, title = "INFERRING sTATIC HAND POsEs FROM A LOW-COsT NON-INTRUsIVE sEMG sENsOR", journal = "sensors", volume = "2019", year = "2019", author = "Nadia Nasri and sergio Orts-Escolano and Francisco Gomez-Donoso and Miguel Cazorla" }
- F. Gomez-Donoso, sergio Orts-Escolano, and M. Cazorla, «Large-scale multiview 3d hand pose dataset,» Image and vision computing, vol. 81, pp. 25-33, 2019.
[Bibtex]@article{DonosoIMAVIs2018, title = "Large-scale Multiview 3D Hand Pose Dataset", journal = "Image and Vision Computing", volume = "81", pages = "25-33", year = "2019", author = "Francisco Gomez-Donoso and sergio Orts-Escolano and Miguel Cazorla" }
- J. Navarrete, F. Gomez-Donoso, D. Viejo, and M. Cazorla, «Multilevel classification using a taxonomy applied to recognizing diptera images,» in Proc. of international joint conference on neural networks (ijcnn), 2019.
[Bibtex]@INPROCEEDINGs{IJCNNNavarrete2019, Author = {Javier Navarrete and Francisco Gomez-Donoso and Diego Viejo and Miguel Cazorla}, Title = {Multilevel Classification using a Taxonomy Applied to Recognizing Diptera Images}, Booktitle = {Proc. of International Joint Conference on Neural Networks (IJCNN)}, Year = {2019} }
- C. C. Espinosa, G. Gonzalez-Serrano, G. R. Washko, M. Cazorla, and R. S. J. Estepar, «Localizing image-based biomarker regression without training masks: a new approach to biomarker discovery,» in Proc. of ieee 16th international symposium on biomedical imaging (isbi), 2019.
[Bibtex]@INPROCEEDINGs{isbiCano2019, Author = {Carlos Cano Espinosa and German Gonzalez-Serrano and George R. Washko and Miguel Cazorla and Raul San Jose Estepar}, Title = {Localizing Image-Based Biomarker Regression without Training Masks: A New Approach to Biomarker Discovery}, Booktitle = {Proc. of IEEE 16th International symposium on Biomedical Imaging (isbi)}, Year = {2019}, doi = {10.1109/ISBI.2019.8759474}, isbn = {9781538636411}, issn = {19458452} }
- J. C. Rangel, M. Cazorla, I. García-Varea, C. Romero-González, and J. Martínez-Gómez, «Automatic semantic maps generation from lexical annotations,» Autonomous robots, vol. 43, iss. 3, p. 697–712, 2019.
[Bibtex]@Article{Rangel2018b, author=" Jose Carlos Rangel and Miguel Cazorla and Ismael Garc\'{i}a-Varea and Cristina Romero-Gonz\'{a}lez and Jes\'{u}s Mart\'{i}nez-G\'{o}mez", title="Automatic semantic Maps Generation from Lexical Annotations", journal="Autonomous robots", year="2019", volume="43", number="3", pages = "697--712" }
- E. Cruz, S. Orts-Escolano, F. Gómez-Donoso, C. Rizo, J. C. Rangel, and M. C. and Higinio Mora, ,» Virtual reality, vol. 23, 2019.
[Bibtex]@Article{Cruz2018, author="Edmanuel Cruz and Sergio Orts-Escolano and Francisco G\'{o}mez-Donoso and Carlos Rizo and Jose Carlos Rangel and Higinio Mora,and Miguel Cazorla", title="An augmented reality application for improving shopping experience in large retail stores”, journal="Virtual Reality", year="2019", volume="23", number=”3”, pages=”281--291” }
- G. Gonzalez-Serrano and C. L. Evans, «Biomedical image processing with containers and deep learning: an automated analysis pipeline,» Bioessays, vol. 41, 2019.
[Bibtex]@article{Gonzalez2019, abstract = {© 2019 The Authors. BioEssays Published by WILEY Periodicals, Inc. Here, a streamlined, scalable, laboratory approach is discussed that enables medium-to-large dataset analysis. The presented approach combines data management, artificial intelligence, containerization, cluster orchestration, and quality control in a unified analytic pipeline. The unique combination of these individual building blocks creates a new and powerful analysis approach that can readily be applied to medium-to-large datasets by researchers to accelerate the pace of research. The proposed framework is applied to a project that counts the number of plasmonic nanoparticles bound to peripheral blood mononuclear cells in dark-field microscopy images. By using the techniques presented in this article, the images are automatically processed overnight, without user interaction, streamlining the path from experiment to conclusions.}, author = {German Gonzalez-Serrano and Conor L. Evans}, doi = {10.1002/bies.201900004}, issn = {15211878}, issue = {6}, journal = {BioEssays}, keywords = {automation,data processing,image analysis,optics}, title = {Biomedical Image Processing with Containers and Deep Learning: An Automated Analysis Pipeline}, volume = {41}, year = {2019}, }
- E. Pardo, G. Gonzalez-Serrano, J. M. Tucker-Schwartz, S. R. Dave, and N. Malpica, «H-em: an algorithm for simultaneous cell diameter and intensity quantification in lowresolution imaging cytometry,» Plos one, vol. 14, 2019.
[Bibtex]@article{Pardo2019, abstract = {© 2019 Pardo et al. This is an open access article distributed under the terms of the Creative Commons Attribution License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original author and source are credited. Fluorescent cytometry refers to the quantification of cell physical properties and surface biomarkers using fluorescently-tagged antibodies. The generally preferred techniques to perform such measurements are flow cytometry, which performs rapid single cell analysis by flowing cells one-by-one through a channel, and microscopy, which eliminates the complexity of the flow channel, offering multi-cell analysis at a lesser throughput. Low-magnification image-based cytometers, also called "cell astronomy" systems, hold promise of simultaneously achieving both instrumental simplicity and high throughput. In this magnification regime, a single cell is mapped to a handful of pixels in the image. While very attractive, this idea has, so far, not been proven to yield quantitative results of cell-labeling, mainly due to the poor signal-to-noise ratio present in those images and to partial volume effects. In this work we present a cell astronomy system that, when coupled with custom-developed algorithms, is able to quantify cell intensities and diameters reliably. We showcase the system using calibrated MESF beads and fluorescently stained leukocytes, achieving good population identification in both cases. The main contribution of the proposed system is in the development of a novel algorithm, H-EM, that enables inter-cluster separation at a very low magnification regime (2x). Such algorithm provides more accurate brightness estimates than DAOSTORM when compared to manual analysis, while fitting cell location, brightness, diameter, and background level concurrently. The algorithm first performs Fisher discriminant analysis to detect bright spots. From each spot an expectation-maximization algorithm is initialized over a heterogeneous mixture model (H-EM), this algorithm recovers both the cell fluorescence and diameter with sub-pixel accuracy while discriminating the background noise. Finally, a recursive splitting procedure is applied to discern individual cells in cell clusters.}, author = {E. Pardo and German Gonzalez-Serrano and J.M. Tucker-Schwartz and S.R. Dave and N. Malpica}, doi = {10.1371/journal.pone.0222265}, issn = {19326203}, issue = {9}, journal = {PLoS ONE}, title = {H-EM: An algorithm for simultaneous cell diameter and intensity quantification in lowresolution imaging cytometry}, volume = {14}, year = {2019}, }
2018
- C. Cano-Espinosa, G. Gonzalez-Serrano, G. R. Washko, M. Cazorla, and R. S. J. Est{’e}par, «On the relevance of the loss function in the agatston score regression from non-ecg gated ct scans,» in Image analysis for moving organ, breast, and thoracic images, Springer, 2018, p. 326–334.
[Bibtex]@incollection{cano2018relevance, title={On the Relevance of the Loss Function in the Agatston score Regression from Non-ECG Gated CT scans}, author={Cano-Espinosa, Carlos and Gonzalez-Serrano, Germ{\’a}n and Washko, George R and Cazorla, Miguel and Est{\’e}par, Ra{\’u}l San Jos{\’e}}, booktitle={Image Analysis for Moving Organ, Breast, and Thoracic Images}, pages={326--334}, year={2018}, publisher={Springer} }
- A. Dominguez-sanchez, M. Cazorla, and sergio Orts-Escolano, ,» Electronics, vol. 7, iss. 11, 2018.
[Bibtex]@article{Dominguez2018b, title = "A New Dataset and Performance Evaluation of a Region-based CNN for Urban Object Detection”, journal = "Electronics", volume = "7", number="11", year = "2018", author = "Alejandro Dominguez-sanchez and Miguel Cazorla and sergio Orts-Escolano" }
- E. Cruz, F. Escalona, Z. Bauer, M. Cazorla, J. Garcia-Rodriguez, E. Martinez-Martin, J. C. Rangel, and F. Gomez-Donoso, ,» Computational intelligence and neuroscience, vol. 2018, 2018.
[Bibtex]@article{Cruz2018Geoffrey, title = "Geoffrey: An Automated schedule system on a social Robot for the Intellectually Challenged”, journal = "Computational Intelligence and Neuroscience", volume = "2018", year = "2018", author = "Edmanuel Cruz and Felix Escalona and Zuria Bauer and Miguel Cazorla and Jose Garcia-Rodriguez and Ester Martinez-Martin and Jose Carlos Rangel and Francisco Gomez-Donoso " }
- A. Garcia, P. Martinez-Gonzalez, s. Oprea, J. A. Castro-Vargas, s. Orts-Escolano, J. Garcia-Rodriguez, and A. Jover-Alvarez, «The robotrix: an extremely photorealistic and very-large-scale indoor dataset of sequences with robot trajectories and interactions.,» in Proc. of the international conference on intelligent robots and systems (iros), 2018.
[Bibtex]@INPROCEEDINGs{agarcia-robotrix18, Author = {A. Garcia and P. Martinez-Gonzalez and s. Oprea and J.A. Castro-Vargas and s. Orts-Escolano and J. Garcia-Rodriguez and A. Jover-Alvarez}, Title = {The RobotriX: An eXtremely Photorealistic and Very-Large-scale Indoor Dataset of sequences with Robot Trajectories and Interactions.}, Booktitle = {Proc. of The International Conference on Intelligent Robots and systems (IROs)}, Year = {2018} }
- A. Garcia-Garcia, sergio Orts-Escolano, sergiu Oprea, V. Villena-Martinez, P. Martinez-Gonzalez, and J. G. Rodriguez, «A survey on deep learning techniques for image and video semantic segmentation,» Appl. soft comput., vol. 70, p. 41–65, 2018.
[Bibtex]@article{Garcia-Garcia18, author = {Alberto Garcia-Garcia and sergio Orts-Escolano and sergiu Oprea and Victor Villena-Martinez and Pablo Martinez-Gonzalez and Jose Garcia Rodriguez}, title = {A survey on deep learning techniques for image and video semantic segmentation}, journal = {Appl. soft Comput.}, volume = {70}, pages = {41--65}, year = {2018}, doi = {10.1016/j.asoc.2018.05.018} }
- E. Cruz, Z. Bauer, J. C. Rangel, M. Cazorla, and F. Gomez-Donoso, «Semantic localization of a robot in a real home,» in Workshop de agentes físicos (waf), 2018.
[Bibtex]@inproceedings{Cruz2018c, author = {Edmanuel Cruz and Zuria Bauer and Jos{\'{e}} Carlos Rangel and Miguel Cazorla and Francisco Gomez-Donoso}, title = {semantic localization of a robot in a real home}, booktitle = {Workshop de Agentes F\'{i}sicos (WAF)}, year = {2018} }
- Z. Bauer, F. Escalona, E. Cruz, M. Cazorla, and F. Gomez-Donoso, «Improving 3d estimation for the pepper robot using monocular depth prediction,» in Workshop de agentes físicos (waf), 2018.
[Bibtex]@inproceedings{Bauer2018, author = {Zuria Bauer and Felix Escalona and Edmanuel Cruz and Miguel Cazorla and Francisco Gomez-Donoso}, title = {Improving 3D estimation for the Pepper robot using monocular depth prediction}, booktitle = {Workshop de Agentes F\'{i}sicos (WAF)}, year = {2018} }
- A. Costa, E. Martinez-Martin, M. Cazorla, and V. Julian, «Pharos—physical assistant robot system,» Sensors, vol. 18, iss. 8, pp. 95-107, 2018.
[Bibtex]@article{Costa2018, title = {PHAROs—PHysical Assistant RObot system}, journal = {sensors}, volume = {18}, number = {8}, pages = {95 - 107}, year = {2018}, author = {Angelo Costa and Ester Martinez-Martin and Miguel Cazorla and Vicente Julian}, IssN = {1424-8220}, DOI = {10.3390/s18082633} }
- J. Navarrete-sanchez, D. Viejo, and M. Cazorla, «Compression and registration of 3d point clouds using gmms,» Pattern recognition letters, vol. 110, pp. 8-15, 2018.
[Bibtex]@Article{Navarrete2018, author="Javier Navarrete-sanchez and Diego Viejo and Miguel Cazorla", title="Compression and Registration of 3D Point Clouds Using GMMs", journal="Pattern Recognition Letters", year="2018", volume="110", pages="8-15" }
- E. Cruz, J. C. Rangel, F. Gomez-Donoso, and M. Cazorla, «Finding the place how to train and use convolutional neural networks for a dynamically learning robot,» in International joint conference on neural networks (ijcnn), 2018.
[Bibtex]@INPROCEEDINGs{Cruz2018b, author={Edmanuel Cruz and Jose Carlos Rangel and Francisco Gomez-Donoso and Miguel Cazorla}, title={Finding the place how to train and use convolutional neural networks for a dynamically learning robot}, booktitle={International Joint Conference on Neural Networks (IJCNN)}, year={2018} }
- A. Dominguez, sergio Orts-Escolano, and M. Cazorla, «A new dataset and performance evaluation of a region-based cnn for urban object detection,» in International joint conference on neural networks (ijcnn), 2018.
[Bibtex]@INPROCEEDINGs{Alex2018, author={Alejandro Dominguez and sergio Orts-Escolano and Miguel Cazorla}, title={A New Dataset and Performance Evaluation of a Region-based CNN for Urban Object Detection}, booktitle={International Joint Conference on Neural Networks (IJCNN)}, year={2018} }
- J. C. Rangel, M. J. Gomez, R. C. Gonzalez, G. I. Varea, and M. Cazorla, «Semi-supervised 3d object recognition through cnn labeling,» Applied soft computing, vol. 65, pp. 603-613, 2018.
[Bibtex]@Article{Rangel2018, author="J.C. Rangel and J. Martinez Gomez and C. Romero Gonzalez and I. Garcia Varea and M. Cazorla", title="semi-supervised 3D Object Recognition through CNN Labeling", journal="Applied soft Computing", year="2018", volume="65", pages="603-613", doi="https://doi.org/10.1016/j.asoc.2018.02.005" }
- C. C. Espinosa, G. Gonzalez-Serrano, G. R. Washko, M. Cazorla, and R. J. san Estépar, «Automated agatston score computation in non-ecg gated ct scans using deep learning,» in Proceedings of the spie: medical imaging 2018, 2018.
[Bibtex]@INPROCEEDINGs{Cano2018sPIE, author={Carlos Cano Espinosa and Germ\'{a}n Gonzalez-Serrano and George R. Washko and Miguel Cazorla and Ra\'{u}l san Jos\'{e} Est\'{e}par}, booktitle={Proceedings of the sPIE: Medical Imaging 2018}, title={Automated Agatston score computation in non-ECG gated CT scans using deep learning}, year={2018}, doi = {10.1117/12.2293681}, month={February}}
- sergio Orts-Escolano, J. Garcia-Rodriguez, M. Cazorla, Vicente Morell, J. Azorin, M. saval, A. Garcia-Garcia, and V. Villena, «Bioinspired point cloud representation: 3d object tracking,» Neural computing and applications, vol. 29, iss. 9, p. 663–672, 2018.
[Bibtex]@article{Orts-Escolano2016NCAA, title = "Bioinspired Point Cloud Representation: 3D object tracking", journal = "Neural Computing and Applications", volume = "29", number = "9", pages = "663--672", year = "2018", note = "", doi = "http://dx.doi.org/doi:10.1007/s00521-016-2585-0", author = " sergio Orts-Escolano and Jose Garcia-Rodriguez and Miguel Cazorla and Vicente Morell and Jorge Azorin and Marcelo saval and Alberto Garcia-Garcia and Victor Villena" }
- A. Angelopoulou, J. G. Rodriguez, sergio Orts-Escolano, G. Gupta, and A. Psarrou, «Fast 2d/3d object representation with growing neural gas,» Neural computing and applications, vol. 29, iss. 10, p. 903–919, 2018.
[Bibtex]@article{Angelopoulou2016NCAA, title = "Fast 2D/3D Object Representation with Growing Neural Gas", journal = "Neural Computing and Applications", volume = "29", number = "10", pages = "903--919", year = "2018", note = "", doi = "http://dx.doi.org/doi:10.1007/s00521-016-2579-y", author = " Anastassia Angelopoulou and Jose Garcia Rodriguez and sergio Orts-Escolano and Gaurav Gupta and Alexandra Psarrou", }
- A. Garcia-Garcia, sergio Orts-Escolano, J. Garcia-Rodriguez, and M. Cazorla, «Interactive 3d object recognition pipeline on mobile gpgpu computing platforms using low-cost rgb-d sensors,» Journal of real-time image processing, vol. 14, iss. 3, p. 585–604, 2018.
[Bibtex]@article{Garcia2016RTIP, title = "Interactive 3D object recognition pipeline on mobile GPGPU computing platforms using low-cost RGB-D sensors", journal = "Journal of Real-Time Image Processing", volume = "14", number = "3", pages = "585--604", year = "2018", note = "", doi = "10.1007/s11554-016-0607-x", author = "Albert Garcia-Garcia and sergio Orts-Escolano and Jose Garcia-Rodriguez and Miguel Cazorla", }
- E. Martinez-Martin, E. Martinez-Martin, and A. P. del Pobil, «A biologically inspired approach for robot depth estimation,» Computational intelligence and neuroscience, 2018.
[Bibtex]@article{Ester_Martinez-Martin47670761, title={A Biologically Inspired Approach for Robot Depth Estimation}, journal={Computational Intelligence and Neuroscience}, author={Ester Martinez-Martin and Ester Martinez-Martin and Angel P. del Pobil}, doi={10.1155/2018/9179462}, url={http://doi.org/10.1155/2018/9179462}, year={2018} }
- E. Martinez-Martin and A. P. del Pobil, «Personal robot assistants for elderly care: an overview,» Intelligent systems reference library, vol. 132, pp. 77-91, 2018.
[Bibtex]@article{Martinez-Martin2018,title = {Personal robot assistants for elderly care: An overview},journal = {Intelligent systems Reference Library},year = {2018},volume = {132},pages = {77-91},author = {Martinez-Martin, E. and del Pobil, A.P.}}
- J. O. Onieva, G. Gonzalez-Serrano, T. P. Young, G. R. Washko, M. Jesús, L. Carbayo, J. O. Onieva, G. G. Serrano, T. P. Young, G. R. Washko, M. Jesús, L. Carbayo, R. San, and J. Estépar, «Multiorgan structures detection using deep convolutional neural networks,» in Medical imaging – image processing- proceedings of spie, 2018.
[Bibtex]@inproceedings{Onieva2018, author = {Jorge Onieva Onieva and Germ\'{a}n Gonzalez-Serrano and Thomas P Young and George R Washko and Mar\'{i}a Jes\'{u}s and Ledesma Carbayo and Jorge Onieva Onieva and Germ\'{a}n Gonz\'{a}lez Serrano and Thomas P Young and George R Washko and Mar\'{i}a Jes\'{u}s and Ledesma Carbayo and Ra\'{u}l San and Jos\'{e} Est\'{e}par}, doi = {10.1117/12.2293761}, issue = {March}, booktitle = {Medical Imaging - Image Processing- Proceedings of SPIE}, keywords = {computed tomography,convolutional neural network,deep learning,organ detector}, title = {Multiorgan structures detection using deep convolutional neural networks}, volume = {1057428}, year = {2018}, }
- G. Gonzalez-Serrano, G. R. Washko, and R. S. José, «Deep learning for biomarker regression : application to osteoporosis and emphysema on chest ct scans,» in Medical imaging – image processing- proceedings of spie, 2018.
[Bibtex]@inproceedings{Gonzalez2018BiomarkerRegression, author = {Germ\'{a}n Gonzalez-Serrano and George R Washko and Ra\'{u}l San Jos\'{e}}, doi = {10.1117/12.2293455}, issue = {March}, booktitle = {Medical Imaging - Image Processing- Proceedings of SPIE}, title = {Deep learning for biomarker regression : application to osteoporosis and emphysema on chest CT scans}, year = {2018}, }
- G. Gonzalez-Serrano, S. Y. Ash, G. Vegas-Sánchez-Ferrero, J. O. Onieva, F. N. Rahaghi, J. C. Ross, A. Dáz, R. S. J. Estépar, and G. R. Washko, «Disease staging and prognosis in smokers using deep learning in chest computed tomography,» American journal of respiratory and critical care medicine, vol. 197, pp. 193-203, 2018.
[Bibtex]@article{Gonzalez2018, abstract = {Rationale: Deep learning is a powerful tool that may allow for improved outcome prediction. Objectives: To determine if deep learning, specifically convolutional neural network (CNN) analysis, could detect and stage chronic obstructive pulmonary disease (COPD) and predict acute respiratory disease events (ARD) and mortality in smokers. Methods: A CNN was trained using CT scans from 7,983 COPDGene participants and evaluated using 1000 non-overlapping COPDGene participants and 1,672 ECLIPSE participants. Logistic regression (c-statistic and the Hosmer-Lemeshow test) was used to assess COPD diagnosis and ARD prediction. Cox regression (c-index and the Greenwood-Nam-D’Agnostino test) was used to assess mortality. Measurements and Main Results: In COPDGene, the c-statistic for the detection of COPD was 0.856. 51.1% of participants in COPDGene were accurately staged and 74.95% were within one stage. In ECLIPSE, 29.4% were accurately staged and 74.6% were within one stage. In COPDGene and ECLIPSE the c-statistic...}, author = {German Gonzalez-Serrano and Samuel Y. Ash and Gonzalo Vegas-S\'{a}nchez-Ferrero and Jorge Onieva Onieva and Farbod N. Rahaghi and James C. Ross and Alejandro D\'{a}z and Raul San Jos\'{e} Est\'{e}par and George R. Washko}, doi = {10.1164/rccm.201705-0860OC}, isbn = {0000000302}, issn = {15354970}, issue = {2}, journal = {American Journal of Respiratory and Critical Care Medicine}, keywords = {Artificial intelligence (computer vision systems),Chronic obstructive pulmonary disease,Neural networks,X-ray computed tomography}, pages = {193-203}, pmid = {28892454}, title = {Disease staging and prognosis in smokers using deep learning in chest computed tomography}, volume = {197}, year = {2018}, }
- G. Gonzalez-Serrano, G. R. Washko, and R. S. J. Estépar, «Multi-structure segmentation from partially labeled datasets . application to body composition measurements on ct scans .,» in Image anal mov organ breast thorac images (2018) ., 2018, pp. 215-224.
[Bibtex]@inproceedings{Gonzalez2018MultiStructure, author = {Germ\'{a}n Gonzalez-Serrano and George R Washko and Ra\'{u}l San Jos\'{e} Est\'{e}par}, doi = {10.1007/978-3-030-00946-5_22}, booktitle = {Image Anal Mov Organ Breast Thorac Images (2018) .}, pages = {215-224}, title = {Multi-Structure Segmentation from Partially Labeled Datasets . Application to Body Composition Measurements on CT scans .}, year = {2018}, }
- G. Gonzalez-Serrano, S. Y. Ash, R. S. J. Estépar, and G. Washko, «Reply to mummadi et al.: overfitting and use of mismatched cohorts in deep learning models: preventable design limitations,» American journal of respiratory and critical care medicine, vol. 198, p. 545, 2018.
[Bibtex]@article{Gonzalez2018Reply, author = {Germ\'{a}n Gonzalez-Serrano and Samuel Y. Ash and Ra\'{u}l San Jos\'{e} Est\'{e}par and George Washko}, doi = {10.1164/RCCM.201803-0540LE}, issn = {1535-4970}, issue = {4}, journal = {American journal of respiratory and critical care medicine}, keywords = {Comment,Deep Learning*,George Washko,Germ\'{a}n Gonz\'{a}lez,Humans,Letter,MEDLINE,NCBI,NIH,NLM,National Center for Biotechnology Information,National Institutes of Health,National Library of Medicine,PMC6835086,Prognosis,PubMed Abstract,Samuel Y Ash,Smokers*,Thorax,Tomography,X-Ray Computed,doi:10.1164/rccm.201803-0540LE,pmid:29641211}, month = {8}, pages = {545}, pmid = {29641211}, publisher = {Am J Respir Crit Care Med}, title = {Reply to Mummadi et al.: Overfitting and Use of Mismatched Cohorts in Deep Learning Models: Preventable Design Limitations}, volume = {198}, url = {https://pubmed.ncbi.nlm.nih.gov/29641211/}, year = {2018}, }
2017
- E. Cruz, J. C. Rangel, and M. Cazorla, «Robot semantic localization through CNN descriptors,» in ROBOT 2017: third iberian robotics conference – volume 1, seville, spain, november 22-24, 2017, 2017, p. 567–578.
[Bibtex]@inproceedings{DBLP:conf/robot/CruzRC17, author = {Edmanuel Cruz and Jos{\'{e}} Carlos Rangel and Miguel Cazorla}, title = {Robot semantic Localization Through {CNN} Descriptors}, booktitle = {{ROBOT} 2017: Third Iberian Robotics Conference - Volume 1, seville, spain, November 22-24, 2017}, pages = {567--578}, year = {2017} }
- F. Escalona, F. G. -, and M. Cazorla, «3d object mapping using a labelling system,» in ROBOT 2017: third iberian robotics conference – volume 1, seville, spain, november 22-24, 2017, 2017, p. 579–590.
[Bibtex]@inproceedings{DBLP:conf/robot/EscalonaGC17, author = {F{\'{e}}lix Escalona and Francisco Gomez{-}Donoso and Miguel Cazorla}, title = {3D Object Mapping Using a Labelling system}, booktitle = {{ROBOT} 2017: Third Iberian Robotics Conference - Volume 1, seville, spain, November 22-24, 2017}, pages = {579--590}, year = {2017} }
- F. G. -, O. sergio -, and M. Cazorla, «Robust hand pose regression using convolutional neural networks,» in ROBOT 2017: third iberian robotics conference – volume 1, seville, spain, november 22-24, 2017, 2017, p. 591–602.
[Bibtex]@inproceedings{DBLP:conf/robot/Gomez-DonosoOC17, author = {Francisco Gomez{-}Donoso and sergio Orts{-}Escolano and Miguel Cazorla}, title = {Robust Hand Pose Regression Using Convolutional Neural Networks}, booktitle = {{ROBOT} 2017: Third Iberian Robotics Conference - Volume 1, seville, spain, November 22-24, 2017}, pages = {591--602}, year = {2017} }
- A. Dominguez-sanchez, M. Cazorla, and sergio Orts-Escolano, «Pedestrian movement direction recognition using convolutional neural networks,» Ieee transactions on intelligent transportation systems, vol. 18, iss. 12, pp. 3540-3548, 2017.
[Bibtex]@Article{Dominguez2017, author="Alejandro Dominguez-sanchez and Miguel Cazorla and sergio Orts-Escolano", title="Pedestrian movement direction recognition using convolutional neural networks", journal="IEEE Transactions on Intelligent Transportation systems", year="2017", volume="18", number="12", pages="3540 - 3548" }
- A. G. García, J. G. Rodríguez, sergio Orts-Escolano, sergiu Oprea, F. Gomez-Donoso, and M. Cazorla, «A study of the effect of noise and occlusion on the accuracy of convolutional neural networks applied to 3d object recognition,» Computer vision and image understanding, vol. 164, p. 124–134, 2017.
[Bibtex]@Article{GomezGarcia2017CVIU, author="Alberto Garc\'{i}a Garc\'{i}a and Jose Garc\'{i}a Rodr\'{i}guez and sergio Orts-Escolano and sergiu Oprea and Francisco Gomez-Donoso and Miguel Cazorla", title="A study of the Effect of Noise and Occlusion on the Accuracy of Convolutional Neural Networks applied to 3D Object Recognition", journal="Computer Vision and Image Understanding", year="2017", volume="164", pages="124--134" }
- F. Gomez-Donoso, sergio Orts Escolano, M. Cazorla, A. Garcia-Garcia, J. Garcia-Rodriguez, J. Castro-Vargas, and sergiu Ovidiu-Oprea, «A robotic platform for customized and interactive rehabilitation of persons with disabilities,» Pattern recognition letters, vol. 99, p. 105–113, 2017.
[Bibtex]@Article{GomezDonoso2017PRL, author={Gomez-Donoso, Francisco and Orts Escolano, sergio and Cazorla, Miguel and Garcia-Garcia, Alberto and Garcia-Rodriguez, Jose and Castro-Vargas, John and Ovidiu-Oprea, sergiu}, title={A robotic platform for customized and interactive rehabilitation of persons with disabilities}, journal={Pattern Recognition Letters}, year={2017}, volume={99}, pages={105--113} }
- J. Garcia-Rodriguez, I. Guyon, sergio Escalera, A. Psarrou, A. Lewis, and M. Cazorla, «Editorial special issue on computational intelligence for vision and robotics,» Neural computing and applications, vol. 28, iss. 5, p. 853–854, 2017.
[Bibtex]@Article{Garcia-Rodriguez2017, author={Garcia-Rodriguez, Jose and Guyon, Isabelle and Escalera, sergio and Psarrou, Alexandra and Lewis, Andrew and Cazorla, Miguel}, title={Editorial special issue on computational intelligence for vision and robotics}, journal={Neural Computing and Applications}, year={2017}, volume={28}, number={5}, pages={853--854}, issn={1433-3058}, doi={10.1007/s00521-016-2330-8}, }
- A. Dominguez-sanchez, sergio Orts-Escolano, and M. Cazorla, «Pedestrian direction recognition using convolutional neural networks,» in 14th international work-conference on artificial neural networks, 2017.
[Bibtex]@INPROCEEDINGs{Dominguez2017IWANN, author={Alex Dominguez-sanchez and sergio Orts-Escolano and Miguel Cazorla}, booktitle={14th International Work-Conference on Artificial Neural Networks}, title={Pedestrian Direction Recognition using Convolutional Neural Networks}, year={2017}, month={June},}
- M. Zamora, E. Caldwell, J. Garcia-Rodriguez, J. Azorin-Lopez, and M. Cazorla, «Machine learning improves human-robot interaction in productive environments: a review,» in 14th international work-conference on artificial neural networks, 2017.
[Bibtex]@INPROCEEDINGs{Zamora2017IWANN, author={Mauricio Zamora and Eldon Caldwell and Jose Garcia-Rodriguez and Jorge Azorin-Lopez and Miguel Cazorla}, booktitle={14th International Work-Conference on Artificial Neural Networks}, title={Machine learning improves human-robot interaction in productive environments: A review}, year={2017}, month={June},}
- F. Escalona, Á. Rodríguez, F. Gómez-Donoso, J. Martínez-Gómez, and M. Cazorla, «3d object detection with deep learning,» Journal of physical agents, vol. 8, iss. 1, 2017.
[Bibtex]@article{Donoso2017, abstract = {Finding an appropriate environment representation is a crucial problem in robotics. 3D data has been recently used thanks to the advent of low cost RGB-D cameras. We propose a new way to represent a 3D map based on the information provided by an expert. Namely, the expert is the output of a Convolutional Neural Network trained with deep learning techniques. Relying on such information, we propose the generation of 3D maps using individual semantic labels, which are associated with environment objects or semantic labels. so, for each label we are provided with a partial 3D map whose data belong to the 3D perceptions, namely point clouds, which have an associated probability above a given threshold. The final map is obtained by registering and merging all these partial maps. The use of semantic labels provide us a with way to build the map while recognizing objects.}, author = {F\'elix Escalona and \'Angel Rodr\'iguez and Francisco G\'omez-Donoso and Jesus Mart\'inez-G\'omez and Miguel Cazorla}, issn = {1888-0258}, journal = {Journal of Physical Agents}, keywords = {semantic mapping, 3D point cloud, deep learning}, number = {1}, title = {3D object detection with deep learning}, volume = {8}, year = {2017} }
- F. Gomez-Donoso, A. Garcia-Garcia, s. Orts-Escolano, J. Garcia-Rodriguez, and M. Cazorla, «Lonchanet: a sliced-based cnn architecture for real-time 3d object recognition,» in 2017 international joint conference on neural networks (ijcnn), 2017.
[Bibtex]@INPROCEEDINGs{Garcia2017, author={ F. Gomez-Donoso and A. Garcia-Garcia and s. Orts-Escolano and J. Garcia-Rodriguez and M. Cazorla}, booktitle={2017 International Joint Conference on Neural Networks (IJCNN)}, title={LonchaNet: A sliced-based CNN Architecture for Real-time 3D Object Recognition}, year={2017}, month={May},}
- sergiu Oprea, A. Garcia-Garcia, s. Orts-Escolano, J. Garcia-Rodriguez, and M. Cazorla, «A recurrent neural network based schaeffer gesture recognition system,» in 2017 international joint conference on neural networks (ijcnn), 2017.
[Bibtex]@INPROCEEDINGs{Oprea2017, author={sergiu Oprea and A. Garcia-Garcia and s. Orts-Escolano and J. Garcia-Rodriguez and M. Cazorla}, booktitle={2017 International Joint Conference on Neural Networks (IJCNN)}, title={A Recurrent Neural Network based schaeffer Gesture Recognition system}, year={2017}, month={May},}
- A. Garcia-Garcia, sergio Orts-Escolano, sergiu Oprea, J. and Garcia-Rodriguez, J. and Azorin-Lopez, M. and saval-Calvo, and M. and Cazorla, «Multi-sensor 3D Object Dataset for Object Recognition with Full Pose Estimation,» Neural computing and applications, vol. 28, iss. 5, p. 941–952, 2017.
[Bibtex]@Article{Garcia-Garcia2017, author = {Garcia-Garcia, Alberto and and Orts-Escolano, sergio and and Oprea, sergiu and and Garcia-Rodriguez, Jose and and Azorin-Lopez, Jorge and and saval-Calvo, Marcelo and and Cazorla, Miguel}, title = {{Multi-sensor 3D Object Dataset for Object Recognition with Full Pose Estimation}}, journal = {Neural Computing and Applications}, year = {2017}, volume = {28}, number = {5}, pages = {941--952}, issn = {1433-3058}, abstract = {In this work, we propose a new dataset for 3D object recognition using the new high-resolution Kinect V2 sensor and some other popular low-cost devices like Primesense Carmine. since most already existing datasets for 3D object recognition lack some features such as 3D pose information about objects in the scene, per pixel segmentation or level of occlusion, we propose a new one combining all this information in a single dataset that can be used to validate existing and new 3D object recognition algorithms. Moreover, with the advent of the new Kinect V2 sensor we are able to provide high-resolution data for RGB and depth information using a single sensor, whereas other datasets had to combine multiple sensors. In addition, we will also provide semiautomatic segmentation and semantic labels about the different parts of the objects so that the dataset could be used for testing robot grasping and scene labeling systems as well as for object recognition.}, doi = {10.1007/s00521-016-2224-9}, }
- J. C. Rangel, J. Martínez-Gomez, I. García-Varea, and M. Cazorla, «Lextomap: lexical-based topological mapping,» Advanced robotics, vol. 31, iss. 5, pp. 268-281, 2017.
[Bibtex]@article{Rangel2016b, author = {Jos\'{e} Carlos Rangel and Jesus Mart\'{i}nez-Gomez and Ismael Garc\'{i}a-Varea and Miguel Cazorla}, title = {LexToMap: lexical-based topological mapping}, journal = {Advanced Robotics}, volume = {31}, number = {5}, pages = {268-281}, year = {2017}, doi = {10.1080/01691864.2016.1261045}, URL = { http://dx.doi.org/10.1080/01691864.2016.1261045 }, eprint = { http://dx.doi.org/10.1080/01691864.2016.1261045 } , abstract = { Any robot should be provided with a proper representation of its environment in order to perform navigation and other tasks. In addition to metrical approaches, topological mapping generates graph representations in which nodes and edges correspond to locations and transitions. In this article, we present LexToMap, a topological mapping procedure that relies on image annotations. These annotations, represented in this work by lexical labels, are obtained from pre-trained deep learning models, namely CNNs, and are used to estimate image similarities. Moreover, the lexical labels contribute to the descriptive capabilities of the topological maps. The proposal has been evaluated using the KTH-IDOL 2 data-set, which consists of image sequences acquired within an indoor environment under three different lighting conditions. The generality of the procedure as well as the descriptive capabilities of the generated maps validate the proposal. } }
- J. C. Rangel, V. Morell, M. Cazorla, sergio Orts-Escolano, and J. Garcia-Rodriguez, «Object recognition in noisy rgb-d data using gng,» Pattern analysis and applications, vol. 20, iss. 4, p. 1061–1076, 2017.
[Bibtex]@Article{Rangel2016PAAA, author={Rangel, Jose Carlos and Morell, Vicente and Cazorla, Miguel and Orts-Escolano, sergio and Garcia-Rodriguez, Jose}, title={Object recognition in noisy RGB-D data using GNG}, journal={Pattern Analysis and Applications}, year={2017}, pages={1061–1076}, volume={20}, number={4}, doi={10.1007/s10044-016-0546-y} }
- E. Martinez-Martin, D. Fischinger, M. Vincze, and A. P. del Pobil, «An rgb-d visual application for error detection in robot grasping tasks,» Advances in intelligent systems and computing, vol. 531, pp. 243-254, 2017.
[Bibtex]@article{Martinez-Martin2017,title = {An RGB-D visual application for error detection in robot grasping tasks},journal = {Advances in Intelligent systems and Computing},year = {2017},volume = {531},pages = {243-254},author = {Martinez-Martin, E. and Fischinger, D. and Vincze, M. and del Pobil, A.P.}}
- E. Martinez-Martin and A. P. Del Pobil, «Object detection and recognition for assistive robots: experimentation and implementation,» Ieee robotics and automation magazine, vol. 24, iss. 3, pp. 123-138, 2017.
[Bibtex]@Article{Martinez-Martin2017a, author = {Martinez-Martin, E. and Del Pobil, A.P.}, title = {Object detection and recognition for assistive robots: Experimentation and implementation}, journal = {IEEE Robotics and Automation Magazine}, year = {2017}, volume = {24}, number = {3}, pages = {123-138}, }
- E. Martinez-Martin and A. P. Del Pobil, «Object recognition for robot tasks: an overview,» Robotics: new research, pp. 1-24, 2017.
[Bibtex]@Article{Martinez-Martin2017b, author = {Martinez-Martin, E. and Del Pobil, A.P.}, title = {Object recognition for robot tasks: An overview}, journal = {Robotics: New Research}, year = {2017}, pages = {1-24}, }
- E. Martinez-Martin and A. P. Del Pobil, «Robust motion detection and tracking for human-robot interaction,» Acm/ieee international conference on human-robot interaction, pp. 401-402, 2017.
[Bibtex]@Article{Martinez-Martin2017c, author = {Martinez-Martin, E. and Del Pobil, A.P.}, title = {Robust motion detection and tracking for human-robot interaction}, journal = {ACM/IEEE International Conference on Human-Robot Interaction}, year = {2017}, pages = {401-402}, }
- A. P. Del Pobil, M. Kassawat, A. J. Duran, M. A. Arias, N. Nechyporenko, A. Mallick, E. Cervera, D. subedi, I. Vasilev, D. Cardin, E. sansebastiano, E. Martinez-Martin, A. Morales, G. A. Casan, A. Arenal, B. Goriatcheff, C. Rubert, and G. Recatala, «Uji robinlab’s approach to the amazon robotics challenge 2017,» Ieee international conference on multisensor fusion and integration for intelligent systems, vol. 2017-November, pp. 318-323, 2017.
[Bibtex]@Article{DelPobil2017, author = {Del Pobil, A.P. and Kassawat, M. and Duran, A.J. and Arias, M.A. and Nechyporenko, N. and Mallick, A. and Cervera, E. and subedi, D. and Vasilev, I. and Cardin, D. and sansebastiano, E. and Martinez-Martin, E. and Morales, A. and Casan, G.A. and Arenal, A. and Goriatcheff, B. and Rubert, C. and Recatala, G.}, title = {UJI RobInLab{'}s approach to the Amazon Robotics Challenge 2017}, journal = {IEEE International Conference on Multisensor Fusion and Integration for Intelligent systems}, year = {2017}, volume = {2017-November}, pages = {318-323}, }
- F. N. Rahaghi, G. Vegas-Sanchez-Ferrero, J. K. Minhas, C. E. Come, I. D. L. Bruere, J. M. Wells, G. Gonzalez-Serrano, S. P. Bhatt, B. E. Fenster, A. A. Diaz, P. Kohli, J. C. Ross, D. A. Lynch, M. T. Dransfield, R. P. Bowler, M. J. Ledesma-Carbayo, R. S. J. Estépar, and G. R. Washko, «Ventricular geometry from non-contrast non-ecg-gated ct scans: an imaging marker of cardiopulmonary disease in smokers,» Academic radiology, vol. 24, pp. 594-602, 2017.
[Bibtex]@article{Rahaghi2017, abstract = {Rationale and Objectives Imaging-based assessment of cardiovascular structure and function provides clinically relevant information in smokers. Non-cardiac-gated thoracic computed tomographic (CT) scanning is increasingly leveraged for clinical care and lung cancer screening. We sought to determine if more comprehensive measures of ventricular geometry could be obtained from CT using an atlas-based surface model of the heart. Materials and Methods Subcohorts of 24 subjects with cardiac magnetic resonance imaging (MRI) and 262 subjects with echocardiography were identified from COPDGene, a longitudinal observational study of smokers. A surface model of the heart was manually initialized, and then automatically optimized to fit the epicardium for each CT. Estimates of right and left ventricular (RV and LV) volume and free-wall curvature were then calculated and compared to structural and functional metrics obtained from MRI and echocardiograms. Results CT measures of RV dimension and curvature correlated with similar measures obtained using MRI. RV and LV volume obtained from CT inversely correlated with echocardiogram-based estimates of RV systolic pressure using tricuspid regurgitation jet velocity and LV ejection fraction respectively. Patients with evidence of RV or LV dysfunction on echocardiogram had larger RV and LV dimensions on CT. Logistic regression models based on demographics and ventricular measures from CT had an area under the curve of >0.7 for the prediction of elevated right ventricular systolic pressure and ventricular failure. Conclusions These data suggest that non-cardiac-gated, non-contrast-enhanced thoracic CT scanning may provide insight into cardiac structure and function in smokers.}, author = {Farbod N. Rahaghi and Gonzalo Vegas-Sanchez-Ferrero and Jasleen K. Minhas and Carolyn E. Come and Isaac De La Bruere and James M. Wells and Germ\'{a}n Gonzalez-Serrano and Surya P. Bhatt and Brett E. Fenster and Alejandro A. Diaz and Puja Kohli and James C. Ross and David A. Lynch and Mark T. Dransfield and Russel P. Bowler and Maria J. Ledesma-Carbayo and Ra\'{u}l San Jos\'{e} Est\'{e}par and George R. Washko}, doi = {10.1016/j.acra.2016.12.007}, isbn = {1878-4046 (Electronic)\r1076-6332 (Linking)}, issn = {18784046}, issue = {5}, journal = {Academic Radiology}, keywords = {COPD,COPDGene,Non-ECG-gated,cardiac,computed tomography,congestive heart failure,echocardiography,heart,left heart failure,non-contrast,pulmonary hypertension,right heart failure,smokers,ventricular geometry,ventricular volume}, pages = {594-602}, pmid = {28215632}, publisher = {Elsevier Inc.}, title = {Ventricular Geometry From Non-contrast Non-ECG-gated CT Scans: An Imaging Marker of Cardiopulmonary Disease in Smokers}, volume = {24}, url = {http://dx.doi.org/10.1016/j.acra.2016.12.007}, year = {2017}, }
- S. P. Bhatt, G. Vegas-Sánchez-Ferrero, F. N. Rahaghi, E. S. MacLean, G. Gonzalez-Serrano, C. E. Come, G. L. Kinney, J. E. Hokanson, M. J. Budoff, M. J. Cuttica, M. J. Wells, R. S. J. Estépar, and G. R. Washko, «Cardiac morphometry on computed tomography and exacerbation reduction with β- blocker therapy in copd,» American journal of respiratory and critical care medicine, vol. 196, pp. 1484-1488, 2017.
[Bibtex]@article{Bhatt2017, author = {Surya P. Bhatt and Gonzalo Vegas-S\'{a}nchez-Ferrero and Farbod N. Rahaghi and Erick S. MacLean and German Gonzalez-Serrano and Carolyn E Come and Gregory L. Kinney and John E. Hokanson and Matthew J. Budoff and Michael J. Cuttica and J. Michael Wells and Ra\'{u}l San Jos\'{e} Est\'{e}par and George R. Washko}, issue = {11}, journal = {American Journal of Respiratory and Critical Care Medicine}, pages = {1484-1488}, title = {Cardiac Morphometry on Computed Tomography and Exacerbation Reduction with β- blocker therapy in COPD}, volume = {196}, year = {2017}, }
2016
- A. Garcia-Garcia, F. Gomez-Donoso, J. Garcia-Rodriguez, s. Orts-Escolano, M. Cazorla, and J. Azorin-Lopez, «Pointnet: a 3d convolutional neural network for real-time object class recognition,» in 2016 international joint conference on neural networks (ijcnn), 2016, pp. 1578-1584.
[Bibtex]@INPROCEEDINGs{Garcia2016, author={A. Garcia-Garcia and F. Gomez-Donoso and J. Garcia-Rodriguez and s. Orts-Escolano and M. Cazorla and J. Azorin-Lopez}, booktitle={2016 International Joint Conference on Neural Networks (IJCNN)}, title={PointNet: A 3D Convolutional Neural Network for real-time object class recognition}, year={2016}, pages={1578-1584}, keywords={CAD;computer vision;data structures;learning (artificial intelligence);neural net architecture;object recognition;3D shapeNets;3D convolutional neural network;ModelNet;PointNet;VoxNet;computer vision;deep learning techniques;density occupancy grids representations;large-scale 3D CAD model dataset;real-time object class recognition;supervised convolutional neural network architecture;Computer architecture;Machine learning;Neural networks;Object recognition;solid modeling;Three-dimensional displays;Two dimensional displays}, month={July},}
- C. Loop, Q. Cai, P. Chou, and sergio Orts-Escolano, «A closed-form bayesian fusion equation using occupancy probabilities,» in 2016 international conference on 3d vision, 3dv 2016, stanford, usa, october 25-28, 2016, 2016.
[Bibtex]@InProceedings{DBLP:conf/3dim/Loop2016, author = {Charles Loop and Qin Cai and Philip Chou and sergio Orts-Escolano}, title = {A Closed-form Bayesian Fusion Equation using Occupancy Probabilities}, booktitle = {2016 International Conference on 3D Vision, 3DV 2016, {stanford}, UsA, October 25-28, 2016}, year = {2016} }
- M. Dou, sameh Khamis, Y. Degtyarev, P. Davidson, R. sean Fanello, A. Kowdle, sergio Orts-Escolano, C. Rhemann, D. Kim, J. Taylor, P. Kohli, V. Tankovich, and shahram Izadi, «Fusion4d: real-time performance capture of challenging scenes,» Acm trans. graph., vol. 35, iss. 4, p. 114:1–114:13, 2016.
[Bibtex]@Article{Dou2016, author = {Dou, Mingsong and Khamis, sameh and Degtyarev, Yury and Davidson, Philip and Fanello, sean Ryan and Kowdle, Adarsh and Orts-Escolano, sergio and Rhemann, Christoph and Kim, David and Taylor, Jonathan and Kohli, Pushmeet and Tankovich, Vladimir and Izadi, shahram}, title = {Fusion4D: Real-time Performance Capture of Challenging scenes}, journal = {ACM Trans. Graph.}, year = {2016}, volume = {35}, number = {4}, pages = {114:1--114:13}, month = jul, acmid = {2925969}, address = {New York, NY, UsA}, articleno = {114}, doi = {10.1145/2897824.2925969}, issn = {0730-0301}, issue_date = {July 2016}, keywords = {4D reconstruction, multi-view, nonrigid, real-time}, numpages = {13}, publisher = {ACM}, url = {http://doi.acm.org/10.1145/2897824.2925969} }
- sean Ryan Fanello, C. Rhemann, V. Tankovich, A. Kowdle, sergio Orts-Escolano, D. Kim, and shahram Izadi, «HyperDepth: Learning Depth From structured Light Without Matching,» in The ieee conference on computer vision and pattern recognition (cvpr), 2016.
[Bibtex]@InProceedings{Fanello_2016_CVPR, author = {Ryan Fanello, sean and Rhemann, Christoph and Tankovich, Vladimir and Kowdle, Adarsh and Orts-Escolano, sergio and Kim, David and Izadi, shahram}, title = {{HyperDepth: Learning Depth From structured Light Without Matching}}, booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2016}, month = {June} }
- A. Garcia-Garcia, F. Gomez-Donoso, J. Garcia-Rodriguez, sergio Orts-Escolano, M. Cazorla, and J. Azorin-Lopez, «PointNet: A 3D Convolutional Neural Network for Real-Time Object Class Recognition,» in The ieee world congress on computational intelligence, 2016.
[Bibtex]@inproceedings{Garcia2016a, author = {Garcia-Garcia, Albert and Gomez-Donoso, Francisco and Garcia-Rodriguez, Jose and Orts-Escolano, sergio and Cazorla, Miguel and Azorin-Lopez, Jorge}, booktitle = {The IEEE World Congress on Computational Intelligence}, title = {{PointNet: A 3D Convolutional Neural Network for Real-Time Object Class Recognition}}, year = {2016} }
- F. Gomez-Donoso, M. Cazorla, A. Garcia-Garcia, and J. Garcia Rodriguez, «Automatic schaeffer’s gestures recognition system,» Expert systems, vol. 33, iss. 5, p. 480–488, 2016.
[Bibtex]@article{Gomez-Donoso2016, abstract = {schaeffer's sign language consists of a reduced set of gestures designed to help children with autism or cognitive learning disabilities to develop adequate communication skills. Our automatic recognition system for schaeffer's gesture language uses the information provided by an RGB-D camera to capture body motion and recognize gestures using Dynamic Time Warping combined with k-Nearest Neighbors methods. The learning process is reinforced by the interaction with the proposed system that accelerates learning itself thus helping both children and educators. To demonstrate the validity of the system, a set of qualitative experiments with children were carried out. As a result, a system which is able to recognize a subset of 11 gestures of schaeffer's sign language online was achieved.}, author = {Gomez-Donoso, Francisco and Cazorla, Miguel and Garcia-Garcia, Alberto and Garcia Rodriguez, Jose}, journal = {Expert systems}, title = {Automatic schaeffer's Gestures Recognition system}, volume={33}, number={5}, pages={480--488}, year = {2016} }
- sergio Orts-Escolano, C. Rhemann, sean Fanello, D. Kim, A. Kowdle, W. Chang, Y. Degtyarev, P. L. Davidson, sameh Khamis, M. Dou, V. Tankovich, C. Loop, Q. Cai, P. A. Chou, sarah Mennicken, J. Valentin, V. Pradeep, shenlong Wang, B. sing Kang, P. Kohli, Y. Lutchyn, C. Keskin, and shahram Izadi, «Holoportation: virtual 3d teleportation in real-time,» in 29th acm user interface software and technology symposium (uist), 2016.
[Bibtex]@InProceedings{holoportation2016, author = {sergio Orts-Escolano and Christoph Rhemann and sean Fanello and David Kim and Adarsh Kowdle and Wayne Chang and Yury Degtyarev and Philip L Davidson and sameh Khamis and Mingsong Dou and Vladimir Tankovich and Charles Loop and Qin Cai and Philip A Chou and sarah Mennicken and Julien Valentin and Vivek Pradeep and shenlong Wang and sing Bing Kang and Pushmeet Kohli and Yuliya Lutchyn and Cem Keskin and shahram Izadi}, title = {Holoportation: Virtual 3D Teleportation in Real-time}, booktitle = {29th ACM User Interface software and Technology symposium (UIsT)}, year = {2016}, doi = {10.1145/2984511.2984517}, url = {http://dl.acm.org/citation.cfm?id=2984517} }
- A. Jimeno-Morenilla, J. Garcia-Rodriguez, sergio Orts-Escolano, and M. Davia-Aracil, «Gng based foot reconstruction for custom footwear manufacturing,» Computers in industry, vol. 75, pp. 116-126, 2016.
[Bibtex]@Article{JimenoMorenilla2016116, author = {Antonio Jimeno-Morenilla and Jose Garcia-Rodriguez and sergio Orts-Escolano and Miguel Davia-Aracil}, title = {GNG based foot reconstruction for custom footwear manufacturing }, journal = {Computers in Industry }, year = {2016}, volume = {75}, pages = {116 - 126}, doi = {http://dx.doi.org/10.1016/j.compind.2015.06.002}, issn = {0166-3615}, keywords = {Custom footwear manufacturing}, url = {http://www.sciencedirect.com/science/article/pii/s0166361515300075} }
- J. Martinez-Gomez, V. Morell Gimenez, M. Cazorla, and I. Garcia-Varea, «semantic Localization in the PCL library,» Robotics and autonomous systems, vol. 75, Part B, p. 641–648, 2016.
[Bibtex]@article{Martinez2016ras, abstract = {The semantic localization problem in robotics consists in determining the place where a robot is located by means of semantic categories. The problem is usually addressed as a supervised classification process, where input data correspond to robot perceptions while classes to semantic categories, like kitchen or corridor. In this paper we propose a framework, implemented in the $\backslash${\{}PCL$\backslash${\}} library, which provides a set of valuable tools to easily develop and evaluate semantic localization systems. The implementation includes the generation of 3D global descriptors following a Bag-of-Words approach. This allows the generation of fixed-dimensionality descriptors from any type of keypoint detector and feature extractor combinations. The framework has been designed, structured and implemented to be easily extended with different keypoint detectors, feature extractors as well as classification models. The proposed framework has also been used to evaluate the performance of a set of already implemented descriptors, when used as input for a specific semantic localization system. The obtained results are discussed paying special attention to the internal parameters of the BoW descriptor generation process. Moreover, we also review the combination of some keypoint detectors with different 3D descriptor generation techniques.}, author = {Martinez-Gomez, Jesus and Morell Gimenez, Vicente and Cazorla, Miguel and Garcia-Varea, Ismael}, journal = {Robotics and Autonomous systems}, pages = {641--648}, title = {{semantic Localization in the PCL library}}, volume = {75, Part B}, year = {2016} }
- J. Navarrete, V. Morell, M. Cazorla, D. Viejo, J. Garcia-Rodriguez, and sergio Orts-Escolano, «3DCOMET: 3D Compression Methods Test Dataset,» Robotics and autonomous systems, vol. 75, Part B, p. 550–557, 2016.
[Bibtex]@article{Navarrete2016Ras, abstract = {The use of 3D data in mobile robotics applications provides valuable information about the robot's environment. However usually the huge amount of 3D information is difficult to manage due to the fact that the robot storage system and computing capabilities are insufficient. Therefore, a data compression method is necessary to store and process this information while preserving as much information as possible. A few methods have been proposed to compress 3D information. Nevertheless, there does not exist a consistent public benchmark for comparing the results (compression level, distance reconstructed error, etc.) obtained with different methods. In this paper, we propose a dataset composed of a set of 3D point clouds with different structure and texture variability to evaluate the results obtained from 3D data compression methods. We also provide useful tools for comparing compression methods, using as a baseline the results obtained by existing relevant compression methods.}, author = {Javier Navarrete and Vicente Morell and Miguel Cazorla and Diego Viejo and Jose Garcia-Rodriguez and sergio Orts-Escolano}, journal = {Robotics and Autonomous systems}, pages = {550--557}, title = {{3DCOMET: 3D Compression Methods Test Dataset}}, volume = {75, Part B}, year = {2016} }
- s Orts-Escolano, J. Garcia-Rodriguez, V. Morell, M. Cazorla, J. A. serra-Perez, and A. Garcia-Garcia, «3D surface reconstruction of noisy point clouds using Growing Neural Gas,» Neural processing letters, vol. 43, iss. 2, p. 401–423, 2016.
[Bibtex]@article{Orts2015, abstract = {With the advent of low-cost 3D sensors and 3D printers, scene and object 3D surface reconstruction has become an important research topic in the last years. In this work, we propose an automatic (unsupervised) method for 3D surface reconstruction from raw unorganized point clouds acquired using low-cost 3D sensors. We have modified the Grow- ing Neural Gas (GNG) network, which is a suitable model because of its flexibility, rapid adaptation and excellent quality of representation, to perform 3D surface reconstruction of different real-world objects and scenes. some improvements have been made on the original algorithm considering colour and surface normal information of input data during the learning stage and creating complete triangular meshes instead of basic wire-frame representations. The proposed method is able to successfully create 3D faces online, whereas existing 3D reconstruction methods based on self-Organizing Maps (sOMs) required post- processing steps to close gaps and holes produced during the 3D reconstruction process. A set of quantitative and qualitative experiments were carried out to validate the proposed method. The method has been implemented and tested on real data, and has been found to be effective at reconstructing noisy point clouds obtained using low-cost 3D sensors.}, author = {Orts-Escolano, s and Garcia-Rodriguez, J and Morell, V and Cazorla, Miguel and serra-Perez, J A and Garcia-Garcia, A}, issn = {1370-4621}, journal = {Neural Processing Letters}, number = {2}, pages = {401--423}, title = {{3D surface reconstruction of noisy point clouds using Growing Neural Gas}}, volume = {43}, year = {2016} }
- J. C. Rangel, M. Cazorla, I. Garcia-Varea, J. Martinez-Gomez, E. Fromont, and M. sebban, «scene Classification from semantic Labeling,» Advanced robotics, vol. 30, iss. 11–12, p. 758–769, 2016.
[Bibtex]@article{Rangel2016, abstract = {Finding an appropriate image representation is a crucial problem in robotics. This problem has been classically addressed by means of computer vision techniques, where local and global features are used. The selection or/and combination of different features is carried out by taking into account repeatability and distinctiveness, but also the specific problem to solve. In this article, we propose the generation of image descriptors from general purpose semantic annotations. This approach has been evaluated as source of information for a scene classifier, and specifically using Clarifai as the semantic annotation tool. The experimentation has been carried out using the ViDRILO toolbox as benchmark, which includes a comparison of state-of-the-art global features and tools to make comparisons among them. According to the experimental results, the proposed descriptor performs similarly to well-known domain-specific image descriptors based on global features in a scene classification task. Moreover, the proposed descriptor is based on generalist annotations without any type of problem-oriented parameter tuning.}, author = {Jose Carlos Rangel and Miguel Cazorla and Ismael Garcia-Varea and Jesus Martinez-Gomez and Elisa Fromont and Marc sebban}, doi = {10.1080/01691864.2016.1164621}, journal = {Advanced Robotics}, number = {11--12}, pages = {758--769}, title = {{scene Classification from semantic Labeling}}, volume = {30}, year = {2016} }
- A. Rodriguez, F. Gomez-Donoso, J. Martinez-Gomez, and M. Cazorla, «Building 3d maps with tag information,» in Xvii workshop en agentes f�sicos (waf 2016), 2016.
[Bibtex]@inproceedings{Rodriguez2016, Author = {Angel Rodriguez and Francisco Gomez-Donoso and Jesus Martinez-Gomez and Miguel Cazorla}, Title = {Building 3D maps with tag information}, Booktitle = {XVII Workshop en Agentes F�sicos (WAF 2016)}, Year={2016} }
- M. saval-Calvo, J. and Azorin-Lopez, A. and Fuster-Guillo, J. and Garcia-Rodriguez, sergio Orts-Escolano, and A. and Garcia-Garcia, «Evaluation of sampling method effects in 3D non-rigid registration,» Neural computing and applications, iss. 1-15, 2016.
[Bibtex]@Article{saval2016, author = {saval-Calvo, Marcelo and and Azorin-Lopez, Jorge and and Fuster-Guillo, Andres and and Garcia-Rodriguez, Jose and and Orts-Escolano, sergio and and Garcia-Garcia, Alberto}, title = {{Evaluation of sampling method effects in 3D non-rigid registration}}, journal = {Neural Computing and Applications}, year = {2016}, number = {1-15} }
- J. Navarrete, D. Viejo, and M. Cazorla, «Color smoothing for rgb-d data using entropy information,» Applied soft computing, vol. 46, p. 361–380, 2016.
[Bibtex]@article{navarrete2016color, title={Color smoothing for RGB-D data using entropy information}, author={Navarrete, Javier and Viejo, Diego and Cazorla, Miguel}, journal={Applied soft Computing}, volume={46}, pages={361--380}, year={2016}, publisher={Elsevier} }
- E. Martinez-Martin and A. P. del Pobil, «Conflict resolution in robotics: an overview,» Artificial intelligence: concepts, methodologies, tools, and applications, vol. 4, pp. 2623-2638, 2016.
[Bibtex]@article{Martinez-Martin2016,title = {Conflict resolution in robotics: An overview},journal = {Artificial Intelligence: Concepts, Methodologies, Tools, and Applications},year = {2016},volume = {4},pages = {2623-2638},author = {Martinez-Martin, E. and del Pobil, A.P.}}
- G. Gonzalez-Serrano, G. R. Washko, and R. S. J. Estepar, «Automated agatston score computation in a large dataset of non ecg-gated chest computed tomography,» in Proceedings – international symposium on biomedical imaging, 2016, pp. 53-57.
[Bibtex]@inproceedings{Gonzalez2016, abstract = {© 2016 IEEE.The Agatston score, computed from ECG-gated computed tomography (CT), is a well established metric of coronary artery disease. It has been recently shown that the Agatston score computed from chest CT (non ECG-gated) studies is highly correlated with the Agatston score computed from cardiac CT scans. In this work we present an automated method to compute the Agatston score from chest CT images. Coronary arteries calcifications (CACs) are defined as voxels contained within the coronary arteries with a value greater or equal to 130 Hounsfield Units (HU). CACs are automatically detected in chest CT studies by locating the heart, generating a region of interest around it, thresholding the image in such region and applying a set of rules to discriminate CACs from calcifications in the main vessels or from metallic implants. We evaluate the methodology in a large cohort of 1500 patients for whom manual reference standard is available. Our results show that the Pearson correlation coefficient between manual and automated Agatston score is p = 0.86 (p < 0.0001).}, author = {German Gonzalez-Serrano and George R. Washko and Raul San Jose Estepar}, doi = {10.1109/ISBI.2016.7493209}, isbn = {9781479923502}, issn = {19458452}, booktitle = {Proceedings - International Symposium on Biomedical Imaging}, keywords = {Agatston score,computed aided detection,heuristics,object detection,segmentation}, pages = {53-57}, pmid = {27974951}, title = {Automated Agatston score computation in a large dataset of non ECG-gated chest computed tomography}, volume = {2016-June}, year = {2016}, }
- L. Fusco, R. Lefort, K. Smith, F. Benmansour, G. Gonzalez-Serrano, C. Barillari, B. Rinn, F. Fleuret, P. Fua, and O. Pertz, "Computer vision profiling of neurite outgrowth dynamics reveals spatiotemporal modularity of rho gtpase signaling," Journal of cell biology, vol. 212, pp. 91-111, 2016.
[Bibtex]@article{Fusco2016, abstract = {© 2016 Fusco et al. Rho guanosine triphosphatases (GTPases) control the cytoskeletal dynamics that power neurite outgrowth. This process consists of dynamic neurite initiation, elongation, retraction, and branching cycles that are likely to be regulated by specific spatiotemporal signaling networks, which cannot be resolved with static, steady-state assays. We present NeuriteTracker, a computer-vision approach to automatically segment and track neuronal morphodynamics in time-lapse datasets. Feature extraction then quantifies dynamic neurite outgrowth phenotypes. We identify a set of stereotypic neurite outgrowth morphodynamic behaviors in a cultured neuronal cell system. Systematic RNA interference perturbation of a Rho GTPase interactome consisting of 219 proteins reveals a limited set of morphodynamic phenotypes. As proof of concept, we show that loss of function of two distinct RhoA-specific GTPase-activating proteins (GAPs) leads to opposite neurite outgrowth phenotypes. Imaging of RhoA activation dynamics indicates that both GAPs regulate different spatiotemporal Rho GTPase pools, with distinct functions. Our results provide a starting point to dissect spatiotemporal Rho GTPase signaling networks that regulate neurite outgrowth.}, author = {L. Fusco and R. Lefort and K. Smith and F. Benmansour and G. Gonzalez-Serrano and C. Barillari and B. Rinn and F. Fleuret and P. Fua and O. Pertz}, doi = {10.1083/jcb.201506018}, issn = {15408140}, issue = {1}, journal = {Journal of Cell Biology}, pages = {91-111}, title = {Computer vision profiling of neurite outgrowth dynamics reveals spatiotemporal modularity of Rho GTPase signaling}, volume = {212}, year = {2016}, }
- K. K. Kumamaru, E. George, A. Aghayev, S. S. Saboo, A. Khandelwal, S. Rodríguez-López, T. Cai, D. Jiménez-Carretero, R. S. J. Estépar, M. J. Ledesma-Carbayo, G. Gonzalez-Serrano, and F. J. Rybicki, "Implementation and performance of automated software for computing right-to-left ventricular diameter ratio from computed tomography pulmonary angiography images," Journal of computer assisted tomography, vol. 40, pp. 387-92, 2016.
[Bibtex]@article{Kumamaru2016, abstract = {© 2016 Wolters Kluwer Health, Inc. All rights reserved. Objective: The aim of this study was to prospectively test the performance and potential for clinical integration of software that automatically calculates the right-to-left ventricular (RV/LV) diameter ratio from computed tomography pulmonary angiography images. Methods: Using 115 computed tomography pulmonary angiography images that were positive for acute pulmonary embolism, we prospectively evaluated RV/LV ratio measurements that were obtained as follows: (1) completely manual measurement (reference standard), (2) completely automated measurement using the software, and (3 and 4) using a customized software interface that allowed 2 independent radiologists to manually adjust the automatically positioned calipers. Results: Automated measurements underestimated (P < 0.001) the reference standard (1.09 [0.25] vs1.03 [0.35] ).With manual correction of the automatically positioned calipers, themean ratio became closer to the reference standard (1.06 [0.29] by read 1 and 1.07 [0.30] by read 2), and the correlation improved (r = 0.675 to 0.872 and 0.887). The mean time required for manual adjustment (37 [20] seconds) was significantly less than the time required to perform measurements entirely manually (100 [23] seconds). Conclusions: Automated CT RV/LV diameter ratio software shows promise for integration into the clinical workflow for patients with acute pulmonary embolism.}, author = {K.K. Kumamaru and E. George and A. Aghayev and S.S. Saboo and A. Khandelwal and S. Rodr\'{i}guez-L\'{o}pez and T. Cai and D. Jim\'{e}nez-Carretero and R.S.J. Est\'{e}par and M.J. Ledesma-Carbayo and German Gonzalez-Serrano and F.J. Rybicki}, doi = {10.1097/RCT.0000000000000375}, issn = {15323145}, issue = {3}, journal = {Journal of Computer Assisted Tomography}, keywords = {Computer-aided detection,Diameter ratio,Prognosis,Pulmonary embolism,Right ventricular strain}, pages = {387-92}, title = {Implementation and performance of automated software for computing right-to-left ventricular diameter ratio from computed tomography pulmonary angiography images}, volume = {40}, year = {2016}, }
- F. N. Rahaghi, J. C. Ross, M. Agarwal, G. Gonzalez-Serrano, C. E. Come, A. A. Diaz, G. Vegas-Sánchez-Ferrero, A. Hunsaker, S. J. R. Estépar, A. B. Waxman, and G. R. Washko, "Pulmonary vascular morphology as an imaging biomarker in chronic thromboembolic pulmonary hypertension," Pulmonary circulation, vol. 6, 2016.
[Bibtex]@article{Rahaghi2016, abstract = {© 2016 by the Pulmonary Vascular Research Institute. All rights reserved. Patients with chronic thromboembolic pulmonary hypertension (CTEPH) have morphologic changes to the pulmonary vasculature. These include pruning of the distal vessels, dilation of the proximal vessels, and increased vascular tortuosity. Advances in image p rocessing and computer vision enable objective detection and quantification of these processes in clinically acquired computed tomographic (CT) scans. Three-dimensional reconstructions of the pulmonary vasculature were created from the CT angiograms of 18 patients with CTEPH diagnosed using imaging and hemodynamics as well as 15 control patients referred to our Dyspnea Clinic and found to have no evidence of pulmonary vascular disease. Compared to controls, CTEPH patients exhibited greater pruning of the distal vasculature (median density of small-vessel volume: 2.7 [interquartile range (IQR): 2.5-3.0] vs. 3.2 [3.0-3.8] ; P = 0.008), greater dilation of proximal arteries (median fraction of blood in large arteries: 0.35 [IQR: 0.30-0.41] vs. 0.23 [0.21-0.31] ; P = 0.0005), and increased tortuosity in the pulmonary arterial tree (median: 4.92% [IQR: 4.85%-5.21%] vs. 4.63% [4.39%-4.92%] ; P = 0.004). CTEPH was not associated with dilation of proximal veins or increased tortuosity in the venous system. Distal pruning of the vasculature was correlated with the cardiac index (R = 0.51, P = 0.04). Quantitative models derived from CT scans can be used to measure changes in vascular morphology previously described subjectively in CTEPH. These measurements are also correlated with invasive metrics of pulmonary hemodynamics, suggesting that they may be used to assess disease severity. Further work in a larger cohort may enable the use of such measures as a biomarker for diagnostic, phenotyping, and prognostic purposes.}, author = {F.N. Rahaghi and J.C. Ross and M. Agarwal and German Gonzalez-Serrano and C.E. Come and A.A. Diaz and G. Vegas-S\'{a}nchez-Ferrero and A. Hunsaker and R. San Jos\'{e} Est\'{e}par and A.B. Waxman and G.R. Washko}, doi = {10.1086/685081}, issn = {20458940}, issue = {1}, journal = {Pulmonary Circulation}, keywords = {Arterial,Chronic thromboembolic pulmonary hypertension,Computed tomography,Tortuosity}, title = {Pulmonary vascular morphology as an imaging biomarker in chronic thromboembolic pulmonary hypertension}, volume = {6}, year = {2016}, }
2015
- J. C. Rangel, V. Morell, M. Cazorla, sergio Orts-Escolano, and J. Garc{'i}a-Rodr{'i}guez, "Object recognition in noisy rgb-d data," in International work-conference on the interplay between natural and artificial computation, 2015, p. 261–270.
[Bibtex]@inproceedings{rangel2015object, title={Object Recognition in Noisy RGB-D Data}, author={Rangel, Jos{\'e} Carlos and Morell, Vicente and Cazorla, Miguel and Orts-Escolano, sergio and Garc{\'\i}a-Rodr{\'\i}guez, Jos{\'e}}, booktitle={International Work-Conference on the Interplay Between Natural and Artificial Computation}, pages={261--270}, year={2015}, organization={springer International Publishing} }
- M. saval-Calvo, sergio Orts-Escolano, J. Azorin-Lopez, J. Garcia-Rodriguez, A. Fuster-Guillo, V. Morell-Gimenez, and M. Cazorla, "A comparative study of downsampling techniques for non-rigid point set registration using color," in International work-conference on the interplay between natural and artificial computation, 2015, p. 281–290.
[Bibtex]@inproceedings{saval2015comparative, title={A Comparative study of Downsampling Techniques for Non-rigid Point set Registration Using Color}, author={saval-Calvo, Marcelo and Orts-Escolano, sergio and Azorin-Lopez, Jorge and Garcia-Rodriguez, Jose and Fuster-Guillo, Andres and Morell-Gimenez, Vicente and Cazorla, Miguel}, booktitle={International Work-Conference on the Interplay Between Natural and Artificial Computation}, pages={281--290}, year={2015}, organization={springer International Publishing} }
- sergio Orts-Escolano, J. Garcia-Rodriguez, V. Morell, M. Cazorla, A. Garcia-Garcia, and sergiu Ovidiu-Oprea, "Optimized representation of 3d sequences using neural networks," in International work-conference on the interplay between natural and artificial computation, 2015, p. 251–260.
[Bibtex]@inproceedings{orts2015optimized, title={Optimized Representation of 3D sequences Using Neural Networks}, author={Orts-Escolano, sergio and Garcia-Rodriguez, Jose and Morell, Vicente and Cazorla, Miguel and Garcia-Garcia, Alberto and Ovidiu-Oprea, sergiu}, booktitle={International Work-Conference on the Interplay Between Natural and Artificial Computation}, pages={251--260}, year={2015}, organization={springer International Publishing} }
- J. Gomez, B. Caputo, M. Cazorla, H. Christensen, M. Fornoni, I. Garcia-Varea, and A. Pronobis, "Where Are We After Five Editions?: Robot Vision Challenge, a Competition that Evaluates solutions for the Visual Place Classification Problem," Robotics automation magazine, ieee, vol. 22, iss. 4, p. 147–156, 2015.
[Bibtex]@article{7349126, author = {Gomez, J and Caputo, B and Cazorla, M and Christensen, H and Fornoni, M and Garcia-Varea, I and Pronobis, A}, doi = {10.1109/MRA.2015.2460931}, issn = {1070-9932}, journal = {Robotics Automation Magazine, IEEE}, keywords = {Benchmark testing;Object recognition;Proposals;Rob}, number = {4}, pages = {147--156}, title = {{Where Are We After Five Editions?: Robot Vision Challenge, a Competition that Evaluates solutions for the Visual Place Classification Problem}}, volume = {22}, year = {2015} }
- A. A. Revett, A. Psarrou, J. Garcia-Rodriguez, sergio Orts-Escolano, J. Azorin-Lopez, and Kenneth, "3D Reconstruction of Medical Images from slices Automatically Landmarked with Growing Neural Models," Neurocomputing, vol. 150, Part, p. 16–25, 2015.
[Bibtex]@article{Angelopouloul2015, abstract = {In this study, we utilise a novel approach to segment out the ventricular system in a series of high resolution T1-weighted $\backslash${\{}MR$\backslash${\}} images. We present a brain ventricles fast reconstruction method. The method is based on the processing of brain sections and establishing a fixed number of landmarks onto those sections to reconstruct the ventricles 3D surface. Automated landmark extraction is accomplished through the use of the self-organising network, the growing neural gas (GNG), which is able to topographically map the low dimensionality of the network to the high dimensionality of the contour manifold without requiring a priori knowledge of the input space structure. Moreover, our $\backslash${\{}GNG$\backslash${\}} landmark method is tolerant to noise and eliminates outliers. Our method accelerates the classical surface reconstruction and filtering processes. The proposed method offers higher accuracy compared to methods with similar efficiency as Voxel Grid.}, author = {Revett, Anastassia Angelopoulou and Alexandra Psarrou and Jose Garcia-Rodriguez and sergio Orts-Escolano and Jorge Azorin-Lopez and Kenneth}, journal = {Neurocomputing}, pages = {16--25}, title = {{3D Reconstruction of Medical Images from slices Automatically Landmarked with Growing Neural Models}}, volume = {150, Part}, year = {2015} }
- B. J. Boom, sergio Orts-Escolano, X. X. Ning, steven McDonagh, P. sandilands, and R. B. Fisher, "Interactive light source position estimation for augmented reality with an rgb-d camera," Computer animation and virtual worlds, p. n/a–n/a, 2015.
[Bibtex]@Article{Boom2016, author = {Boom, Bastiaan J. and Orts-Escolano, sergio and Ning, Xin X. and McDonagh, steven and sandilands, Peter and Fisher, Robert B.}, title = {Interactive light source position estimation for augmented reality with an RGB-D camera}, journal = {Computer Animation and Virtual Worlds}, year = {2015}, pages = {n/a--n/a}, note = {cav.1686}, doi = {10.1002/cav.1686}, issn = {1546-427X}, keywords = {light source estimation, augmented reality, GPU implementation, RGB-D camera}, url = {http://dx.doi.org/10.1002/cav.1686} }
- M. Cazorla and D. Viejo, "JavaVis: An integrated computer vision library for teaching computer vision," Computer applications in engineering education, vol. 23, iss. 2, p. 258–267, 2015.
[Bibtex]@article{CAE:CAE21594, abstract = { In this article, we present a new framework oriented to teach Computer Vision related subjects called JavaVis. It is a computer vision library divided in three main areas: 2D package is featured for classical computer vision processing; 3D package, which includes a complete 3D geometric toolset, is used for 3D vision computing; Desktop package comprises a tool for graphic designing and testing of new algorithms. JavaVis is designed to be easy to use, both for launching and testing existing algorithms and for developing new ones.}, author = {Cazorla, Miguel and Viejo, Diego}, doi = {10.1002/cae.21594}, issn = {1099-0542}, journal = {Computer Applications in Engineering Education}, keywords = {3D data,Java GUI,computer vision,image processing teaching,open source}, number = {2}, pages = {258--267}, title = {{JavaVis: An integrated computer vision library for teaching computer vision}}, url = {http://dx.doi.org/10.1002/cae.21594}, volume = {23}, year = {2015} }
- M. Cazorla and D. Viejo, "Experiences Using an Open source software Library to Teach Computer Vision subjects," Journal of technology and science education, vol. 4, iss. 3, p. 214–227, 2015.
[Bibtex]@article{cazorla2015, abstract = {Machine vision is an important subject in computer science and engineering degrees. For laboratory experimentation, it is desirable to have a complete and easy-to-use tool. In this work we present a Java library, oriented to teaching computer vision. We have designed and built the library from the scratch with enfasis on readability and understanding rather than on efficiency. However, the library can also be used for research purposes. JavaVis is an open source Java library, oriented to the teaching of Computer Vision. It consists of a framework with several features that meet its demands. It has been designed to be easy to use: the user does not have to deal with internal structures or graphical interface, and should the student need to add a new algorithm it can be done simply enough. Once we sketch the library, we focus on the experience the student gets using this library in several computer vision courses. Our main goal is to find out whether the students understand what they are doing, that is, find out how much the library helps the student in grasping the basic concepts of computer vision. In the last four years we have conducted surveys to assess how much the students have improved their skills by using this library. }, author = {Cazorla, Miguel and Viejo, Diego}, issn = {2014-5349}, journal = {Journal of Technology and science Education}, keywords = {Computer vision teaching,Open source,engineering}, number = {3}, pages = {214--227}, title = {{Experiences Using an Open source software Library to Teach Computer Vision subjects}}, volume = {4}, year = {2015} }
- M. Cazorla, J. Garcia-Rodriguez, J. M. C. Plaza, I. G. Varea, V. Matellan, F. M. Rico, J. Martinez-Gomez, F. J. R. Lera, C. suarez Mejias, and M. E. M. sahuquillo, "sIRMAVED: Development of a comprehensive robotic system for monitoring and interaction for people with acquired brain damage and dependent people," in Xvi conferencia de la asociacion espanola para la inteligencia artificial (caepia), 2015.
[Bibtex]@inproceedings{Cazorla2015Caepia1, author = {Cazorla, Miguel and Garcia-Rodriguez, Jose and Plaza, Jose Maria Canas and Varea, Ismael Garcia and Matellan, Vicente and Rico, Francisco Martin and Martinez-Gomez, Jesus and Lera, Francisco Javier Rodriguez and Mejias, Cristina suarez and sahuquillo, Maria Encarnacion Martinez}, booktitle = {XVI Conferencia de la Asociacion Espanola para la Inteligencia Artificial (CAEPIA)}, title = {{sIRMAVED: Development of a comprehensive robotic system for monitoring and interaction for people with acquired brain damage and dependent people}}, year = {2015} }
- F. Gomez-Donoso and M. Cazorla, "Recognizing schaeffer's Gestures for Robot Interaction," in Actas de la conferencia de la asociacion espanola para la inteligencia artificial (caepia), 2015.
[Bibtex]@inproceedings{Gomez2015, abstract = {In this paper we present a new interaction system for schaeffer's gesture language recognition. It uses the information provided by an RGBD camera to capture body motion and recognize gestures. schaeffer's gestures are a reduced set of gestures designed for people with cognitive disabilities. The system is able to send alarms to an assistant or even a robot for human robot interaction.}, author = {Francisco Gomez-Donoso and Miguel Cazorla}, booktitle= {Actas de la Conferencia de la Asociacion Espanola para la Inteligencia Artificial (CAEPIA)}, keywords = {3d gesture recognition,shaeffer's gestures,human robot in-teraction}, title = {{Recognizing schaeffer's Gestures for Robot Interaction}}, url = {http://simd.albacete.org/actascaepia15/papers/01045.pdf}, year = {2015} }
- J. Martinez-Gomez, M. Cazorla, I. Garcia-Varea, and C. Romero-Gonzalez, "Object categorization from RGB-D local features and Bag Of Words," in 2nd iberian robotics conference, 2015.
[Bibtex]@inproceedings{MartinezRobot2015, author = {Martinez-Gomez, Jesus and Cazorla, Miguel and Garcia-Varea, Ismael and Romero-Gonzalez, Cristina}, booktitle = {2nd Iberian robotics conference}, title = {{Object categorization from RGB-D local features and Bag Of Words}}, year = {2015} }
- V. Morell, J. Martinez-Gomez, M. Cazorla, and I. Garcia-Varea, "ViDRILO: The Visual and Depth Robot Indoor Localization with Objects information dataset," International journal of robotics research, vol. 34, iss. 14, p. 1681–1687, 2015.
[Bibtex]@article{Morell2015, author = {Vicente Morell and Jesus Martinez-Gomez and Miguel Cazorla and Ismael Garcia-Varea}, journal = {International Journal of Robotics Research}, number = {14}, pages = {1681--1687}, title = {{ViDRILO: The Visual and Depth Robot Indoor Localization with Objects information dataset}}, volume = {34}, year = {2015} }
- s Orts-Escolano, J. Garcia-Rodriguez, J. A. serra-Perez, A. Jimeno-Morenilla, A. Garcia-Garcia, V. Morell, and M. Cazorla, "3D Model Reconstruction using Neural Gas Accelerated on GPUs," Applied soft computing, vol. 32, p. 87–100, 2015.
[Bibtex]@Article{Orts-Escolano2015, author = {Orts-Escolano, s and Garcia-Rodriguez, J and serra-Perez, J A and Jimeno-Morenilla, A and Garcia-Garcia, A and Morell, V and Cazorla, Miguel}, title = {{3D Model Reconstruction using Neural Gas Accelerated on GPUs}}, journal = {Applied soft Computing}, year = {2015}, volume = {32}, pages = {87--100}, issn = {1568-4946}, abstract = {In this work, we propose the use of the Neural Gas (NG), a neural network with unsupervised competitive hebbian learning (CHL), to develop a reverse engineering process. This is a simple and accurate method to reconstruct objects from the point cloud obtained from overlapped multiple views using low cost sensors. In contrast to other methods that may need several stages that include downsampling, noise filtering and many other tasks, the NG automatically obtains the 3D model of the scanned objects. The combination of the acquired and reconstructed 3D models with virtual and augmented reality environments allows the users interaction and also permits developing a virtual design and manufacturing system. To demonstrate the validity of our proposal we tested our method with several models and performed a study of the neural network parameterization calculating the quality of representation and also comparing results with other neural methods like Growing Neural Gas and Kohonen maps or clasical methods like Voxel Grid. We also reconstructed models acquired by low cost sensors that can be included in virtual and augmented reality environments to redesign or manipulation purpose. since the NG algorithm has a strong computational cost we propose its acceleration. We have redesigned and implemented the NG learning algorithm to fit it onto a Graphic Processor Unit using CUDA. A speed-up of 180x faster is obtained compared to the sequential CPU version.}, }
- s Orts-Escolano, J. Garcia-Rodriguez, V. Morell, M. Cazorla, M. saval-Calvo, and J. Azorin, "Processing Point Cloud sequences with Growing Neural Gas," in Neural networks (ijcnn), the 2015 international joint conference on, 2015.
[Bibtex]@inproceedings{Orts2015IJCNN, author = {Orts-Escolano, s and Garcia-Rodriguez, J and Morell, V and Cazorla, M and saval-Calvo, M and Azorin, J}, booktitle = {Neural Networks (IJCNN), The 2015 International Joint Conference on}, title = {{Processing Point Cloud sequences with Growing Neural Gas}}, year = {2015} }
- M. saval-Calvo, sergio Orts-Escolano, J. Azorin-Lopez, J. Garcia-Rodriguez, A. Fuster-Guillo, V. Morell-Gimenez, and M. Cazorla, "Non-rigid point set registration using color and data downsampling," in Neural networks (ijcnn), the 2015 international joint conference on, 2015.
[Bibtex]@InProceedings{saval-Calvo2015, author = {saval-Calvo, Marcelo and Orts-Escolano, sergio and Azorin-Lopez, Jorge and Garcia-Rodriguez, Jose and Fuster-Guillo, Andres and Morell-Gimenez, Vicente and Cazorla, Miguel}, title = {{Non-rigid point set registration using color and data downsampling}}, booktitle = {Neural Networks (IJCNN), The 2015 International Joint Conference on}, year = {2015}, }
- sergio Orts-Escolano, V. Morell, J. Garcia-Rodriguez, M. Cazorla, and R. Fisher, "Real-time 3D semi-local surface patch extraction using GPGPU," Journal of real-time image processing, vol. 10, iss. 4, p. 647–666, 2015.
[Bibtex]@article{Orts-Escolano2015JRTIP, author = {Orts-Escolano, sergio and Morell, Vicente and Garcia-Rodriguez, Jose and Cazorla, Miguel and Fisher, RobertB.}, doi = {10.1007/s11554-013-0385-7}, issn = {1861-8200}, journal = {Journal of Real-Time Image Processing}, keywords = {Real-time; GPGPU; RGB-D; 3D local shape descriptor}, number = {4}, pages = {647--666}, publisher = {springer Berlin Heidelberg}, title = {{Real-time 3D semi-local surface patch extraction using GPGPU}}, url = {http://dx.doi.org/10.1007/s11554-013-0385-7}, volume = {10}, year = {2015} }
- J. C. Rangel, V. Morell, M. Cazorla, s Orts-Escolano, and J. Garcia-Rodriguez, "Using GNG on 3D Object Recognition in Noisy RGB-D data," in Neural networks (ijcnn), the 2015 international joint conference on, 2015.
[Bibtex]@inproceedings{Rangel2015IJCNN, author = {Rangel, J C and Morell, V and Cazorla, M and Orts-Escolano, s and Garcia-Rodriguez, J}, booktitle = {Neural Networks (IJCNN), The 2015 International Joint Conference on}, title = {{Using GNG on 3D Object Recognition in Noisy RGB-D data}}, year = {2015} }
- J. C. Rangel, M. Cazorla, I. G. Varea, J. Martinez-Gomez, E. Fromont, and M. sebban, "Computing Image Descriptors from Annotations Acquired from External Tools," in 2nd iberian robotics conference, 2015.
[Bibtex]@inproceedings{RangelRobot2015, author = {Rangel, Jose Carlos and Cazorla, Miguel and Varea, Ismael Garcia and Martinez-Gomez, Jesus and Fromont, Elisa and sebban, Marc}, booktitle = {2nd Iberian robotics conference}, title = {{Computing Image Descriptors from Annotations Acquired from External Tools}}, year = {2015} }
- E. Martinez-Martin and A. P. del Pobil, "Uji hri-bd: a new human-robot interaction benchmark dataset," Human-robot interactions: principles, technologies and challenges, pp. 57-73, 2015.
[Bibtex]@article{Martinez-Martin2015,title = {UJI HRI-BD: A new human-robot interaction benchmark dataset},journal = {Human-Robot Interactions: Principles, Technologies and Challenges},year = {2015},pages = {57-73},author = {Martinez-Martin, E. and del Pobil, A.P.}}
- G. Gonzalez-Serrano, D. Jiménez-Carretero, S. Rodríguez-López, K. K. Kumamaru, E. George, R. S. J. Estépar, F. J. Rybicki, and M. J. Ledesma-Carbayo, "Automated axial right ventricle to left ventricle diameter ratio computation in computed tomography pulmonary angiography," Plos one, vol. 10, 2015.
[Bibtex]@article{Gonzalez2015, abstract = {© 2015 Gonz\'{a}lez et al. This is an open access article distributed under the terms of the Creative Commons Attribution License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original author and source are credited. Background and Purpose: Right Ventricular to Left Ventricular (RV/LV) diameter ratio has been shown to be a prognostic biomarker for patients suffering from acute Pulmonary Embolism (PE). While Computed Tomography Pulmonary Angiography (CTPA) images used to confirm a clinical suspicion of PE do include information of the heart, a numerical RV/LV diameter ratio is not universally reported, likely because of lack in training, inter-reader variability in the measurements, and additional effort by the radiologist. This study designs and validates a completely automated Computer Aided Detection (CAD) system to compute the axial RV/ LV diameter ratio from CTPA images so that the RV/LV diameter ratio can be a more objective metric that is consistently reported in patients for whom CTPA diagnoses PE. Materials and Methods: The CAD system was designed specifically for RV/LV measurements. The system was tested in 198 consecutive CTPA patients with acute PE. Its accuracy was evaluated using reference standard RV/LV radiologist measurements and its prognostic value was established for 30-day PE-specific mortality and a composite outcome of 30-day PE-specific mortality or the need for intensive therapies. The study was Institutional Review Board (IRB) approved and HIPAA compliant. Results: The CAD system analyzed correctly 92.4%(183/198) of CTPA studies. The mean difference between automated and manually computed axial RV/LV ratios was 0.03±0.22. The correlation between the RV/LV diameter ratio obtained by the CAD system and that obtained by the radiologist was high (r=0.81). Compared to the radiologist, the CAD system equally achieved high accuracy for the composite outcome, with areas under the receiver operating characteristic curves of 0.75 vs. 0.78. Similar results were found for 30-days PE-specific mortality, with areas under the curve of 0.72 vs. 0.75. Conclusions: An automated CAD system for determining the CT derived RV/LV diameter ratio in patients with acute PE has high accuracy when compared to manual measurements and similar prognostic significance for two clinical outcomes.}, author = {G. Gonzalez-Serrano and D. Jim\'{e}nez-Carretero and S. Rodr\'{i}guez-L\'{o}pez and K.K. Kumamaru and E. George and R.S.J. Est\'{e}par and F.J. Rybicki and M.J. Ledesma-Carbayo}, doi = {10.1371/journal.pone.0127797}, issn = {19326203}, issue = {5}, journal = {PLoS ONE}, title = {Automated axial right ventricle to left ventricle diameter ratio computation in computed tomography pulmonary angiography}, volume = {10}, year = {2015}, }
- S. Rodriguez-Lopez, D. Jimenez-Carretero, R. S. J. Estepar, E. F. Moreno, K. K. Kumamaru, F. J. Rybicki, M. J. Ledesma-Carbayo, and G. Gonzalez-Serrano, "Automatic ventricle detection in computed tomography pulmonary angiography," in Proceedings - international symposium on biomedical imaging, 2015, pp. 1143-1146.
[Bibtex]@inproceedings{RodriguezLopez2015, abstract = {Automated medical image analysis requires methods to localize anatomic structures in the presence of normal interpatient variability, pathology, and the different protocols used to acquire images for different clinical settings. Recent advances have improved object detection in the context of natural images, but they have not been adapted to the 3D context of medical images. In this paper we present a 2.5D object detector designed to locate, without any user interaction, the left and right heart ventricles in Computed Tomography Pulmonary Angiography (CTPA) images. A 2D object detector is trained to find ventricles on axial slices. Those detections are automatically clustered according to their size and position. The cluster with highest score, representing the 3D location of the ventricle, is then selected. The proposed method is validated in 403 CTPA studies obtained in patients with clinically suspected pulmonary embolism. Both ventricles are properly detected in 94.7% of the cases. The proposed method is very generic and can be easily adapted to detect other structures in medical images. © 2015 IEEE.}, author = {Sara Rodriguez-Lopez and Daniel Jimenez-Carretero and Raul San Jose Estepar and Eduardo Fraile Moreno and Kanako K. Kumamaru and Frank J. Rybicki and Maria J. Ledesma-Carbayo and German Gonzalez-Serrano}, doi = {10.1109/ISBI.2015.7164074}, isbn = {9781479923748}, issn = {19458452}, booktitle = {Proceedings - International Symposium on Biomedical Imaging}, keywords = {CTPA,Detection,HOG,Heart Ventricle}, pages = {1143-1146}, title = {Automatic ventricle detection in Computed Tomography Pulmonary Angiography}, volume = {2015-July}, year = {2015}, }
2014
- M. Cazorla, P. Gil, santiago Puente, J. L. Munoz, and D. Pastor, "An improvement of a sLAM RGB-D method with movement prediction derived from a study of visual features," Advanced robotics, vol. 28, iss. 18, p. 1231–1242, 2014.
[Bibtex]@article{Cazorla2014An, abstract = {This paper presents a method for the fast calculation of a robot's egomotion using visual features. The method is part of a complete system for automatic map building and simultaneous Location and Mapping (sLAM). The method uses optical flow to determine whether the robot has undergone a movement. If so, some visual features that do not satisfy several criteria are deleted, and then egomotion is calculated. Thus, the proposed method improves the efficiency of the whole process because not all the data is processed. We use a state-of-the-art algorithm (TORO) to rectify the map and solve the sLAM problem. Additionally, a study of different visual detectors and descriptors has been conducted to identify which of them are more suitable for the sLAM problem. Finally, a navigation method is described using the map obtained from the sLAM solution.}, author = {Cazorla, Miguel and Gil, Pablo and Puente, santiago and Munoz, Jose Luis and Pastor, Daniel}, journal = {Advanced robotics}, keywords = {3D data,RGB-D data,sLAM,visual features}, number = {18}, pages = {1231--1242}, title = {{An improvement of a sLAM RGB-D method with movement prediction derived from a study of visual features}}, volume = {28}, year = {2014} }
- J. Garcia-Rodriguez, s. Orts-Escolano, N. Angelopoulou, A. Psarrou, and J. Azorin-Lopez, "Real time motion estimation using a neural architecture implemented on GPUs," in Journal of real-time image processing, 2014.
[Bibtex]@inproceedings{garcia2014z, author = {J. Garcia-Rodriguez and s. Orts-Escolano and N. Angelopoulou and A. Psarrou and J. Azorin-Lopez}, booktitle = {Journal of Real-Time Image Processing}, title = {{Real time motion estimation using a neural architecture implemented on GPUs}}, year = {2014} }
- D. Gil, J. Garcia-Rodriguez, M. Cazorla, and M. Johnsson, "sARAsOM: a supervised architecture based on the recurrent associative sOM," Neural computing and applications, p. 1–13, 2014.
[Bibtex]@article{gil2014, author = {Gil, David and Garcia-Rodriguez, Jose and Cazorla, Miguel and Johnsson, Magnus}, title = {{sARAsOM: a supervised architecture based on the recurrent associative sOM}}, journal = {Neural Computing and Applications}, year = {2014}, abstract = {We present and evaluate a novel supervised recurrent neural network architecture, the sARAsOM, based on the associative self-organizing map. The performance of the sARAsOM is evaluated and compared with the Elman network as well as with a hidden Markov model (HMM) in a number of prediction tasks using sequences of letters, including some experiments with a reduced lexicon of 15 words. The results were very encouraging with the sARAsOM learning better and performing with better accuracy than both the Elman network and the HMM.}, url = {http://dx.doi.org/10.1007/s00521-014-1785-8}, pages = {1--13}, doi = {10.1007/s00521-014-1785-8}, issn = {0941-0643}, keywords = {Recurrent associative self-organizing map; supervi}, publisher = {springer London} }
- sergio Orts-Escolano, J. Garcia-Rodriguez, V. Morell, M. Cazorla, J. Azorin, and J. M. Garcia-Chamizo, "Parallel Computational Intelligence-Based Multi-Camera surveillance system," Journal of sensor and actuator networks, vol. 3, iss. 2, p. 95–112, 2014.
[Bibtex]@article{jsan3020095, abstract = {In this work, we present a multi-camera surveillance system based on the use of self-organizing neural networks to represent events on video. The system processes several tasks in parallel using GPUs (graphic processor units). It addresses multiple vision tasks at various levels, such as segmentation, representation or characterization, analysis and monitoring of the movement. These features allow the construction of a robust representation of the environment and interpret the behavior of mobile agents in the scene. It is also necessary to integrate the vision module into a global system that operates in a complex environment by receiving images from multiple acquisition devices at video frequency. Offering relevant information to higher level systems, monitoring and making decisions in real time, it must accomplish a set of requirements, such as: time constraints, high availability, robustness, high processing speed and re-configurability. We have built a system able to represent and analyze the motion in video acquired by a multi-camera network and to process multi-source data in parallel on a multi-GPU architecture.}, author = {Orts-Escolano, sergio and Garcia-Rodriguez, Jose and Morell, Vicente and Cazorla, Miguel and Azorin, Jorge and Garcia-Chamizo, Juan Manuel}, doi = {10.3390/jsan3020095}, issn = {2224-2708}, journal = {Journal of sensor and Actuator Networks}, keywords = {growing neural gas; camera networks; visual survei}, number = {2}, pages = {95--112}, title = {{Parallel Computational Intelligence-Based Multi-Camera surveillance system}}, url = {http://www.mdpi.com/2224-2708/3/2/95}, volume = {3}, year = {2014} }
- J. Montoyo, V. Morell, M. Cazorla, J. Garcia-Rodriguez, and sergio Orts-Escolano, "Registration methods for RGB-D cameras accelerated on GPUs," in International symposium on robotics, isr, 2014.
[Bibtex]@inproceedings{Montoyo20143Registration, author = {Montoyo, Javier and Morell, Vicente and Cazorla, Miguel and Garcia-Rodriguez, Jose and Orts-Escolano, sergio}, booktitle = {International symposium on robotics, IsR}, title = {{Registration methods for RGB-D cameras accelerated on GPUs}}, year = {2014} }
- V. Morell, sergio Orts-Escolano, M. Cazorla, and J. Garcia-Rodriguez, "Geometric 3D point cloud compression," Pattern recognition letters, vol. 50, p. 55–62, 2014.
[Bibtex]@Article{Morell2014, author = {Morell, Vicente and Orts-Escolano, sergio and Cazorla, Miguel and Garcia-Rodriguez, Jose}, title = {{Geometric 3D point cloud compression}}, journal = {Pattern Recognition Letters}, year = {2014}, volume = {50}, pages = {55--62}, abstract = { The use of 3D data in mobile robotics applications provides valuable information about the robot's environment but usually the huge amount of 3D information is unmanageable by the robot storage and computing capabilities. A data compression is necessary to store and manage this information but preserving as much information as possible. In this paper, we propose a 3D lossy compression system based on plane extraction which represent the points of each scene plane as a Delaunay triangulation and a set of points/area information. The compression system can be customized to achieve different data compression or accuracy ratios. It also supports a color segmentation stage to preserve original scene color information and provides a realistic scene reconstruction. The design of the method provides a fast scene reconstruction useful for further visualization or processing tasks. }, doi = {http://dx.doi.org/10.1016/j.patrec.2014.05.016}, issn = {0167-8655}, keywords = {3D data; Compression; Kinect} }
- V. Morell, M. Cazorla, sergio Orts-Escolano, and J. Garcia-Rodriguez, "3D Maps Representation using GNG," in Neural networks (ijcnn), the 2014 international joint conference on, 2014.
[Bibtex]@InProceedings{Morell20143d, author = {Morell, Vicente and Cazorla, Miguel and Orts-Escolano, sergio and Garcia-Rodriguez, Jose}, title = {{3D Maps Representation using GNG}}, booktitle = {Neural Networks (IJCNN), The 2014 International Joint Conference on}, year = {2014} }
- V. Morell-Gimenez, M. saval-Calvo, J. Azorin-Lopez, J. Garcia-Rodriguez, M. Cazorla, sergio Orts-Escolano, and A. Fuster-Guillo, "A Comparative study of Registration Methods for RGB-D Video of static scenes," Sensors, vol. 14, iss. 5, p. 8547–8576, 2014.
[Bibtex]@article{morell2014comparative, abstract = {The use of RGB-D sensors for mapping and recognition tasks in robotics or, in general, for virtual reconstruction has increased in recent years. The key aspect of these kinds of sensors is that they provide both depth and color information using the same device. In this paper, we present a comparative analysis of the most important methods used in the literature for the registration of subsequent RGB-D video frames in static scenarios. The analysis begins by explaining the characteristics of the registration problem, dividing it into two representative applications: scene modeling and object reconstruction. Then, a detailed experimentation is carried out to determine the behavior of the different methods depending on the application. For both applications, we used standard datasets and a new one built for object reconstruction.}, author = {Morell-Gimenez, Vicente and saval-Calvo, Marcelo and Azorin-Lopez, Jorge and Garcia-Rodriguez, Jose and Cazorla, Miguel and Orts-Escolano, sergio and Fuster-Guillo, Andres}, doi = {10.3390/s140508547}, issn = {1424-8220}, journal = {sensors}, keywords = {RGB-D sensor; registration; robotics mapping; obje}, month = {may}, number = {5}, pages = {8547--8576}, publisher = {Multidisciplinary Digital Publishing Institute}, title = {{A Comparative study of Registration Methods for RGB-D Video of static scenes}}, url = {http://www.mdpi.com/1424-8220/14/5/8547}, volume = {14}, year = {2014} }
- sergio Orts-Escolano, J. Garcia-Rodriguez, V. Morella, M. Cazorla, and J. M. Garcia-Chamizo, "3D Colour Object Reconstruction based on Growing Neural Gas," in Neural networks (ijcnn), the 2014 international joint conference on, 2014.
[Bibtex]@inproceedings{Orts20143d, author = {Orts-Escolano, sergio and Garcia-Rodriguez, Jose and Morella, Vicente and Cazorla, Miguel and Garcia-Chamizo, Juan Manuel}, booktitle = {Neural Networks (IJCNN), The 2014 International Joint Conference on}, title = {{3D Colour Object Reconstruction based on Growing Neural Gas}}, year = {2014} }
- D. Viejo, J. Garcia-Rodriguez, and M. Cazorla, "Combining Visual Features and Growing Neural Gas Networks for Robotic 3D sLAM," Information sciences, vol. 276, p. 174–185, 2014.
[Bibtex]@article{viejo2014combining, abstract = {The use of 3D data in mobile robotics provides valuable information about the robot's environment. Traditionally, stereo cameras have been used as a low-cost 3D sensor. However, the lack of precision and texture for some surfaces suggests that the use of other 3D sensors could be more suitable. In this work, we examine the use of two sensors: an infrared sR4000 and a Kinect camera. We use a combination of 3D data obtained by these cameras, along with features obtained from 2D images acquired from these cameras, using a Growing Neural Gas (GNG) network applied to the 3D data. The goal is to obtain a robust egomotion technique. The GNG network is used to reduce the camera error. To calculate the egomotion, we test two methods for 3D registration. One is based on an iterative closest points algorithm, and the other employs random sample consensus. Finally, a simultaneous localization and mapping method is applied to the complete sequence to reduce the global error. The error from each sensor and the mapping results from the proposed method are examined.}, author = {Viejo, Diego and Garcia-Rodriguez, Jose and Cazorla, Miguel}, journal = {Information sciences}, keywords = {GNG; sLAM; 3D registration}, pages = {174--185}, title = {{Combining Visual Features and Growing Neural Gas Networks for Robotic 3D sLAM}}, volume = {276}, year = {2014} }
- D. Viejo and M. Cazorla, "A robust and fast method for 6DoF motion estimation from generalized 3D data," Autonomous robots, 2014.
[Bibtex]@article{Viejo2014raey, author = {Viejo, Diego and Cazorla, Miguel}, doi = {10.1007/s10514-013-9354-z}, issn = {0929-5593}, journal = {Autonomous Robots}, keywords = {6DoF pose registration; 3D mapping; Mobile robots;}, publisher = {springer Us}, title = {{A robust and fast method for 6DoF motion estimation from generalized 3D data}}, year = {2014} }
- E. Martinez-Martin, A. P. del Pobil, M. Chessa, F. solari, and P. silvio sabatini, "An active system for visually-guided reaching in 3d across binocular fixations," The scientific world journal, vol. 2014, p. 1–16, 2014.
[Bibtex]@article{Martinez_Martin_2014, doi = {10.1155/2014/179391}, url = {http://dx.doi.org/10.1155/2014/179391}, year = 2014, publisher = {Hindawi Publishing Corporation}, volume = {2014}, pages = {1--16}, author = {Ester Martinez-Martin and Angel P. del Pobil and Manuela Chessa and Fabio solari and silvio P. sabatini}, title = {An Active system for Visually-Guided Reaching in 3D across Binocular Fixations}, journal = {The scientific World Journal} }
- E. Martinez-Martin and A. P. Del Pobil, "Animal social behaviour: a visual analysis," Lecture notes in computer science (including subseries lecture notes in artificial intelligence and lecture notes in bioinformatics), vol. 8575 LNAI, pp. 320-327, 2014.
[Bibtex]@article{Martinez-Martin2014,title = {Animal social behaviour: A visual analysis},journal = {Lecture Notes in Computer science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},year = {2014},volume = {8575 LNAI},pages = {320-327},author = {Martinez-Martin, E. and Del Pobil, A.P.}}
- G. Gonzalez-Serrano, V. Parot, W. Lo, B. J. Vakoc, and N. J. Durr, "Feature space optimization for virtual chromoendoscopy augmented by topography," in Lecture notes in computer science (including subseries lecture notes in artificial intelligence and lecture notes in bioinformatics), 2014.
[Bibtex]@inproceedings{Gonzalez2014Chromoendoscopy, abstract = {Optical colonoscopy is the preferred modality for the screening and prevention of colorectal cancer. Chromoendoscopy can increase lesion detection rate by highlighting tissue topography with a colored dye, but is too time-consuming to be adopted in routine colonoscopy screening. We developed a fast and dye-free technique that generates virtual chromoendoscopy images that incorporate topography features acquired from photometric stereo endoscopy. We demonstrate that virtual chromoendoscopy augmented by topography achieves similar image quality to conventional chromoendoscopy in ex-vivo swine colon. © 2014 Springer International Publishing.}, author = {G. Gonzalez-Serrano and V. Parot and W. Lo and B.J. Vakoc and N.J. Durr}, doi = {10.1007/978-3-319-10404-1_80}, isbn = {9783319104034}, issn = {16113349}, issue = {PART 1}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, keywords = {chromoendoscopy,optical colonoscopy,photometric stereo endoscopy,topography}, title = {Feature space optimization for virtual chromoendoscopy augmented by topography}, volume = {8673 LNCS}, year = {2014}, }
- N. J. Durr, G. Gonzalez-Serrano, D. Lim, G. Traverso, N. S. Nishioka, B. J. Vakoc, and V. Parot, "System for clinical photometric stereo endoscopy," in Progress in biomedical optics and imaging - proceedings of spie, 2014.
[Bibtex]@inproceedings{Durr2014, abstract = {Photometric stereo endoscopy is a technique that captures information about the high-spatial-frequency topography of the field of view simultaneously with a conventional color image. Here we describe a system that will enable photometric stereo endoscopy to be clinically evaluated in the large intestine of human patients. The clinical photometric stereo endoscopy system consists of a commercial gastroscope, a commercial video processor, an image capturing and processing unit, custom synchronization electronics, white light LEDs, a set of four fibers with diffusing tips, and an alignment cap. The custom pieces that come into contact with the patient are composed of biocompatible materials that can be sterilized before use. The components can then be assembled in the endoscopy suite before use. The resulting endoscope has the same outer diameter as a conventional colonoscope (14 mm), plugs into a commercial video processor, captures topography and color images at 15 Hz, and displays the conventional color image to the gastroenterologist in real-time. We show that this system can capture a color and topographical video in a tubular colon phantom, demonstrating robustness to complex geometries and motion. The reported system is suitable for in vivo evaluation of photometric stereo endoscopy in the human large intestine.}, author = {N.J. Durr and G. Gonzalez-Serrano and D. Lim and G. Traverso and N.S. Nishioka and B.J. Vakoc and V. Parot}, doi = {10.1117/12.2038119}, isbn = {9780819498489}, issn = {16057422}, booktitle = {Progress in Biomedical Optics and Imaging - Proceedings of SPIE}, keywords = {Colonoscopy,Computer aided detection,Computer vision,Endoscopy,Stereo,Topography}, title = {System for clinical photometric stereo endoscopy}, volume = {8935}, year = {2014}, }
- N. J. Durr, V. J. Parot, G. Traverso, W. P. Puricelli, B. J. Vakoc, N. S. Nishioka, and G. Gonzalez-Serrano, "Imaging colonic surface topography with photometric stereo endoscopy," in Gastrointestinal endoscopy, 2014, p. AB459.
[Bibtex]@inproceedings{Durr2014b, author = {Nicholas J. Durr and Vicente J. Parot and Giovanni Traverso and William P. Puricelli and Benjamin J. Vakoc and Norman S. Nishioka and German Gonzalez-Serrano}, doi = {10.1016/j.gie.2014.02.676}, issn = {00165107}, issue = {5}, booktitle = {Gastrointestinal Endoscopy}, pages = {AB459}, title = {Imaging Colonic Surface Topography With Photometric Stereo Endoscopy}, volume = {79}, url = {http://linkinghub.elsevier.com/retrieve/pii/S0016510714008463}, year = {2014}, }
- R. Rigamonti, V. Lepetit, G. Gonzalez-Serrano, E. Türetken, F. Benmansour, M. Brown, and P. Fua, "On the relevance of sparsity for image classification," Computer vision and image understanding, vol. 125, pp. 115-127, 2014.
[Bibtex]@article{Rigamonti2014, abstract = {In this paper we empirically analyze the importance of sparsifying representations for classification purposes. We focus on those obtained by convolving images with linear filters, which can be either hand designed or learned, and perform extensive experiments on two important Computer Vision problems, image categorization and pixel classification. To this end, we adopt a simple modular architecture that encompasses many recently proposed models. The key outcome of our investigations is that enforcing sparsity constraints on features extracted in a convolutional architecture does not improve classification performance, whereas it does so when redundancy is artificially introduced. This is very relevant for practical purposes, since it implies that the expensive run-time optimization required to sparsify the representation is not always justified, and therefore that computational costs can be drastically reduced. © 2014 Elsevier Inc. All rights reserved.}, author = {Roberto Rigamonti and Vincent Lepetit and Germ\'{a}n Gonzalez-Serrano and Engin Türetken and Fethallah Benmansour and Matthew Brown and Pascal Fua}, doi = {10.1016/j.cviu.2014.03.009}, issn = {1090235X}, journal = {Computer Vision and Image Understanding}, keywords = {Image categorization,Image descriptors,Pixel classification,Sparse representations}, pages = {115-127}, publisher = {Elsevier Inc.}, title = {On the relevance of sparsity for image classification}, volume = {125}, url = {http://dx.doi.org/10.1016/j.cviu.2014.03.009}, year = {2014}, }
- N. J. Durr, G. Gonzalez-Serrano, and V. Parot, "3d imaging techniques for improved colonoscopy," Expert review of medical devices, vol. 11, pp. 105-107, 2014.
[Bibtex]@article{Durr2014c, abstract = {Colonoscopy screening with a conventional 2D colonoscope is known to reduce mortality due to colorectal cancer by half. Unfortunately, the protective value of this procedure is limited by missed lesions. To improve the sensitivity of colonoscopy to precancerous lesions, 3D imaging techniques could be used to highlight their characteristic morphology. While 3D imaging has proved beneficial for laparoscopic procedures, more research is needed to assess how it will improve applications of flexible endoscopy. In this editorial, we discuss the possible uses of 3D technologies in colonoscopy and factors that have hindered the translation of 3D imaging to flexible endoscopy. Emerging 3D imaging technologies for flexible endoscopy have the potential to improve sensitivity, lesion resection, training and automated lesion detection. To maximize the likelihood of clinical adoption, these technologies should require minimal hardware modification while maintaining the robustness and quality of regular 2D imaging. © Informa UK, Ltd.}, author = {Nicholas J. Durr and Germ\'{a}n Gonzalez-Serrano and Vicente Parot}, doi = {10.1586/17434440.2013.868303}, issn = {17434440}, issue = {2}, journal = {Expert Review of Medical Devices}, keywords = {Colorectal cancer,Depth,Endoscopy,Laparoscopy,Morphology,Photometric,Stereo,Topography}, pages = {105-107}, title = {3D imaging techniques for improved colonoscopy}, volume = {11}, year = {2014}, }
2013
- s Orts-Escolano, V. Morell, J. Garcia-Rodriguez, and M. Cazorla, "Point cloud data filtering and downsampling using growing neural gas," in Neural networks (ijcnn), the 2013 international joint conference on, 2013, p. 1–8.
[Bibtex]@inproceedings{6706719, abstract = {3D sensors provide valuable information for mobile robotic tasks like scene classification or object recognition, but these sensors often produce noisy data that makes impossible applying classical keypoint detection and feature extraction techniques. Therefore, noise removal and downsampling have become essential steps in 3D data processing. In this work, we propose the use of a 3D filtering and downsampling technique based on a Growing Neural Gas (GNG) network. GNG method is able to deal with outliers presents in the input data. These features allows to represent 3D spaces, obtaining an induced Delaunay Triangulation of the input space. Experiments show how GNG method yields better input space adaptation to noisy data than other filtering and downsampling methods like Voxel Grid. It is also demonstrated how the state-of-the-art keypoint detectors improve their performance using filtered data with GNG network. Descriptors extracted on improved keypoints perform better matching in robotics applications as 3D scene registration.}, author = {Orts-Escolano, s and Morell, V and Garcia-Rodriguez, J and Cazorla, M}, booktitle = {Neural Networks (IJCNN), The 2013 International Joint Conference on}, doi = {10.1109/IJCNN.2013.6706719}, issn = {2161-4393}, keywords = {feature extraction;image classification;image registration}, pages = {1--8}, title = {{Point cloud data filtering and downsampling using growing neural gas}}, year = {2013} }
- J. Navarrete, D. Viejo, and M. Cazorla, "Portable 3D laser-camera calibration system with color fusion for sLAM," International journal of automation and smart technology, vol. 3, iss. 1, 2013.
[Bibtex]@article{AUsMT163, abstract = { Nowadays, the use of RGB-D sensors have focused a lot of research in computer vision and robotics. These kinds of sensors, like Kinect, allow to obtain 3D data together with color information. However, their working range is limited to less than 10 meters, making them useless in some robotics applications, like outdoor mapping. In these environments, 3D lasers, working in ranges of 20-80 meters, are better. But 3D lasers do not usually provide color information. A simple 2D camera can be used to provide color information to the point cloud, but a calibration process between camera and laser must be done. In this paper we present a portable calibration system to calibrate any traditional camera with a 3D laser in order to assign color information to the 3D points obtained. Thus, we can use laser precision and simultaneously make use of color information. Unlike other techniques that make use of a three-dimensional body of known dimensions in the calibration process, this system is highly portable because it makes use of small catadioptrics that can be placed in a simple manner in the environment. We use our calibration system in a 3D mapping system, including simultaneous Location and Mapping (sLAM), in order to get a 3D colored map which can be used in different tasks. We show that an additional problem arises: 2D cameras information is different when lighting conditions change. so when we merge 3D point clouds from two different views, several points in a given neighborhood could have different color information. A new method for color fusion is presented, obtaining correct colored maps. The system will be tested by applying it to 3D reconstruction.}, author = {Navarrete, Javier and Viejo, Diego and Cazorla, Miguel}, issn = {2223-9766}, journal = {International Journal of Automation and smart Technology}, keywords = {2D-3D calibration; RGB-D information; color fusion}, number = {1}, title = {{Portable 3D laser-camera calibration system with color fusion for sLAM}}, url = {http://www.ausmt.org/index.php/AUsMT/article/view/163}, volume = {3}, year = {2013} }
- B. Boom, sergio Orts-Escolano, X. Ning, steven McDonagh, P. sandilands, and R. Fisher, "Point light source estimation based on scenes recorded by a rgb-d camera," in Proceedings of the british machine vision conference, 2013.
[Bibtex]@InProceedings{Boom2013, author = {Bas Boom and sergio Orts-Escolano and Xi Ning and steven McDonagh and Peter sandilands and Robert Fisher }, title = {Point Light source Estimation based on scenes Recorded by a RGB-D camera }, booktitle = {Proceedings of the British Machine Vision Conference}, year = {2013}, publisher = {BMVA Press}, editors = {Burghardt, Tilo and Damen, Dima and Mayol-Cuevas, Walterio and Mirmehdi, Majid} }
- B. Caputo, H. Muller, B. Thomee, M. Villegas, R. Paredes, D. Zellhofer, H. Goeau, A. Joly, P. Bonnet, J. M. Gomez, I. G. Varea, and M. Cazorla, "ImageCLEF 2013: The Vision, the Data and the Open Challenges," in Information access evaluation. multilinguality, multimodality, and visualization, P. Forner, H. Muller, R. Paredes, P. Rosso, and B. stein, Eds., Springer berlin heidelberg, 2013, vol. 8138, p. 250–268.
[Bibtex]@incollection{Caputo:2013aa, abstract = {This paper presents an overview of the ImageCLEF 2013 lab. since its first edition in 2003, ImageCLEF has become one of the key initiatives promoting the benchmark evaluation of algorithms for the cross-language annotation and retrieval of images in various domains, such as public and personal images, to data acquired by mobile robot platforms and botanic collections. Over the years, by providing new data collections and challenging tasks to the community of interest, the ImageCLEF lab has achieved an unique position in the multi lingual image annotation and retrieval research landscape. The 2013 edition consisted of three tasks: the photo annotation and retrieval task, the plant identification task and the robot vision task. Furthermore, the medical annotation task, that traditionally has been under the ImageCLEF umbrella and that this year celebrates its tenth anniversary, has been organized in conjunction with AMIA for the first time. The paper describes the tasks and the 2013 competition, giving an unifying perspective of the present activities of the lab while discussion the future challenges and opportunities.}, author = {Barbara Caputo and Henning Muller and Bart Thomee and Mauricio Villegas and Roberto Paredes and David Zellhofer and Herve Goeau and Alexis Joly and Pierre Bonnet and Jesus Martinez Gomez and Ismael Garcia Varea and Miguel Cazorla}, booktitle = {Information Access Evaluation. Multilinguality, Multimodality, and Visualization}, doi = {10.1007/978-3-642-40802-1_26}, editor = {Forner, Pamela and Muller, Henning and Paredes, Roberto and Rosso, Paolo and stein, Benno}, isbn = {978-3-642-40801-4}, keywords = {Language Translation and Linguistics Artificial In}, pages = {250--268}, publisher = {springer Berlin Heidelberg}, series = {Lecture Notes in Computer science}, title = {{ImageCLEF 2013: The Vision, the Data and the Open Challenges}}, url = {http://dx.doi.org/10.1007/978-3-642-40802-1{\_}26}, volume = {8138}, year = {2013} }
- I. Garcia-Varea, M. Cazorla, J. Martinez-Gomez, and B. Caputo, "Overview of the ImageCLEF 2013 Robot Vision Task," in Working notes, clef 2013, 2013.
[Bibtex]@inproceedings{garcia2013overview, author = {Garcia-Varea, Ismael and Cazorla, Miguel and Martinez-Gomez, Jesus and Caputo, Barbara}, booktitle = {Working Notes, CLEF 2013}, number = {EPFL-CONF-192517}, title = {{Overview of the ImageCLEF 2013 Robot Vision Task}}, year = {2013} }
- J. Garcia-Rodriguez, M. Cazorla, sergio Orts-Escolano, and V. Morell, "Improving 3D Keypoint Detection from Noisy Data Using Growing Neural Gas," in Advances in computational intelligence, I. Rojas, G. Joya, and J. Cabestany, Eds., Springer berlin heidelberg, 2013, vol. 7903, p. 480–487.
[Bibtex]@incollection{Garcia-Rodriguez:2013aa, abstract = {3D sensors provides valuable information for mobile robotic tasks like scene classification or object recognition, but these sensors often produce noisy data that makes impossible applying classical keypoint detection and feature extraction techniques. Therefore, noise removal and downsampling have become essential steps in 3D data processing. In this work, we propose the use of a 3D filtering and down-sampling technique based on a Growing Neural Gas (GNG) network. GNG method is able to deal with outliers presents in the input data. These features allows to represent 3D spaces, obtaining an induced Delaunay Triangulation of the input space. Experiments show how the state-of-the-art keypoint detectors improve their performance using GNG output representation as input data. Descriptors extracted on improved keypoints perform better matching in robotics applications as 3D scene registration.}, author = {Garcia-Rodriguez, Jos?$\backslash$copyright and Cazorla, Miguel and Orts-Escolano, sergio and Morell, Vicente}, booktitle = {Advances in Computational Intelligence}, doi = {10.1007/978-3-642-38682-4_51}, editor = {Rojas, Ignacio and Joya, Gonzalo and Cabestany, Joan}, isbn = {978-3-642-38681-7}, keywords = {GNG; Noisy Point Cloud; Visual Features; Keypoint}, pages = {480--487}, publisher = {springer Berlin Heidelberg}, series = {Lecture Notes in Computer science}, title = {{Improving 3D Keypoint Detection from Noisy Data Using Growing Neural Gas}}, url = {http://dx.doi.org/10.1007/978-3-642-38682-4{\_}51}, volume = {7903}, year = {2013} }
- A. Jimeno-Morenilla, J. Garcia-Rodriguez, sergio Orts-Escolano, and M. Davia-Aracil, "3d-based reconstruction using growing neural gas landmark: application to rapid prototyping in shoe last manufacturing," The international journal of advanced manufacturing technology, vol. 69, iss. 1, p. 657–668, 2013.
[Bibtex]@Article{Jimeno-Morenilla2013, author = {Jimeno-Morenilla, Antonio and Garcia-Rodriguez, Jose and Orts-Escolano, sergio and Davia-Aracil, Miguel}, title = {3D-based reconstruction using growing neural gas landmark: application to rapid prototyping in shoe last manufacturing}, journal = {The International Journal of Advanced Manufacturing Technology}, year = {2013}, volume = {69}, number = {1}, pages = {657--668}, abstract = {Customizing shoe manufacturing is one of the great challenges in the footwear industry. It is a production model change where design adopts not only the main role, but also the main bottleneck. It is therefore necessary to accelerate this process by improving the accuracy of current methods. Rapid prototyping techniques are based on the reuse of manufactured footwear lasts so that they can be modified with CAD systems leading rapidly to new shoe models. In this work, we present a shoe last fast reconstruction method that fits current design and manufacturing processes. The method is based on the scanning of shoe last obtaining sections and establishing a fixed number of landmarks onto those sections to reconstruct the shoe last 3D surface. Automated landmark extraction is accomplished through the use of the self-organizing network, the growing neural gas (GNG), which is able to topographically map the low dimensionality of the network to the high dimensionality of the contour manifold without requiring a priori knowledge of the input space structure. Moreover, our GNG landmark method is tolerant to noise and eliminates outliers. Our method accelerates up to 12 times the surface reconstruction and filtering processes used by the current shoe last design software. The proposed method offers higher accuracy compared with methods with similar efficiency as voxel grid.}, doi = {10.1007/s00170-013-5061-3}, issn = {1433-3015}, url = {http://dx.doi.org/10.1007/s00170-013-5061-3} }
- A. Romero and M. Cazorla, "Learning Multi-class Topological Mapping using Visual Information.," in Visapp (2), 2013, p. 316–321.
[Bibtex]@inproceedings{Romero2013Learning, author = {Romero, Anna and Cazorla, Miguel}, booktitle = {VIsAPP (2)}, pages = {316--321}, title = {{Learning Multi-class Topological Mapping using Visual Information.}}, year = {2013} }
- E. Martinez-Martin and A. P. del Pobil, "Visual object recognition for robot tasks in real-life scenarios," in 2013 10th international conference on ubiquitous robots and ambient intelligence (URAI), 2013.
[Bibtex]@inproceedings{Martinez_Martin_2013,doi = {10.1109/urai.2013.6677413},url = {http://dx.doi.org/10.1109/urai.2013.6677413},year = 2013,month = {oct},publisher = {Institute of Electrical {\&} Electronics Engineers ({IEEE})},author = {Ester Martinez-Martin and Angel P. del Pobil},title = {Visual object recognition for robot tasks in real-life scenarios},booktitle = {2013 10th International Conference on Ubiquitous Robots and Ambient Intelligence ({URAI})}}
- E. Martinez-Martin and A. P. del Pobil, "Visual people detection for safe human-robot interaction," in 2013 IEEE RO-MAN, 2013.
[Bibtex]@InProceedings{Martinez-Martin2013d, author = {Ester Martinez-Martin and Angel P. del Pobil}, title = {Visual people detection for safe Human-Robot Interaction}, booktitle = {2013 {IEEE} {RO}-{MAN}}, year = {2013}, month = {aug}, publisher = {Institute of Electrical {\&} Electronics Engineers ({IEEE})}, doi = {10.1109/roman.2013.6628478}, url = {http://dx.doi.org/10.1109/roman.2013.6628478}, }
- Â. Costa, E. Martinez-Martin, A. P. del Pobil, R. simoes, and P. Novais, "Find it - an assistant home agent," Advances in intelligent systems and computing, vol. 221, pp. 121-128, 2013.
[Bibtex]@article{Martinez-Martin2013,title = {Find it - an assistant home agent},journal = {Advances in Intelligent systems and Computing},year = {2013},volume = {221},pages = {121-128},author = {Costa, {\^A}. and Martinez-Martin, E. and del Pobil, A.P. and simoes, R. and Novais, P.}}
- E. Martinez-Martin, M. Teresa Escrig, and A. P. del Pobil, "Naming qualitative models based on intervals: a general framework," International journal of artificial intelligence, vol. 11, iss. 13 A, pp. 74-92, 2013.
[Bibtex]@Article{Martinez-Martin2013a, author = {Martinez-Martin, E. and Teresa Escrig, M. and del Pobil, A.P.}, title = {Naming qualitative models based on intervals: A general framework}, journal = {International Journal of Artificial Intelligence}, year = {2013}, volume = {11}, number = {13 A}, pages = {74-92}, }
- E. Martinez-Martin and A. P. del Pobil, "Object recognition in cluttered environments," in Computer graphics and imaging / 798: signal processing, pattern recognition and applications, 2013.
[Bibtex]@InProceedings{Martinez-Martin2013e, author = {Ester Martinez-Martin and Angel P. del Pobil}, title = {Object Recognition in Cluttered Environments}, booktitle = {Computer Graphics and Imaging / 798: signal Processing, Pattern Recognition and Applications}, year = {2013}, publisher = {{ACTA} Press}, doi = {10.2316/p.2013.798-026}, url = {http://dx.doi.org/10.2316/p.2013.798-026}, }
- E. Martinez-Martin, M. T. Escrig, and A. P. del Pobil, "Qualitative acceleration model: representation, reasoning and application," Advances in intelligent systems and computing, vol. 217, pp. 87-94, 2013.
[Bibtex]@Article{Martinez-Martin2013b, author = {Martinez-Martin, E. and Escrig, M.T. and del Pobil, A.P.}, title = {Qualitative acceleration model: Representation, reasoning and application}, journal = {Advances in Intelligent systems and Computing}, year = {2013}, volume = {217}, pages = {87-94}, }
- V. Parot, D. Lim, G. Gonzalez-Serrano, G. Traverso, N. S. Nishioka, B. J. Vakoc, and N. J. Durr, "Photometric stereo endoscopy," Journal of biomedical optics, vol. 18, p. 76017, 2013.
[Bibtex]@article{Parot2013, abstract = {While color video endoscopy has enabled wide-field examination of the gastrointestinal tract, it often misses or incorrectly classifies lesions. Many of these missed lesions exhibit characteristic three-dimensional surface topographies. An endoscopic system that adds topographical measurements to conventional color imagery could therefore increase lesion detection and improve classification accuracy. We introduce photometric stereo endoscopy (PSE), a technique which allows high spatial frequency components of surface topography to be acquired simultaneously with conventional two-dimensional color imagery. We implement this technique in an endoscopic form factor and demonstrate that it can acquire the topography of small features with complex geometries and heterogeneous optical properties. PSE imaging of ex vivo human gastrointestinal tissue shows that surface topography measurements enable differentiation of abnormal shapes from surrounding normal tissue. Together, these results confirm that the topographical measurements can be obtained with relatively simple hardware in an endoscopic form factor, and suggest the potential of PSE to improve lesion detection and classification in gastrointestinal imaging.}, author = {Vicente Parot and Daryl Lim and Germ\'{a}n Gonzalez-Serrano and Giovanni Traverso and Norman S. Nishioka and Benjamin J. Vakoc and Nicholas J. Durr}, doi = {10.1117/1.JBO.18.7.076017}, isbn = {1560-2281 (Electronic)\r1083-3668 (Linking)}, issn = {1083-3668}, issue = {7}, journal = {Journal of Biomedical Optics}, keywords = {14,17,18,2013,21,accepted for publication jun,computer vision,endoscopy,online jul,paper 130261r received apr,published,revised manuscript received jun,stereoscopy,three-dimensions,tissues}, pages = {076017}, pmid = {23864015}, title = {Photometric stereo endoscopy}, volume = {18}, url = {http://biomedicaloptics.spiedigitallibrary.org/article.aspx?doi=10.1117/1.JBO.18.7.076017}, year = {2013}, }
- G. Gonzalez-Serrano, L. Fusco, F. Benmansour, P. Fua, O. Pertz, and K. Smith, "Automated quantification of morphodynamics for high-throughput live cell time-lapse datasets," in Proceedings - international symposium on biomedical imaging, 2013, pp. 664-667.
[Bibtex]@inproceedings{Gonzalez2013, abstract = {We present a fully automatic method to track and quantify the morphodynamics of differentiating neurons in fluorescence time-lapse datasets. Previous high-throughput studies have been limited to static analysis or simple behavior. Our approach opens the door to rich dynamic analysis of complex cellular behavior in high-throughput time-lapse data. It is capable of robustly detecting, tracking, and segmenting all the components of the neuron including the nucleus, soma, neurites, and filopodia. It was designed to be efficient enough to handle the massive amount of data from a high-throughput screen. Each image is processed in approximately two seconds on a notebook computer. To validate the approach, we applied our method to over 500 neuronal differentiation videos from a small-scale RNAi screen. Our fully automated analysis of over 7,000 neurons quantifies and confirms with strong statistical significance static and dynamic behaviors that had been previously observed by biologists, but never measured. © 2013 IEEE.}, author = {German Gonzalez-Serrano and Ludovico Fusco and Fethallah Benmansour and Pascal Fua and Olivier Pertz and Kevin Smith}, doi = {10.1109/ISBI.2013.6556562}, isbn = {9781467364546}, issn = {19458452}, booktitle = {Proceedings - International Symposium on Biomedical Imaging}, keywords = {Fluorescence microscopy,Image sequence processing,Molecular and cellular screening}, pages = {664-667}, publisher = {IEEE Computer Society}, title = {Automated quantification of morphodynamics for high-throughput live cell time-lapse datasets}, year = {2013}, }
2012
- B. Bonev, M. Cazorla, F. Martin, and V. Matellan, "Portable autonomous walk calibration for 4-legged robots," Applied intelligence, vol. 36, iss. 1, p. 136–147, 2012.
[Bibtex]@article{Bonev:2012aa, abstract = {In the present paper we describe an efficient and portable optimization method for calibrating the walk parameters of a quadruped robot, and its contribution for the robot control and localization. The locomotion of a legged robot presents not only the problem of maximizing the speed, but also the problem of obtaining a precise speed response, and achieving an acceptable odometry information. In this study we use a simulated annealing algorithm for calibrating different parametric sets for different speed ranges, with the goal of avoiding discontinuities. The results are applied to the robot AIBO in the RoboCup domain. Moreover, we outline the relevance of calibration to the control, showing the improvement obtained in odometry and, as a consequence, in robot localization.}, author = {Bonev, Boyan and Cazorla, Miguel and Martin, Francisco and Matellan, Vicente}, doi = {10.1007/s10489-010-0249-9}, issn = {0924-669X}, journal = {Applied Intelligence}, keywords = {Legged locomotion; Walk parameters estimation; Aut}, number = {1}, pages = {136--147}, publisher = {springer Us}, title = {{Portable autonomous walk calibration for 4-legged robots}}, url = {http://dx.doi.org/10.1007/s10489-010-0249-9}, volume = {36}, year = {2012} }
- V. Morell, M. Cazorla, D. Viejo, sergio Orts-Escolano, and J. Garcia-Rodriguez, "A study of registration techniques for 6DoF sLAM," in Ccia, 2012, p. 143–150.
[Bibtex]@InProceedings{Morell2012A, author = {Morell, Vicente and Cazorla, Miguel and Viejo, Diego and Orts-Escolano, sergio and Garcia-Rodriguez, Jose}, title = {{A study of registration techniques for 6DoF sLAM}}, booktitle = {CCIA}, year = {2012}, editor = {Riano, David and Onaindia, Eva and Cazorla, Miguel}, volume = {248}, series = {Frontiers in Artificial Intelligence and Applications}, pages = {143--150}, publisher = {IOs Press}, isbn = {978-1-61499-138-0} }
- J. Munoz, D. Pastor, P. Gil Vazquez, santiago Puente Mendez, and M. Cazorla, "A study of 2D features for 3D visual sLAM," in 43th international symposium on robotics, 2012.
[Bibtex]@inproceedings{munoz2012study, author = {Jose Munoz and Daniel Pastor and Pablo {Gil Vazquez} and santiago {Puente Mendez} and Miguel Cazorla}, booktitle = {43th International symposium on Robotics}, title = {{A study of 2D features for 3D visual sLAM}}, year = {2012} }
- J. Munoz, D. Pastor, P. Gil, P. T. santiago Mendez, and M. Cazorla, "Using a RGB-D camera for 6DoF sLAM.," in Ccia, 2012, p. 143–150.
[Bibtex]@inproceedings{MunozPGMC12, author = {Munoz, Jose and Pastor, Daniel and Gil, Pablo and Mendez, santiago T Puente and Cazorla, Miguel}, booktitle = {CCIA}, editor = {Riano, David and Onaindia, Eva and Cazorla, Miguel}, isbn = {978-1-61499-138-0}, keywords = {dblp}, pages = {143--150}, publisher = {IOs Press}, series = {Frontiers in Artificial Intelligence and Applications}, title = {{Using a RGB-D camera for 6DoF sLAM.}}, url = {http://dblp.uni-trier.de/db/conf/ccia/ccia2012.html{\#}MunozPGMC12}, volume = {248}, year = {2012} }
- J. Navarrete-sanchez, D. Viejo, and M. Cazorla, "Portable 3D laser-camera calibration system with color fusion for sLAM," in International symposium on robotics, isr, 2012.
[Bibtex]@inproceedings{Navarrete2012Portable, author = {Navarrete-sanchez, Javier and Viejo, Diego and Cazorla, Miguel}, booktitle = {International symposium on robotics, IsR}, title = {{Portable 3D laser-camera calibration system with color fusion for sLAM}}, year = {2012} }
- sergio Orts-Escolano, J. Garcia-Rodriguez, D. Viejo, M. Cazorla, and V. Morell, "GPGPU implementation of growing neural gas: Application to 3D scene reconstruction," Journal of parallel and distributed computing, vol. 72, iss. 10, p. 1361–1372, 2012.
[Bibtex]@Article{Orts20121361, author = {Orts-Escolano, sergio and Garcia-Rodriguez, Jose and Viejo, Diego and Cazorla, Miguel and Morell, Vicente}, title = {{GPGPU implementation of growing neural gas: Application to 3D scene reconstruction}}, journal = {Journal of Parallel and Distributed Computing}, year = {2012}, volume = {72}, number = {10}, pages = {1361--1372}, abstract = {self-organising neural models have the ability to provide a good representation of the input space. In particular the Growing Neural Gas (GNG) is a suitable model because of its flexibility, rapid adaptation and excellent quality of representation. However, this type of learning is time-consuming, especially for high-dimensional input data. since real applications often work under time constraints, it is necessary to adapt the learning process in order to complete it in a predefined time. This paper proposes a Graphics Processing Unit (GPU) parallel implementation of the {\{}GNG{\}} with Compute Unified Device Architecture (CUDA). In contrast to existing algorithms, the proposed {\{}GPU{\}} implementation allows the acceleration of the learning process keeping a good quality of representation. Comparative experiments using iterative, parallel and hybrid implementations are carried out to demonstrate the effectiveness of {\{}CUDA{\}} implementation. The results show that {\{}GNG{\}} learning with the proposed implementation achieves a speed-up of 6 ?{\{}o{\}} compared with the single-threaded {\{}CPU{\}} implementation. {\{}GPU{\}} implementation has also been applied to a real application with time constraints: acceleration of 3D scene reconstruction for egomotion, in order to validate the proposal. }, doi = {http://dx.doi.org/10.1016/j.jpdc.2012.05.008}, issn = {0743-7315}, keywords = {Egomotion}, url = {http://www.sciencedirect.com/science/article/pii/s0743731512001268} }
- sergio Orts–Escolano, J. Garcia-Rodriguez, D. Viejo, M. Cazorla, V. Morell, and J. serra, "6DoF pose estimation using Growing Neural Gas Network," in Proceedings of 5th international conference on cognitive systems, 2012.
[Bibtex]@inproceedings{Orts2012b, author = {Orts--Escolano, sergio and Garcia-Rodriguez, Jose and Viejo, Diego and Cazorla, Miguel and Morell, Vicente and serra, Jose}, booktitle = {proceedings of 5th International Conference on Cognitive systems}, title = {{6DoF pose estimation using Growing Neural Gas Network}}, year = {2012} }
- A. Romero and M. Cazorla, "Finding nodes into a topological map using visual features," in International symposium on robotics, isr, 2012.
[Bibtex]@inproceedings{Romero2012Finding, author = {Romero, Anna and Cazorla, Miguel}, booktitle = {International symposium on robotics, IsR}, title = {{Finding nodes into a topological map using visual features}}, year = {2012} }
- A. Romero and M. Cazorla, "Learning Multi-class Topological Mapping using Visual Information," in Ccia, 2012, p. 143–150.
[Bibtex]@inproceedings{Romero2012Learning, author = {Romero, Anna and Cazorla, Miguel}, booktitle = {CCIA}, editor = {Riano, David and Onaindia, Eva and Cazorla, Miguel}, isbn = {978-1-61499-138-0}, pages = {143--150}, publisher = {IOs Press}, series = {Frontiers in Artificial Intelligence and Applications}, title = {{Learning Multi-class Topological Mapping using Visual Information}}, volume = {248}, year = {2012} }
- A. Romero and M. Cazorla, "Topological visual mapping in robotics," in Proceedings of the 5th international conference on spatial cognition, 2012.
[Bibtex]@inproceedings{Romero2012Topological, author = {Romero, Anna and Cazorla, Miguel}, booktitle = {Proceedings of the 5th International Conference on spatial Cognition}, title = {{Topological visual mapping in robotics}}, year = {2012} }
- A. Romero and M. Cazorla, "Topological visual mapping in robotics," Cognitive processing, vol. 3, iss. 305–308, 2012.
[Bibtex]@Article{Romero2012Cognitive, author = {Romero, Anna and Cazorla, Miguel}, title = {Topological visual mapping in robotics}, journal = {Cognitive Processing}, year = {2012}, volume = {3}, number = {305--308} }
- J. salinas, M. de la Iglesia-Vaya, L. Bonmati, R. Valenzuela, and M. Cazorla, "R & D Cloud CEIB: Management system and Knowledge Extraction for Bioimaging in the Cloud," in Distributed computing and artificial intelligence, sigeru Omatu, J. F. {De Paz santana}, R. sara Gonzalez, J. M. Molina, A. M. Bernardos, and J. C. M. Rodriguez, Eds., Springer berlin heidelberg, 2012, vol. 151, p. 331–338.
[Bibtex]@incollection{salinas:2012aa, abstract = {The management system and knowledge extraction of bioimaging in the cloud (R {\&} D Cloud CEIB) which is proposed in this article will use the services offered by the centralization of bioimaging through Valencian Biobank Medical Imaging (GIMC in spanish) as a basis for managing and extracting knowledge from a bioimaging bank, providing that knowledge as services with high added value and expertise to the Electronic Patient History system (HsE), thus bringing the results of R {\&} D to the patient, improving the quality of the information contained therein. R {\&} D Cloud CEIB has four general modules: search engine (sE), manager of clinical trials (GEBID), anonymizer (ANON) and motor knowledge (BIKE). The BIKE is the central module and through its sub modules analyses and generates knowledge to provide to the HsE through services. The technology used in R {\&} D Cloud CEIB is completely based on Open source. Within the BIKE, we focus on the development of the classifier module (BIKEClassifier), which aims to establish a method for the extraction of biomarkers for bioimaging and subsequent analysis to obtain a classification in bioimaging available pools following GIMC diagnostic experience.}, author = {salinas, JoseMaria and de la Iglesia-Vaya, Maria and Bonmati, LuisMarti and Valenzuela, Rosa and Cazorla, Miguel}, booktitle = {Distributed Computing and Artificial Intelligence}, doi = {10.1007/978-3-642-28765-7_39}, editor = {Omatu, sigeru and {De Paz santana}, Juan F and Gonzalez, sara Rodriguez and Molina, Jose M and Bernardos, Ana M and Rodriguez, Juan M Corchado}, isbn = {978-3-642-28764-0}, pages = {331--338}, publisher = {springer Berlin Heidelberg}, series = {Advances in Intelligent and soft Computing}, title = {{R {\&} D Cloud CEIB: Management system and Knowledge Extraction for Bioimaging in the Cloud}}, url = {http://dx.doi.org/10.1007/978-3-642-28765-7{\_}39}, volume = {151}, year = {2012} }
- J. M. salinas, M. D. {la Iglesia Vaya}, and M. Cazorla, "R & D Cloud CEIB," in Proceedings of the ieee international conference on biomedical engineering and biotechnology, 2012.
[Bibtex]@inproceedings{salinas2012c, author = {salinas, Jose Maria and {la Iglesia Vaya}, Maria De and Cazorla, Miguel}, booktitle = {Proceedings of the IEEE International Conference on Biomedical Engineering and Biotechnology}, title = {{R {\&} D Cloud CEIB}}, year = {2012} }
- D. Viejo, J. Garcia, M. Cazorla, D. Gil, and M. Johnsson, "Using GNG to improve 3D feature extraction. Application to 6DoF egomotion," Neural networks, vol. 32, p. 138–146, 2012.
[Bibtex]@article{Viejo2012138, abstract = {several recent works deal with 3D data in mobile robotic problems, e.g. mapping or egomotion. Data comes from any kind of sensor such as stereo vision systems, time of flight cameras or 3D lasers, providing a huge amount of unorganized 3D data. In this paper, we describe an efficient method to build complete 3D models from a Growing Neural Gas (GNG). The {\{}GNG{\}} is applied to the 3D raw data and it reduces both the subjacent error and the number of points, keeping the topology of the 3D data. The {\{}GNG{\}} output is then used in a 3D feature extraction method. We have performed a deep study in which we quantitatively show that the use of {\{}GNG{\}} improves the 3D feature extraction method. We also show that our method can be applied to any kind of 3D data. The 3D features obtained are used as input in an Iterative Closest Point (ICP)-like method to compute the 6DoF movement performed by a mobile robot. A comparison with standard {\{}ICP{\}} is performed, showing that the use of {\{}GNG{\}} improves the results. Final results of 3D mapping from the egomotion calculated are also shown. }, annote = {selected Papers from {\{}IJCNN{\}} 2011}, author = {Viejo, Diego and Garcia, Jose and Cazorla, Miguel and Gil, David and Johnsson, Magnus}, doi = {http://dx.doi.org/10.1016/j.neunet.2012.02.014}, issn = {0893-6080}, journal = {Neural Networks}, keywords = {6DoF registration}, number = {0}, pages = {138--146}, title = {{Using GNG to improve 3D feature extraction. Application to 6DoF egomotion}}, url = {http://www.sciencedirect.com/science/article/pii/s0893608012000433}, volume = {32}, year = {2012} }
- D. Viejo, J. Garcia-Rodriguez, and M. Cazorla, "A study of a soft computing based method for 3D scenario reconstruction," Applied soft computing, vol. 12, iss. 10, p. 3158–3164, 2012.
[Bibtex]@article{Viejo20123158, abstract = {several recent works deal with 3D data in mobile robotic problems, e.g., mapping. Data comes from any kind of sensor (time of flight, Kinect or 3D lasers) that provide a huge amount of unorganized 3D data. In this paper we detail an efficient approach to build complete 3D models using a soft computing method, the Growing Neural Gas (GNG). As neural models deal easily with noise, imprecision, uncertainty or partial data, {\{}GNG{\}} provides better results than other approaches. The {\{}GNG{\}} obtained is then applied to a sequence. We present a comprehensive study on {\{}GNG{\}} parameters to ensure the best result at the lowest time cost. From this {\{}GNG{\}} structure, we propose to calculate planar patches and thus obtaining a fast method to compute the movement performed by a mobile robot by means of a 3D models registration algorithm. Final results of 3D mapping are also shown. }, author = {Viejo, Diego and Garcia-Rodriguez, Jose and Cazorla, Miguel}, doi = {http://dx.doi.org/10.1016/j.asoc.2012.05.025}, issn = {1568-4946}, journal = {Applied soft Computing}, keywords = {3D feature extraction}, number = {10}, pages = {3158--3164}, title = {{A study of a soft computing based method for 3D scenario reconstruction}}, url = {http://www.sciencedirect.com/science/article/pii/s1568494612002803}, volume = {12}, year = {2012} }
- D. Viejo and M. Cazorla, "A framework for managing heterogenous sensor data in a single map," in Ieee intelligent vehicles symposium, 2012.
[Bibtex]@inproceedings{Viejo20123A, author = {Viejo, Diego and Cazorla, Miguel}, booktitle = {IEEE Intelligent Vehicles symposium}, title = {{A framework for managing heterogenous sensor data in a single map}}, year = {2012} }
- E. Martinez-Martin and A. P. del Pobil, "Visual surveillance for human-robot interaction," in 2012 IEEE international conference on systems, man, and cybernetics (sMC), 2012.
[Bibtex]@inproceedings{Martinez_Martin_2012,doi = {10.1109/icsmc.2012.6378306},url = {http://dx.doi.org/10.1109/icsmc.2012.6378306},year = 2012,month = {oct},publisher = {Institute of Electrical {\&} Electronics Engineers ({IEEE})},author = {Ester Martinez-Martin and Angel P. del Pobil},title = {Visual surveillance for human-robot interaction},booktitle = {2012 {IEEE} International Conference on systems, Man, and Cybernetics ({sMC})}}
- E. Martinez-Martin and Á. P. del Pobil, "Applications," in Robust motion detection in real-life scenarios, Springer science $\mathplus$ business media, 2012, p. 85–98.
[Bibtex]@incollection{Martinez-Martin_2012,doi = {10.1007/978-1-4471-4216-4_4},url = {http://dx.doi.org/10.1007/978-1-4471-4216-4_4},year = 2012,publisher = {springer science $\mathplus$ Business Media},pages = {85--98},author = {Ester Martinez-Martin and {\'{A}}ngel P. del Pobil},title = {Applications},booktitle = {Robust Motion Detection in Real-Life scenarios}}
- E. Martinez-Martin and Á. P. del Pobil, "Computer vision concepts," in Robust motion detection in real-life scenarios, Springer science $\mathplus$ business media, 2012, p. 99–108.
[Bibtex]@InCollection{Martinez-Martin2012d, author = {Ester Martinez-Martin and {\'{A}}ngel P. del Pobil}, title = {Computer Vision Concepts}, booktitle = {Robust Motion Detection in Real-Life scenarios}, publisher = {springer science $\mathplus$ Business Media}, year = {2012}, pages = {99--108}, doi = {10.1007/978-1-4471-4216-4_5}, url = {http://dx.doi.org/10.1007/978-1-4471-4216-4_5}, }
- E. Martinez-Martin and Á. P. del Pobil, "Introduction," in Robust motion detection in real-life scenarios, Springer science $\mathplus$ business media, 2012, p. 1–3.
[Bibtex]@InCollection{Martinez-Martin2012e, author = {Ester Martinez-Martin and {\'{A}}ngel P. del Pobil}, title = {Introduction}, booktitle = {Robust Motion Detection in Real-Life scenarios}, publisher = {springer science $\mathplus$ Business Media}, year = {2012}, pages = {1--3}, doi = {10.1007/978-1-4471-4216-4_1}, url = {http://dx.doi.org/10.1007/978-1-4471-4216-4_1}, }
- E. Martinez-Martin and Á. P. del Pobil, "Motion detection in general backgrounds," in Robust motion detection in real-life scenarios, Springer science $\mathplus$ business media, 2012, p. 43–84.
[Bibtex]@InCollection{Martinez-Martin2012f, author = {Ester Martinez-Martin and {\'{A}}ngel P. del Pobil}, title = {Motion Detection in General Backgrounds}, booktitle = {Robust Motion Detection in Real-Life scenarios}, publisher = {springer science $\mathplus$ Business Media}, year = {2012}, pages = {43--84}, doi = {10.1007/978-1-4471-4216-4_3}, url = {http://dx.doi.org/10.1007/978-1-4471-4216-4_3}, }
- E. Martinez-Martin and Á. P. del Pobil, "Motion detection in static backgrounds," in Robust motion detection in real-life scenarios, Springer science $\mathplus$ business media, 2012, p. 5–42.
[Bibtex]@InCollection{Martinez-Martin2012g, author = {Ester Martinez-Martin and {\'{A}}ngel P. del Pobil}, title = {Motion Detection in static Backgrounds}, booktitle = {Robust Motion Detection in Real-Life scenarios}, publisher = {springer science $\mathplus$ Business Media}, year = {2012}, pages = {5--42}, doi = {10.1007/978-1-4471-4216-4_2}, url = {http://dx.doi.org/10.1007/978-1-4471-4216-4_2}, }
2011
- M. angel Cazorla, V. {Matellan Olivera}, and Others, "special issue about advances in Physical Agents," , 2011.
[Bibtex]@article{cazorla2011special, author = {Cazorla, Miguel angel and {Matellan Olivera}, Vicente and Others}, publisher = {Red de Agentes Fisicos}, title = {{special issue about advances in Physical Agents}}, year = {2011} }
- M. Cazorla and A. Romero, "VIDEO LECTUREs++: Combining information and interaction in an open source framework," in Inted2011 proceedings, 2011, p. 4034–4040.
[Bibtex]@inproceedings{cazorla2011video, author = {Cazorla, M and Romero, A}, booktitle = {INTED2011 Proceedings}, pages = {4034--4040}, publisher = {IATED}, title = {{VIDEO LECTUREs++: Combining information and interaction in an open source framework}}, year = {2011} }
- D. Gil, J. Garcia, M. Cazorla, and M. Johnsson, "Predictions tasks with words and sequences: Comparing a novel recurrent architecture with the Elman network," in Neural networks (ijcnn), the 2011 international joint conference on, 2011, p. 1207–1213.
[Bibtex]@inproceedings{gil2011predictions, author = {Gil, David and Garcia, J and Cazorla, Miguel and Johnsson, Magnus}, booktitle = {Neural Networks (IJCNN), The 2011 International Joint Conference on}, organization = {IEEE}, pages = {1207--1213}, title = {{Predictions tasks with words and sequences: Comparing a novel recurrent architecture with the Elman network}}, year = {2011} }
- D. Viejo, J. Garcia, and M. Cazorla, "6DoF egomotion computing using 3D GNG-based reconstruction," in Advances in computational intelligence, Springer berlin heidelberg, 2011, p. 50–57.
[Bibtex]@incollection{viejo20116dof, author = {Viejo, Diego and Garcia, Jose and Cazorla, Miguel}, booktitle = {Advances in Computational Intelligence}, pages = {50--57}, publisher = {springer Berlin Heidelberg}, title = {{6DoF egomotion computing using 3D GNG-based reconstruction}}, year = {2011} }
- D. Viejo, J. Garcia, M. Cazorla, D. Gil, and M. Johnsson, "Using 3d gng-based reconstruction for 6dof egomotion," in Neural networks (ijcnn), the 2011 international joint conference on, 2011, p. 1042–1048.
[Bibtex]@inproceedings{viejo2011using, author = {Viejo, Diego and Garcia, Jose and Cazorla, Miguel and Gil, David and Johnsson, Magnus}, booktitle = {Neural Networks (IJCNN), The 2011 International Joint Conference on}, organization = {IEEE}, pages = {1042--1048}, title = {{Using 3d gng-based reconstruction for 6dof egomotion}}, year = {2011} }
- D. Viejo, J. Garcia, and M. Cazorla, "Visual features extraction based egomotion calculation from a infrared time-of-flight camera," in Advances in computational intelligence, Springer berlin heidelberg, 2011, p. 9–16.
[Bibtex]@incollection{viejo2011visual, author = {Viejo, Diego and Garcia, Jose and Cazorla, Miguel}, booktitle = {Advances in Computational Intelligence}, pages = {9--16}, publisher = {springer Berlin Heidelberg}, title = {{Visual features extraction based egomotion calculation from a infrared time-of-flight camera}}, year = {2011} }
- E. Türetken, G. Gonzalez-Serrano, C. Blum, and P. Fua, "Automated reconstruction of dendritic and axonal trees by global optimization with geometric priors," Neuroinformatics, vol. 9, pp. 279-302, 2011.
[Bibtex]@article{Turetken2011, abstract = {We present a novel probabilistic approach to fully automated delineation of tree structures in noisy 2D images and 3D image stacks. Unlike earlier methods that rely mostly on local evidence, ours builds a set of candidate trees over many different subsets of points likely to belong to the optimal tree and then chooses the best one according to a global objective function that combines image evidence with geometric priors. Since the best tree does not necessarily span all the points, the algorithm is able to eliminate false detections while retaining the correct tree topology. Manually annotated brightfield micrographs, retinal scans and the DIADEM challenge datasets are used to evaluate the performance of our method. We used the DIADEM metric to quantitatively evaluate the topological accuracy of the reconstructions and showed that the use of the geometric regularization yields a substantial improvement.}, author = {Engin Türetken and Germ\'{a}n Gonzalez-Serrano and Christian Blum and Pascal Fua}, doi = {10.1007/s12021-011-9122-1}, isbn = {1559-0089 (Electronic)\r1539-2791 (Linking)}, issn = {15392791}, issue = {2-3}, journal = {Neuroinformatics}, keywords = {Ant colony optimization,DIADEM,Global optimization,Minimum arborescence,Tree reconstruction,k-MST}, pages = {279-302}, pmid = {21573886}, title = {Automated reconstruction of dendritic and axonal trees by global optimization with geometric priors}, volume = {9}, year = {2011}, }
2010
- M. Cazorla, D. Viejo, A. Hernandez, J. Nieto, and E. Nebot, "Large scale Egomotion and Error Analysis with Visual Features," Journal of physical agents, vol. 4, p. 19–24, 2010.
[Bibtex]@article{Cazorla10, author = {Cazorla, Miguel and Viejo, Diego and Hernandez, Andres and Nieto, Juan and Nebot, Eduardo}, journal = {Journal of Physical Agents}, pages = {19--24}, title = {{Large scale Egomotion and Error Analysis with Visual Features}}, volume = {4}, year = {2010} }
- M. Cazorla, D. V. Hernando, A. H. Gutierrez, J. Nieto, and E. Nebot, "Large scale egomotion and error analysis with visual features," Journal of physical agents, vol. 4, iss. 1, p. 19–24, 2010.
[Bibtex]@article{CazorlaLargescale2010, author = {Miguel Cazorla and Diego Viejo Hernando and Andres Hernandez Gutierrez and Juan Nieto and Eduardo Nebot}, title = {Large scale egomotion and error analysis with visual features}, journal = {Journal of Physical Agents}, volume = {4}, number = {1}, year = {2010}, keywords = {Computer vision; Mobile robotics}, abstract = {several works deal with 3D data in sLAM problem but many of them are focused on short scale maps. In this paper, we propose a method that can be used for computing the 6DoF trajectory performed by a robot from the stereo images captured during a large scale trajectory. The method transforms robust 2D features extracted from the reference stereo images to the 3D space. These 3D features are then used for obtaining the correct robot movement. Both sift and surf methods for feature extraction have been used. Also, a comparison between our method and the results of the ICP algorithm have been performed. We have also made a study about errors in stereo cameras.}, pages = {19--24}, doi = {10.14198/JoPha.2010.4.1.04}, url = {http://www.jopha.net/article/view/2010-v4-n1-large-scale-egomotion-and-error-analysis-with-visual-features} }
- M. Cazorla and D. Viejo, "EXPERIENCEs UsING AN OPEN sOURCE sOFTWARE LIBRARY TO TEACH A COMPUTER VIsION sUBJECT," in Inted2010 proceedings, 2010, p. 4514–4522.
[Bibtex]@inproceedings{cazorla2010experiences, author = {Cazorla, M and Viejo, D}, booktitle = {INTED2010 Proceedings}, pages = {4514--4522}, publisher = {IATED}, title = {{EXPERIENCEs UsING AN OPEN sOURCE sOFTWARE LIBRARY TO TEACH A COMPUTER VIsION sUBJECT}}, year = {2010} }
- M. Cazorla and A. Romero, "A NEW FRAMEWORK IN VIDEO LECTUREs: ADDING INTERACTION AND ADDITIONAL INFORMATION," in Iceri2010 proceedings, 2010, p. 4593–4598.
[Bibtex]@inproceedings{cazorla2010new, author = {Cazorla, M and Romero, A}, booktitle = {ICERI2010 Proceedings}, pages = {4593--4598}, publisher = {IATED}, title = {{A NEW FRAMEWORK IN VIDEO LECTUREs: ADDING INTERACTION AND ADDITIONAL INFORMATION}}, year = {2010} }
- M. Cazorla, D. Viejo, and C. Pomares, "study of the sR4000 camera," in Workshop of physical agents, 2010.
[Bibtex]@inproceedings{cazorla2010study, author = {Cazorla, Miguel and Viejo, Diego and Pomares, Cristina}, booktitle = {Workshop of Physical Agents}, organization = {Red de Agentes Fisicos}, title = {{study of the sR4000 camera}}, year = {2010} }
- M. Cazorla and B. Bonev, "Large scale Environment Partitioning in Mobile Robotics Recognition Tasks," Journal of physical agents, vol. 4, iss. 2, 2010.
[Bibtex]@article{JoPhA71, abstract = {In this paper we present a scalable machine learning approach to mobile robots visual localization. The applicability of machine learning approaches is constrained by the complexity and size of the problem{\&}rsquo;s domain. Thus, dividing the problem becomes necessary and two essential questions arise: which partition set is optimal for the problem and how to integrate the separate results into a single solution. The novelty of this work is the use of Information Theory for partitioning high-dimensional data. In the presented experiments the domain of the problem is a large sequence of omnidirectional images, each one of them providing a high number of features. A robot which follows the same trajectory has to answer which is the most similar image from the sequence. The sequence is divided so that each partition is suitable for building a simple classifier. The partitions are established on the basis of the information divergence peaks among the images. Measuring the divergence has usually been considered unfeasible in high-dimensional data spaces. We overcome this problem by estimating the Jensen-Renyi divergence with an entropy approximation based on entropic spanning graphs. Finally, the responses of the different classifiers provide a multimodal hypothesis for each incoming image. As the robot is moving, a particle filter is used for attaining the convergence to a unimodal hypothesis.}, author = {Cazorla, Miguel and Bonev, Boyan}, issn = {1888-0258}, journal = {Journal of Physical Agents}, keywords = {Jensen-Renyi diver- gence,Visual localization,classifier,entropy,particle filter}, number = {2}, title = {{Large scale Environment Partitioning in Mobile Robotics Recognition Tasks}}, url = {http://www.jopha.net/index.php/jopha/article/view/71}, volume = {4}, year = {2010} }
- A. Romero and M. Cazorla, "An Improvement of Topological Mapping Using a Graph-Matching Based Method with Omnidirectional Images.," in Ccia, 2010, p. 311–320.
[Bibtex]@inproceedings{romero2010improvement, author = {Romero, Anna and Cazorla, Miguel}, booktitle = {CCIA}, pages = {311--320}, title = {{An Improvement of Topological Mapping Using a Graph-Matching Based Method with Omnidirectional Images.}}, year = {2010} }
- A. Romero and M. Cazorla, "Testing image segmentation for topological sLAM with omnidirectional images," in Advances in artificial intelligence, Springer berlin heidelberg, 2010, p. 266–277.
[Bibtex]@incollection{romero2010testing, author = {Romero, Anna and Cazorla, Miguel}, booktitle = {Advances in Artificial Intelligence}, pages = {266--277}, publisher = {springer Berlin Heidelberg}, title = {{Testing image segmentation for topological sLAM with omnidirectional images}}, year = {2010} }
- A. Romero and M. Cazorla, "Topological slam using omnidirectional images: Merging feature detectors and graph-matching," in Advanced concepts for intelligent vision systems, Springer berlin heidelberg, 2010, p. 464–475.
[Bibtex]@incollection{romero2010topological, author = {Romero, Anna and Cazorla, Miguel}, booktitle = {Advanced Concepts for Intelligent Vision systems}, pages = {464--475}, publisher = {springer Berlin Heidelberg}, title = {{Topological slam using omnidirectional images: Merging feature detectors and graph-matching}}, year = {2010} }
- A. M. {Romero Cortijo}, M. angel Cazorla, and Others, "Topological sLAM using a graph-matching based method on omnidirectional images," in Advanced concepts for intelligent vision systems - 12th international conference, 2010.
[Bibtex]@inproceedings{romero2010topological1, author = {{Romero Cortijo}, Anna Maria and Cazorla, Miguel angel and Others}, booktitle = {Advanced Concepts for Intelligent Vision systems - 12th International Conference}, doi = {10.1007/978-3-642-17688-3_43}, title = {{Topological sLAM using a graph-matching based method on omnidirectional images}}, year = {2010} }
- E. Martinez-Martin and A. P. del Pobil, "A panoramic vision system for human-robot interaction," in 2010 5th ACM/IEEE international conference on human-robot interaction (HRI), 2010.
[Bibtex]@inproceedings{Martinez_2010,doi = {10.1109/hri.2010.5453211},url = {http://dx.doi.org/10.1109/hri.2010.5453211},year = 2010,month = {mar},publisher = {Institute of Electrical {\&}amp$\mathsemicolon$ Electronics Engineers ({IEEE})},author = {Ester Martinez-Martin and Angel P. del Pobil},title = {A panoramic vision system for human-robot interaction},booktitle = {2010 5th {ACM}/{IEEE} International Conference on Human-Robot Interaction ({HRI})}}
- E. Martinez-Martin and A. P. del Pobil, "A hybrid algorithm for motion segmentation," in Signal and image processing, 2010.
[Bibtex]@inproceedings{Martinez-Martin_2010,doi = {10.2316/p.2010.710-010},url = {http://dx.doi.org/10.2316/p.2010.710-010},year = 2010,publisher = {{ACTA} Press},author = {E. Martinez-Martin and A.P. del Pobil},title = {A Hybrid Algorithm for Motion segmentation},booktitle = {signal and Image Processing}}
- E. Martinez-Martin and A. P. del Pobil, "A panoramic vision system for human-robot interaction," in Proceeding of the 5th ACM/IEEE international conference on human-robot interaction - HRI \textquotesingle10, 2010.
[Bibtex]@inproceedings{Mart_nez_2010,doi = {10.1145/1734454.1734528},url = {http://dx.doi.org/10.1145/1734454.1734528},year = 2010,publisher = {Association for Computing Machinery ({ACM})},author = {Ester Martinez-Martin and Angel P. del Pobil},title = {A panoramic vision system for human-robot interaction},booktitle = {Proceeding of the 5th {ACM}/{IEEE} international conference on Human-robot interaction - {HRI} {\textquotesingle}10}}
- G. Gonzalez-Serrano, E. Türetken, F. Fleuret, and P. Fua, "Delineating trees in noisy 2d images and 3d image-stacks," in Proceedings of the ieee computer society conference on computer vision and pattern recognition, 2010.
[Bibtex]@inproceedings{Gonzalez2010, abstract = {We present a novel approach to fully automated delineation of tree structures in noisy 2D images and 3D image stacks. Unlike earlier methods that rely mostly on local evidence, our method builds a set of candidate trees over many different subsets of points likely to belong to the final one and then chooses the best one according to a global objective function. Since we are not systematically trying to span all nodes, our algorithm is able to eliminate noise while retaining the right tree structure. Manually annotated dendrite micrographs and retinal scans are used to evaluate the performance of our method, which is shown to be able to reject noise while retaining the tree structure. ©2010 IEEE.}, author = {G. Gonzalez-Serrano and E. Türetken and F. Fleuret and P. Fua}, doi = {10.1109/CVPR.2010.5540010}, isbn = {9781424469840}, issn = {10636919}, booktitle = {Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition}, title = {Delineating trees in noisy 2D images and 3D image-stacks}, year = {2010}, }
- E. Türetken, C. Blum, G. Gonzalez-Serrano, and P. Fua, "Reconstructing geometrically consistent tree structures from noisy images," in Lecture notes in computer science (including subseries lecture notes in artificial intelligence and lecture notes in bioinformatics), 2010, pp. 291-299.
[Bibtex]@inproceedings{Turetken2010, abstract = {We present a novel approach to fully automated reconstruction of tree structures in noisy 2D images. Unlike in earlier approaches, we explicitly handle crossovers and bifurcation points, and impose geometric constraints while optimizing a global cost function. We use manually annotated retinal scans to evaluate our method and demonstrate that it brings about a very substantial improvement.}, author = {Engin Türetken and Christian Blum and Germ\'{a}n Gonzalez-Serrano and Pascal Fua}, doi = {10.1007/978-3-642-15705-9_36}, isbn = {3642157041}, issn = {03029743}, issue = {PART 1}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, pages = {291-299}, pmid = {20879243}, title = {Reconstructing geometrically consistent tree structures from noisy images}, volume = {6361 LNCS}, year = {2010}, }
2009
- M. A. Lozano, F. Escolano, B. Bonev, P. suau, W. Aguilar, J. M. saez, and M. A. Cazorla, "Region and Constellations Based Categorization of Images with Unsupervised Graph Learning," Image vision comput., vol. 27, iss. 7, p. 960–978, 2009.
[Bibtex]@article{Lozano:2009:RCB:1534927.1534960, address = {Newton, MA, UsA}, author = {Lozano, M A and Escolano, F and Bonev, B and suau, P and Aguilar, W and saez, J M and Cazorla, M A}, doi = {10.1016/j.imavis.2008.09.011}, issn = {0262-8856}, journal = {Image Vision Comput.}, keywords = {Clustering of graphs,EM algorithms,Image categorization}, number = {7}, pages = {960--978}, publisher = {Butterworth-Heinemann}, title = {{Region and Constellations Based Categorization of Images with Unsupervised Graph Learning}}, url = {http://dx.doi.org/10.1016/j.imavis.2008.09.011}, volume = {27}, year = {2009} }
- J. M. C. Plaza, M. Cazorla, and V. Matellan, "Uso de simuladores en Docencia de Robotica Movil.," Ieee-rita, vol. 4, iss. 4, p. 269–278, 2009.
[Bibtex]@article{Plaza2009Uso, author = {Plaza, Jose Maria Canyas and Cazorla, Miguel and Matellan, Vicente}, journal = {IEEE-RITA}, number = {4}, pages = {269--278}, title = {{Uso de simuladores en Docencia de Robotica Movil.}}, volume = {4}, year = {2009} }
- A. M. Romero Cortijo, M. angel Cazorla, and Others, "Comparativa de detectores de caracteristicas visuales y su aplicacion al sLAM." 2009.
[Bibtex]@inproceedings{romero2009comparativa, author = {Romero Cortijo, Anna Maria and Cazorla, Miguel angel and Others}, title = {{Comparativa de detectores de caracteristicas visuales y su aplicacion al sLAM}}, year = {2009} }
- E. Martinez-Martin and A. P. del Pobil, "Safety for human-robot interaction in dynamic environments," in 2009 IEEE international symposium on assembly and manufacturing, 2009.
[Bibtex]@inproceedings{Martinez_2009,doi = {10.1109/isam.2009.5376949},url = {http://dx.doi.org/10.1109/isam.2009.5376949},year = 2009,month = {nov},publisher = {Institute of Electrical {\&}amp$\mathsemicolon$ Electronics Engineers ({IEEE})},author = {Ester Martinez-Martin and Angel P. del Pobil},title = {safety for human-robot interaction in dynamic environments},booktitle = {2009 {IEEE} International symposium on Assembly and Manufacturing}}
- G. Gonzalez-Serrano, F. Fleuret, and P. Fua, "Learning rotational features for filament detection," in 2009 ieee computer society conference on computer vision and pattern recognition workshops, cvpr workshops 2009, 2009, pp. 1582-1589.
[Bibtex]@inproceedings{Gonzalez2009, abstract = {State-of-the-art approaches for detecting filament-like structures in noisy images rely on filters optimized for signals of a particular shape, such as an ideal edge or ridge. While these approaches are optimal when the image conforms to these ideal shapes, their performance quickly degrades on many types of real data where the image deviates from the ideal model, and when noise processes violate a Gaussian assumption. In this paper, we show that by learning rotational features, we can outperform state-of-the-art filament detection techniques on many different kinds of imagery. More specifically, we demonstrate superior performance for the detection of blood vessel in retinal scans, neurons in brightfield microscopy imagery, and streets in satellite imagery.}, author = {German Gonzalez-Serrano and Francois Fleuret and Pascal Fua}, doi = {10.1109/CVPRW.2009.5206511}, isbn = {9781424439935}, issn = {9781424439935}, booktitle = {2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops, CVPR Workshops 2009}, pages = {1582-1589}, title = {Learning rotational features for filament detection}, year = {2009}, }
2008
- B. Bonev, F. Escolano, and M. Cazorla, "Feature selection, mutual information, and the classification of high-dimensional patterns," Pattern analysis and applications, vol. 11, iss. 3-4, p. 309–319, 2008.
[Bibtex]@article{Bonev:2008aa, abstract = {We propose a novel feature selection filter for supervised learning, which relies on the efficient estimation of the mutual information between a high-dimensional set of features and the classes. We bypass the estimation of the probability density function with the aid of the entropic-graphs approximation of R{\{}e{\}}nyi entropy, and the subsequent approximation of the shannon entropy. Thus, the complexity does not depend on the number of dimensions but on the number of patterns/samples, and the curse of dimensionality is circumvented. We show that it is then possible to outperform algorithms which individually rank features, as well as a greedy algorithm based on the maximal relevance and minimal redundancy criterion. We successfully test our method both in the contexts of image classification and microarray data classification. For most of the tested data sets, we obtain better classification results than those reported in the literature.}, author = {Bonev, Boyan and Escolano, Francisco and Cazorla, Miguel}, doi = {10.1007/s10044-008-0107-0}, issn = {1433-7541}, journal = {Pattern Analysis and Applications}, keywords = {Filter feature selection; Mutual information; Entropy}, number = {3-4}, pages = {309--319}, publisher = {springer-Verlag}, title = {{Feature selection, mutual information, and the classification of high-dimensional patterns}}, url = {http://dx.doi.org/10.1007/s10044-008-0107-0}, volume = {11}, year = {2008} }
- D. Viejo and M. Cazorla, "3D Model Based Map Building," in International symposium on robotics, isr 2008, 2008.
[Bibtex]@inproceedings{Viejo2008, author = {Viejo, Diego and Cazorla, Miguel}, booktitle = {International symposium on Robotics, IsR 2008}, title = {{3D Model Based Map Building}}, year = {2008} }
- D. {Viejo Hernando}, M. angel Cazorla, and Others, "3D Feature Extraction and Modelling for sLAM." 2008.
[Bibtex]@inproceedings{viejo20083d1, author = {{Viejo Hernando}, Diego and Cazorla, Miguel angel and Others}, title = {{3D Feature Extraction and Modelling for sLAM}}, year = {2008} }
- M. Prats, E. Martinez-Martin, P. J. sanz, and A. P. del Pobil, "The UJI librarian robot," Intelligent service robotics, vol. 1, iss. 4, p. 321–335, 2008.
[Bibtex]@article{Prats_2008,doi = {10.1007/s11370-008-0028-1},url = {http://dx.doi.org/10.1007/s11370-008-0028-1},year = 2008,month = {jul},publisher = {springer science $\mathplus$ Business Media},volume = {1},number = {4},pages = {321--335},author = {Mario Prats and Ester Martinez-Martin and Pedro J. sanz and Angel P. del Pobil},title = {The {UJI} librarian robot},journal = {Intelligent service Robotics}}
- E. Cervera, N. Garcia-Aracil, E. Martinez-Martin, L. Nomdedeu, and A. P. del Pobil, "Safety for a robot arm moving amidst humans by using panoramic vision," in 2008 IEEE international conference on robotics and automation, 2008.
[Bibtex]@inproceedings{Cervera_2008,doi = {10.1109/robot.2008.4543530},url = {http://dx.doi.org/10.1109/robot.2008.4543530},year = 2008,month = {may},publisher = {Institute of Electrical {\&} Electronics Engineers ({IEEE})},author = {Enric Cervera and Nicolas Garcia-Aracil and Ester Martinez-Martin and Leo Nomdedeu and Angel P. del Pobil},title = {safety for a robot arm moving amidst humans by using panoramic vision},booktitle = {2008 {IEEE} International Conference on Robotics and Automation}}
- M. Prats, P. J. sanz, E. Martinez-Martin, R. Marín, and A. P. del Pobil, "Manipulación autónoma multipropósito en el robot de servicios jaume-2," Revista iberoamericana de automática e informática industrial RIAI, vol. 5, iss. 2, p. 25–37, 2008.
[Bibtex]@Article{Prats2008, author = {Mario Prats and Pedro J. sanz and Ester Martinez-Martin and Ra{\'{u}}l Mar{\'{\i}}n and Angel P. del Pobil}, title = {Manipulaci{\'{o}}n aut{\'{o}}noma multiprop{\'{o}}sito en el robot de servicios jaume-2}, journal = {Revista Iberoamericana de Autom{\'{a}}tica e Inform{\'{a}}tica Industrial {RIAI}}, year = {2008}, volume = {5}, number = {2}, pages = {25--37}, month = {apr}, doi = {10.1016/s1697-7912(08)70142-0}, publisher = {Elsevier {BV}}, url = {http://dx.doi.org/10.1016/s1697-7912(08)70142-0}, }
- G. Gonzalez-Serrano, F. Fleuret, and P. Fua, "Automated delineation of dendritic networks in noisy image stacks," in Lecture notes in computer science (including subseries lecture notes in artificial intelligence and lecture notes in bioinformatics), 2008, pp. 214-227.
[Bibtex]@inproceedings{Gonzalez2008, abstract = {We present a novel approach to 3D delineation of dendritic networks in noisy image stacks. We achieve a level of automation beyond that of state- of-the-art systems, which model dendrites as continuous tubular structures and postulate simple appearance models. Instead, we learn models from the data it- self, which make them better suited to handle noise and deviations fromexpected appearance. From very little expert-labeled ground truth, we train both a classifier to recog- nize individual dendrite voxels and a density model to classify segments connect- ing pairs of points as dendrite-like or not. Given these models, we can then trace the dendritic trees of neurons automatically by enforcing the tree structure of the resulting graph. We will show that our approach performs better than traditional techniques on brighfield image stacks.}, author = {Germ\'{a}n Gonzalez-Serrano and François Fleuret and Pascal Fua}, doi = {10.1007/978-3-540-88693-8-16}, isbn = {3540886923}, issn = {03029743}, issue = {PART 4}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, pages = {214-227}, title = {Automated delineation of dendritic networks in noisy image stacks}, volume = {5305 LNCS}, year = {2008}, }
2007
- B. Bonev, F. Escolano, M. A. Lozano, P. suau, M. Cazorla, and W. Aguilar, "Constellations and the unsupervised learning of graphs," in Graph-based representations in pattern recognition, Springer berlin heidelberg, 2007, p. 340–350.
[Bibtex]@incollection{bonev2007constellations, author = {Bonev, Boyan and Escolano, Francisco and Lozano, Miguel A and suau, Pablo and Cazorla, Miguel and Aguilar, Wendy}, booktitle = {Graph-Based Representations in Pattern Recognition}, pages = {340--350}, publisher = {springer Berlin Heidelberg}, title = {{Constellations and the unsupervised learning of graphs}}, year = {2007} }
- B. Bonev, F. Escolano, and M. Cazorla, "A novel information theory method for filter feature selection," in Micai 2007: advances in artificial intelligence, Springer berlin heidelberg, 2007, p. 431–440.
[Bibtex]@incollection{bonev2007novel, author = {Bonev, Boyan and Escolano, Francisco and Cazorla, Miguel}, booktitle = {MICAI 2007: Advances in Artificial Intelligence}, pages = {431--440}, publisher = {springer Berlin Heidelberg}, title = {{A novel information theory method for filter feature selection}}, year = {2007} }
- B. Bonev, M. Cazorla, and F. Escolano Ruiz, "Robot navigation behaviors based on omnidirectional vision and information theory," Journal of physical agents, vol. 1, iss. 1, 2007.
[Bibtex]@article{bonev2007robot, author = {Bonev, Boyan and Cazorla, Miguel and Escolano Ruiz, Francisco }, journal = {Journal of Physical Agents}, publisher = {Red de Agentes Fisicos}, title = {{Robot navigation behaviors based on omnidirectional vision and information theory}}, volume=1, number=1, year = {2007} }
- F. Escolano, B. Bonev, P. suau, W. Aguilar, Y. Frauel, J. M. saez, and M. Cazorla, "Contextual visual localization: cascaded submap classification, optimized saliency detection, and fast view matching," in Intelligent robots and systems, 2007. iros 2007. ieee/rsj international conference on, 2007, p. 1715–1722.
[Bibtex]@inproceedings{escolano2007contextual, author = {Escolano, Francisco and Bonev, Boyan and suau, Pablo and Aguilar, Wendy and Frauel, Yann and saez, Juan Manuel and Cazorla, Miguel}, booktitle = {Intelligent Robots and systems, 2007. IROs 2007. IEEE/RsJ International Conference on}, organization = {IEEE}, pages = {1715--1722}, title = {{Contextual visual localization: cascaded submap classification, optimized saliency detection, and fast view matching}}, year = {2007} }
- D. Viejo and M. Cazorla, "3D plane-based egomotion for sLAM on semi-structured environment," in Intelligent robots and systems, 2007. iros 2007. ieee/rsj international conference on, 2007, p. 2761–2766.
[Bibtex]@inproceedings{Viejo2007, author = {Viejo, Diego and Cazorla, Miguel}, booktitle = {Intelligent Robots and systems, 2007. IROs 2007. IEEE/RsJ International Conference on}, doi = {10.1109/IROs.2007.4399138}, pages = {2761--2766}, title = {{3D plane-based egomotion for sLAM on semi-structured environment}}, year = {2007} }
- D. Viejo and M. Cazorla, "Pose Registration Model Improvement: Crease Detection." 2007.
[Bibtex]@inproceedings{viejo2007pose, abstract = {several works deal with 3D data in sLAM pro- blem. Data come from a 3D laser sweeping unit or a stereo camera, both providing a huge amount of data. In this paper, we detail an efficient method to find out creases from 3D raw data. This information can be used together with planar patches extracted from 3D raw data in order to build a complete 3D model of the scene. some promising results are shown for both outdoor and indoor environments.}, author = {Viejo, Diego and Cazorla, Miguel}, title = {{Pose Registration Model Improvement: Crease Detection}}, year = {2007} }
- O. C. Jenkins, G. Gonzalez-Serrano, and M. M. Loper, "Tracking human motion and actions for interactive robots," in Hri 2007 - proceedings of the 2007 acm/ieee conference on human-robot interaction - robot as team member, 2007.
[Bibtex]@inproceedings{Jenkins2007, abstract = {A method is presented for kinematic pose estimation and action recognition from monocular robot vision through the use of dynamical human motion vocabularies. We propose the utilization of dynamical motion vocabularies towards bridging the decision making of observed humans and information from robot sensing. Our motion vocabulary is comprised of learned primitives that structure the action space for decision making and describe human movement dynamics. Given image observations over time, each primitive infers on pose independently using its prediction density on movement dynamics in the context of a particle filter. Pose estimates from a set of primitives inferencing in parallel are arbitrated to estimate the action being performed. The efficacy of our approach is demonstrated through tracking and action recognition over extended motion trials. Results evidence the robustness of the algorithm with respect to unsegmented multi-action movement, movement speed, and camera viewpoint. Copyright 2007 ACM.}, author = {O.C. Jenkins and G. Gonzalez-Serrano and M.M. Loper}, doi = {10.1145/1228716.1228765}, isbn = {1595936173}, booktitle = {HRI 2007 - Proceedings of the 2007 ACM/IEEE Conference on Human-Robot Interaction - Robot as Team Member}, keywords = {Action recognition,Human tracking,Human-robot interaction,Markerless motion capture}, title = {Tracking human motion and actions for interactive robots}, year = {2007}, }
- O. C. Jenkins, G. Gonzalez-Serrano, and M. M. Loper, "Interactive human pose and action recognition using dynamical motion primitives," International journal of humanoid robotics, vol. 04, pp. 365-385, 2007.
[Bibtex]@article{Jenkins2007b, abstract = {There is currently a division between real-world human performance and the decision making of socially interactive robots. This circumstance is partially due to the difficulty in estimating human cues, such as pose and gesture, from robot sensing. Towards bridging this division, we present a method for kinematic pose estimation and action recognition from monocular robot vision through the use of dynamical human motion vocabularies. Our notion of a motion vocabulary is comprised of movement primitives that structure a human's action space for decision making and predict human movement dynamics. Through prediction, such primitives can be used to both generate motor commands for specific actions and perceive humans performing those actions. In this paper, we focus specifically on the perception of human pose and performed actions using a known vocabulary of primitives. Given image observations over time, each primitive infers pose independently using its expected dynamics in the context of a particle filter...}, author = {Odest Chadwicke Jenkins and German Gonzalez-Serrano and Matthew M. Loper}, doi = {10.1142/S0219843607001060}, isbn = {0219-8436}, issn = {0219-8436}, issue = {02}, journal = {International Journal of Humanoid Robotics}, pages = {365-385}, title = {Interactive human pose and action recognition using dynamical motion primitives}, volume = {04}, url = {http://www.worldscientific.com/doi/abs/10.1142/S0219843607001060}, year = {2007}, }
2006
- B. Bonev and M. Cazorla, "Towards autonomous adaptation in visual tasks.," in Workshop de agentes fisicos, 2006, p. 59–66.
[Bibtex]@inproceedings{bonev2006towards, author = {Bonev, Boyan and Cazorla, Miguel}, booktitle = {Workshop de Agentes Fisicos}, pages = {59--66}, title = {{Towards autonomous adaptation in visual tasks.}}, year = {2006} }
- B. Bonev, M. Cazorla, and H. Martinez, "Walk calibration in a four-legged robot," in Climbing and walking robots, Springer berlin heidelberg, 2006, p. 493–500.
[Bibtex]@incollection{bonev2006walk, author = {Bonev, Boyan and Cazorla, Miguel and Martinez, Humberto}, booktitle = {Climbing and Walking Robots}, pages = {493--500}, publisher = {springer Berlin Heidelberg}, title = {{Walk calibration in a four-legged robot}}, year = {2006} }
- D. Herrero-Perez, F. Bas-Esparza, H. Martinez-Barbera, F. Martin, C. E. Aguero, V. M. Gomez, V. Matellan, and M. Cazorla, "Team Chaos 2006," in Robotics symposium, 2006. lars'06. ieee 3rd latin american, 2006, p. 208–213.
[Bibtex]@inproceedings{herrero2006team, author = {Herrero-Perez, D and Bas-Esparza, F and Martinez-Barbera, H and Martin, F and Aguero, C E and Gomez, V M and Matellan, V and Cazorla, M}, booktitle = {Robotics symposium, 2006. LARs'06. IEEE 3rd Latin American}, organization = {IEEE}, pages = {208--213}, title = {{Team Chaos 2006}}, year = {2006} }
- J. M. {Perez Torres}, D. {Viejo Hernando}, P. {suau Perez}, M. angel {Lozano Ortega}, O. {Colomina Pardo}, M. angel Cazorla, F. {Escolano Ruiz}, and Others, "Una concepcion moderna de Tecnicas de Inteligencia Artificial en la Universidad de Alicante." 2006.
[Bibtex]@inproceedings{perez2006concepcion, author = {{Perez Torres}, Jose Manuel and {Viejo Hernando}, Diego and {suau Perez}, Pablo and {Lozano Ortega}, Miguel angel and {Colomina Pardo}, Otto and Cazorla, Miguel angel and {Escolano Ruiz}, Francisco and Others}, publisher = {Thomson-Paraninfo}, title = {{Una concepcion moderna de Tecnicas de Inteligencia Artificial en la Universidad de Alicante}}, year = {2006} }
- D. Viejo and M. Cazorla, "Extraction and error modeling of 3D data: application to sLAM.," in Workshop de agentes fisicos, 2006, p. 153–158.
[Bibtex]@inproceedings{viejo2006extraction, author = {Viejo, Diego and Cazorla, Miguel}, booktitle = {Workshop de Agentes Fisicos}, pages = {153--158}, title = {{Extraction and error modeling of 3D data: application to sLAM.}}, year = {2006} }
- D. Viejo and M. Cazorla, "Plane extraction and error modeling of 3d data," in International symposium on robotics and automation, 2006.
[Bibtex]@inproceedings{viejo2006plane, author = {Viejo, Diego and Cazorla, Miguel}, booktitle = {International symposium on Robotics and Automation}, title = {{Plane extraction and error modeling of 3d data}}, year = {2006} }
- O. Jenkins, G. Gonzalez-Serrano, and M. Loper, "Monocular virtual trajectory estimation with dynamical primitives," in American association for artificial intelligence workshop on cognitive robotics (aaai), 2006.
[Bibtex]@inproceedings{Jenkins2006, abstract = {We present a method for monocular kinematic pose estimation and activity recognition from video for movement imitation. Learned vocabularies of kinematic motion primitives are used emulate the function of hypothesized neuroscientific models for spinal fields and mirror neurons in the process of imitation. For imitation, we assume the movement of a demonstrator is produced through a virtual trajectory specifying desired body poses over time. Each pose represents a decomposition into mirror neuron firing coefficients that specify the attractive dynamics to this configuration through a linear combination of primitives. Each primitive is a nonlinear dynamical system that predicts expected motion with respect to an underlying activity. Our aim is to invert this process by estimating a demonstrator's virtual trajectory from monocular image observations in a bottom-up fashion. At our lowest level, pose estimates are inferred in a modular fashion through the use of a particle filter with each primitive. We hypothesize the likelihood of these pose estimates over time emulate the firing of mirror neurons from the formation of the virtual trajectory. We present preliminary results our method applied to video composed of multiple activities performed at various speeds and viewpoints.}, author = {O. Jenkins and G. Gonzalez-Serrano and M. Loper}, isbn = {1577352858}, booktitle = {American Association for Artificial Intelligence Workshop on Cognitive Robotics (AAAI)}, title = {Monocular virtual trajectory estimation with dynamical primitives}, url = {https://www.aaai.org/Papers/Workshops/2006/WS-06-03/WS06-03-016.pdf}, year = {2006}, }
- O. C. Jenkins, G. Gonzalez-Serrano, and M. Loper, "Dynamical motion vocabularies for kinematic tracking and activity recognition," Proceedings of the ieee computer society conference on computer vision and pattern recognition, vol. 2006, 2006.
[Bibtex]@article{Jenkins2006b, abstract = {We present a method for 3D monocular kinematic pose estimation and activity recognition through the use of dynamical human motion vocabularies. A motion vocabulary is comprised as a set of primitives that each describe the movement dynamics of an activity in a low-dimensional space. Given image observations over time, each primitive is used to infer the pose independently using its expected dynamics in the context of a particle filter. Pose estimates from a set of primitives are inferred in parallel and arbitrated to estimate the activity being performed. The approach presented is evaluated through tracking and activity recognition over extended motion trials. The results suggest robustness with respect to multi-activity movement, movement speed, and camera viewpoint.}, author = {Odest Chadwicke Jenkins and Germ\'{a}n Gonzalez-Serrano and Matthew Loper}, doi = {10.1109/CVPRW.2006.67}, isbn = {0769526462}, issn = {10636919}, journal = {Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition}, title = {Dynamical motion vocabularies for kinematic tracking and activity recognition}, volume = {2006}, year = {2006}, }
2005
- B. Bonev, M. Cazorla, and H. Martinez-Barbera, "Parameters optimization for quadruped walking," in Proc. of the vi workshop de agentes fisicos, granada, spain, 2005.
[Bibtex]@inproceedings{bonev2005parameters, author = {Bonev, Boyan and Cazorla, Miguel and Martinez-Barbera, H}, booktitle = {Proc. of the VI Workshop de agentes fisicos, Granada, spain}, title = {{Parameters optimization for quadruped walking}}, year = {2005} }
- M. Cazorla and F. Escolano, "Feature Extraction and Grouping for Robot Vision Tasks," in Cutting edge robotics, I-tech, 2005, p. 91.
[Bibtex]@incollection{cazorla2005feature, author = {Cazorla, Miguel and Escolano, Francisco}, booktitle = {Cutting Edge Robotics}, pages = {91}, publisher = {I-Tech}, title = {{Feature Extraction and Grouping for Robot Vision Tasks}}, year = {2005} }
- H. Martinez, V. Matellan, M. A. Cazorla, A. saffiotti, D. Herrero, F. Mart$backslash$in, B. Bonev, and K. LeBlanc, "Team Chaos 2005." 2005.
[Bibtex]@inproceedings{martinezteam, author = {Martinez, H and Matellan, V and Cazorla, M A and saffiotti, A and Herrero, D and Mart$\backslash$in, F and Bonev, B and LeBlanc, K}, title = {{Team Chaos 2005}}, year = {2005} }
- D. Viejo, J. M. saez, M. A. Cazorla, and F. Escolano, "Active stereo based compact mapping," in Intelligent robots and systems, 2005.(iros 2005). 2005 ieee/rsj international conference on, 2005, p. 529–534.
[Bibtex]@inproceedings{viejo2005active, author = {Viejo, Diego and saez, Juan Manuel and Cazorla, Miguel Angel and Escolano, Francisco}, booktitle = {Intelligent Robots and systems, 2005.(IROs 2005). 2005 IEEE/RsJ International Conference on}, organization = {IEEE}, pages = {529--534}, title = {{Active stereo based compact mapping}}, year = {2005} }
2004
- D. Viejo and M. A. Cazorla, "Construccion de mapas 3D y extraccion de primitivas geometricas del entorno," in Proc of 5th workshop de agentes fisicos, 2004.
[Bibtex]@inproceedings{viejo2004construccion, author = {Viejo, Diego and Cazorla, M A}, booktitle = {Proc of 5th Workshop de Agentes Fisicos}, title = {{Construccion de mapas 3D y extraccion de primitivas geometricas del entorno}}, year = {2004} }
- D. Viejo and M. Cazorla, "Unconstrained 3D-mesh generation applied to map building," in Progress in pattern recognition, image analysis and applications, Springer berlin heidelberg, 2004, p. 241–248.
[Bibtex]@incollection{viejo2004unconstrained, abstract = {3D map building is a complex robotics task which needs mathematical robust models. From a 3D point cloud, we can use the normal vectors to these points to do feature extraction. In this paper, we will present a robust method for normal estimation and unconstrained 3D-mesh generation from a not-uniformly distributed point cloud.}, author = {Viejo, Diego and Cazorla, Miguel}, booktitle = {Progress in Pattern Recognition, Image Analysis and Applications}, pages = {241--248}, publisher = {springer Berlin Heidelberg}, title = {{Unconstrained 3D-mesh generation applied to map building}}, year = {2004} }
2003
- M. Cazorla and F. Escolano, "Two Bayesian methods for junction classification," Image processing, ieee transactions on, vol. 12, iss. 3, p. 317–327, 2003.
[Bibtex]@article{1197837, abstract = {We propose two Bayesian methods for junction classification which evolve from the Kona method: a region-based method and an edge-based method. Our region-based method computes a one-dimensional (1-D) profile where wedges are mapped to intervals with homogeneous intensity. These intervals are found through a growing-and-merging algorithm driven by a greedy rule. On the other hand, our edge-based method computes a different profile which maps wedge limits to peaks of contrast, and these peaks are found through thresholding followed by nonmaximum suppression. Experimental results show that both methods are more robust and efficient than the Kona method, and also that the edge-based method outperforms the region-based one.}, author = {Cazorla, M and Escolano, F}, doi = {10.1109/TIP.2002.806242}, issn = {1057-7149}, journal = {Image Processing, IEEE Transactions on}, keywords = {Bayes methods;edge detection;image classification;}, number = {3}, pages = {317--327}, title = {{Two Bayesian methods for junction classification}}, volume = {12}, year = {2003} }
2002
- M. Cazorla, F. Escolano, D. Gallardo, and R. Rizo, "Junction detection and grouping with probabilistic edge models and Bayesian A*," Pattern recognition, vol. 35, iss. 9, p. 1869–1881, 2002.
[Bibtex]@article{Cazorla20021869, abstract = {In this paper, we propose and integrate two Bayesian methods, one of them for junction detection, and the other one for junction grouping. Our junction detection method relies on a probabilistic edge model and a log-likelihood test. Our junction grouping method relies on finding connecting paths between pairs of junctions. Path searching is performed by applying a Bayesian A* algorithm. such algorithm uses both an intensity and geometric model for defining the rewards of a partial path and prunes those paths with low rewards. We have extended such a pruning with an additional rule which favors the stability of longer paths against shorter ones. We have tested experimentally the efficiency and robustness of the methods in an indoor image sequence. In this paper, we propose and integrate two Bayesian methods, one of them for junction detection, and the other one for junction grouping. Our junction detection method relies on a probabilistic edge model and a log-likelihood test. Our junction grouping method relies on finding connecting paths between pairs of junctions. Path searching is performed by applying a Bayesian A algorithm. such algorithm uses both an intensity and geometric model for defining the rewards of a partial path and prunes those paths with low rewards. We have extended such a pruning with an additional rule which favors the stability of longer paths against shorter ones. We have tested experimentally the efficiency and robustness of the methods in an indoor image sequence.}, author = {Cazorla, M and Escolano, F and Gallardo, D and Rizo, R}, doi = {http://dx.doi.org/10.1016/s0031-3203(01)00150-9}, issn = {0031-3203}, journal = {Pattern Recognition}, keywords = {Bayesian inference}, number = {9}, pages = {1869--1881}, title = {{Junction detection and grouping with probabilistic edge models and Bayesian A*}}, url = {http://www.sciencedirect.com/science/article/pii/s0031320301001509}, volume = {35}, year = {2002} }
2001
- M. angel Cazorla, O. Colomina Pardo, P. Compan Rosique, F. Escolano Ruiz, J. L. Zamora, and Others, "JavaVis: Una libreria para vision artificial en Java." 2001.
[Bibtex]@inproceedings{cazorla2001javavis, author = {Cazorla, Miguel angel and Colomina Pardo, Otto and Compan Rosique, Patricia and Escolano Ruiz, Francisco and Zamora, Jose Luis and Others}, publisher = {Universitat de les Illes Balears. servei de Publicacions i Intercanvi Cientific}, title = {{JavaVis: Una libreria para vision artificial en Java}}, year = {2001} }
1999
- M. angel Cazorla, F. Escolano Ruiz, D. Gallardo Lopez, O. Colomina Pardo, and Others, "A competition-based deformable template for junction extraction." 1999.
[Bibtex]@inproceedings{cazorla1999competition, author = {Cazorla, Miguel angel and Escolano Ruiz, Francisco and Gallardo Lopez, Domingo and Colomina Pardo, Otto and Others}, title = {{A competition-based deformable template for junction extraction}}, year = {1999} }
- M. Cazorla, F. Escolano, D. Gallardo, and R. Rizo, "Bayesian Models for Finding and Grouping Junctions," in Proc of the emmcvpr99, 1999.
[Bibtex]@inproceedings{Cazorla99a, author = {Cazorla, M and Escolano, F and Gallardo, D and Rizo, R}, booktitle = {Proc of the EMMCVPR99}, publisher = {Lectures Notes in Computer science}, title = {{Bayesian Models for Finding and Grouping Junctions}}, year = {1999} }
1998
- F. Escolano, M. Cazorla, D. Gallardo, F. Llorens, R. satorre, and R. Rizo, "A combined probabilistic framework for learning gestures and actions," in Tasks and methods in applied artificial intelligence, Springer berlin heidelberg, 1998, p. 658–667.
[Bibtex]@incollection{escolano1998combined, author = {Escolano, Francisco and Cazorla, Miguel and Gallardo, Domingo and Llorens, Faraon and satorre, Rosana and Rizo, Ramon}, booktitle = {Tasks and Methods in Applied Artificial Intelligence}, pages = {658--667}, publisher = {springer Berlin Heidelberg}, title = {{A combined probabilistic framework for learning gestures and actions}}, year = {1998} }
- D. Gallardo, F. Escolano, R. Rizo, O. Colomina, and M. Cazorla, "Estimacion bayesiana de caracteristicas en robots moviles mediante muestreo de la densidad a posteriori," in Actas del primer congr\e\s catal\à\ dintellig\è\ncia artificial, 1998.
[Bibtex]@inproceedings{gallardo1998estimacion, author = {Gallardo, Domingo and Escolano, Francisco and Rizo, Ramon and Colomina, Otto and Cazorla, M}, booktitle = {Actas del Primer Congr{\{}e{\}}s Catal{\{}{\`{a}}{\}} dintellig{\{}{\`{e}}{\}}ncia Artificial}, title = {{Estimacion bayesiana de caracteristicas en robots moviles mediante muestreo de la densidad a posteriori}}, year = {1998} }
1997
- F. Escolano, M. Cazorla, D. Gallardo, and R. Rizo, "Deformable templates for tracking and analysis of intravascular ultrasound sequences," in Energy minimization methods in computer vision and pattern recognition, Springer berlin heidelberg, 1997, p. 521–534.
[Bibtex]@incollection{escolano1997deformable, author = {Escolano, Francisco and Cazorla, Miguel and Gallardo, Domingo and Rizo, Ramon}, booktitle = {Energy Minimization Methods in Computer Vision and Pattern Recognition}, pages = {521--534}, publisher = {springer Berlin Heidelberg}, title = {{Deformable templates for tracking and analysis of intravascular ultrasound sequences}}, year = {1997} }
- F. Escolano Ruiz, M. Cazorla, and Others, "Estimacion del movimiento coherente: computacion evolutiva como alternativa al annealing determinista." 1997.
[Bibtex]@inproceedings{escolano1997estimacion, author = {Escolano Ruiz, Francisco and Cazorla, Miguel and Others}, title = {{Estimacion del movimiento coherente: computacion evolutiva como alternativa al annealing determinista}}, year = {1997} }
- F. Escolano Ruiz, M. angel Cazorla, D. {Gallardo Lopez}, F. Llorens Largo, R. satorre Cuerda, R. Rizo Aldeguer, and Others, "Plantillas deformables espacio-temporales para el tracking y reconocimiento gestual." 1997.
[Bibtex]@inproceedings{escolano1997plantillas, author = {Escolano Ruiz, Francisco and Cazorla, Miguel angel and {Gallardo Lopez}, Domingo and Llorens Largo, Faraon and satorre Cuerda, Rosana and Rizo Aldeguer, Ramon and Others}, title = {{Plantillas deformables espacio-temporales para el tracking y reconocimiento gestual}}, year = {1997} }
- I. sabuco, F. {Escolano Ruiz}, M. angel Cazorla, D. {Gallardo Lopez}, R. {Rizo Aldeguer}, and Others, "snakes based tracking and texture analysis of microscopic images." 1997.
[Bibtex]@inproceedings{sabuco1997snakes, author = {sabuco, Isabel and {Escolano Ruiz}, Francisco and Cazorla, Miguel angel and {Gallardo Lopez}, Domingo and {Rizo Aldeguer}, Ramon and Others}, title = {{snakes based tracking and texture analysis of microscopic images}}, year = {1997} }
1995
- M. Cazorla, P. Caceres, F. Escolano, D. Gallardo, and R. Rizo, "Deteccion automatica con snakes y Representacion 3D sobre imagenes cerebrales," in Vi caepia, 1995, p. 331–340.
[Bibtex]@inproceedings{cazorla1995deteccion, author = {Cazorla, M and Caceres, Pedro and Escolano, Francisco and Gallardo, Domingo and Rizo, Ramon}, booktitle = {VI CAEPIA}, pages = {331--340}, title = {{Deteccion automatica con snakes y Representacion 3D sobre imagenes cerebrales}}, year = {1995} }
- E. Martinez-Martin and A. P. del Pobil, "Conflict resolution in robotics." IGI global, p. 263–278.
[Bibtex]@incollection{Martinez_Martin,doi = {10.4018/978-1-5225-0245-6.ch015},url = {http://dx.doi.org/10.4018/978-1-5225-0245-6.ch015},publisher = {{IGI} Global},pages = {263--278},author = {Ester Martinez-Martin and Angel P. del Pobil},title = {Conflict Resolution in Robotics}}