Publications


Authors: Type:

2024

  • L. J. Marhuenda, F. Gomez-Donoso, and M. Cazorla, «Harto: human activity recognition through optical flow architecture,» in The ieee world congress on computational intelligence, 2024.
    [Bibtex]
    @inproceedings{Marhuenda2024,
    author = {Marhuenda, Luis Jesus and Gomez-Donoso, Francisco and Cazorla, Miguel},
    booktitle = {The IEEE World Congress on Computational Intelligence},
    title = {HARTO: Human Activity Recognition Through Optical Flow Architecture},
    year = {2024}
    }
  • B. Dominguez-Dager, F. Gomez-Donoso, R. Roig-Vila, F. Escalona, and M. Cazorla, «Holograms for seamless integration of remote students in the classroom,» Virtual reality, vol. 28, iss. 1, p. 24, 2024.
    [Bibtex]
    @article{dominguez2024holograms,
    title={Holograms for seamless integration of remote students in the classroom},
    author={Dominguez-Dager, Bessie and Gomez-Donoso, Francisco and Roig-Vila, Rosabel and Escalona, Felix and Cazorla, Miguel},
    journal={Virtual Reality},
    volume={28},
    number={1},
    pages={24},
    year={2024},
    publisher={Springer}
    }

2023

  • L. Marquez-Carpintero, M. Pina-Navarro, S. Suescun-Ferrandiz, F. Escalona, F. Gomez-Donoso, R. Roig-Vila, and M. Cazorla, «Artificial intelligence-based system for detecting attention levels in students,» Journal of visualized experiments: jove, iss. 202, 2023.
    [Bibtex]
    @article{marquez2023artificial,
    title={Artificial Intelligence-based System for Detecting Attention Levels in Students},
    author={Marquez-Carpintero, Luis and Pina-Navarro, Monica and Suescun-Ferrandiz, Sergio and Escalona, Felix and Gomez-Donoso, Francisco and Roig-Vila, Rosabel and Cazorla, Miguel},
    journal={Journal of visualized experiments: JoVE},
    number={202},
    year={2023}
    }
  • F. Gomez-Donoso, B. Dominguez-Dager, F. Escalona, J. Montoyo-Bojo, and M. Cazorla, «Hypergaze: gaze tracking with a color camera,» in The 23rd international workshop de agentes físicos (waf), 2023.
    [Bibtex]
    @inproceedings{Gomez-DonosoWAF2023,
    author = {Francisco Gomez-Donoso and Bessie Dominguez-Dager and Félix Escalona and Javier Montoyo-Bojo and Miguel Cazorla},
    title = {HyperGaze: Gaze Tracking with a Color Camera},
    booktitle = {The 23rd International Workshop de Agentes Físicos (WAF)},
    year = {2023}
    }
  • S. Suescun, F. Gomez-Donoso, L. Marquez-Carpintero, and M. Cazorla, «Genetic algorithms for self-driving drones,» in The 23rd international workshop de agentes físicos (waf), 2023.
    [Bibtex]
    @inproceedings{SuescunWAF2023,
    author = {Sergio Suescun and Francisco Gomez-Donoso and Luis Marquez-Carpintero and Miguel Cazorla},
    title = {Genetic Algorithms for Self-Driving Drones},
    booktitle = {The 23rd International Workshop de Agentes Físicos (WAF)},
    year = {2023}
    }
  • E. Cruz, E. Gomez, A. M. Acosta-Reyes, F. Gomez-Donoso, M. Cazorla, and J. C. Rangel, «Enhancing poultry management practices: automated chicken counting and flock monitoring using yolo,» in The 23rd international workshop de agentes físicos (waf), 2023.
    [Bibtex]
    @inproceedings{CruzWAF2023,
    author = {  Edmanuel Cruz and Edgar Gomez and Adiz Mariel Acosta-Reyes and Francisco Gomez-Donoso and Miguel Cazorla and Jose Carlos Rangel},
    title = {Enhancing Poultry Management Practices: Automated Chicken Counting and Flock Monitoring using YOLO},
    booktitle = {The 23rd International Workshop de Agentes Físicos (WAF)},
    year = {2023}
    }
  • E. Alvarez, R. Alvarez Sanchez, and M. Cazorla, «Exploring transferability on adversarial attacks,» Ieee access, vol. Accepted, 2023.
    [Bibtex]
    @ARTICLE{alvarez2023,
    author={Alvarez, Enrique and Alvarez Sanchez, Rafael and Cazorla, Miguel },
    title={Exploring Transferability on Adversarial Attacks},
    journal={IEEE Access},
    year={2023},
    volume={Accepted},
    number={},
    }
  • M. Torres Mendoza, R. Alvarez Sanchez, and M. Cazorla, «A malware detection approach based on feature engineering and behavior analysis,» Ieee access, vol. Accepted, 2023.
    [Bibtex]
    @ARTICLE{torres2023,
    author={Torres Mendoza, Manuel and Alvarez Sanchez, Rafael and Cazorla, Miguel },
    title={A Malware Detection Approach Based on Feature Engineering and Behavior Analysis},
    journal={IEEE Access},
    year={2023},
    volume={Accepted},
    number={},
    }
  • C. Mejia-Escobar, M. Cazorla, and E. Martinez-Martin, «Improving facial expression recognition through data preparation & merging,» Ieee access, vol. Accepted, 2023.
    [Bibtex]
    @ARTICLE{mejia2023,
    author={Mejia-Escobar, Christian and Cazorla, Miguel and Martinez-Martin, Ester},
    title={Improving Facial Expression Recognition through Data Preparation & Merging},
    journal={IEEE Access},
    year={2023},
    volume={Accepted},
    number={},
    }
  • F. Escalona, F. Gomez-Donoso, F. Morillas-Espejo, M. Pina-Navarro, L. M. Carpintero, and M. Cazorla, «Aatiende: automatic attention evaluation on a non-invasive device,» in Iwann 2023, 2023.
    [Bibtex]
    @INPROCEEDINGs{EscalonaIWANN2023,
    Author = { F. Escalona and F. Gomez-Donoso and Francisco Morillas-Espejo and Mónica Pina-Navarro and Luis Márquez Carpintero and Miguel Cazorla},
    Title = {AATiENDe: Automatic ATtention Evaluation on a Non-invasive Device},
    Booktitle = {IWANN 2023},
    Year = {2023}
    }
  • R. Martinez-Roig, M. Cazorla, and J. M. Esteve Faubel, «Social robotics in music education: a systematic review,» Frontiers in education, vol. Accepted, 2023.
    [Bibtex]
    @ARTICLE{martinez2023,
    author={Martinez-Roig, Rosabel and Cazorla, Miguel and Esteve Faubel, Jose Maria},
    journal={Frontiers in Education},
    title={Social robotics in music education: a systematic review},
    year={2023},
    volume={Accepted},
    number={},
    }
  • F. Romero-Ramirez, R. Munyoz-Salinas, M. Marín, M. Cazorla, and R. Medina-Carnicer, «Sslam: speeded up visual slam mixing artificial markers and temporary keypoints,» Sensors, vol. Accepted, 2023.
    [Bibtex]
    @ARTICLE{romero2023,
    author={Romero-Ramirez, Francisco and Munyoz-Salinas, Rafael and Marín, Manuel and Cazorla, Miguel and Medina-Carnicer, Rafael},
    journal={Sensors},
    title={sSLAM: Speeded Up visual SLAM mixing artificial markers and temporary keypoints },
    year={2023},
    volume={Accepted},
    number={},
    }
  • F. Gomez-Donoso, J. Castano, F. Escalona-Moncholi, and M. Cazorla, «Three-dimensional reconstruction using sfm for actual pedestrian classification,» Expert sytems with applications, vol. 213, 2023.
    [Bibtex]
    @ARTICLE{gomezDonoso2022c,
    author={Gomez-Donoso, Francisco and Castano, Julio and Escalona-Moncholi, Felix and Cazorla, Miguel},
    journal={Expert Sytems with Applications},
    title={Three-Dimensional Reconstruction Using SFM for Actual Pedestrian Classification
    },
    year={2023},
    volume={213},
    number={},
    }
  • C. Mejia, M. Cazorla, and E. Martinez-Martin, «Towards a better performance in facial expression recognition: a data-centric approach,» Computational intelligence and neuroscience, vol. 2023, 2023.
    [Bibtex]
    @ARTICLE{christian2022a,
    author={Mejia, Christian and Cazorla, Miguel and Martinez-Martin, Ester},
    journal={Computational Intelligence and Neuroscience},
    title={Towards a better performance in facial expression recognition: a data-centric approach
    },
    year={2023},
    volume={2023},
    number={},
    }

2022

  • F. Gomez-Donoso, F. Escalona-Moncholi, and M. Cazorla, «Vfkd: voxelized fractal keypoint detector,» in Proc. of international joint conference on neural networks (ijcnn), 2022.
    [Bibtex]
    @INPROCEEDINGs{GomezDonoso2022IJCNN,
    Author = { Francisco Gomez-Donoso and Felix Escalona-Moncholi and Miguel Cazorla},
    Title = {VFKD: Voxelized Fractal Keypoint Detector},
    Booktitle = {Proc. of International Joint Conference on Neural Networks (IJCNN)},
    Year = {2022}
    }
  • [DOI] F. Gomez-Donoso, F. Escalona, S. Orts-Escolano, A. Garcia-Garcia, J. Garcia-Rodriguez, and M. Cazorla, «3dslicelenet: recognizing 3d objects using a slice-representation,» Ieee access, vol. 10, pp. 15378-15392, 2022.
    [Bibtex]
    @ARTICLE{9701312,
    author={Gomez-Donoso, Francisco and Escalona, Felix and Orts-Escolano, Sergio and Garcia-Garcia, Alberto and Garcia-Rodriguez, Jose and Cazorla, Miguel},
    journal={IEEE Access},
    title={3DSliceLeNet: Recognizing 3D Objects Using a Slice-Representation},
    year={2022},
    volume={10},
    number={},
    pages={15378-15392},
    doi={10.1109/ACCESS.2022.3148387}}
  • F. Gomez-Donoso, M. Moreno-Martineza, and M. Cazorla, «Uroac: urban objects in any-light conditions,» Data in brief, vol. Aceptado, 2022.
    [Bibtex]
    @ARTICLE{donoso2022b,
    author={F. Gomez-Donoso and Marcos Moreno-Martineza and Miguel Cazorla},
    journal={Data in brief},
    title={UrOAC: Urban Objects in Any-light Conditions},
    year={2022},
    volume={Aceptado},
    number={}}
  • J. C. Rangel, E. Cruz, and M. Cazorla, «Automatic understanding and mapping of regions in cities using google,» Applied sciences, vol. 12, iss. 6, 2022.
    [Bibtex]
    @ARTICLE{Rangel2022,
    author={Jose Carlos Rangel and Edmanuel Cruz and Miguel Cazorla},
    journal={Applied Sciences},
    title={Automatic understanding and mapping of regions in cities using Google},
    year={2022},
    volume={12},
    number={6}}

2021

  • C. Mejia, E. Martinez-Martin, and M. Cazorla, «Webpage categorization using deep learning,» in Proc. of the 16th international conference on soft computing models in industrial and environmental applications (soco), 2021.
    [Bibtex]
    @INPROCEEDINGs{SOCOChristian2021,
    Author = { Christian Mejia and Ester Martinez-Martin and Miguel Cazorla},
    Title = {Webpage Categorization using Deep Learning},
    Booktitle = {Proc. of the 16th International Conference on Soft Computing Models in Industrial and Environmental Applications (SOCO)},
    Year = {2021}
    }
  • Z. Bauer, Z. Li, S. Orts, M. Cazorla, M. Pollefeys, and M. OSwald, «Nvs-monodepth: improving monocular depth prediction with novel view synthesis,» in Proceedings of the international conference on 3d vision (3dv), 2021.
    [Bibtex]
    @INPROCEEDINGs{3DVZuria2021,
    Author = { Zuria Bauer and Zuoyue Li and Sergio Orts and Miguel Cazorla and Marc Pollefeys and Martin OSwald },
    Title = {NVS-MonoDepth: Improving Monocular Depth Prediction with Novel View Synthesis},
    Booktitle = {Proceedings of the International Conference on 3D Vision (3DV)},
    Year = {2021}
    }
  • E. Alvarez, R. Alvarez, and M. Cazorla, «Studying the transferability of non-targeted adversarial attacks,» in Proc. of international joint conference on neural networks (ijcnn), 2021.
    [Bibtex]
    @INPROCEEDINGs{IJCNNEnrique2021,
    Author = { Enrique Alvarez and Rafael Alvarez and Miguel Cazorla},
    Title = {Studying the Transferability of Non-Targeted Adversarial Attacks},
    Booktitle = {Proc. of International Joint Conference on Neural Networks (IJCNN)},
    Year = {2021}
    }
  • [DOI] E. Martinez-Martin and F. Morillas-Espejo, «Deep learning techniques for spanish sign language interpretation,» Computational intelligence and neuroscience, vol. 2021, 2021.
    [Bibtex]
    @ARTICLE{martinez-martin2021a,
    author={Ester Martinez-Martin and Francisco Morillas-Espejo},
    journal={Computational Intelligence and Neuroscience},
    title={Deep Learning Techniques for Spanish Sign Language Interpretation},
    year={2021},
    volume={2021},
    doi={https://doi.org/10.1155/2021/5532580}
    }
  • [DOI] E. Martinez-Martin and A. Costa, «Assistive technology for elderly care: an overview,» Ieee access, vol. 9, pp. 92420-92430, 2021.
    [Bibtex]
    @ARTICLE{martinez-martin2021b,
    author={Ester Martinez-Martin and Angelo Costa},
    journal={IEEE Access},
    title={Assistive Technology for Elderly Care: An Overview},
    year={2021},
    volume={9},
    pages={92420-92430},
    doi={10.1109/ACCESS.2021.3092407}
    }
  • [DOI] E. Martinez-Martin and A. del~Pobil, «Robot vision for manipulation: a trip to real-world applications,» Ieee access, vol. 9, pp. 3471-3481, 2021.
    [Bibtex]
    @ARTICLE{martinez-martin2021c,
    author={Ester Martinez-Martin and Angel~P. del~Pobil},
    journal={IEEE Access},
    title={Robot Vision for Manipulation: A Trip to Real-World Applications},
    year={2021},
    volume={9},
    number={},
    pages={3471-3481},
    doi={10.1109/ACCESS.2020.3048053}
    }
  • [DOI] E. Martinez-Martin, E. Ferrer, I. Vasilev, and A. P. del Pobil, «The uji aerial librarian robot: a quadcopter for visual library inventory and book localisation,» Sensors, vol. 21, iss. 4, 2021.
    [Bibtex]
    @Article{martinez-martin2021d,
    AUTHOR = {Martinez-Martin, Ester and Ferrer, Eric and Vasilev, Ilia and del Pobil, Angel P.},
    TITLE = {The UJI Aerial Librarian Robot: A Quadcopter for Visual Library Inventory and Book Localisation},
    JOURNAL = {Sensors},
    VOLUME = {21},
    YEAR = {2021},
    NUMBER = {4},
    ISSN = {1424-8220},
    DOI = {10.3390/s21041079}
    }
  • E. Martinez-Martin, E. Ferrer, I. Vasilev, and A. P. del Pobil, «An autonomous drone for image-based inspection of bookshelves,» in 29th european signal processing conference – eusipco 2021, 2021.
    [Bibtex]
    @inproceedings{martinez-martin2021e,
    author = {Martinez-Martin, Ester and Ferrer, Eric and Vasilev, Ilia and del Pobil, Angel P.},
    title = {An Autonomous Drone for Image-Based Inspection of Bookshelves},
    booktitle = {29th European Signal Processing Conference - EUSIPCO 2021},
    year = {2021}
    }
  • E. Caldwell, M. Cazorla, and J. M. C. Plaza, «Designing a cyber-physical robotic platform to assist speech-language pathologists,» Assistive technologies, vol. Aceptado, 2021.
    [Bibtex]
    @ARTICLE{Eldon2021,
    author={Eldon Caldwell and Miguel Cazorla and Jose Maria Cañas Plaza},
    journal={Assistive Technologies},
    title={Designing a Cyber-physical Robotic Platform to assist Speech-Language Pathologists},
    year={2021},
    volume={Aceptado},
    number={}}
  • F. Gomez-Donoso, F. Escalona, N. Nasri, and M. Cazorla, «A hand motor skills rehabilitation for the injured implemented on a social robot,» Applied sciences, vol. 11, iss. 7, 2021.
    [Bibtex]
    @ARTICLE{Donoso2021b,
    author={F. Gomez-Donoso and F. Escalona and N. Nasri and M. Cazorla},
    journal={Applied Sciences},
    title={A Hand Motor Skills Rehabilitation for the Injured Implemented on a Social Robot},
    year={2021},
    volume={11},
    number={7}}
  • F. Gomez-Donoso, F. Escalona, F. Pérez-Esteve, and M. Cazorla, «Accurate multilevel classification for wildlife images,» Computational intelligence and neuroscience, vol. 2021, 2021.
    [Bibtex]
    @ARTICLE{Donoso2021a,
    author={Gomez-Donoso, F. and Escalona, F. and Pérez-Esteve, F. and Cazorla, M.},
    journal={Computational Intelligence and Neuroscience},
    title={Accurate Multilevel Classification for Wildlife Images},
    year={2021},
    volume={2021},
    number={},
    pages={},}

2020

  • F. Gomez-Donoso, F. Escalona, A. Bañuls, D. Abellan, and M. Cazorla, «Monocular 3d hand pose estimation for teleoperating low-cost actuators,» in The 21st international workshop de agentes físicos (waf), 2020.
    [Bibtex]
    @inproceedings{Cruz2018c,
    author = {Francisco Gomez-Donoso and Félix Escalona and Alejandro Bañuls and Daniel Abellan and Miguel Cazorla},
    title = {Monocular 3D Hand Pose Estimation for Teleoperating Low-cost Actuators},
    booktitle = {The 21st International Workshop de Agentes Físicos (WAF)},
    year = {2020}
    }
  • J. F. Domenech, F. Escalona, F. Gomez-Donoso, and M. Cazorla, «A voxelized fractal descriptor for 3d object recognition,» Ieee access, vol. 8, p. 161958–161968, 2020.
    [Bibtex]
    @ARTICLE{Domenech2020,
    author={J.F. Domenech and F. {Escalona} and F. {Gomez-Donoso} and M. {Cazorla}},
    journal={IEEE Access},
    title={A Voxelized Fractal Descriptor for 3D Object Recognition},
    year={2020},
    volume={8},
    pages={161958--161968}}
  • N. Nasri, S. Orts-Escolano, and M. Cazorla, «A semg-controlled 3d game for rehabilitation therapies: real-time time hand gesture recognition using deep learning techniques,» Sensors, vol. 20, iss. 22, 2020.
    [Bibtex]
    @ARTICLE{Nadia2020,
    author={Nadia Nasri and Sergio Orts-Escolano and Miguel Cazorla},
    journal={Sensors},
    title={A sEMG-controlled 3D game for rehabilitation therapies: real-time time
    hand gesture recognition using deep learning techniques},
    year={2020},
    volume={20},
    number={22}}
  • E. Martinez-Martin, M. Cazorla, and S. Orts-Escolano, ,» Electronics, vol. 9, iss. 5, 2020.
    [Bibtex]
    @Article{Gomez-Donoso20,
    author="Ester Martinez-Martin and Miguel Cazorla and Sergio Orts-Escolano",
    title="Machine Learning Techniques for Assistive Robotics”,
    journal="Electronics",
    year="2020",
    volume="9",
    number="5"
    }
  • F. Gomez-Donoso, F. Escalona, and M. Cazorla, ,» Applied sciences, vol. 10, iss. 10, 2020.
    [Bibtex]
    @Article{Gomez-Donoso20,
    author="Francisco Gomez-Donoso and Felix Escalona and Miguel Cazorla",
    title=" Par3DNet: Using 3DCNNs for Object Recognition on Tridimensional Partial Views”,
    journal="Applied Sciences",
    year="2020",
    volume="10",
    number="10"
    }
  • W. Zhou, E. Cruz, S. Worrall, F. Gomez-Donoso, M. Cazorla, and E. Nebot, «Weakly-supervised road condition classificationusing automatically generated labels,» in Proc. of the 23rd ieee international conference on intelligent transportation systems (itsc), 2020.
    [Bibtex]
    @INPROCEEDINGs{ITSC2020,
    Author = { Wei Zhou and Edmanuel Cruz and Stewart Worrall and Francisco Gomez-Donoso and Miguel Cazorla and Eduardo Nebot},
    Title = {Weakly-supervised Road Condition ClassificationUsing Automatically Generated Labels},
    Booktitle = {Proc. of The 23rd IEEE International Conference on Intelligent Transportation Systems (ITSC)},
    Year = {2020}
    }
  • C. Cano-Espinosa, M. Cazorla, and G. González-Serrano, ,» Applied sciences, vol. 10, iss. 8, 2020.
    [Bibtex]
    @Article{Cano-Espinosa20,
    author="Carlos Cano-Espinosa and Miguel Cazorla and German González-Serrano",
    title="Computer Aided Detection of Pulmonary Embolism Using Multi-slice
    Multi-axial Segmentation”,
    journal="Applied Sciences",
    year="2020",
    volume="10",
    number="8"
    }
  • F. Martin-Rico, F. G. Donoso, F. Escalona, J. G. Rodriguez, and M. Cazorla, ,» Integrated computer-aided engineering, vol. 50, iss. 1, pp. 14-28, 2020.
    [Bibtex]
    @Article{Martin-Rico2020,
    author="Francisco Martin-Rico and Francisco Gomez Donoso and Felix Escalona and Jose Garcia Rodriguez and Miguel Cazorla",
    title="Semantic Visual Recognition in a Cognitive Architecture for Social Robots”,
    journal="Integrated Computer-Aided Engineering",
    year="2020",
    volume="50",
    number="1",
    pages="14-28"
    }
  • F. Gomez-Donoso, E. Cruz, M. Cazorla, S. Worrall, and E. Nebot, «Using a 3d cnn for rejecting false positives on pedestrian detection,» in Proc. of international joint conference on neural networks (ijcnn), 2020.
    [Bibtex]
    @INPROCEEDINGs{IJCNNFran2020,
    Author = { Francisco Gomez-Donoso and Edmanuel Cruz and Miguel Cazorla and Stewart Worrall and Eduardo Nebot},
    Title = {Using a 3D CNN for Rejecting False Positives on Pedestrian Detection},
    Booktitle = {Proc. of International Joint Conference on Neural Networks (IJCNN)},
    Year = {2020}
    }
  • F. Escalona, D. Viejo, R. Fisher, and M. Cazorla, «Nurbsnet: a nurbs approach for 3d object recognition,» in Proc. of international joint conference on neural networks (ijcnn), 2020.
    [Bibtex]
    @INPROCEEDINGs{IJCNNFelix2020,
    Author = {Felix Escalona and Diego Viejo and Robert Fisher and Miguel Cazorla},
    Title = {NurbsNet: A Nurbs approach for 3d object recognition},
    Booktitle = {Proc. of International Joint Conference on Neural Networks (IJCNN)},
    Year = {2020}
    }
  • E. Martínez-Martín, F. Escalona, and M. Cazorla, ,» Electronics, vol. 9, iss. 2, 2020.
    [Bibtex]
    @Article{Rangel2020Electronics,
    author="Ester Martínez-Martín and Félix Escalona and Miguel Cazorla",
    title="Socially Assistive Robots for Disabled People: A survey”,
    journal="Electronics",
    year="2020",
    volume="9",
    number="2"
    }
  • C. Cano-Espinosa, G. Gonzalez, G. R. Washko, M. Cazorla, and R. S. J. Estepar, «Biomarker localization from deep learning regression networks,» Ieee transactions on medical imaging, vol. 39, iss. 6, p. 2121–2132, 2020.
    [Bibtex]
    @Article{Espinosa20,
    author="Carlos Cano-Espinosa and German Gonzalez and George R. Washko and Miguel Cazorla and Raul San Jose Estepar",
    title="Biomarker Localization from Deep Learning Regression Networks",
    journal="IEEE Transactions on Medical Imaging",
    year="2020",
    volume="39",
    number="6",
    pages="2121--2132"
    }
  • [DOI] F. Escalona, E. Martinez-Martin, E. Cruz, M. Cazorla, and F. Gomez-Donoso, «Eva: evaluating at-home rehabilitation exercises using augmented reality and low-cost sensors,» Virtual reality, vol. 24, p. 567–581, 2020.
    [Bibtex]
    @Article{Escalona2019,
    author="Escalona, Felix
    and Martinez-Martin, Ester
    and Cruz, Edmanuel
    and Cazorla, Miguel
    and Gomez-Donoso, Francisco",
    title="EVA: EVAluating at-home rehabilitation exercises using augmented reality and low-cost sensors",
    journal="Virtual Reality",
    volume = "24",
    year="2020",
    pages="567--581",
    abstract="Over one billion people in the world live with some form of disability. This is incessantly increasing due to aging population and chronic diseases. Among the emerging social needs, rehabilitation services are the most required. However, they are scarce and expensive what considerably limits access to them. In this paper, we propose EVA, an augmented reality platform to engage and supervise rehabilitation sessions at home using low-cost sensors. It also stores the user's statistics and allows therapists to tailor the exercise programs according to their performance. This system has been evaluated in both qualitative and quantitative ways obtaining very promising results.",
    issn="1434-9957",
    doi="10.1007/s10055-019-00419-4"
    }
  • E. Cruz, J. C. Rangel, F. Gomez-Donoso, and M. Cazorla, ,» Applied intelligence, vol. 50, iss. 1, pp. 14-28, 2020.
    [Bibtex]
    @article{Cruz2020AI,
    title = "How to add new knowledge to already trained deep learning models applied to semantic localization”,
    journal = "Applied Intelligence",
    volume = "50",
    number = "1",
    pages = "14-28",
    year = "2020",
    author = "Edmanuel Cruz and José Carlos Rangel and Francisco Gomez-Donoso and Miguel Cazorla"
    }
  • Z. Bauer, A. Dominguez, E. Cruz, F. Gomez-Donoso, S. Orts-Escolano, and M. Cazorla, «Enhancing perception for the visually impaired with deep learning techniques and low-cost wearable sensors,» Pattern recognition letters, vol. 137, p. 27–36, 2020.
    [Bibtex]
    @article{rompetechos2019,
    title = "Enhancing Perception for the Visually Impaired with Deep Learning Techniques and Low-cost Wearable Sensors",
    journal = "Pattern Recognition Letters",
    volume = "137",
    year = "2020",
    pages = "27--36",
    author = "Zuria Bauer and Alejandro Dominguez and Edmanuel Cruz and Francisco Gomez-Donoso and Sergio Orts-Escolano and Miguel Cazorla"
    }
  • J. Garcia-Rodriguez, F. Gomez-Donoso, A. Garcia-Garcia, M. Cazorla, sergio Orts-Escolano, sergiu Oprea, Z. Bauer, J. Castro-Vargas, P. Martinez-Gonzalez, D. Ivorra-Piqueres, F. Escalona-Moncholí, E. Aguirre, M. Garcia-silviente, M. Garcia-Perez, J. M. Cañas, F. Martin-Rico, J. Gines, and F. Rivas-Montero, «Combaho: a deep learning system for integrating brain injury patients in society,» Pattern recognition letters, vol. 137, p. 80–90, 2020.
    [Bibtex]
    @article{combaho2019,
    title = "COMBAHO: A Deep Learning system for Integrating Brain Injury Patients in society",
    journal = "Pattern Recognition Letters",
    volume = "137",
    pages = "80--90",
    year = "2020",
    author = "Jose Garcia-Rodriguez and Francisco Gomez-Donoso and Alberto Garcia-Garcia and Miguel Cazorla and sergio Orts-Escolano and sergiu Oprea and Zuria Bauer and John Castro-Vargas and Pablo Martinez-Gonzalez and David Ivorra-Piqueres and Félix Escalona-Moncholí and Eugenio Aguirre and Miguel Garcia-silviente and Marcelo Garcia-Perez and Jose M Cañas and Francisco Martin-Rico and Jonathan Gines and Francisco Rivas-Montero"
    }

2019

  • F. Gomez-Donoso, F. Escalona, F. M. Rivas, J. M. Cañas, and M. Cazorla, «Enhancing the ambient assisted living capabilities with a mobile robot,» Computational intelligence and neuroscience, vol. 2019, 2019.
    [Bibtex]
    @article{gomez2019enhancing,
    title={Enhancing the ambient assisted living capabilities with a mobile robot},
    author={Gomez-Donoso, Francisco and Escalona, F{\'e}lix and Rivas, Francisco Miguel and Ca{\~n}as, Jose Maria and Cazorla, Miguel},
    journal={Computational intelligence and neuroscience},
    volume={2019},
    year={2019},
    publisher={Hindawi}
    }
  • Z. Bauer, F. Escalona, E. Cruz, M. Cazorla, and F. Gomez-Donoso, «Refining the fusion of pepper robot and estimated depth maps method for improved 3d perception,» Ieee access, vol. 7, pp. 185076-185085, 2019.
    [Bibtex]
    @ARTICLE{8936685,
    author={Z. {Bauer} and F. {Escalona} and E. {Cruz} and M. {Cazorla} and F. {Gomez-Donoso}},
    journal={IEEE Access},
    title={Refining the Fusion of Pepper Robot and Estimated Depth Maps Method for Improved 3D Perception},
    year={2019},
    volume={7},
    number={},
    pages={185076-185085},}
  • J. M. Torres-Camara, F. Escalona, F. Gomez-Donoso, and M. Cazorla, «Map slammer: densifying scattered kslam 3d maps with estimated depth,» in Proc. of the robot’2019: fourth iberian robotics conference, 2019.
    [Bibtex]
    @INPROCEEDINGs{robot2019b,
    Author = {Jose Miguel Torres-Camara and Felix Escalona and Francisco Gomez-Donoso and Miguel Cazorla},
    Title = {Map slammer: Densifying scattered KSLAM 3D Maps with Estimated Depth},
    Booktitle = {Proc. of the ROBOT'2019: Fourth Iberian Robotics Conference},
    month={November},
    Year = {2019}
    }
  • D. Azuar, G. Gallud, F. Escalona, F. Gomez-Donoso, and M. Cazorla, «A story-telling social robot with emotion recognition capabilities for the intellectually challenged,» in Proc. of the robot’2019: fourth iberian robotics conference, 2019.
    [Bibtex]
    @INPROCEEDINGs{robot2019,
    Author = {David Azuar and Guillermo Gallud and Felix Escalona and Francisco Gomez-Donoso and Miguel Cazorla},
    Title = {A story-telling social Robot with Emotion Recognition Capabilities for the Intellectually Challenged},
    Booktitle = {Proc. of the ROBOT'2019: Fourth Iberian Robotics Conference},
    month={November},
    Year = {2019}
    }
  • E. Martinez-Martin, A. Costa, and M. Cazorla, «Pharos 2.0 – a physical assistant robot system improved,» Sensors, vol. 2019, 2019.
    [Bibtex]
    @article{Martinez2019Sensors,
    author = {Ester Martinez-Martin and Angelo Costa and Miguel Cazorla},
    year = {2019},
    title = {PHAROS 2.0 – A PHysical Assistant RObot System improved},
    journal = {Sensors},
    volume = {2019},
    year = {2019}
    }
  • [DOI] Z. Bauer, F. Gomez-Donoso, E. Cruz, sergio Orts-Escolano, and M. Cazorla, «Uasol, a large-scale high-resolution outdoor stereo dataset,» Scientific data, vol. 6, iss. 1, p. 1–14, 2019.
    [Bibtex]
    @article{bauer2019uasol,
    author = {Bauer, Zuria and Gomez-Donoso, Francisco and Cruz, Edmanuel and Orts-Escolano, sergio and Cazorla, Miguel},
    year = {2019},
    title = {UASOL, a large-scale high-resolution outdoor stereo dataset},
    journal = {Scientific Data},
    publisher = {Nature Publishing Group},
    issn = {2052-4463},
    doi = {10.1038/s41597-019-0168-5},
    volume = {6},
    month = {8},
    pages = {1--14},
    number = {1},
    url = {https://doi.org/10.1038/s41597-019-0168-5},
    abstract = {In this paper, we propose a new dataset for outdoor depth estimation from single and stereo RGB images. The dataset was acquired from the point of view of a pedestrian. Currently, the most novel approaches take advantage of deep learning-based techniques, which have proven to outperform traditional state-of-the-art computer vision methods. Nonetheless, these methods require large amounts of reliable ground-truth data. Despite there already existing several datasets that could be used for depth estimation, almost none of them are outdoor-oriented from an egocentric point of view. Our dataset introduces a large number of high-definition pairs of color frames and corresponding depth maps from a human perspective. In addition, the proposed dataset also features human interaction and great variability of data, as shown in this work. Machine-accessible metadata file describing the reported data (ISA-Tab format)}
    }
  • F. Gomez-Donoso, sergio Orts-Escolano, and M. Cazorla, ,» Expert systems with applications, vol. 136, p. 327–337, 2019.
    [Bibtex]
    @article{Donoso2019ESWA,
    title = "Accurate and Efficient 3D Hand Pose Regression for Robot Hand Teleoperation using a Monocular RGB Camera”,
    journal = "Expert systems With Applications",
    volume = "136",
    pages = "327--337",
    year = "2019",
    author = "Francisco Gomez-Donoso and sergio Orts-Escolano and Miguel Cazorla"
    }
  • E. Martinez-Martin and M. Cazorla, ,» Ieee access, vol. 7, pp. 75515-75529, 2019.
    [Bibtex]
    @article{Martinez-Martin2019Access,
    title = "A socially Assistive Robot for Elderly Exercise Promotion”,
    journal = "IEEE Access",
    volume = "7",
    pages = "75515-75529",
    year = "2019",
    author = "Ester Martinez-Martin and Miguel Cazorla"
    }
  • E. Martinez-Martin and M. Cazorla, ,» Computational intelligence and neuroscience, vol. 2019, 2019.
    [Bibtex]
    @article{Martin2019Review,
    title = "Rehabilitation Technology: Assistance From Hospital to Home”,
    journal = "Computational Intelligence and Neuroscience",
    volume = "2019",
    year = "2019",
    author = "Ester Martinez-Martin and Miguel Cazorla"
    }
  • N. Nasri, sergio Orts-Escolano, F. Gomez-Donoso, and M. Cazorla, «Using inferred gestures from semg signal to teleoperate a domestic robot for the disabled,» in Proc. of the 15th international work-conference on artificial neural networks, 2019.
    [Bibtex]
    @INPROCEEDINGs{Iwwan2019,
    Author = {Nadia Nasri and sergio Orts-Escolano and Francisco Gomez-Donoso and Miguel Cazorla},
    Title = {USING INFERRED GESTURES FROM SEMG SIGNAL TO TELEOPERATE A DOMESTIC ROBOT FOR THE DISABLED},
    Booktitle = {Proc. of the 15th International Work-Conference on Artificial Neural Networks},
    Year = {2019}
    }
  • F. Martin-Rico, F. Gomez-Donoso, F. Escalona, M. Cazorla, and J. Garcia-Rodriguez, «Artificial semantic memory with autonomous learning applied to social robots,» in Proc. of international work-conference on the interplay between natural and artificial computation, 2019.
    [Bibtex]
    @INPROCEEDINGs{Iwinac2019,
    Author = {Francisco Martin-Rico and Francisco Gomez-Donoso and Felix Escalona and Miguel Cazorla and Jose Garcia-Rodriguez},
    Title = {Artificial semantic Memory with Autonomous Learning applied to social Robots},
    Booktitle = {Proc. of International Work-Conference on the Interplay Between Natural and Artificial Computation},
    Year = {2019}
    }
  • F. Gomez-Donoso, F. Escalona, F. Rivas-Montero, J. M. Cañas, and M. Cazorla, «Enhancing the ambient assisted living capabilities with a mobile robot,» Computational intelligence and neuroscience, vol. 2019, 2019.
    [Bibtex]
    @article{cinMadrid2019,
    title = "Enhancing the Ambient Assisted Living Capabilities with a Mobile Robot",
    journal = "Computational Intelligence and Neuroscience",
    volume = "2019",
    year = "2019",
    author = "Francisco Gomez-Donoso and Felix Escalona and Francisco Rivas-Montero and José M. Cañas and Miguel Cazorla"
    }
  • N. Nasri, sergio Orts-Escolano, F. Gomez-Donoso, and M. Cazorla, «Inferring static hand poses from a low-cost non-intrusive semg sensor,» Sensors, vol. 2019, 2019.
    [Bibtex]
    @article{Nadia2019sensors,
    title = "INFERRING sTATIC HAND POsEs FROM A LOW-COsT NON-INTRUsIVE sEMG sENsOR",
    journal = "sensors",
    volume = "2019",
    year = "2019",
    author = "Nadia Nasri and sergio Orts-Escolano and Francisco Gomez-Donoso and Miguel Cazorla"
    }
  • F. Gomez-Donoso, sergio Orts-Escolano, and M. Cazorla, «Large-scale multiview 3d hand pose dataset,» Image and vision computing, vol. 81, pp. 25-33, 2019.
    [Bibtex]
    @article{DonosoIMAVIs2018,
    title = "Large-scale Multiview 3D Hand Pose Dataset",
    journal = "Image and Vision Computing",
    volume = "81",
    pages = "25-33",
    year = "2019",
    author = "Francisco Gomez-Donoso and sergio Orts-Escolano and Miguel Cazorla"
    }
  • J. Navarrete, F. Gomez-Donoso, D. Viejo, and M. Cazorla, «Multilevel classification using a taxonomy applied to recognizing diptera images,» in Proc. of international joint conference on neural networks (ijcnn), 2019.
    [Bibtex]
    @INPROCEEDINGs{IJCNNNavarrete2019,
    Author = {Javier Navarrete and Francisco Gomez-Donoso and Diego Viejo and Miguel Cazorla},
    Title = {Multilevel Classification using a Taxonomy Applied to Recognizing Diptera Images},
    Booktitle = {Proc. of International Joint Conference on Neural Networks (IJCNN)},
    Year = {2019}
    }
  • C. C. Espinosa, G. Gonzalez, G. R. Washko, M. Cazorla, and R. S. J. Estepar, «Localizing image-based biomarker regression without training masks: a new approach to biomarker discovery,» in Proc. of ieee 16th international symposium on biomedical imaging (isbi), 2019.
    [Bibtex]
    @INPROCEEDINGs{isbiCano2019,
    Author = {Carlos Cano Espinosa and German Gonzalez and George R. Washko and Miguel Cazorla and Raul San Jose Estepar},
    Title = {Localizing Image-Based Biomarker Regression without Training Masks: A New Approach to Biomarker Discovery},
    Booktitle = {Proc. of IEEE 16th International symposium on Biomedical Imaging (isbi)},
    Year = {2019}
    }
  • J. C. Rangel, M. Cazorla, I. García-Varea, C. Romero-González, and J. Martínez-Gómez, «Automatic semantic maps generation from lexical annotations,» Autonomous robots, vol. 43, iss. 3, p. 697–712, 2019.
    [Bibtex]
    @Article{Rangel2018b,
    author=" Jose Carlos Rangel and Miguel Cazorla and Ismael García-Varea and Cristina Romero-González and Jesús Martínez-Gómez",
    title="Automatic semantic Maps Generation from Lexical Annotations",
    journal="Autonomous robots",
    year="2019",
    volume="43",
    number="3",
    pages = "697--712"
    }
  • E. Cruz, S. Orts-Escolano, F. Gómez-Donoso, C. Rizo, J. C. Rangel, and M. C. and Higinio Mora, ,» Virtual reality, vol. 23, 2019.
    [Bibtex]
    @Article{Cruz2018,
    author="Edmanuel Cruz and Sergio Orts-Escolano and Francisco Gómez-Donoso and Carlos Rizo and Jose Carlos Rangel and Higinio Mora,and Miguel Cazorla",
    title="An augmented reality application for improving shopping experience in large retail stores”,
    journal="Virtual Reality",
    year="2019",
    volume="23",
    number=”3”,
    pages=”281--291”
    }

2018

  • C. Cano-Espinosa, G. Gonz{’a}lez, G. R. Washko, M. Cazorla, and R. S. J. Est{’e}par, «On the relevance of the loss function in the agatston score regression from non-ecg gated ct scans,» in Image analysis for moving organ, breast, and thoracic images, Springer, 2018, p. 326–334.
    [Bibtex]
    @incollection{cano2018relevance,
    title={On the Relevance of the Loss Function in the Agatston score Regression from Non-ECG Gated CT scans},
    author={Cano-Espinosa, Carlos and Gonz{\’a}lez, Germ{\’a}n and Washko, George R and Cazorla, Miguel and Est{\’e}par, Ra{\’u}l San Jos{\’e}},
    booktitle={Image Analysis for Moving Organ, Breast, and Thoracic Images},
    pages={326--334},
    year={2018},
    publisher={Springer}
    }
  • A. Dominguez-sanchez, M. Cazorla, and sergio Orts-Escolano, ,» Electronics, vol. 7, iss. 11, 2018.
    [Bibtex]
    @article{Dominguez2018b,
    title = "A New Dataset and Performance Evaluation of a Region-based CNN for Urban Object Detection”,
    journal = "Electronics",
    volume = "7",
    number="11",
    year = "2018",
    author = "Alejandro Dominguez-sanchez and Miguel Cazorla and sergio Orts-Escolano"
    }
  • E. Cruz, F. Escalona, Z. Bauer, M. Cazorla, J. Garcia-Rodriguez, E. Martinez-Martin, J. C. Rangel, and F. Gomez-Donoso, ,» Computational intelligence and neuroscience, vol. 2018, 2018.
    [Bibtex]
    @article{Cruz2018Geoffrey,
    title = "Geoffrey: An Automated schedule system on a social Robot for the Intellectually Challenged”,
    journal = "Computational Intelligence and Neuroscience",
    volume = "2018",
    year = "2018",
    author = "Edmanuel Cruz and Felix Escalona and Zuria Bauer and Miguel Cazorla and Jose Garcia-Rodriguez and Ester Martinez-Martin and Jose Carlos Rangel and Francisco Gomez-Donoso "
    }
  • A. Garcia, P. Martinez-Gonzalez, s. Oprea, J. A. Castro-Vargas, s. Orts-Escolano, J. Garcia-Rodriguez, and A. Jover-Alvarez, «The robotrix: an extremely photorealistic and very-large-scale indoor dataset of sequences with robot trajectories and interactions.,» in Proc. of the international conference on intelligent robots and systems (iros), 2018.
    [Bibtex]
    @INPROCEEDINGs{agarcia-robotrix18,
    Author = {A. Garcia and P. Martinez-Gonzalez and s. Oprea and J.A. Castro-Vargas and s. Orts-Escolano and J. Garcia-Rodriguez and A. Jover-Alvarez},
    Title = {The RobotriX: An eXtremely Photorealistic and Very-Large-scale Indoor Dataset of sequences with Robot Trajectories and Interactions.},
    Booktitle = {Proc. of The International Conference on Intelligent Robots and systems (IROs)},
    Year = {2018}
    }
  • [DOI] A. Garcia-Garcia, sergio Orts-Escolano, sergiu Oprea, V. Villena-Martinez, P. Martinez-Gonzalez, and J. G. Rodriguez, «A survey on deep learning techniques for image and video semantic segmentation,» Appl. soft comput., vol. 70, p. 41–65, 2018.
    [Bibtex]
    @article{Garcia-Garcia18,
    author = {Alberto Garcia-Garcia and sergio Orts-Escolano and sergiu Oprea and Victor Villena-Martinez and Pablo Martinez-Gonzalez and Jose Garcia Rodriguez},
    title = {A survey on deep learning techniques for image and video semantic segmentation},
    journal = {Appl. soft Comput.},
    volume = {70},
    pages = {41--65},
    year = {2018},
    doi = {10.1016/j.asoc.2018.05.018}
    }
  • E. Cruz, Z. Bauer, J. C. Rangel, M. Cazorla, and F. Gomez-Donoso, «Semantic localization of a robot in a real home,» in Workshop de agentes físicos (waf), 2018.
    [Bibtex]
    @inproceedings{Cruz2018c,
    author = {Edmanuel Cruz and Zuria Bauer and
    Jos{\'{e}} Carlos Rangel and Miguel Cazorla and Francisco Gomez-Donoso},
    title = {semantic localization of a robot in a real home},
    booktitle = {Workshop de Agentes Físicos (WAF)},
    year = {2018}
    }
  • Z. Bauer, F. Escalona, E. Cruz, M. Cazorla, and F. Gomez-Donoso, «Improving 3d estimation for the pepper robot using monocular depth prediction,» in Workshop de agentes físicos (waf), 2018.
    [Bibtex]
    @inproceedings{Bauer2018,
    author = {Zuria Bauer and Felix Escalona and Edmanuel Cruz and Miguel Cazorla and Francisco Gomez-Donoso},
    title = {Improving 3D estimation for the Pepper robot using monocular depth prediction},
    booktitle = {Workshop de Agentes Físicos (WAF)},
    year = {2018}
    }
  • [DOI] A. Costa, E. Martinez-Martin, M. Cazorla, and V. Julian, «Pharos—physical assistant robot system,» Sensors, vol. 18, iss. 8, pp. 95-107, 2018.
    [Bibtex]
    @article{Costa2018,
    title = {PHAROs—PHysical Assistant RObot system},
    journal = {sensors},
    volume = {18},
    number = {8},
    pages = {95 - 107},
    year = {2018},
    author = {Angelo Costa and Ester Martinez-Martin and Miguel Cazorla and Vicente Julian},
    IssN = {1424-8220},
    DOI = {10.3390/s18082633}
    }
  • J. Navarrete-sanchez, D. Viejo, and M. Cazorla, «Compression and registration of 3d point clouds using gmms,» Pattern recognition letters, vol. 110, pp. 8-15, 2018.
    [Bibtex]
    @Article{Navarrete2018,
    author="Javier Navarrete-sanchez and Diego Viejo and Miguel Cazorla",
    title="Compression and Registration of 3D Point Clouds Using GMMs",
    journal="Pattern Recognition Letters",
    year="2018",
    volume="110",
    pages="8-15"
    }
  • E. Cruz, J. C. Rangel, F. Gomez-Donoso, and M. Cazorla, «Finding the place how to train and use convolutional neural networks for a dynamically learning robot,» in International joint conference on neural networks (ijcnn), 2018.
    [Bibtex]
    @INPROCEEDINGs{Cruz2018b,
    author={Edmanuel Cruz and Jose Carlos Rangel and Francisco Gomez-Donoso and Miguel Cazorla},
    title={Finding the place how to train and use convolutional neural networks for a dynamically learning robot},
    booktitle={International Joint Conference on Neural Networks (IJCNN)},
    year={2018}
    }
  • A. Dominguez, sergio Orts-Escolano, and M. Cazorla, «A new dataset and performance evaluation of a region-based cnn for urban object detection,» in International joint conference on neural networks (ijcnn), 2018.
    [Bibtex]
    @INPROCEEDINGs{Alex2018,
    author={Alejandro Dominguez and sergio Orts-Escolano and Miguel Cazorla},
    title={A New Dataset and Performance Evaluation of a Region-based CNN for Urban Object Detection},
    booktitle={International Joint Conference on Neural Networks (IJCNN)},
    year={2018}
    }
  • [DOI] J. C. Rangel, M. J. Gomez, R. C. Gonzalez, G. I. Varea, and M. Cazorla, «Semi-supervised 3d object recognition through cnn labeling,» Applied soft computing, vol. 65, pp. 603-613, 2018.
    [Bibtex]
    @Article{Rangel2018,
    author="J.C. Rangel and J. Martinez Gomez and C. Romero Gonzalez and I. Garcia Varea and M. Cazorla",
    title="semi-supervised 3D Object Recognition through CNN Labeling",
    journal="Applied soft Computing",
    year="2018",
    volume="65",
    pages="603-613",
    doi="https://doi.org/10.1016/j.asoc.2018.02.005"
    }
  • C. C. Espinosa, G. González, G. R. Washko, M. Cazorla, and R. J. san Estépar, «Automated agatston score computation in non-ecg gated ct scans using deep learning,» in Proceedings of the spie: medical imaging 2018, 2018.
    [Bibtex]
    @INPROCEEDINGs{Cano2018sPIE,
    author={Carlos Cano Espinosa and Germán González and George R. Washko and Miguel Cazorla and Raúl san José Estépar},
    booktitle={Proceedings of the sPIE: Medical Imaging 2018},
    title={Automated Agatston score computation in non-ECG gated CT scans using deep learning},
    year={2018},
    month={February}}
  • [DOI] sergio Orts-Escolano, J. Garcia-Rodriguez, M. Cazorla, Vicente Morell, J. Azorin, M. saval, A. Garcia-Garcia, and V. Villena, «Bioinspired point cloud representation: 3d object tracking,» Neural computing and applications, vol. 29, iss. 9, p. 663–672, 2018.
    [Bibtex]
    @article{Orts-Escolano2016NCAA,
    title = "Bioinspired Point Cloud Representation: 3D object tracking",
    journal = "Neural Computing and Applications",
    volume = "29",
    number = "9",
    pages = "663--672",
    year = "2018",
    note = "",
    doi = "http://dx.doi.org/doi:10.1007/s00521-016-2585-0",
    author = " sergio Orts-Escolano and Jose Garcia-Rodriguez and Miguel Cazorla and Vicente
    Morell and Jorge Azorin and Marcelo saval and Alberto Garcia-Garcia and Victor Villena"
    }
  • [DOI] A. Angelopoulou, J. G. Rodriguez, sergio Orts-Escolano, G. Gupta, and A. Psarrou, «Fast 2d/3d object representation with growing neural gas,» Neural computing and applications, vol. 29, iss. 10, p. 903–919, 2018.
    [Bibtex]
    @article{Angelopoulou2016NCAA,
    title = "Fast 2D/3D Object Representation with Growing Neural Gas",
    journal = "Neural Computing and Applications",
    volume = "29",
    number = "10",
    pages = "903--919",
    year = "2018",
    note = "",
    doi = "http://dx.doi.org/doi:10.1007/s00521-016-2579-y",
    author = " Anastassia Angelopoulou and Jose Garcia Rodriguez and sergio Orts-Escolano and Gaurav Gupta and Alexandra Psarrou",
    }
  • [DOI] A. Garcia-Garcia, sergio Orts-Escolano, J. Garcia-Rodriguez, and M. Cazorla, «Interactive 3d object recognition pipeline on mobile gpgpu computing platforms using low-cost rgb-d sensors,» Journal of real-time image processing, vol. 14, iss. 3, p. 585–604, 2018.
    [Bibtex]
    @article{Garcia2016RTIP,
    title = "Interactive 3D object recognition pipeline on mobile GPGPU computing platforms using low-cost RGB-D sensors",
    journal = "Journal of Real-Time Image Processing",
    volume = "14",
    number = "3",
    pages = "585--604",
    year = "2018",
    note = "",
    doi = "10.1007/s11554-016-0607-x",
    author = "Albert Garcia-Garcia and sergio Orts-Escolano and Jose Garcia-Rodriguez and Miguel Cazorla",
    }
  • [DOI] E. Martinez-Martin, E. Martinez-Martin, and A. P. del Pobil, «A biologically inspired approach for robot depth estimation,» Computational intelligence and neuroscience, 2018.
    [Bibtex]
    @article{Ester_Martinez-Martin47670761,
    title={A Biologically Inspired Approach for Robot Depth Estimation},
    journal={Computational Intelligence and Neuroscience},
    author={Ester Martinez-Martin and Ester Martinez-Martin and Angel P. del Pobil},
    doi={10.1155/2018/9179462},
    url={http://doi.org/10.1155/2018/9179462},
    year={2018}
    }
  • E. Martinez-Martin and A. P. del Pobil, «Personal robot assistants for elderly care: an overview,» Intelligent systems reference library, vol. 132, pp. 77-91, 2018.
    [Bibtex]
    @article{Martinez-Martin2018,title = {Personal robot assistants for elderly care: An overview},journal = {Intelligent systems Reference Library},year = {2018},volume = {132},pages = {77-91},author = {Martinez-Martin, E. and del Pobil, A.P.}}

2017

  • E. Cruz, J. C. Rangel, and M. Cazorla, «Robot semantic localization through CNN descriptors,» in ROBOT 2017: third iberian robotics conference – volume 1, seville, spain, november 22-24, 2017, 2017, p. 567–578.
    [Bibtex]
    @inproceedings{DBLP:conf/robot/CruzRC17,
    author = {Edmanuel Cruz and
    Jos{\'{e}} Carlos Rangel and
    Miguel Cazorla},
    title = {Robot semantic Localization Through {CNN} Descriptors},
    booktitle = {{ROBOT} 2017: Third Iberian Robotics Conference - Volume 1, seville,
    spain, November 22-24, 2017},
    pages = {567--578},
    year = {2017}
    }
  • F. Escalona, F. G. -, and M. Cazorla, «3d object mapping using a labelling system,» in ROBOT 2017: third iberian robotics conference – volume 1, seville, spain, november 22-24, 2017, 2017, p. 579–590.
    [Bibtex]
    @inproceedings{DBLP:conf/robot/EscalonaGC17,
    author = {F{\'{e}}lix Escalona and
    Francisco Gomez{-}Donoso and
    Miguel Cazorla},
    title = {3D Object Mapping Using a Labelling system},
    booktitle = {{ROBOT} 2017: Third Iberian Robotics Conference - Volume 1, seville,
    spain, November 22-24, 2017},
    pages = {579--590},
    year = {2017}
    }
  • F. G. -, O. sergio -, and M. Cazorla, «Robust hand pose regression using convolutional neural networks,» in ROBOT 2017: third iberian robotics conference – volume 1, seville, spain, november 22-24, 2017, 2017, p. 591–602.
    [Bibtex]
    @inproceedings{DBLP:conf/robot/Gomez-DonosoOC17,
    author = {Francisco Gomez{-}Donoso and
    sergio Orts{-}Escolano and
    Miguel Cazorla},
    title = {Robust Hand Pose Regression Using Convolutional Neural Networks},
    booktitle = {{ROBOT} 2017: Third Iberian Robotics Conference - Volume 1, seville,
    spain, November 22-24, 2017},
    pages = {591--602},
    year = {2017}
    }
  • A. Dominguez-sanchez, M. Cazorla, and sergio Orts-Escolano, «Pedestrian movement direction recognition using convolutional neural networks,» Ieee transactions on intelligent transportation systems, vol. 18, iss. 12, pp. 3540-3548, 2017.
    [Bibtex]
    @Article{Dominguez2017,
    author="Alejandro Dominguez-sanchez and Miguel Cazorla and sergio Orts-Escolano",
    title="Pedestrian movement direction recognition using convolutional neural networks",
    journal="IEEE Transactions on Intelligent Transportation systems",
    year="2017",
    volume="18",
    number="12",
    pages="3540 - 3548"
    }
  • A. G. García, J. G. Rodríguez, sergio Orts-Escolano, sergiu Oprea, F. Gomez-Donoso, and M. Cazorla, «A study of the effect of noise and occlusion on the accuracy of convolutional neural networks applied to 3d object recognition,» Computer vision and image understanding, vol. 164, p. 124–134, 2017.
    [Bibtex]
    @Article{GomezGarcia2017CVIU,
    author="Alberto García García and
    Jose García Rodríguez and
    sergio Orts-Escolano and
    sergiu Oprea and
    Francisco Gomez-Donoso
    and Miguel Cazorla",
    title="A study of the Effect of Noise and Occlusion on the Accuracy of Convolutional Neural Networks applied to 3D Object Recognition",
    journal="Computer Vision and Image Understanding",
    year="2017",
    volume="164",
    pages="124--134"
    }
  • F. Gomez-Donoso, sergio Orts Escolano, M. Cazorla, A. Garcia-Garcia, J. Garcia-Rodriguez, J. Castro-Vargas, and sergiu Ovidiu-Oprea, «A robotic platform for customized and interactive rehabilitation of persons with disabilities,» Pattern recognition letters, vol. 99, p. 105–113, 2017.
    [Bibtex]
    @Article{GomezDonoso2017PRL,
    author={Gomez-Donoso, Francisco and Orts Escolano, sergio and Cazorla, Miguel and Garcia-Garcia, Alberto and Garcia-Rodriguez, Jose and Castro-Vargas, John and Ovidiu-Oprea, sergiu},
    title={A robotic platform for customized and interactive rehabilitation of persons with disabilities},
    journal={Pattern Recognition Letters},
    year={2017},
    volume={99},
    pages={105--113}
    }
  • [DOI] J. Garcia-Rodriguez, I. Guyon, sergio Escalera, A. Psarrou, A. Lewis, and M. Cazorla, «Editorial special issue on computational intelligence for vision and robotics,» Neural computing and applications, vol. 28, iss. 5, p. 853–854, 2017.
    [Bibtex]
    @Article{Garcia-Rodriguez2017,
    author={Garcia-Rodriguez, Jose and Guyon, Isabelle and Escalera, sergio and Psarrou, Alexandra and Lewis, Andrew and Cazorla, Miguel},
    title={Editorial special issue on computational intelligence for vision and robotics},
    journal={Neural Computing and Applications},
    year={2017},
    volume={28},
    number={5},
    pages={853--854},
    issn={1433-3058},
    doi={10.1007/s00521-016-2330-8},
    }
  • A. Dominguez-sanchez, sergio Orts-Escolano, and M. Cazorla, «Pedestrian direction recognition using convolutional neural networks,» in 14th international work-conference on artificial neural networks, 2017.
    [Bibtex]
    @INPROCEEDINGs{Dominguez2017IWANN,
    author={Alex Dominguez-sanchez and sergio Orts-Escolano and Miguel Cazorla},
    booktitle={14th International Work-Conference on Artificial Neural Networks},
    title={Pedestrian Direction Recognition using Convolutional Neural Networks},
    year={2017},
    month={June},}
  • M. Zamora, E. Caldwell, J. Garcia-Rodriguez, J. Azorin-Lopez, and M. Cazorla, «Machine learning improves human-robot interaction in productive environments: a review,» in 14th international work-conference on artificial neural networks, 2017.
    [Bibtex]
    @INPROCEEDINGs{Zamora2017IWANN,
    author={Mauricio Zamora and Eldon Caldwell and Jose Garcia-Rodriguez and Jorge Azorin-Lopez and Miguel Cazorla},
    booktitle={14th International Work-Conference on Artificial Neural Networks},
    title={Machine learning improves human-robot interaction in productive environments: A review},
    year={2017},
    month={June},}
  • F. Escalona, Á. Rodríguez, F. Gómez-Donoso, J. Martínez-Gómez, and M. Cazorla, «3d object detection with deep learning,» Journal of physical agents, vol. 8, iss. 1, 2017.
    [Bibtex]
    @article{Donoso2017,
    abstract = {Finding an appropriate environment representation is a crucial problem in robotics. 3D data has been recently used thanks to the advent of low cost RGB-D cameras. We propose a new way to represent a 3D map based on the information provided by an expert. Namely, the expert is the output of a Convolutional Neural Network trained with deep learning techniques. Relying on such information, we propose the generation of 3D maps using individual semantic labels, which are associated with environment objects or semantic labels. so, for each label we are provided with a partial 3D map whose data belong to the 3D perceptions, namely point clouds, which have an associated probability above a given threshold. The final map is obtained by registering and merging all these partial maps. The use of semantic labels provide us a with way to build the map while recognizing objects.},
    author = {F\'elix Escalona and \'Angel Rodr\'iguez and Francisco G\'omez-Donoso and Jesus Mart\'inez-G\'omez and Miguel Cazorla},
    issn = {1888-0258},
    journal = {Journal of Physical Agents},
    keywords = {semantic mapping, 3D point cloud, deep learning},
    number = {1},
    title = {3D object detection with deep learning},
    volume = {8},
    year = {2017}
    }
  • F. Gomez-Donoso, A. Garcia-Garcia, s. Orts-Escolano, J. Garcia-Rodriguez, and M. Cazorla, «Lonchanet: a sliced-based cnn architecture for real-time 3d object recognition,» in 2017 international joint conference on neural networks (ijcnn), 2017.
    [Bibtex]
    @INPROCEEDINGs{Garcia2017,
    author={ F. Gomez-Donoso and A. Garcia-Garcia and s. Orts-Escolano and J. Garcia-Rodriguez and M. Cazorla},
    booktitle={2017 International Joint Conference on Neural Networks (IJCNN)},
    title={LonchaNet: A sliced-based CNN Architecture for Real-time 3D Object Recognition},
    year={2017},
    month={May},}
  • sergiu Oprea, A. Garcia-Garcia, s. Orts-Escolano, J. Garcia-Rodriguez, and M. Cazorla, «A recurrent neural network based schaeffer gesture recognition system,» in 2017 international joint conference on neural networks (ijcnn), 2017.
    [Bibtex]
    @INPROCEEDINGs{Oprea2017,
    author={sergiu Oprea and A. Garcia-Garcia and s. Orts-Escolano and J. Garcia-Rodriguez and M. Cazorla},
    booktitle={2017 International Joint Conference on Neural Networks (IJCNN)},
    title={A Recurrent Neural Network based schaeffer Gesture Recognition system},
    year={2017},
    month={May},}
  • [DOI] A. Garcia-Garcia, sergio Orts-Escolano, sergiu Oprea, J. and Garcia-Rodriguez, J. and Azorin-Lopez, M. and saval-Calvo, and M. and Cazorla, «Multi-sensor 3D Object Dataset for Object Recognition with Full Pose Estimation,» Neural computing and applications, vol. 28, iss. 5, p. 941–952, 2017.
    [Bibtex]
    @Article{Garcia-Garcia2017,
    author = {Garcia-Garcia, Alberto and and Orts-Escolano, sergio and and Oprea, sergiu and and Garcia-Rodriguez, Jose and and Azorin-Lopez, Jorge and and saval-Calvo, Marcelo and and Cazorla, Miguel},
    title = {{Multi-sensor 3D Object Dataset for Object Recognition with Full Pose Estimation}},
    journal = {Neural Computing and Applications},
    year = {2017},
    volume = {28},
    number = {5},
    pages = {941--952},
    issn = {1433-3058},
    abstract = {In this work, we propose a new dataset for 3D object recognition using the new high-resolution Kinect V2 sensor and some other popular low-cost devices like Primesense Carmine. since most already existing datasets for 3D object recognition lack some features such as 3D pose information about objects in the scene, per pixel segmentation or level of occlusion, we propose a new one combining all this information in a single dataset that can be used to validate existing and new 3D object recognition algorithms. Moreover, with the advent of the new Kinect V2 sensor we are able to provide high-resolution data for RGB and depth information using a single sensor, whereas other datasets had to combine multiple sensors. In addition, we will also provide semiautomatic segmentation and semantic labels about the different parts of the objects so that the dataset could be used for testing robot grasping and scene labeling systems as well as for object recognition.},
    doi = {10.1007/s00521-016-2224-9},
    }
  • [DOI] J. C. Rangel, J. Martínez-Gomez, I. García-Varea, and M. Cazorla, «Lextomap: lexical-based topological mapping,» Advanced robotics, vol. 31, iss. 5, pp. 268-281, 2017.
    [Bibtex]
    @article{Rangel2016b,
    author = {José Carlos Rangel and Jesus Martínez-Gomez and Ismael García-Varea and Miguel Cazorla},
    title = {LexToMap: lexical-based topological mapping},
    journal = {Advanced Robotics},
    volume = {31},
    number = {5},
    pages = {268-281},
    year = {2017},
    doi = {10.1080/01691864.2016.1261045},
    URL = {
    http://dx.doi.org/10.1080/01691864.2016.1261045
    },
    eprint = {
    http://dx.doi.org/10.1080/01691864.2016.1261045
    }
    ,
    abstract = { Any robot should be provided with a proper representation of its environment in order to perform navigation and other tasks. In addition to metrical approaches, topological mapping generates graph representations in which nodes and edges correspond to locations and transitions. In this article, we present LexToMap, a topological mapping procedure that relies on image annotations. These annotations, represented in this work by lexical labels, are obtained from pre-trained deep learning models, namely CNNs, and are used to estimate image similarities. Moreover, the lexical labels contribute to the descriptive capabilities of the topological maps. The proposal has been evaluated using the KTH-IDOL 2 data-set, which consists of image sequences acquired within an indoor environment under three different lighting conditions. The generality of the procedure as well as the descriptive capabilities of the generated maps validate the proposal. }
    }
  • [DOI] J. C. Rangel, V. Morell, M. Cazorla, sergio Orts-Escolano, and J. Garcia-Rodriguez, «Object recognition in noisy rgb-d data using gng,» Pattern analysis and applications, vol. 20, iss. 4, p. 1061–1076, 2017.
    [Bibtex]
    @Article{Rangel2016PAAA,
    author={Rangel, Jose Carlos and Morell, Vicente and Cazorla, Miguel and Orts-Escolano, sergio and Garcia-Rodriguez, Jose},
    title={Object recognition in noisy RGB-D data using GNG},
    journal={Pattern Analysis and Applications},
    year={2017},
    pages={1061–1076},
    volume={20},
    number={4},
    doi={10.1007/s10044-016-0546-y}
    }
  • E. Martinez-Martin, D. Fischinger, M. Vincze, and A. P. del Pobil, «An rgb-d visual application for error detection in robot grasping tasks,» Advances in intelligent systems and computing, vol. 531, pp. 243-254, 2017.
    [Bibtex]
    @article{Martinez-Martin2017,title = {An RGB-D visual application for error detection in robot grasping tasks},journal = {Advances in Intelligent systems and Computing},year = {2017},volume = {531},pages = {243-254},author = {Martinez-Martin, E. and Fischinger, D. and Vincze, M. and del Pobil, A.P.}}
  • E. Martinez-Martin and A. P. Del Pobil, «Object detection and recognition for assistive robots: experimentation and implementation,» Ieee robotics and automation magazine, vol. 24, iss. 3, pp. 123-138, 2017.
    [Bibtex]
    @Article{Martinez-Martin2017a,
    author = {Martinez-Martin, E. and Del Pobil, A.P.},
    title = {Object detection and recognition for assistive robots: Experimentation and implementation},
    journal = {IEEE Robotics and Automation Magazine},
    year = {2017},
    volume = {24},
    number = {3},
    pages = {123-138},
    }
  • E. Martinez-Martin and A. P. Del Pobil, «Object recognition for robot tasks: an overview,» Robotics: new research, pp. 1-24, 2017.
    [Bibtex]
    @Article{Martinez-Martin2017b,
    author = {Martinez-Martin, E. and Del Pobil, A.P.},
    title = {Object recognition for robot tasks: An overview},
    journal = {Robotics: New Research},
    year = {2017},
    pages = {1-24},
    }
  • E. Martinez-Martin and A. P. Del Pobil, «Robust motion detection and tracking for human-robot interaction,» Acm/ieee international conference on human-robot interaction, pp. 401-402, 2017.
    [Bibtex]
    @Article{Martinez-Martin2017c,
    author = {Martinez-Martin, E. and Del Pobil, A.P.},
    title = {Robust motion detection and tracking for human-robot interaction},
    journal = {ACM/IEEE International Conference on Human-Robot Interaction},
    year = {2017},
    pages = {401-402},
    }
  • A. P. Del Pobil, M. Kassawat, A. J. Duran, M. A. Arias, N. Nechyporenko, A. Mallick, E. Cervera, D. subedi, I. Vasilev, D. Cardin, E. sansebastiano, E. Martinez-Martin, A. Morales, G. A. Casan, A. Arenal, B. Goriatcheff, C. Rubert, and G. Recatala, «Uji robinlab’s approach to the amazon robotics challenge 2017,» Ieee international conference on multisensor fusion and integration for intelligent systems, vol. 2017-November, pp. 318-323, 2017.
    [Bibtex]
    @Article{DelPobil2017,
    author = {Del Pobil, A.P. and Kassawat, M. and Duran, A.J. and Arias, M.A. and Nechyporenko, N. and Mallick, A. and Cervera, E. and subedi, D. and Vasilev, I. and Cardin, D. and sansebastiano, E. and Martinez-Martin, E. and Morales, A. and Casan, G.A. and Arenal, A. and Goriatcheff, B. and Rubert, C. and Recatala, G.},
    title = {UJI RobInLab{'}s approach to the Amazon Robotics Challenge 2017},
    journal = {IEEE International Conference on Multisensor Fusion and Integration for Intelligent systems},
    year = {2017},
    volume = {2017-November},
    pages = {318-323},
    }

2016

  • A. Garcia-Garcia, F. Gomez-Donoso, J. Garcia-Rodriguez, s. Orts-Escolano, M. Cazorla, and J. Azorin-Lopez, «Pointnet: a 3d convolutional neural network for real-time object class recognition,» in 2016 international joint conference on neural networks (ijcnn), 2016, pp. 1578-1584.
    [Bibtex]
    @INPROCEEDINGs{Garcia2016,
    author={A. Garcia-Garcia and F. Gomez-Donoso and J. Garcia-Rodriguez and s. Orts-Escolano and M. Cazorla and J. Azorin-Lopez},
    booktitle={2016 International Joint Conference on Neural Networks (IJCNN)},
    title={PointNet: A 3D Convolutional Neural Network for real-time object class recognition},
    year={2016},
    pages={1578-1584},
    keywords={CAD;computer vision;data structures;learning (artificial intelligence);neural net architecture;object recognition;3D shapeNets;3D convolutional neural network;ModelNet;PointNet;VoxNet;computer vision;deep learning techniques;density occupancy grids representations;large-scale 3D CAD model dataset;real-time object class recognition;supervised convolutional neural network architecture;Computer architecture;Machine learning;Neural networks;Object recognition;solid modeling;Three-dimensional displays;Two dimensional displays},
    month={July},}
  • C. Loop, Q. Cai, P. Chou, and sergio Orts-Escolano, «A closed-form bayesian fusion equation using occupancy probabilities,» in 2016 international conference on 3d vision, 3dv 2016, stanford, usa, october 25-28, 2016, 2016.
    [Bibtex]
    @InProceedings{DBLP:conf/3dim/Loop2016,
    author = {Charles Loop and Qin Cai and Philip Chou and sergio Orts-Escolano},
    title = {A Closed-form Bayesian Fusion Equation using Occupancy Probabilities},
    booktitle = {2016 International Conference on 3D Vision, 3DV 2016, {stanford}, UsA, October 25-28, 2016},
    year = {2016}
    }
  • [DOI] M. Dou, sameh Khamis, Y. Degtyarev, P. Davidson, R. sean Fanello, A. Kowdle, sergio Orts-Escolano, C. Rhemann, D. Kim, J. Taylor, P. Kohli, V. Tankovich, and shahram Izadi, «Fusion4d: real-time performance capture of challenging scenes,» Acm trans. graph., vol. 35, iss. 4, p. 114:1–114:13, 2016.
    [Bibtex]
    @Article{Dou2016,
    author = {Dou, Mingsong and Khamis, sameh and Degtyarev, Yury and Davidson, Philip and Fanello, sean Ryan and Kowdle, Adarsh and Orts-Escolano, sergio and Rhemann, Christoph and Kim, David and Taylor, Jonathan and Kohli, Pushmeet and Tankovich, Vladimir and Izadi, shahram},
    title = {Fusion4D: Real-time Performance Capture of Challenging scenes},
    journal = {ACM Trans. Graph.},
    year = {2016},
    volume = {35},
    number = {4},
    pages = {114:1--114:13},
    month = jul,
    acmid = {2925969},
    address = {New York, NY, UsA},
    articleno = {114},
    doi = {10.1145/2897824.2925969},
    issn = {0730-0301},
    issue_date = {July 2016},
    keywords = {4D reconstruction, multi-view, nonrigid, real-time},
    numpages = {13},
    publisher = {ACM},
    url = {http://doi.acm.org/10.1145/2897824.2925969}
    }
  • sean Ryan Fanello, C. Rhemann, V. Tankovich, A. Kowdle, sergio Orts-Escolano, D. Kim, and shahram Izadi, «HyperDepth: Learning Depth From structured Light Without Matching,» in The ieee conference on computer vision and pattern recognition (cvpr), 2016.
    [Bibtex]
    @InProceedings{Fanello_2016_CVPR,
    author = {Ryan Fanello, sean and Rhemann, Christoph and Tankovich, Vladimir and Kowdle, Adarsh and Orts-Escolano, sergio and Kim, David and Izadi, shahram},
    title = {{HyperDepth: Learning Depth From structured Light Without Matching}},
    booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
    year = {2016},
    month = {June}
    }
  • A. Garcia-Garcia, F. Gomez-Donoso, J. Garcia-Rodriguez, sergio Orts-Escolano, M. Cazorla, and J. Azorin-Lopez, «PointNet: A 3D Convolutional Neural Network for Real-Time Object Class Recognition,» in The ieee world congress on computational intelligence, 2016.
    [Bibtex]
    @inproceedings{Garcia2016a,
    author = {Garcia-Garcia, Albert and Gomez-Donoso, Francisco and Garcia-Rodriguez, Jose and Orts-Escolano, sergio and Cazorla, Miguel and Azorin-Lopez, Jorge},
    booktitle = {The IEEE World Congress on Computational Intelligence},
    title = {{PointNet: A 3D Convolutional Neural Network for Real-Time Object Class Recognition}},
    year = {2016}
    }
  • F. Gomez-Donoso, M. Cazorla, A. Garcia-Garcia, and J. Garcia Rodriguez, «Automatic schaeffer’s gestures recognition system,» Expert systems, vol. 33, iss. 5, p. 480–488, 2016.
    [Bibtex]
    @article{Gomez-Donoso2016,
    abstract = {schaeffer's sign language consists of a reduced set of gestures designed to help children with autism or cognitive learning disabilities to develop adequate communication skills. Our automatic recognition system for schaeffer's gesture language uses the information provided by an RGB-D camera to capture body motion and recognize gestures using Dynamic Time Warping combined with k-Nearest Neighbors methods. The learning process is reinforced by the interaction with the proposed system that accelerates learning itself thus helping both children and educators. To demonstrate the validity of the system, a set of qualitative experiments with children were carried out. As a result, a system which is able to recognize a subset of 11 gestures of schaeffer's sign language online was achieved.},
    author = {Gomez-Donoso, Francisco and Cazorla, Miguel and Garcia-Garcia, Alberto and Garcia Rodriguez, Jose},
    journal = {Expert systems},
    title = {Automatic schaeffer's Gestures Recognition system},
    volume={33},
    number={5},
    pages={480--488},
    year = {2016}
    }
  • [DOI] sergio Orts-Escolano, C. Rhemann, sean Fanello, D. Kim, A. Kowdle, W. Chang, Y. Degtyarev, P. L. Davidson, sameh Khamis, M. Dou, V. Tankovich, C. Loop, Q. Cai, P. A. Chou, sarah Mennicken, J. Valentin, V. Pradeep, shenlong Wang, B. sing Kang, P. Kohli, Y. Lutchyn, C. Keskin, and shahram Izadi, «Holoportation: virtual 3d teleportation in real-time,» in 29th acm user interface software and technology symposium (uist), 2016.
    [Bibtex]
    @InProceedings{holoportation2016,
    author = {sergio Orts-Escolano and Christoph Rhemann and sean Fanello and David Kim and Adarsh Kowdle and Wayne Chang and Yury Degtyarev and Philip L Davidson and sameh Khamis and Mingsong Dou and Vladimir Tankovich and Charles Loop and Qin Cai and Philip A Chou and sarah Mennicken and Julien Valentin and Vivek Pradeep and shenlong Wang and sing Bing Kang and Pushmeet Kohli and Yuliya Lutchyn and Cem Keskin and shahram Izadi},
    title = {Holoportation: Virtual 3D Teleportation in Real-time},
    booktitle = {29th ACM User Interface software and Technology symposium (UIsT)},
    year = {2016},
    doi = {10.1145/2984511.2984517},
    url = {http://dl.acm.org/citation.cfm?id=2984517}
    }
  • [DOI] A. Jimeno-Morenilla, J. Garcia-Rodriguez, sergio Orts-Escolano, and M. Davia-Aracil, «Gng based foot reconstruction for custom footwear manufacturing,» Computers in industry, vol. 75, pp. 116-126, 2016.
    [Bibtex]
    @Article{JimenoMorenilla2016116,
    author = {Antonio Jimeno-Morenilla and Jose Garcia-Rodriguez and sergio Orts-Escolano and Miguel Davia-Aracil},
    title = {GNG based foot reconstruction for custom footwear manufacturing },
    journal = {Computers in Industry },
    year = {2016},
    volume = {75},
    pages = {116 - 126},
    doi = {http://dx.doi.org/10.1016/j.compind.2015.06.002},
    issn = {0166-3615},
    keywords = {Custom footwear manufacturing},
    url = {http://www.sciencedirect.com/science/article/pii/s0166361515300075}
    }
  • J. Martinez-Gomez, V. Morell Gimenez, M. Cazorla, and I. Garcia-Varea, «semantic Localization in the PCL library,» Robotics and autonomous systems, vol. 75, Part B, p. 641–648, 2016.
    [Bibtex]
    @article{Martinez2016ras,
    abstract = {The semantic localization problem in robotics consists in determining the place where a robot is located by means of semantic categories. The problem is usually addressed as a supervised classification process, where input data correspond to robot perceptions while classes to semantic categories, like kitchen or corridor. In this paper we propose a framework, implemented in the $\backslash${\{}PCL$\backslash${\}} library, which provides a set of valuable tools to easily develop and evaluate semantic localization systems. The implementation includes the generation of 3D global descriptors following a Bag-of-Words approach. This allows the generation of fixed-dimensionality descriptors from any type of keypoint detector and feature extractor combinations. The framework has been designed, structured and implemented to be easily extended with different keypoint detectors, feature extractors as well as classification models. The proposed framework has also been used to evaluate the performance of a set of already implemented descriptors, when used as input for a specific semantic localization system. The obtained results are discussed paying special attention to the internal parameters of the BoW descriptor generation process. Moreover, we also review the combination of some keypoint detectors with different 3D descriptor generation techniques.},
    author = {Martinez-Gomez, Jesus and Morell Gimenez, Vicente and Cazorla, Miguel and Garcia-Varea, Ismael},
    journal = {Robotics and Autonomous systems},
    pages = {641--648},
    title = {{semantic Localization in the PCL library}},
    volume = {75, Part B},
    year = {2016}
    }
  • J. Navarrete, V. Morell, M. Cazorla, D. Viejo, J. Garcia-Rodriguez, and sergio Orts-Escolano, «3DCOMET: 3D Compression Methods Test Dataset,» Robotics and autonomous systems, vol. 75, Part B, p. 550–557, 2016.
    [Bibtex]
    @article{Navarrete2016Ras,
    abstract = {The use of 3D data in mobile robotics applications provides valuable information about the robot's environment. However usually the huge amount of 3D information is difficult to manage due to the fact that the robot storage system and computing capabilities are insufficient. Therefore, a data compression method is necessary to store and process this information while preserving as much information as possible. A few methods have been proposed to compress 3D information. Nevertheless, there does not exist a consistent public benchmark for comparing the results (compression level, distance reconstructed error, etc.) obtained with different methods. In this paper, we propose a dataset composed of a set of 3D point clouds with different structure and texture variability to evaluate the results obtained from 3D data compression methods. We also provide useful tools for comparing compression methods, using as a baseline the results obtained by existing relevant compression methods.},
    author = {Javier Navarrete and Vicente Morell and Miguel Cazorla and Diego Viejo and Jose Garcia-Rodriguez and sergio Orts-Escolano},
    journal = {Robotics and Autonomous systems},
    pages = {550--557},
    title = {{3DCOMET: 3D Compression Methods Test Dataset}},
    volume = {75, Part B},
    year = {2016}
    }
  • s Orts-Escolano, J. Garcia-Rodriguez, V. Morell, M. Cazorla, J. A. serra-Perez, and A. Garcia-Garcia, «3D surface reconstruction of noisy point clouds using Growing Neural Gas,» Neural processing letters, vol. 43, iss. 2, p. 401–423, 2016.
    [Bibtex]
    @article{Orts2015,
    abstract = {With the advent of low-cost 3D sensors and 3D printers, scene and object 3D surface reconstruction has become an important research topic in the last years. In this work, we propose an automatic (unsupervised) method for 3D surface reconstruction from raw unorganized point clouds acquired using low-cost 3D sensors. We have modified the Grow- ing Neural Gas (GNG) network, which is a suitable model because of its flexibility, rapid adaptation and excellent quality of representation, to perform 3D surface reconstruction of different real-world objects and scenes. some improvements have been made on the original algorithm considering colour and surface normal information of input data during the learning stage and creating complete triangular meshes instead of basic wire-frame representations. The proposed method is able to successfully create 3D faces online, whereas existing 3D reconstruction methods based on self-Organizing Maps (sOMs) required post- processing steps to close gaps and holes produced during the 3D reconstruction process. A set of quantitative and qualitative experiments were carried out to validate the proposed method. The method has been implemented and tested on real data, and has been found to be effective at reconstructing noisy point clouds obtained using low-cost 3D sensors.},
    author = {Orts-Escolano, s and Garcia-Rodriguez, J and Morell, V and Cazorla, Miguel and serra-Perez, J A and Garcia-Garcia, A},
    issn = {1370-4621},
    journal = {Neural Processing Letters},
    number = {2},
    pages = {401--423},
    title = {{3D surface reconstruction of noisy point clouds using Growing Neural Gas}},
    volume = {43},
    year = {2016}
    }
  • [DOI] J. C. Rangel, M. Cazorla, I. Garcia-Varea, J. Martinez-Gomez, E. Fromont, and M. sebban, «scene Classification from semantic Labeling,» Advanced robotics, vol. 30, iss. 11–12, p. 758–769, 2016.
    [Bibtex]
    @article{Rangel2016,
    abstract = {Finding an appropriate image representation is a crucial problem in robotics. This problem has been classically addressed by means of computer vision techniques, where local and global features are used. The selection or/and combination of different features is carried out by taking into account repeatability and distinctiveness, but also the specific problem to solve. In this article, we propose the generation of image descriptors from general purpose semantic annotations. This approach has been evaluated as source of information for a scene classifier, and specifically using Clarifai as the semantic annotation tool. The experimentation has been carried out using the ViDRILO toolbox as benchmark, which includes a comparison of state-of-the-art global features and tools to make comparisons among them. According to the experimental results, the proposed descriptor performs similarly to well-known domain-specific image descriptors based on global features in a scene classification task. Moreover, the proposed descriptor is based on generalist annotations without any type of problem-oriented parameter tuning.},
    author = {Jose Carlos Rangel and Miguel Cazorla and Ismael Garcia-Varea and Jesus Martinez-Gomez and Elisa Fromont and Marc sebban},
    doi = {10.1080/01691864.2016.1164621},
    journal = {Advanced Robotics},
    number = {11--12},
    pages = {758--769},
    title = {{scene Classification from semantic Labeling}},
    volume = {30},
    year = {2016}
    }
  • A. Rodriguez, F. Gomez-Donoso, J. Martinez-Gomez, and M. Cazorla, «Building 3d maps with tag information,» in Xvii workshop en agentes f�sicos (waf 2016), 2016.
    [Bibtex]
    @inproceedings{Rodriguez2016,
    Author = {Angel Rodriguez and Francisco Gomez-Donoso and Jesus Martinez-Gomez and Miguel Cazorla},
    Title = {Building 3D maps with tag information},
    Booktitle = {XVII Workshop en Agentes F�sicos (WAF 2016)},
    Year={2016}
    }
  • M. saval-Calvo, J. and Azorin-Lopez, A. and Fuster-Guillo, J. and Garcia-Rodriguez, sergio Orts-Escolano, and A. and Garcia-Garcia, «Evaluation of sampling method effects in 3D non-rigid registration,» Neural computing and applications, iss. 1-15, 2016.
    [Bibtex]
    @Article{saval2016,
    author = {saval-Calvo, Marcelo and and Azorin-Lopez, Jorge and and Fuster-Guillo, Andres and and Garcia-Rodriguez, Jose and and Orts-Escolano, sergio and and Garcia-Garcia, Alberto},
    title = {{Evaluation of sampling method effects in 3D non-rigid registration}},
    journal = {Neural Computing and Applications},
    year = {2016},
    number = {1-15}
    }
  • J. Navarrete, D. Viejo, and M. Cazorla, «Color smoothing for rgb-d data using entropy information,» Applied soft computing, vol. 46, p. 361–380, 2016.
    [Bibtex]
    @article{navarrete2016color,
    title={Color smoothing for RGB-D data using entropy information},
    author={Navarrete, Javier and Viejo, Diego and Cazorla, Miguel},
    journal={Applied soft Computing},
    volume={46},
    pages={361--380},
    year={2016},
    publisher={Elsevier}
    }
  • E. Martinez-Martin and A. P. del Pobil, «Conflict resolution in robotics: an overview,» Artificial intelligence: concepts, methodologies, tools, and applications, vol. 4, pp. 2623-2638, 2016.
    [Bibtex]
    @article{Martinez-Martin2016,title = {Conflict resolution in robotics: An overview},journal = {Artificial Intelligence: Concepts, Methodologies, Tools, and Applications},year = {2016},volume = {4},pages = {2623-2638},author = {Martinez-Martin, E. and del Pobil, A.P.}}

2015

  • J. C. Rangel, V. Morell, M. Cazorla, sergio Orts-Escolano, and J. Garc{‘i}a-Rodr{‘i}guez, «Object recognition in noisy rgb-d data,» in International work-conference on the interplay between natural and artificial computation, 2015, p. 261–270.
    [Bibtex]
    @inproceedings{rangel2015object,
    title={Object Recognition in Noisy RGB-D Data},
    author={Rangel, Jos{\'e} Carlos and Morell, Vicente and Cazorla, Miguel and Orts-Escolano, sergio and Garc{\'\i}a-Rodr{\'\i}guez, Jos{\'e}},
    booktitle={International Work-Conference on the Interplay Between Natural and Artificial Computation},
    pages={261--270},
    year={2015},
    organization={springer International Publishing}
    }
  • M. saval-Calvo, sergio Orts-Escolano, J. Azorin-Lopez, J. Garcia-Rodriguez, A. Fuster-Guillo, V. Morell-Gimenez, and M. Cazorla, «A comparative study of downsampling techniques for non-rigid point set registration using color,» in International work-conference on the interplay between natural and artificial computation, 2015, p. 281–290.
    [Bibtex]
    @inproceedings{saval2015comparative,
    title={A Comparative study of Downsampling Techniques for Non-rigid Point set Registration Using Color},
    author={saval-Calvo, Marcelo and Orts-Escolano, sergio and Azorin-Lopez, Jorge and Garcia-Rodriguez, Jose and Fuster-Guillo, Andres and Morell-Gimenez, Vicente and Cazorla, Miguel},
    booktitle={International Work-Conference on the Interplay Between Natural and Artificial Computation},
    pages={281--290},
    year={2015},
    organization={springer International Publishing}
    }
  • sergio Orts-Escolano, J. Garcia-Rodriguez, V. Morell, M. Cazorla, A. Garcia-Garcia, and sergiu Ovidiu-Oprea, «Optimized representation of 3d sequences using neural networks,» in International work-conference on the interplay between natural and artificial computation, 2015, p. 251–260.
    [Bibtex]
    @inproceedings{orts2015optimized,
    title={Optimized Representation of 3D sequences Using Neural Networks},
    author={Orts-Escolano, sergio and Garcia-Rodriguez, Jose and Morell, Vicente and Cazorla, Miguel and Garcia-Garcia, Alberto and Ovidiu-Oprea, sergiu},
    booktitle={International Work-Conference on the Interplay Between Natural and Artificial Computation},
    pages={251--260},
    year={2015},
    organization={springer International Publishing}
    }
  • [DOI] J. Gomez, B. Caputo, M. Cazorla, H. Christensen, M. Fornoni, I. Garcia-Varea, and A. Pronobis, «Where Are We After Five Editions?: Robot Vision Challenge, a Competition that Evaluates solutions for the Visual Place Classification Problem,» Robotics automation magazine, ieee, vol. 22, iss. 4, p. 147–156, 2015.
    [Bibtex]
    @article{7349126,
    author = {Gomez, J and Caputo, B and Cazorla, M and Christensen, H and Fornoni, M and Garcia-Varea, I and Pronobis, A},
    doi = {10.1109/MRA.2015.2460931},
    issn = {1070-9932},
    journal = {Robotics Automation Magazine, IEEE},
    keywords = {Benchmark testing;Object recognition;Proposals;Rob},
    number = {4},
    pages = {147--156},
    title = {{Where Are We After Five Editions?: Robot Vision Challenge, a Competition that Evaluates solutions for the Visual Place Classification Problem}},
    volume = {22},
    year = {2015}
    }
  • A. A. Revett, A. Psarrou, J. Garcia-Rodriguez, sergio Orts-Escolano, J. Azorin-Lopez, and Kenneth, «3D Reconstruction of Medical Images from slices Automatically Landmarked with Growing Neural Models,» Neurocomputing, vol. 150, Part, p. 16–25, 2015.
    [Bibtex]
    @article{Angelopouloul2015,
    abstract = {In this study, we utilise a novel approach to segment out the ventricular system in a series of high resolution T1-weighted $\backslash${\{}MR$\backslash${\}} images. We present a brain ventricles fast reconstruction method. The method is based on the processing of brain sections and establishing a fixed number of landmarks onto those sections to reconstruct the ventricles 3D surface. Automated landmark extraction is accomplished through the use of the self-organising network, the growing neural gas (GNG), which is able to topographically map the low dimensionality of the network to the high dimensionality of the contour manifold without requiring a priori knowledge of the input space structure. Moreover, our $\backslash${\{}GNG$\backslash${\}} landmark method is tolerant to noise and eliminates outliers. Our method accelerates the classical surface reconstruction and filtering processes. The proposed method offers higher accuracy compared to methods with similar efficiency as Voxel Grid.},
    author = {Revett, Anastassia Angelopoulou and Alexandra Psarrou and Jose Garcia-Rodriguez and sergio Orts-Escolano and Jorge Azorin-Lopez and Kenneth},
    journal = {Neurocomputing},
    pages = {16--25},
    title = {{3D Reconstruction of Medical Images from slices Automatically Landmarked with Growing Neural Models}},
    volume = {150, Part},
    year = {2015}
    }
  • [DOI] B. J. Boom, sergio Orts-Escolano, X. X. Ning, steven McDonagh, P. sandilands, and R. B. Fisher, «Interactive light source position estimation for augmented reality with an rgb-d camera,» Computer animation and virtual worlds, p. n/a–n/a, 2015.
    [Bibtex]
    @Article{Boom2016,
    author = {Boom, Bastiaan J. and Orts-Escolano, sergio and Ning, Xin X. and McDonagh, steven and sandilands, Peter and Fisher, Robert B.},
    title = {Interactive light source position estimation for augmented reality with an RGB-D camera},
    journal = {Computer Animation and Virtual Worlds},
    year = {2015},
    pages = {n/a--n/a},
    note = {cav.1686},
    doi = {10.1002/cav.1686},
    issn = {1546-427X},
    keywords = {light source estimation, augmented reality, GPU implementation, RGB-D camera},
    url = {http://dx.doi.org/10.1002/cav.1686}
    }
  • [DOI] M. Cazorla and D. Viejo, «JavaVis: An integrated computer vision library for teaching computer vision,» Computer applications in engineering education, vol. 23, iss. 2, p. 258–267, 2015.
    [Bibtex]
    @article{CAE:CAE21594,
    abstract = {
    In this article, we present a new framework oriented to teach Computer Vision related subjects called JavaVis. It is a computer vision library divided in three main areas: 2D package is featured for classical computer vision processing; 3D package, which includes a complete 3D geometric toolset, is used for 3D vision computing; Desktop package comprises a tool for graphic designing and testing of new algorithms. JavaVis is designed to be easy to use, both for launching and testing existing algorithms and for developing new ones.},
    author = {Cazorla, Miguel and Viejo, Diego},
    doi = {10.1002/cae.21594},
    issn = {1099-0542},
    journal = {Computer Applications in Engineering Education},
    keywords = {3D data,Java GUI,computer vision,image processing teaching,open source},
    number = {2},
    pages = {258--267},
    title = {{JavaVis: An integrated computer vision library for teaching computer vision}},
    url = {http://dx.doi.org/10.1002/cae.21594},
    volume = {23},
    year = {2015}
    }
  • M. Cazorla and D. Viejo, «Experiences Using an Open source software Library to Teach Computer Vision subjects,» Journal of technology and science education, vol. 4, iss. 3, p. 214–227, 2015.
    [Bibtex]
    @article{cazorla2015,
    abstract = {Machine vision is an important subject in computer science and engineering degrees. For laboratory experimentation, it is desirable to have a complete and easy-to-use tool. In this work we present a Java library, oriented to teaching computer vision. We have designed and built the library from the scratch with enfasis on readability and understanding rather than on efficiency. However, the library can also be used for research purposes.
    JavaVis is an open source Java library, oriented to the teaching of Computer Vision. It consists of a framework with several features that meet its demands. It has been designed to be easy to use: the user does not have to deal with internal structures or graphical interface, and should the student need to add a new algorithm it can be done simply enough.
    Once we sketch the library, we focus on the experience the student gets using this library in several computer vision courses. Our main goal is to find out whether the students understand what they are doing, that is, find out how much the library helps the student in grasping the basic concepts of computer vision. In the last four years we have conducted surveys to assess how much the students have improved their skills by using this library.
    },
    author = {Cazorla, Miguel and Viejo, Diego},
    issn = {2014-5349},
    journal = {Journal of Technology and science Education},
    keywords = {Computer vision teaching,Open source,engineering},
    number = {3},
    pages = {214--227},
    title = {{Experiences Using an Open source software Library to Teach Computer Vision subjects}},
    volume = {4},
    year = {2015}
    }
  • M. Cazorla, J. Garcia-Rodriguez, J. M. C. Plaza, I. G. Varea, V. Matellan, F. M. Rico, J. Martinez-Gomez, F. J. R. Lera, C. suarez Mejias, and M. E. M. sahuquillo, «sIRMAVED: Development of a comprehensive robotic system for monitoring and interaction for people with acquired brain damage and dependent people,» in Xvi conferencia de la asociacion espanola para la inteligencia artificial (caepia), 2015.
    [Bibtex]
    @inproceedings{Cazorla2015Caepia1,
    author = {Cazorla, Miguel and Garcia-Rodriguez, Jose and Plaza, Jose Maria Canas and Varea, Ismael Garcia and Matellan, Vicente and Rico, Francisco Martin and Martinez-Gomez, Jesus and Lera, Francisco Javier Rodriguez and Mejias, Cristina suarez and sahuquillo, Maria Encarnacion Martinez},
    booktitle = {XVI Conferencia de la Asociacion Espanola para la Inteligencia Artificial (CAEPIA)},
    title = {{sIRMAVED: Development of a comprehensive robotic system for monitoring and interaction for people with acquired brain damage and dependent people}},
    year = {2015}
    }
  • F. Gomez-Donoso and M. Cazorla, «Recognizing schaeffer’s Gestures for Robot Interaction,» in Actas de la conferencia de la asociacion espanola para la inteligencia artificial (caepia), 2015.
    [Bibtex]
    @inproceedings{Gomez2015,
    abstract = {In this paper we present a new interaction system for schaeffer's gesture language recognition. It uses the information provided by an RGBD camera to capture body motion and recognize gestures. schaeffer's gestures are a reduced set of gestures designed for people with cognitive disabilities. The system is able to send alarms to an assistant or even a robot for human robot interaction.},
    author = {Francisco Gomez-Donoso and Miguel Cazorla},
    booktitle= {Actas de la Conferencia de la Asociacion Espanola para la Inteligencia Artificial (CAEPIA)},
    keywords = {3d gesture recognition,shaeffer's gestures,human robot in-teraction},
    title = {{Recognizing schaeffer's Gestures for Robot Interaction}},
    url = {http://simd.albacete.org/actascaepia15/papers/01045.pdf},
    year = {2015}
    }
  • J. Martinez-Gomez, M. Cazorla, I. Garcia-Varea, and C. Romero-Gonzalez, «Object categorization from RGB-D local features and Bag Of Words,» in 2nd iberian robotics conference, 2015.
    [Bibtex]
    @inproceedings{MartinezRobot2015,
    author = {Martinez-Gomez, Jesus and Cazorla, Miguel and Garcia-Varea, Ismael and Romero-Gonzalez, Cristina},
    booktitle = {2nd Iberian robotics conference},
    title = {{Object categorization from RGB-D local features and Bag Of Words}},
    year = {2015}
    }
  • V. Morell, J. Martinez-Gomez, M. Cazorla, and I. Garcia-Varea, «ViDRILO: The Visual and Depth Robot Indoor Localization with Objects information dataset,» International journal of robotics research, vol. 34, iss. 14, p. 1681–1687, 2015.
    [Bibtex]
    @article{Morell2015,
    author = {Vicente Morell and Jesus Martinez-Gomez and Miguel Cazorla and Ismael Garcia-Varea},
    journal = {International Journal of Robotics Research},
    number = {14},
    pages = {1681--1687},
    title = {{ViDRILO: The Visual and Depth Robot Indoor Localization with Objects information dataset}},
    volume = {34},
    year = {2015}
    }
  • s Orts-Escolano, J. Garcia-Rodriguez, J. A. serra-Perez, A. Jimeno-Morenilla, A. Garcia-Garcia, V. Morell, and M. Cazorla, «3D Model Reconstruction using Neural Gas Accelerated on GPUs,» Applied soft computing, vol. 32, p. 87–100, 2015.
    [Bibtex]
    @Article{Orts-Escolano2015,
    author = {Orts-Escolano, s and Garcia-Rodriguez, J and serra-Perez, J A and Jimeno-Morenilla, A and Garcia-Garcia, A and Morell, V and Cazorla, Miguel},
    title = {{3D Model Reconstruction using Neural Gas Accelerated on GPUs}},
    journal = {Applied soft Computing},
    year = {2015},
    volume = {32},
    pages = {87--100},
    issn = {1568-4946},
    abstract = {In this work, we propose the use of the Neural Gas (NG), a neural network with unsupervised competitive hebbian learning (CHL), to develop a reverse engineering process. This is a simple and accurate method to reconstruct objects from the point cloud obtained from overlapped multiple views using low cost sensors. In contrast to other methods that may need several stages that include downsampling, noise filtering and many other tasks, the NG automatically obtains the 3D model of the scanned objects. The combination of the acquired and reconstructed 3D models with virtual and augmented reality environments allows the users interaction and also permits developing a virtual design and manufacturing system.
    To demonstrate the validity of our proposal we tested our method with several models and performed a study of the neural network parameterization calculating the quality of representation and also comparing results with other neural methods like Growing Neural Gas and Kohonen maps or clasical methods like Voxel Grid. We also reconstructed models acquired by low cost sensors that can be included in virtual and augmented reality environments to redesign or manipulation purpose. since the NG algorithm has a strong computational cost we propose its acceleration. We have redesigned and implemented the NG learning algorithm to fit it onto a Graphic Processor Unit using CUDA. A speed-up of 180x faster is obtained compared to the sequential CPU version.},
    }
  • s Orts-Escolano, J. Garcia-Rodriguez, V. Morell, M. Cazorla, M. saval-Calvo, and J. Azorin, «Processing Point Cloud sequences with Growing Neural Gas,» in Neural networks (ijcnn), the 2015 international joint conference on, 2015.
    [Bibtex]
    @inproceedings{Orts2015IJCNN,
    author = {Orts-Escolano, s and Garcia-Rodriguez, J and Morell, V and Cazorla, M and saval-Calvo, M and Azorin, J},
    booktitle = {Neural Networks (IJCNN), The 2015 International Joint Conference on},
    title = {{Processing Point Cloud sequences with Growing Neural Gas}},
    year = {2015}
    }
  • M. saval-Calvo, sergio Orts-Escolano, J. Azorin-Lopez, J. Garcia-Rodriguez, A. Fuster-Guillo, V. Morell-Gimenez, and M. Cazorla, «Non-rigid point set registration using color and data downsampling,» in Neural networks (ijcnn), the 2015 international joint conference on, 2015.
    [Bibtex]
    @InProceedings{saval-Calvo2015,
    author = {saval-Calvo, Marcelo and Orts-Escolano, sergio and Azorin-Lopez, Jorge and Garcia-Rodriguez, Jose and Fuster-Guillo, Andres and Morell-Gimenez, Vicente and Cazorla, Miguel},
    title = {{Non-rigid point set registration using color and data downsampling}},
    booktitle = {Neural Networks (IJCNN), The 2015 International Joint Conference on},
    year = {2015},
    }
  • [DOI] sergio Orts-Escolano, V. Morell, J. Garcia-Rodriguez, M. Cazorla, and R. Fisher, «Real-time 3D semi-local surface patch extraction using GPGPU,» Journal of real-time image processing, vol. 10, iss. 4, p. 647–666, 2015.
    [Bibtex]
    @article{Orts-Escolano2015JRTIP,
    author = {Orts-Escolano, sergio and Morell, Vicente and Garcia-Rodriguez, Jose and Cazorla, Miguel and Fisher, RobertB.},
    doi = {10.1007/s11554-013-0385-7},
    issn = {1861-8200},
    journal = {Journal of Real-Time Image Processing},
    keywords = {Real-time; GPGPU; RGB-D; 3D local shape descriptor},
    number = {4},
    pages = {647--666},
    publisher = {springer Berlin Heidelberg},
    title = {{Real-time 3D semi-local surface patch extraction using GPGPU}},
    url = {http://dx.doi.org/10.1007/s11554-013-0385-7},
    volume = {10},
    year = {2015}
    }
  • J. C. Rangel, V. Morell, M. Cazorla, s Orts-Escolano, and J. Garcia-Rodriguez, «Using GNG on 3D Object Recognition in Noisy RGB-D data,» in Neural networks (ijcnn), the 2015 international joint conference on, 2015.
    [Bibtex]
    @inproceedings{Rangel2015IJCNN,
    author = {Rangel, J C and Morell, V and Cazorla, M and Orts-Escolano, s and Garcia-Rodriguez, J},
    booktitle = {Neural Networks (IJCNN), The 2015 International Joint Conference on},
    title = {{Using GNG on 3D Object Recognition in Noisy RGB-D data}},
    year = {2015}
    }
  • J. C. Rangel, M. Cazorla, I. G. Varea, J. Martinez-Gomez, E. Fromont, and M. sebban, «Computing Image Descriptors from Annotations Acquired from External Tools,» in 2nd iberian robotics conference, 2015.
    [Bibtex]
    @inproceedings{RangelRobot2015,
    author = {Rangel, Jose Carlos and Cazorla, Miguel and Varea, Ismael Garcia and Martinez-Gomez, Jesus and Fromont, Elisa and sebban, Marc},
    booktitle = {2nd Iberian robotics conference},
    title = {{Computing Image Descriptors from Annotations Acquired from External Tools}},
    year = {2015}
    }
  • E. Martinez-Martin and A. P. del Pobil, «Uji hri-bd: a new human-robot interaction benchmark dataset,» Human-robot interactions: principles, technologies and challenges, pp. 57-73, 2015.
    [Bibtex]
    @article{Martinez-Martin2015,title = {UJI HRI-BD: A new human-robot interaction benchmark dataset},journal = {Human-Robot Interactions: Principles, Technologies and Challenges},year = {2015},pages = {57-73},author = {Martinez-Martin, E. and del Pobil, A.P.}}

2014

  • M. Cazorla, P. Gil, santiago Puente, J. L. Munoz, and D. Pastor, «An improvement of a sLAM RGB-D method with movement prediction derived from a study of visual features,» Advanced robotics, vol. 28, iss. 18, p. 1231–1242, 2014.
    [Bibtex]
    @article{Cazorla2014An,
    abstract = {This paper presents a method for the fast calculation of a robot's egomotion using visual features. The method is part of a complete system for automatic map building and simultaneous Location and Mapping (sLAM). The method uses optical flow to determine whether the robot has undergone a movement. If so, some visual features that do not satisfy several criteria are deleted, and then egomotion is calculated. Thus, the proposed method improves the efficiency of the whole process because not all the data is processed. We use a state-of-the-art algorithm (TORO) to rectify the map and solve the sLAM problem. Additionally, a study of different visual detectors and descriptors has been conducted to identify which of them are more suitable for the sLAM problem. Finally, a navigation method is described using the map obtained from the sLAM solution.},
    author = {Cazorla, Miguel and Gil, Pablo and Puente, santiago and Munoz, Jose Luis and Pastor, Daniel},
    journal = {Advanced robotics},
    keywords = {3D data,RGB-D data,sLAM,visual features},
    number = {18},
    pages = {1231--1242},
    title = {{An improvement of a sLAM RGB-D method with movement prediction derived from a study of visual features}},
    volume = {28},
    year = {2014}
    }
  • J. Garcia-Rodriguez, s. Orts-Escolano, N. Angelopoulou, A. Psarrou, and J. Azorin-Lopez, «Real time motion estimation using a neural architecture implemented on GPUs,» in Journal of real-time image processing, 2014.
    [Bibtex]
    @inproceedings{garcia2014z,
    author = {J. Garcia-Rodriguez and s. Orts-Escolano and N. Angelopoulou and A. Psarrou and J. Azorin-Lopez},
    booktitle = {Journal of Real-Time Image Processing},
    title = {{Real time motion estimation using a neural architecture implemented on GPUs}},
    year = {2014}
    }
  • [DOI] D. Gil, J. Garcia-Rodriguez, M. Cazorla, and M. Johnsson, «sARAsOM: a supervised architecture based on the recurrent associative sOM,» Neural computing and applications, p. 1–13, 2014.
    [Bibtex]
    @article{gil2014,
    author = {Gil, David and Garcia-Rodriguez, Jose and Cazorla, Miguel and Johnsson, Magnus},
    title = {{sARAsOM: a supervised architecture based on the recurrent associative sOM}},
    journal = {Neural Computing and Applications},
    year = {2014},
    abstract = {We present and evaluate a novel supervised recurrent neural network architecture, the sARAsOM, based on the associative self-organizing map. The performance of the sARAsOM is evaluated and compared with the Elman network as well as with a hidden Markov model (HMM) in a number of prediction tasks using sequences of letters, including some experiments with a reduced lexicon of 15 words. The results were very encouraging with the sARAsOM learning better and performing with better accuracy than both the Elman network and the HMM.},
    url = {http://dx.doi.org/10.1007/s00521-014-1785-8},
    pages = {1--13},
    doi = {10.1007/s00521-014-1785-8},
    issn = {0941-0643},
    keywords = {Recurrent associative self-organizing map; supervi},
    publisher = {springer London}
    }
  • [DOI] sergio Orts-Escolano, J. Garcia-Rodriguez, V. Morell, M. Cazorla, J. Azorin, and J. M. Garcia-Chamizo, «Parallel Computational Intelligence-Based Multi-Camera surveillance system,» Journal of sensor and actuator networks, vol. 3, iss. 2, p. 95–112, 2014.
    [Bibtex]
    @article{jsan3020095,
    abstract = {In this work, we present a multi-camera surveillance system based on the use of self-organizing neural networks to represent events on video. The system processes several tasks in parallel using GPUs (graphic processor units). It addresses multiple vision tasks at various levels, such as segmentation, representation or characterization, analysis and monitoring of the movement. These features allow the construction of a robust representation of the environment and interpret the behavior of mobile agents in the scene. It is also necessary to integrate the vision module into a global system that operates in a complex environment by receiving images from multiple acquisition devices at video frequency. Offering relevant information to higher level systems, monitoring and making decisions in real time, it must accomplish a set of requirements, such as: time constraints, high availability, robustness, high processing speed and re-configurability. We have built a system able to represent and analyze the motion in video acquired by a multi-camera network and to process multi-source data in parallel on a multi-GPU architecture.},
    author = {Orts-Escolano, sergio and Garcia-Rodriguez, Jose and Morell, Vicente and Cazorla, Miguel and Azorin, Jorge and Garcia-Chamizo, Juan Manuel},
    doi = {10.3390/jsan3020095},
    issn = {2224-2708},
    journal = {Journal of sensor and Actuator Networks},
    keywords = {growing neural gas; camera networks; visual survei},
    number = {2},
    pages = {95--112},
    title = {{Parallel Computational Intelligence-Based Multi-Camera surveillance system}},
    url = {http://www.mdpi.com/2224-2708/3/2/95},
    volume = {3},
    year = {2014}
    }
  • J. Montoyo, V. Morell, M. Cazorla, J. Garcia-Rodriguez, and sergio Orts-Escolano, «Registration methods for RGB-D cameras accelerated on GPUs,» in International symposium on robotics, isr, 2014.
    [Bibtex]
    @inproceedings{Montoyo20143Registration,
    author = {Montoyo, Javier and Morell, Vicente and Cazorla, Miguel and Garcia-Rodriguez, Jose and Orts-Escolano, sergio},
    booktitle = {International symposium on robotics, IsR},
    title = {{Registration methods for RGB-D cameras accelerated on GPUs}},
    year = {2014}
    }
  • [DOI] V. Morell, sergio Orts-Escolano, M. Cazorla, and J. Garcia-Rodriguez, «Geometric 3D point cloud compression,» Pattern recognition letters, vol. 50, p. 55–62, 2014.
    [Bibtex]
    @Article{Morell2014,
    author = {Morell, Vicente and Orts-Escolano, sergio and Cazorla, Miguel and Garcia-Rodriguez, Jose},
    title = {{Geometric 3D point cloud compression}},
    journal = {Pattern Recognition Letters},
    year = {2014},
    volume = {50},
    pages = {55--62},
    abstract = { The use of 3D data in mobile robotics applications provides valuable information about the robot's environment but usually the huge amount of 3D information is unmanageable by the robot storage and computing capabilities. A data compression is necessary to store and manage this information but preserving as much information as possible. In this paper, we propose a 3D lossy compression system based on plane extraction which represent the points of each scene plane as a Delaunay triangulation and a set of points/area information. The compression system can be customized to achieve different data compression or accuracy ratios. It also supports a color segmentation stage to preserve original scene color information and provides a realistic scene reconstruction. The design of the method provides a fast scene reconstruction useful for further visualization or processing tasks. },
    doi = {http://dx.doi.org/10.1016/j.patrec.2014.05.016},
    issn = {0167-8655},
    keywords = {3D data; Compression; Kinect}
    }
  • V. Morell, M. Cazorla, sergio Orts-Escolano, and J. Garcia-Rodriguez, «3D Maps Representation using GNG,» in Neural networks (ijcnn), the 2014 international joint conference on, 2014.
    [Bibtex]
    @InProceedings{Morell20143d,
    author = {Morell, Vicente and Cazorla, Miguel and Orts-Escolano, sergio and Garcia-Rodriguez, Jose},
    title = {{3D Maps Representation using GNG}},
    booktitle = {Neural Networks (IJCNN), The 2014 International Joint Conference on},
    year = {2014}
    }
  • [DOI] V. Morell-Gimenez, M. saval-Calvo, J. Azorin-Lopez, J. Garcia-Rodriguez, M. Cazorla, sergio Orts-Escolano, and A. Fuster-Guillo, «A Comparative study of Registration Methods for RGB-D Video of static scenes,» Sensors, vol. 14, iss. 5, p. 8547–8576, 2014.
    [Bibtex]
    @article{morell2014comparative,
    abstract = {The use of RGB-D sensors for mapping and recognition tasks in robotics or, in general, for virtual reconstruction has increased in recent years. The key aspect of these kinds of sensors is that they provide both depth and color information using the same device. In this paper, we present a comparative analysis of the most important methods used in the literature for the registration of subsequent RGB-D video frames in static scenarios. The analysis begins by explaining the characteristics of the registration problem, dividing it into two representative applications: scene modeling and object reconstruction. Then, a detailed experimentation is carried out to determine the behavior of the different methods depending on the application. For both applications, we used standard datasets and a new one built for object reconstruction.},
    author = {Morell-Gimenez, Vicente and saval-Calvo, Marcelo and Azorin-Lopez, Jorge and Garcia-Rodriguez, Jose and Cazorla, Miguel and Orts-Escolano, sergio and Fuster-Guillo, Andres},
    doi = {10.3390/s140508547},
    issn = {1424-8220},
    journal = {sensors},
    keywords = {RGB-D sensor; registration; robotics mapping; obje},
    month = {may},
    number = {5},
    pages = {8547--8576},
    publisher = {Multidisciplinary Digital Publishing Institute},
    title = {{A Comparative study of Registration Methods for RGB-D Video of static scenes}},
    url = {http://www.mdpi.com/1424-8220/14/5/8547},
    volume = {14},
    year = {2014}
    }
  • sergio Orts-Escolano, J. Garcia-Rodriguez, V. Morella, M. Cazorla, and J. M. Garcia-Chamizo, «3D Colour Object Reconstruction based on Growing Neural Gas,» in Neural networks (ijcnn), the 2014 international joint conference on, 2014.
    [Bibtex]
    @inproceedings{Orts20143d,
    author = {Orts-Escolano, sergio and Garcia-Rodriguez, Jose and Morella, Vicente and Cazorla, Miguel and Garcia-Chamizo, Juan Manuel},
    booktitle = {Neural Networks (IJCNN), The 2014 International Joint Conference on},
    title = {{3D Colour Object Reconstruction based on Growing Neural Gas}},
    year = {2014}
    }
  • D. Viejo, J. Garcia-Rodriguez, and M. Cazorla, «Combining Visual Features and Growing Neural Gas Networks for Robotic 3D sLAM,» Information sciences, vol. 276, p. 174–185, 2014.
    [Bibtex]
    @article{viejo2014combining,
    abstract = {The use of 3D data in mobile robotics provides valuable information about the robot's environment. Traditionally, stereo cameras have been used as a low-cost 3D sensor. However, the lack of precision and texture for some surfaces suggests that the use of other 3D sensors could be more suitable. In this work, we examine the use of two sensors: an infrared sR4000 and a Kinect camera. We use a combination of 3D data obtained by these cameras, along with features obtained from 2D images acquired from these cameras, using a Growing Neural Gas (GNG) network applied to the 3D data. The goal is to obtain a robust egomotion technique. The GNG network is used to reduce the camera error. To calculate the egomotion, we test two methods for 3D registration. One is based on an iterative closest points algorithm, and the other employs random sample consensus. Finally, a simultaneous localization and mapping method is applied to the complete sequence to reduce the global error. The error from each sensor and the mapping results from the proposed method are examined.},
    author = {Viejo, Diego and Garcia-Rodriguez, Jose and Cazorla, Miguel},
    journal = {Information sciences},
    keywords = {GNG; sLAM; 3D registration},
    pages = {174--185},
    title = {{Combining Visual Features and Growing Neural Gas Networks for Robotic 3D sLAM}},
    volume = {276},
    year = {2014}
    }
  • [DOI] D. Viejo and M. Cazorla, «A robust and fast method for 6DoF motion estimation from generalized 3D data,» Autonomous robots, 2014.
    [Bibtex]
    @article{Viejo2014raey,
    author = {Viejo, Diego and Cazorla, Miguel},
    doi = {10.1007/s10514-013-9354-z},
    issn = {0929-5593},
    journal = {Autonomous Robots},
    keywords = {6DoF pose registration; 3D mapping; Mobile robots;},
    publisher = {springer Us},
    title = {{A robust and fast method for 6DoF motion estimation from generalized 3D data}},
    year = {2014}
    }
  • [DOI] E. Martinez-Martin, A. P. del Pobil, M. Chessa, F. solari, and P. silvio sabatini, «An active system for visually-guided reaching in 3d across binocular fixations,» The scientific world journal, vol. 2014, p. 1–16, 2014.
    [Bibtex]
    @article{Martinez_Martin_2014,
    doi = {10.1155/2014/179391},
    url = {http://dx.doi.org/10.1155/2014/179391},
    year = 2014,
    publisher = {Hindawi Publishing Corporation},
    volume = {2014},
    pages = {1--16},
    author = {Ester Martinez-Martin and Angel P. del Pobil and Manuela Chessa and Fabio solari and silvio P. sabatini},
    title = {An Active system for Visually-Guided Reaching in 3D across Binocular Fixations},
    journal = {The scientific World Journal}
    }
  • E. Martinez-Martin and A. P. Del Pobil, «Animal social behaviour: a visual analysis,» Lecture notes in computer science (including subseries lecture notes in artificial intelligence and lecture notes in bioinformatics), vol. 8575 LNAI, pp. 320-327, 2014.
    [Bibtex]
    @article{Martinez-Martin2014,title = {Animal social behaviour: A visual analysis},journal = {Lecture Notes in Computer science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},year = {2014},volume = {8575 LNAI},pages = {320-327},author = {Martinez-Martin, E. and Del Pobil, A.P.}}

2013

  • [DOI] s Orts-Escolano, V. Morell, J. Garcia-Rodriguez, and M. Cazorla, «Point cloud data filtering and downsampling using growing neural gas,» in Neural networks (ijcnn), the 2013 international joint conference on, 2013, p. 1–8.
    [Bibtex]
    @inproceedings{6706719,
    abstract = {3D sensors provide valuable information for mobile robotic tasks like scene classification or object recognition, but these sensors often produce noisy data that makes impossible applying classical keypoint detection and feature extraction techniques. Therefore, noise removal and downsampling have become essential steps in 3D data processing. In this work, we propose the use of a 3D filtering and downsampling technique based on a Growing Neural Gas (GNG) network. GNG method is able to deal with outliers presents in the input data. These features allows to represent 3D spaces, obtaining an induced Delaunay Triangulation of the input space. Experiments show how GNG method yields better input space adaptation to noisy data than other filtering and downsampling methods like Voxel Grid. It is also demonstrated how the state-of-the-art keypoint detectors improve their performance using filtered data with GNG network. Descriptors extracted on improved keypoints perform better matching in robotics applications as 3D scene registration.},
    author = {Orts-Escolano, s and Morell, V and Garcia-Rodriguez, J and Cazorla, M},
    booktitle = {Neural Networks (IJCNN), The 2013 International Joint Conference on},
    doi = {10.1109/IJCNN.2013.6706719},
    issn = {2161-4393},
    keywords = {feature extraction;image classification;image registration},
    pages = {1--8},
    title = {{Point cloud data filtering and downsampling using growing neural gas}},
    year = {2013}
    }
  • J. Navarrete, D. Viejo, and M. Cazorla, «Portable 3D laser-camera calibration system with color fusion for sLAM,» International journal of automation and smart technology, vol. 3, iss. 1, 2013.
    [Bibtex]
    @article{AUsMT163,
    abstract = { Nowadays, the use of RGB-D sensors have focused a lot of research in computer vision and robotics. These kinds of sensors, like Kinect, allow to obtain 3D data together with color information. However, their working range is limited to less than 10 meters, making them useless in some robotics applications, like outdoor mapping. In these environments, 3D lasers, working in ranges of 20-80 meters, are better. But 3D lasers do not usually provide color information. A simple 2D camera can be used to provide color information to the point cloud, but a calibration process between camera and laser must be done. In this paper we present a portable calibration system to calibrate any traditional camera with a 3D laser in order to assign color information to the 3D points obtained. Thus, we can use laser precision and simultaneously make use of color information. Unlike other techniques that make use of a three-dimensional body of known dimensions in the calibration process, this system is highly portable because it makes use of small catadioptrics that can be placed in a simple manner in the environment. We use our calibration system in a 3D mapping system, including simultaneous Location and Mapping (sLAM), in order to get a 3D colored map which can be used in different tasks. We show that an additional problem arises: 2D cameras information is different when lighting conditions change. so when we merge 3D point clouds from two different views, several points in a given neighborhood could have different color information. A new method for color fusion is presented, obtaining correct colored maps. The system will be tested by applying it to 3D reconstruction.},
    author = {Navarrete, Javier and Viejo, Diego and Cazorla, Miguel},
    issn = {2223-9766},
    journal = {International Journal of Automation and smart Technology},
    keywords = {2D-3D calibration; RGB-D information; color fusion},
    number = {1},
    title = {{Portable 3D laser-camera calibration system with color fusion for sLAM}},
    url = {http://www.ausmt.org/index.php/AUsMT/article/view/163},
    volume = {3},
    year = {2013}
    }
  • B. Boom, sergio Orts-Escolano, X. Ning, steven McDonagh, P. sandilands, and R. Fisher, «Point light source estimation based on scenes recorded by a rgb-d camera,» in Proceedings of the british machine vision conference, 2013.
    [Bibtex]
    @InProceedings{Boom2013,
    author = {Bas Boom and sergio Orts-Escolano and Xi Ning and steven McDonagh and Peter sandilands and Robert Fisher },
    title = {Point Light source Estimation based on scenes Recorded by a RGB-D camera },
    booktitle = {Proceedings of the British Machine Vision Conference},
    year = {2013},
    publisher = {BMVA Press},
    editors = {Burghardt, Tilo and Damen, Dima and Mayol-Cuevas, Walterio and Mirmehdi, Majid}
    }
  • [DOI] B. Caputo, H. Muller, B. Thomee, M. Villegas, R. Paredes, D. Zellhofer, H. Goeau, A. Joly, P. Bonnet, J. M. Gomez, I. G. Varea, and M. Cazorla, «ImageCLEF 2013: The Vision, the Data and the Open Challenges,» in Information access evaluation. multilinguality, multimodality, and visualization, P. Forner, H. Muller, R. Paredes, P. Rosso, and B. stein, Eds., Springer berlin heidelberg, 2013, vol. 8138, p. 250–268.
    [Bibtex]
    @incollection{Caputo:2013aa,
    abstract = {This paper presents an overview of the ImageCLEF 2013 lab. since its first edition in 2003, ImageCLEF has become one of the key initiatives promoting the benchmark evaluation of algorithms for the cross-language annotation and retrieval of images in various domains, such as public and personal images, to data acquired by mobile robot platforms and botanic collections. Over the years, by providing new data collections and challenging tasks to the community of interest, the ImageCLEF lab has achieved an unique position in the multi lingual image annotation and retrieval research landscape. The 2013 edition consisted of three tasks: the photo annotation and retrieval task, the plant identification task and the robot vision task. Furthermore, the medical annotation task, that traditionally has been under the ImageCLEF umbrella and that this year celebrates its tenth anniversary, has been organized in conjunction with AMIA for the first time. The paper describes the tasks and the 2013 competition, giving an unifying perspective of the present activities of the lab while discussion the future challenges and opportunities.},
    author = {Barbara Caputo and Henning Muller and Bart Thomee and Mauricio Villegas and Roberto Paredes and David Zellhofer and Herve Goeau and Alexis Joly and Pierre Bonnet and Jesus Martinez Gomez and Ismael Garcia Varea and Miguel Cazorla},
    booktitle = {Information Access Evaluation. Multilinguality, Multimodality, and Visualization},
    doi = {10.1007/978-3-642-40802-1_26},
    editor = {Forner, Pamela and Muller, Henning and Paredes, Roberto and Rosso, Paolo and stein, Benno},
    isbn = {978-3-642-40801-4},
    keywords = {Language Translation and Linguistics Artificial In},
    pages = {250--268},
    publisher = {springer Berlin Heidelberg},
    series = {Lecture Notes in Computer science},
    title = {{ImageCLEF 2013: The Vision, the Data and the Open Challenges}},
    url = {http://dx.doi.org/10.1007/978-3-642-40802-1{\_}26},
    volume = {8138},
    year = {2013}
    }
  • I. Garcia-Varea, M. Cazorla, J. Martinez-Gomez, and B. Caputo, «Overview of the ImageCLEF 2013 Robot Vision Task,» in Working notes, clef 2013, 2013.
    [Bibtex]
    @inproceedings{garcia2013overview,
    author = {Garcia-Varea, Ismael and Cazorla, Miguel and Martinez-Gomez, Jesus and Caputo, Barbara},
    booktitle = {Working Notes, CLEF 2013},
    number = {EPFL-CONF-192517},
    title = {{Overview of the ImageCLEF 2013 Robot Vision Task}},
    year = {2013}
    }
  • [DOI] J. Garcia-Rodriguez, M. Cazorla, sergio Orts-Escolano, and V. Morell, «Improving 3D Keypoint Detection from Noisy Data Using Growing Neural Gas,» in Advances in computational intelligence, I. Rojas, G. Joya, and J. Cabestany, Eds., Springer berlin heidelberg, 2013, vol. 7903, p. 480–487.
    [Bibtex]
    @incollection{Garcia-Rodriguez:2013aa,
    abstract = {3D sensors provides valuable information for mobile robotic tasks like scene classification or object recognition, but these sensors often produce noisy data that makes impossible applying classical keypoint detection and feature extraction techniques. Therefore, noise removal and downsampling have become essential steps in 3D data processing. In this work, we propose the use of a 3D filtering and down-sampling technique based on a Growing Neural Gas (GNG) network. GNG method is able to deal with outliers presents in the input data. These features allows to represent 3D spaces, obtaining an induced Delaunay Triangulation of the input space. Experiments show how the state-of-the-art keypoint detectors improve their performance using GNG output representation as input data. Descriptors extracted on improved keypoints perform better matching in robotics applications as 3D scene registration.},
    author = {Garcia-Rodriguez, Jos?$\backslash$copyright and Cazorla, Miguel and Orts-Escolano, sergio and Morell, Vicente},
    booktitle = {Advances in Computational Intelligence},
    doi = {10.1007/978-3-642-38682-4_51},
    editor = {Rojas, Ignacio and Joya, Gonzalo and Cabestany, Joan},
    isbn = {978-3-642-38681-7},
    keywords = {GNG; Noisy Point Cloud; Visual Features; Keypoint},
    pages = {480--487},
    publisher = {springer Berlin Heidelberg},
    series = {Lecture Notes in Computer science},
    title = {{Improving 3D Keypoint Detection from Noisy Data Using Growing Neural Gas}},
    url = {http://dx.doi.org/10.1007/978-3-642-38682-4{\_}51},
    volume = {7903},
    year = {2013}
    }
  • [DOI] A. Jimeno-Morenilla, J. Garcia-Rodriguez, sergio Orts-Escolano, and M. Davia-Aracil, «3d-based reconstruction using growing neural gas landmark: application to rapid prototyping in shoe last manufacturing,» The international journal of advanced manufacturing technology, vol. 69, iss. 1, p. 657–668, 2013.
    [Bibtex]
    @Article{Jimeno-Morenilla2013,
    author = {Jimeno-Morenilla, Antonio and Garcia-Rodriguez, Jose and Orts-Escolano, sergio and Davia-Aracil, Miguel},
    title = {3D-based reconstruction using growing neural gas landmark: application to rapid prototyping in shoe last manufacturing},
    journal = {The International Journal of Advanced Manufacturing Technology},
    year = {2013},
    volume = {69},
    number = {1},
    pages = {657--668},
    abstract = {Customizing shoe manufacturing is one of the great challenges in the footwear industry. It is a production model change where design adopts not only the main role, but also the main bottleneck. It is therefore necessary to accelerate this process by improving the accuracy of current methods. Rapid prototyping techniques are based on the reuse of manufactured footwear lasts so that they can be modified with CAD systems leading rapidly to new shoe models. In this work, we present a shoe last fast reconstruction method that fits current design and manufacturing processes. The method is based on the scanning of shoe last obtaining sections and establishing a fixed number of landmarks onto those sections to reconstruct the shoe last 3D surface. Automated landmark extraction is accomplished through the use of the self-organizing network, the growing neural gas (GNG), which is able to topographically map the low dimensionality of the network to the high dimensionality of the contour manifold without requiring a priori knowledge of the input space structure. Moreover, our GNG landmark method is tolerant to noise and eliminates outliers. Our method accelerates up to 12 times the surface reconstruction and filtering processes used by the current shoe last design software. The proposed method offers higher accuracy compared with methods with similar efficiency as voxel grid.},
    doi = {10.1007/s00170-013-5061-3},
    issn = {1433-3015},
    url = {http://dx.doi.org/10.1007/s00170-013-5061-3}
    }
  • A. Romero and M. Cazorla, «Learning Multi-class Topological Mapping using Visual Information.,» in Visapp (2), 2013, p. 316–321.
    [Bibtex]
    @inproceedings{Romero2013Learning,
    author = {Romero, Anna and Cazorla, Miguel},
    booktitle = {VIsAPP (2)},
    pages = {316--321},
    title = {{Learning Multi-class Topological Mapping using Visual Information.}},
    year = {2013}
    }
  • [DOI] E. Martinez-Martin and A. P. del Pobil, «Visual object recognition for robot tasks in real-life scenarios,» in 2013 10th international conference on ubiquitous robots and ambient intelligence (URAI), 2013.
    [Bibtex]
    @inproceedings{Martinez_Martin_2013,doi = {10.1109/urai.2013.6677413},url = {http://dx.doi.org/10.1109/urai.2013.6677413},year = 2013,month = {oct},publisher = {Institute of Electrical {\&} Electronics Engineers ({IEEE})},author = {Ester Martinez-Martin and Angel P. del Pobil},title = {Visual object recognition for robot tasks in real-life scenarios},booktitle = {2013 10th International Conference on Ubiquitous Robots and Ambient Intelligence ({URAI})}}
  • [DOI] E. Martinez-Martin and A. P. del Pobil, «Visual people detection for safe human-robot interaction,» in 2013 IEEE RO-MAN, 2013.
    [Bibtex]
    @InProceedings{Martinez-Martin2013d,
    author = {Ester Martinez-Martin and Angel P. del Pobil},
    title = {Visual people detection for safe Human-Robot Interaction},
    booktitle = {2013 {IEEE} {RO}-{MAN}},
    year = {2013},
    month = {aug},
    publisher = {Institute of Electrical {\&} Electronics Engineers ({IEEE})},
    doi = {10.1109/roman.2013.6628478},
    url = {http://dx.doi.org/10.1109/roman.2013.6628478},
    }
  • Â. Costa, E. Martinez-Martin, A. P. del Pobil, R. simoes, and P. Novais, «Find it – an assistant home agent,» Advances in intelligent systems and computing, vol. 221, pp. 121-128, 2013.
    [Bibtex]
    @article{Martinez-Martin2013,title = {Find it - an assistant home agent},journal = {Advances in Intelligent systems and Computing},year = {2013},volume = {221},pages = {121-128},author = {Costa, {\^A}. and Martinez-Martin, E. and del Pobil, A.P. and simoes, R. and Novais, P.}}
  • E. Martinez-Martin, M. Teresa Escrig, and A. P. del Pobil, «Naming qualitative models based on intervals: a general framework,» International journal of artificial intelligence, vol. 11, iss. 13 A, pp. 74-92, 2013.
    [Bibtex]
    @Article{Martinez-Martin2013a,
    author = {Martinez-Martin, E. and Teresa Escrig, M. and del Pobil, A.P.},
    title = {Naming qualitative models based on intervals: A general framework},
    journal = {International Journal of Artificial Intelligence},
    year = {2013},
    volume = {11},
    number = {13 A},
    pages = {74-92},
    }
  • [DOI] E. Martinez-Martin and A. P. del Pobil, «Object recognition in cluttered environments,» in Computer graphics and imaging / 798: signal processing, pattern recognition and applications, 2013.
    [Bibtex]
    @InProceedings{Martinez-Martin2013e,
    author = {Ester Martinez-Martin and Angel P. del Pobil},
    title = {Object Recognition in Cluttered Environments},
    booktitle = {Computer Graphics and Imaging / 798: signal Processing, Pattern Recognition and Applications},
    year = {2013},
    publisher = {{ACTA} Press},
    doi = {10.2316/p.2013.798-026},
    url = {http://dx.doi.org/10.2316/p.2013.798-026},
    }
  • E. Martinez-Martin, M. T. Escrig, and A. P. del Pobil, «Qualitative acceleration model: representation, reasoning and application,» Advances in intelligent systems and computing, vol. 217, pp. 87-94, 2013.
    [Bibtex]
    @Article{Martinez-Martin2013b,
    author = {Martinez-Martin, E. and Escrig, M.T. and del Pobil, A.P.},
    title = {Qualitative acceleration model: Representation, reasoning and application},
    journal = {Advances in Intelligent systems and Computing},
    year = {2013},
    volume = {217},
    pages = {87-94},
    }

2012

  • [DOI] B. Bonev, M. Cazorla, F. Martin, and V. Matellan, «Portable autonomous walk calibration for 4-legged robots,» Applied intelligence, vol. 36, iss. 1, p. 136–147, 2012.
    [Bibtex]
    @article{Bonev:2012aa,
    abstract = {In the present paper we describe an efficient and portable optimization method for calibrating the walk parameters of a quadruped robot, and its contribution for the robot control and localization. The locomotion of a legged robot presents not only the problem of maximizing the speed, but also the problem of obtaining a precise speed response, and achieving an acceptable odometry information. In this study we use a simulated annealing algorithm for calibrating different parametric sets for different speed ranges, with the goal of avoiding discontinuities. The results are applied to the robot AIBO in the RoboCup domain. Moreover, we outline the relevance of calibration to the control, showing the improvement obtained in odometry and, as a consequence, in robot localization.},
    author = {Bonev, Boyan and Cazorla, Miguel and Martin, Francisco and Matellan, Vicente},
    doi = {10.1007/s10489-010-0249-9},
    issn = {0924-669X},
    journal = {Applied Intelligence},
    keywords = {Legged locomotion; Walk parameters estimation; Aut},
    number = {1},
    pages = {136--147},
    publisher = {springer Us},
    title = {{Portable autonomous walk calibration for 4-legged robots}},
    url = {http://dx.doi.org/10.1007/s10489-010-0249-9},
    volume = {36},
    year = {2012}
    }
  • V. Morell, M. Cazorla, D. Viejo, sergio Orts-Escolano, and J. Garcia-Rodriguez, «A study of registration techniques for 6DoF sLAM,» in Ccia, 2012, p. 143–150.
    [Bibtex]
    @InProceedings{Morell2012A,
    author = {Morell, Vicente and Cazorla, Miguel and Viejo, Diego and Orts-Escolano, sergio and Garcia-Rodriguez, Jose},
    title = {{A study of registration techniques for 6DoF sLAM}},
    booktitle = {CCIA},
    year = {2012},
    editor = {Riano, David and Onaindia, Eva and Cazorla, Miguel},
    volume = {248},
    series = {Frontiers in Artificial Intelligence and Applications},
    pages = {143--150},
    publisher = {IOs Press},
    isbn = {978-1-61499-138-0}
    }
  • J. Munoz, D. Pastor, P. Gil Vazquez, santiago Puente Mendez, and M. Cazorla, «A study of 2D features for 3D visual sLAM,» in 43th international symposium on robotics, 2012.
    [Bibtex]
    @inproceedings{munoz2012study,
    author = {Jose Munoz and Daniel Pastor and Pablo {Gil Vazquez} and santiago {Puente Mendez} and Miguel Cazorla},
    booktitle = {43th International symposium on Robotics},
    title = {{A study of 2D features for 3D visual sLAM}},
    year = {2012}
    }
  • J. Munoz, D. Pastor, P. Gil, P. T. santiago Mendez, and M. Cazorla, «Using a RGB-D camera for 6DoF sLAM.,» in Ccia, 2012, p. 143–150.
    [Bibtex]
    @inproceedings{MunozPGMC12,
    author = {Munoz, Jose and Pastor, Daniel and Gil, Pablo and Mendez, santiago T Puente and Cazorla, Miguel},
    booktitle = {CCIA},
    editor = {Riano, David and Onaindia, Eva and Cazorla, Miguel},
    isbn = {978-1-61499-138-0},
    keywords = {dblp},
    pages = {143--150},
    publisher = {IOs Press},
    series = {Frontiers in Artificial Intelligence and Applications},
    title = {{Using a RGB-D camera for 6DoF sLAM.}},
    url = {http://dblp.uni-trier.de/db/conf/ccia/ccia2012.html{\#}MunozPGMC12},
    volume = {248},
    year = {2012}
    }
  • J. Navarrete-sanchez, D. Viejo, and M. Cazorla, «Portable 3D laser-camera calibration system with color fusion for sLAM,» in International symposium on robotics, isr, 2012.
    [Bibtex]
    @inproceedings{Navarrete2012Portable,
    author = {Navarrete-sanchez, Javier and Viejo, Diego and Cazorla, Miguel},
    booktitle = {International symposium on robotics, IsR},
    title = {{Portable 3D laser-camera calibration system with color fusion for sLAM}},
    year = {2012}
    }
  • [DOI] sergio Orts-Escolano, J. Garcia-Rodriguez, D. Viejo, M. Cazorla, and V. Morell, «GPGPU implementation of growing neural gas: Application to 3D scene reconstruction,» Journal of parallel and distributed computing, vol. 72, iss. 10, p. 1361–1372, 2012.
    [Bibtex]
    @Article{Orts20121361,
    author = {Orts-Escolano, sergio and Garcia-Rodriguez, Jose and Viejo, Diego and Cazorla, Miguel and Morell, Vicente},
    title = {{GPGPU implementation of growing neural gas: Application to 3D scene reconstruction}},
    journal = {Journal of Parallel and Distributed Computing},
    year = {2012},
    volume = {72},
    number = {10},
    pages = {1361--1372},
    abstract = {self-organising neural models have the ability to provide a good representation of the input space. In particular the Growing Neural Gas (GNG) is a suitable model because of its flexibility, rapid adaptation and excellent quality of representation. However, this type of learning is time-consuming, especially for high-dimensional input data. since real applications often work under time constraints, it is necessary to adapt the learning process in order to complete it in a predefined time. This paper proposes a Graphics Processing Unit (GPU) parallel implementation of the {\{}GNG{\}} with Compute Unified Device Architecture (CUDA). In contrast to existing algorithms, the proposed {\{}GPU{\}} implementation allows the acceleration of the learning process keeping a good quality of representation. Comparative experiments using iterative, parallel and hybrid implementations are carried out to demonstrate the effectiveness of {\{}CUDA{\}} implementation. The results show that {\{}GNG{\}} learning with the proposed implementation achieves a speed-up of 6 ?{\{}o{\}} compared with the single-threaded {\{}CPU{\}} implementation. {\{}GPU{\}} implementation has also been applied to a real application with time constraints: acceleration of 3D scene reconstruction for egomotion, in order to validate the proposal. },
    doi = {http://dx.doi.org/10.1016/j.jpdc.2012.05.008},
    issn = {0743-7315},
    keywords = {Egomotion},
    url = {http://www.sciencedirect.com/science/article/pii/s0743731512001268}
    }
  • sergio Orts–Escolano, J. Garcia-Rodriguez, D. Viejo, M. Cazorla, V. Morell, and J. serra, «6DoF pose estimation using Growing Neural Gas Network,» in Proceedings of 5th international conference on cognitive systems, 2012.
    [Bibtex]
    @inproceedings{Orts2012b,
    author = {Orts--Escolano, sergio and Garcia-Rodriguez, Jose and Viejo, Diego and Cazorla, Miguel and Morell, Vicente and serra, Jose},
    booktitle = {proceedings of 5th International Conference on Cognitive systems},
    title = {{6DoF pose estimation using Growing Neural Gas Network}},
    year = {2012}
    }
  • A. Romero and M. Cazorla, «Finding nodes into a topological map using visual features,» in International symposium on robotics, isr, 2012.
    [Bibtex]
    @inproceedings{Romero2012Finding,
    author = {Romero, Anna and Cazorla, Miguel},
    booktitle = {International symposium on robotics, IsR},
    title = {{Finding nodes into a topological map using visual features}},
    year = {2012}
    }
  • A. Romero and M. Cazorla, «Learning Multi-class Topological Mapping using Visual Information,» in Ccia, 2012, p. 143–150.
    [Bibtex]
    @inproceedings{Romero2012Learning,
    author = {Romero, Anna and Cazorla, Miguel},
    booktitle = {CCIA},
    editor = {Riano, David and Onaindia, Eva and Cazorla, Miguel},
    isbn = {978-1-61499-138-0},
    pages = {143--150},
    publisher = {IOs Press},
    series = {Frontiers in Artificial Intelligence and Applications},
    title = {{Learning Multi-class Topological Mapping using Visual Information}},
    volume = {248},
    year = {2012}
    }
  • A. Romero and M. Cazorla, «Topological visual mapping in robotics,» in Proceedings of the 5th international conference on spatial cognition, 2012.
    [Bibtex]
    @inproceedings{Romero2012Topological,
    author = {Romero, Anna and Cazorla, Miguel},
    booktitle = {Proceedings of the 5th International Conference on spatial Cognition},
    title = {{Topological visual mapping in robotics}},
    year = {2012}
    }
  • A. Romero and M. Cazorla, «Topological visual mapping in robotics,» Cognitive processing, vol. 3, iss. 305–308, 2012.
    [Bibtex]
    @Article{Romero2012Cognitive,
    author = {Romero, Anna and Cazorla, Miguel},
    title = {Topological visual mapping in robotics},
    journal = {Cognitive Processing},
    year = {2012},
    volume = {3},
    number = {305--308}
    }
  • [DOI] J. salinas, M. de la Iglesia-Vaya, L. Bonmati, R. Valenzuela, and M. Cazorla, «R & D Cloud CEIB: Management system and Knowledge Extraction for Bioimaging in the Cloud,» in Distributed computing and artificial intelligence, sigeru Omatu, J. F. {De Paz santana}, R. sara Gonzalez, J. M. Molina, A. M. Bernardos, and J. C. M. Rodriguez, Eds., Springer berlin heidelberg, 2012, vol. 151, p. 331–338.
    [Bibtex]
    @incollection{salinas:2012aa,
    abstract = {The management system and knowledge extraction of bioimaging in the cloud (R {\&} D Cloud CEIB) which is proposed in this article will use the services offered by the centralization of bioimaging through Valencian Biobank Medical Imaging (GIMC in spanish) as a basis for managing and extracting knowledge from a bioimaging bank, providing that knowledge as services with high added value and expertise to the Electronic Patient History system (HsE), thus bringing the results of R {\&} D to the patient, improving the quality of the information contained therein. R {\&} D Cloud CEIB has four general modules: search engine (sE), manager of clinical trials (GEBID), anonymizer (ANON) and motor knowledge (BIKE). The BIKE is the central module and through its sub modules analyses and generates knowledge to provide to the HsE through services. The technology used in R {\&} D Cloud CEIB is completely based on Open source.
    Within the BIKE, we focus on the development of the classifier module (BIKEClassifier), which aims to establish a method for the extraction of biomarkers for bioimaging and subsequent analysis to obtain a classification in bioimaging available pools following GIMC diagnostic experience.},
    author = {salinas, JoseMaria and de la Iglesia-Vaya, Maria and Bonmati, LuisMarti and Valenzuela, Rosa and Cazorla, Miguel},
    booktitle = {Distributed Computing and Artificial Intelligence},
    doi = {10.1007/978-3-642-28765-7_39},
    editor = {Omatu, sigeru and {De Paz santana}, Juan F and Gonzalez, sara Rodriguez and Molina, Jose M and Bernardos, Ana M and Rodriguez, Juan M Corchado},
    isbn = {978-3-642-28764-0},
    pages = {331--338},
    publisher = {springer Berlin Heidelberg},
    series = {Advances in Intelligent and soft Computing},
    title = {{R {\&} D Cloud CEIB: Management system and Knowledge Extraction for Bioimaging in the Cloud}},
    url = {http://dx.doi.org/10.1007/978-3-642-28765-7{\_}39},
    volume = {151},
    year = {2012}
    }
  • J. M. salinas, M. D. {la Iglesia Vaya}, and M. Cazorla, «R & D Cloud CEIB,» in Proceedings of the ieee international conference on biomedical engineering and biotechnology, 2012.
    [Bibtex]
    @inproceedings{salinas2012c,
    author = {salinas, Jose Maria and {la Iglesia Vaya}, Maria De and Cazorla, Miguel},
    booktitle = {Proceedings of the IEEE International Conference on Biomedical Engineering and Biotechnology},
    title = {{R {\&} D Cloud CEIB}},
    year = {2012}
    }
  • [DOI] D. Viejo, J. Garcia, M. Cazorla, D. Gil, and M. Johnsson, «Using GNG to improve 3D feature extraction. Application to 6DoF egomotion,» Neural networks, vol. 32, p. 138–146, 2012.
    [Bibtex]
    @article{Viejo2012138,
    abstract = {several recent works deal with 3D data in mobile robotic problems, e.g. mapping or egomotion. Data comes from any kind of sensor such as stereo vision systems, time of flight cameras or 3D lasers, providing a huge amount of unorganized 3D data. In this paper, we describe an efficient method to build complete 3D models from a Growing Neural Gas (GNG). The {\{}GNG{\}} is applied to the 3D raw data and it reduces both the subjacent error and the number of points, keeping the topology of the 3D data. The {\{}GNG{\}} output is then used in a 3D feature extraction method. We have performed a deep study in which we quantitatively show that the use of {\{}GNG{\}} improves the 3D feature extraction method. We also show that our method can be applied to any kind of 3D data. The 3D features obtained are used as input in an Iterative Closest Point (ICP)-like method to compute the 6DoF movement performed by a mobile robot. A comparison with standard {\{}ICP{\}} is performed, showing that the use of {\{}GNG{\}} improves the results. Final results of 3D mapping from the egomotion calculated are also shown. },
    annote = {selected Papers from {\{}IJCNN{\}} 2011},
    author = {Viejo, Diego and Garcia, Jose and Cazorla, Miguel and Gil, David and Johnsson, Magnus},
    doi = {http://dx.doi.org/10.1016/j.neunet.2012.02.014},
    issn = {0893-6080},
    journal = {Neural Networks},
    keywords = {6DoF registration},
    number = {0},
    pages = {138--146},
    title = {{Using GNG to improve 3D feature extraction. Application to 6DoF egomotion}},
    url = {http://www.sciencedirect.com/science/article/pii/s0893608012000433},
    volume = {32},
    year = {2012}
    }
  • [DOI] D. Viejo, J. Garcia-Rodriguez, and M. Cazorla, «A study of a soft computing based method for 3D scenario reconstruction,» Applied soft computing, vol. 12, iss. 10, p. 3158–3164, 2012.
    [Bibtex]
    @article{Viejo20123158,
    abstract = {several recent works deal with 3D data in mobile robotic problems, e.g., mapping. Data comes from any kind of sensor (time of flight, Kinect or 3D lasers) that provide a huge amount of unorganized 3D data. In this paper we detail an efficient approach to build complete 3D models using a soft computing method, the Growing Neural Gas (GNG). As neural models deal easily with noise, imprecision, uncertainty or partial data, {\{}GNG{\}} provides better results than other approaches. The {\{}GNG{\}} obtained is then applied to a sequence. We present a comprehensive study on {\{}GNG{\}} parameters to ensure the best result at the lowest time cost. From this {\{}GNG{\}} structure, we propose to calculate planar patches and thus obtaining a fast method to compute the movement performed by a mobile robot by means of a 3D models registration algorithm. Final results of 3D mapping are also shown. },
    author = {Viejo, Diego and Garcia-Rodriguez, Jose and Cazorla, Miguel},
    doi = {http://dx.doi.org/10.1016/j.asoc.2012.05.025},
    issn = {1568-4946},
    journal = {Applied soft Computing},
    keywords = {3D feature extraction},
    number = {10},
    pages = {3158--3164},
    title = {{A study of a soft computing based method for 3D scenario reconstruction}},
    url = {http://www.sciencedirect.com/science/article/pii/s1568494612002803},
    volume = {12},
    year = {2012}
    }
  • D. Viejo and M. Cazorla, «A framework for managing heterogenous sensor data in a single map,» in Ieee intelligent vehicles symposium, 2012.
    [Bibtex]
    @inproceedings{Viejo20123A,
    author = {Viejo, Diego and Cazorla, Miguel},
    booktitle = {IEEE Intelligent Vehicles symposium},
    title = {{A framework for managing heterogenous sensor data in a single map}},
    year = {2012}
    }
  • [DOI] E. Martinez-Martin and A. P. del Pobil, «Visual surveillance for human-robot interaction,» in 2012 IEEE international conference on systems, man, and cybernetics (sMC), 2012.
    [Bibtex]
    @inproceedings{Martinez_Martin_2012,doi = {10.1109/icsmc.2012.6378306},url = {http://dx.doi.org/10.1109/icsmc.2012.6378306},year = 2012,month = {oct},publisher = {Institute of Electrical {\&} Electronics Engineers ({IEEE})},author = {Ester Martinez-Martin and Angel P. del Pobil},title = {Visual surveillance for human-robot interaction},booktitle = {2012 {IEEE} International Conference on systems, Man, and Cybernetics ({sMC})}}
  • [DOI] E. Martinez-Martin and Á. P. del Pobil, «Applications,» in Robust motion detection in real-life scenarios, Springer science $\mathplus$ business media, 2012, p. 85–98.
    [Bibtex]
    @incollection{Martinez-Martin_2012,doi = {10.1007/978-1-4471-4216-4_4},url = {http://dx.doi.org/10.1007/978-1-4471-4216-4_4},year = 2012,publisher = {springer science $\mathplus$ Business Media},pages = {85--98},author = {Ester Martinez-Martin and {\'{A}}ngel P. del Pobil},title = {Applications},booktitle = {Robust Motion Detection in Real-Life scenarios}}
  • [DOI] E. Martinez-Martin and Á. P. del Pobil, «Computer vision concepts,» in Robust motion detection in real-life scenarios, Springer science $\mathplus$ business media, 2012, p. 99–108.
    [Bibtex]
    @InCollection{Martinez-Martin2012d,
    author = {Ester Martinez-Martin and {\'{A}}ngel P. del Pobil},
    title = {Computer Vision Concepts},
    booktitle = {Robust Motion Detection in Real-Life scenarios},
    publisher = {springer science $\mathplus$ Business Media},
    year = {2012},
    pages = {99--108},
    doi = {10.1007/978-1-4471-4216-4_5},
    url = {http://dx.doi.org/10.1007/978-1-4471-4216-4_5},
    }
  • [DOI] E. Martinez-Martin and Á. P. del Pobil, «Introduction,» in Robust motion detection in real-life scenarios, Springer science $\mathplus$ business media, 2012, p. 1–3.
    [Bibtex]
    @InCollection{Martinez-Martin2012e,
    author = {Ester Martinez-Martin and {\'{A}}ngel P. del Pobil},
    title = {Introduction},
    booktitle = {Robust Motion Detection in Real-Life scenarios},
    publisher = {springer science $\mathplus$ Business Media},
    year = {2012},
    pages = {1--3},
    doi = {10.1007/978-1-4471-4216-4_1},
    url = {http://dx.doi.org/10.1007/978-1-4471-4216-4_1},
    }
  • [DOI] E. Martinez-Martin and Á. P. del Pobil, «Motion detection in general backgrounds,» in Robust motion detection in real-life scenarios, Springer science $\mathplus$ business media, 2012, p. 43–84.
    [Bibtex]
    @InCollection{Martinez-Martin2012f,
    author = {Ester Martinez-Martin and {\'{A}}ngel P. del Pobil},
    title = {Motion Detection in General Backgrounds},
    booktitle = {Robust Motion Detection in Real-Life scenarios},
    publisher = {springer science $\mathplus$ Business Media},
    year = {2012},
    pages = {43--84},
    doi = {10.1007/978-1-4471-4216-4_3},
    url = {http://dx.doi.org/10.1007/978-1-4471-4216-4_3},
    }
  • [DOI] E. Martinez-Martin and Á. P. del Pobil, «Motion detection in static backgrounds,» in Robust motion detection in real-life scenarios, Springer science $\mathplus$ business media, 2012, p. 5–42.
    [Bibtex]
    @InCollection{Martinez-Martin2012g,
    author = {Ester Martinez-Martin and {\'{A}}ngel P. del Pobil},
    title = {Motion Detection in static Backgrounds},
    booktitle = {Robust Motion Detection in Real-Life scenarios},
    publisher = {springer science $\mathplus$ Business Media},
    year = {2012},
    pages = {5--42},
    doi = {10.1007/978-1-4471-4216-4_2},
    url = {http://dx.doi.org/10.1007/978-1-4471-4216-4_2},
    }

2011

  • M. angel Cazorla, V. {Matellan Olivera}, and Others, «special issue about advances in Physical Agents,» , 2011.
    [Bibtex]
    @article{cazorla2011special,
    author = {Cazorla, Miguel angel and {Matellan Olivera}, Vicente and Others},
    publisher = {Red de Agentes Fisicos},
    title = {{special issue about advances in Physical Agents}},
    year = {2011}
    }
  • M. Cazorla and A. Romero, «VIDEO LECTUREs++: Combining information and interaction in an open source framework,» in Inted2011 proceedings, 2011, p. 4034–4040.
    [Bibtex]
    @inproceedings{cazorla2011video,
    author = {Cazorla, M and Romero, A},
    booktitle = {INTED2011 Proceedings},
    pages = {4034--4040},
    publisher = {IATED},
    title = {{VIDEO LECTUREs++: Combining information and interaction in an open source framework}},
    year = {2011}
    }
  • D. Gil, J. Garcia, M. Cazorla, and M. Johnsson, «Predictions tasks with words and sequences: Comparing a novel recurrent architecture with the Elman network,» in Neural networks (ijcnn), the 2011 international joint conference on, 2011, p. 1207–1213.
    [Bibtex]
    @inproceedings{gil2011predictions,
    author = {Gil, David and Garcia, J and Cazorla, Miguel and Johnsson, Magnus},
    booktitle = {Neural Networks (IJCNN), The 2011 International Joint Conference on},
    organization = {IEEE},
    pages = {1207--1213},
    title = {{Predictions tasks with words and sequences: Comparing a novel recurrent architecture with the Elman network}},
    year = {2011}
    }
  • D. Viejo, J. Garcia, and M. Cazorla, «6DoF egomotion computing using 3D GNG-based reconstruction,» in Advances in computational intelligence, Springer berlin heidelberg, 2011, p. 50–57.
    [Bibtex]
    @incollection{viejo20116dof,
    author = {Viejo, Diego and Garcia, Jose and Cazorla, Miguel},
    booktitle = {Advances in Computational Intelligence},
    pages = {50--57},
    publisher = {springer Berlin Heidelberg},
    title = {{6DoF egomotion computing using 3D GNG-based reconstruction}},
    year = {2011}
    }
  • D. Viejo, J. Garcia, M. Cazorla, D. Gil, and M. Johnsson, «Using 3d gng-based reconstruction for 6dof egomotion,» in Neural networks (ijcnn), the 2011 international joint conference on, 2011, p. 1042–1048.
    [Bibtex]
    @inproceedings{viejo2011using,
    author = {Viejo, Diego and Garcia, Jose and Cazorla, Miguel and Gil, David and Johnsson, Magnus},
    booktitle = {Neural Networks (IJCNN), The 2011 International Joint Conference on},
    organization = {IEEE},
    pages = {1042--1048},
    title = {{Using 3d gng-based reconstruction for 6dof egomotion}},
    year = {2011}
    }
  • D. Viejo, J. Garcia, and M. Cazorla, «Visual features extraction based egomotion calculation from a infrared time-of-flight camera,» in Advances in computational intelligence, Springer berlin heidelberg, 2011, p. 9–16.
    [Bibtex]
    @incollection{viejo2011visual,
    author = {Viejo, Diego and Garcia, Jose and Cazorla, Miguel},
    booktitle = {Advances in Computational Intelligence},
    pages = {9--16},
    publisher = {springer Berlin Heidelberg},
    title = {{Visual features extraction based egomotion calculation from a infrared time-of-flight camera}},
    year = {2011}
    }

2010

  • M. Cazorla, D. Viejo, A. Hernandez, J. Nieto, and E. Nebot, «Large scale Egomotion and Error Analysis with Visual Features,» Journal of physical agents, vol. 4, p. 19–24, 2010.
    [Bibtex]
    @article{Cazorla10,
    author = {Cazorla, Miguel and Viejo, Diego and Hernandez, Andres and Nieto, Juan and Nebot, Eduardo},
    journal = {Journal of Physical Agents},
    pages = {19--24},
    title = {{Large scale Egomotion and Error Analysis with Visual Features}},
    volume = {4},
    year = {2010}
    }
  • [DOI] M. Cazorla, D. V. Hernando, A. H. Gutierrez, J. Nieto, and E. Nebot, «Large scale egomotion and error analysis with visual features,» Journal of physical agents, vol. 4, iss. 1, p. 19–24, 2010.
    [Bibtex]
    @article{CazorlaLargescale2010,
    author = {Miguel Cazorla and Diego Viejo Hernando and Andres Hernandez Gutierrez and Juan Nieto and Eduardo Nebot},
    title = {Large scale egomotion and error analysis with visual features},
    journal = {Journal of Physical Agents},
    volume = {4},
    number = {1},
    year = {2010},
    keywords = {Computer vision; Mobile robotics},
    abstract = {several works deal with 3D data in sLAM problem but many of them are focused on short scale maps. In this paper, we propose a method that can be used for computing the 6DoF trajectory performed by a robot from the stereo images captured during a large scale trajectory. The method transforms robust 2D features extracted from the reference stereo images to the 3D space. These 3D features are then used for obtaining the correct robot movement. Both sift and surf methods for feature extraction have been used. Also, a comparison between our method and the results of the ICP algorithm have been performed. We have also made a study about errors in stereo cameras.},
    pages = {19--24},
    doi = {10.14198/JoPha.2010.4.1.04},
    url = {http://www.jopha.net/article/view/2010-v4-n1-large-scale-egomotion-and-error-analysis-with-visual-features}
    }
  • M. Cazorla and D. Viejo, «EXPERIENCEs UsING AN OPEN sOURCE sOFTWARE LIBRARY TO TEACH A COMPUTER VIsION sUBJECT,» in Inted2010 proceedings, 2010, p. 4514–4522.
    [Bibtex]
    @inproceedings{cazorla2010experiences,
    author = {Cazorla, M and Viejo, D},
    booktitle = {INTED2010 Proceedings},
    pages = {4514--4522},
    publisher = {IATED},
    title = {{EXPERIENCEs UsING AN OPEN sOURCE sOFTWARE LIBRARY TO TEACH A COMPUTER VIsION sUBJECT}},
    year = {2010}
    }
  • M. Cazorla and A. Romero, «A NEW FRAMEWORK IN VIDEO LECTUREs: ADDING INTERACTION AND ADDITIONAL INFORMATION,» in Iceri2010 proceedings, 2010, p. 4593–4598.
    [Bibtex]
    @inproceedings{cazorla2010new,
    author = {Cazorla, M and Romero, A},
    booktitle = {ICERI2010 Proceedings},
    pages = {4593--4598},
    publisher = {IATED},
    title = {{A NEW FRAMEWORK IN VIDEO LECTUREs: ADDING INTERACTION AND ADDITIONAL INFORMATION}},
    year = {2010}
    }
  • M. Cazorla, D. Viejo, and C. Pomares, «study of the sR4000 camera,» in Workshop of physical agents, 2010.
    [Bibtex]
    @inproceedings{cazorla2010study,
    author = {Cazorla, Miguel and Viejo, Diego and Pomares, Cristina},
    booktitle = {Workshop of Physical Agents},
    organization = {Red de Agentes Fisicos},
    title = {{study of the sR4000 camera}},
    year = {2010}
    }
  • M. Cazorla and B. Bonev, «Large scale Environment Partitioning in Mobile Robotics Recognition Tasks,» Journal of physical agents, vol. 4, iss. 2, 2010.
    [Bibtex]
    @article{JoPhA71,
    abstract = {In this paper we present a scalable machine learning approach to mobile robots visual localization. The applicability of machine learning approaches is constrained by the complexity and size of the problem{\&}rsquo;s domain. Thus, dividing the problem becomes necessary and two essential questions arise: which partition set is optimal for the problem and how to integrate the separate results into a single solution. The novelty of this work is the use of Information Theory for partitioning high-dimensional data. In the presented experiments the domain of the problem is a large sequence of omnidirectional images, each one of them providing a high number of features. A robot which follows the same trajectory has to answer which is the most similar image from the sequence. The sequence is divided so that each partition is suitable for building a simple classifier. The partitions are established on the basis of the information divergence peaks among the images. Measuring the divergence has usually been considered unfeasible in high-dimensional data spaces. We overcome this problem by estimating the Jensen-Renyi divergence with an entropy approximation based on entropic spanning graphs. Finally, the responses of the different classifiers provide a multimodal hypothesis for each incoming image. As the robot is moving, a particle filter is used for attaining the convergence to a unimodal hypothesis.},
    author = {Cazorla, Miguel and Bonev, Boyan},
    issn = {1888-0258},
    journal = {Journal of Physical Agents},
    keywords = {Jensen-Renyi diver- gence,Visual localization,classifier,entropy,particle filter},
    number = {2},
    title = {{Large scale Environment Partitioning in Mobile Robotics Recognition Tasks}},
    url = {http://www.jopha.net/index.php/jopha/article/view/71},
    volume = {4},
    year = {2010}
    }
  • A. Romero and M. Cazorla, «An Improvement of Topological Mapping Using a Graph-Matching Based Method with Omnidirectional Images.,» in Ccia, 2010, p. 311–320.
    [Bibtex]
    @inproceedings{romero2010improvement,
    author = {Romero, Anna and Cazorla, Miguel},
    booktitle = {CCIA},
    pages = {311--320},
    title = {{An Improvement of Topological Mapping Using a Graph-Matching Based Method with Omnidirectional Images.}},
    year = {2010}
    }
  • A. Romero and M. Cazorla, «Testing image segmentation for topological sLAM with omnidirectional images,» in Advances in artificial intelligence, Springer berlin heidelberg, 2010, p. 266–277.
    [Bibtex]
    @incollection{romero2010testing,
    author = {Romero, Anna and Cazorla, Miguel},
    booktitle = {Advances in Artificial Intelligence},
    pages = {266--277},
    publisher = {springer Berlin Heidelberg},
    title = {{Testing image segmentation for topological sLAM with omnidirectional images}},
    year = {2010}
    }
  • A. Romero and M. Cazorla, «Topological slam using omnidirectional images: Merging feature detectors and graph-matching,» in Advanced concepts for intelligent vision systems, Springer berlin heidelberg, 2010, p. 464–475.
    [Bibtex]
    @incollection{romero2010topological,
    author = {Romero, Anna and Cazorla, Miguel},
    booktitle = {Advanced Concepts for Intelligent Vision systems},
    pages = {464--475},
    publisher = {springer Berlin Heidelberg},
    title = {{Topological slam using omnidirectional images: Merging feature detectors and graph-matching}},
    year = {2010}
    }
  • [DOI] A. M. {Romero Cortijo}, M. angel Cazorla, and Others, «Topological sLAM using a graph-matching based method on omnidirectional images,» in Advanced concepts for intelligent vision systems – 12th international conference, 2010.
    [Bibtex]
    @inproceedings{romero2010topological1,
    author = {{Romero Cortijo}, Anna Maria and Cazorla, Miguel angel and Others},
    booktitle = {Advanced Concepts for Intelligent Vision systems - 12th International Conference},
    doi = {10.1007/978-3-642-17688-3_43},
    title = {{Topological sLAM using a graph-matching based method on omnidirectional images}},
    year = {2010}
    }
  • [DOI] E. Martinez-Martin and A. P. del Pobil, «A panoramic vision system for human-robot interaction,» in 2010 5th ACM/IEEE international conference on human-robot interaction (HRI), 2010.
    [Bibtex]
    @inproceedings{Martinez_2010,doi = {10.1109/hri.2010.5453211},url = {http://dx.doi.org/10.1109/hri.2010.5453211},year = 2010,month = {mar},publisher = {Institute of Electrical {\&}amp$\mathsemicolon$ Electronics Engineers ({IEEE})},author = {Ester Martinez-Martin and Angel P. del Pobil},title = {A panoramic vision system for human-robot interaction},booktitle = {2010 5th {ACM}/{IEEE} International Conference on Human-Robot Interaction ({HRI})}}
  • [DOI] E. Martinez-Martin and A. P. del Pobil, «A hybrid algorithm for motion segmentation,» in Signal and image processing, 2010.
    [Bibtex]
    @inproceedings{Martinez-Martin_2010,doi = {10.2316/p.2010.710-010},url = {http://dx.doi.org/10.2316/p.2010.710-010},year = 2010,publisher = {{ACTA} Press},author = {E. Martinez-Martin and A.P. del Pobil},title = {A Hybrid Algorithm for Motion segmentation},booktitle = {signal and Image Processing}}
  • [DOI] E. Martinez-Martin and A. P. del Pobil, «A panoramic vision system for human-robot interaction,» in Proceeding of the 5th ACM/IEEE international conference on human-robot interaction – HRI \textquotesingle10, 2010.
    [Bibtex]
    @inproceedings{Mart_nez_2010,doi = {10.1145/1734454.1734528},url = {http://dx.doi.org/10.1145/1734454.1734528},year = 2010,publisher = {Association for Computing Machinery ({ACM})},author = {Ester Martinez-Martin and Angel P. del Pobil},title = {A panoramic vision system for human-robot interaction},booktitle = {Proceeding of the 5th {ACM}/{IEEE} international conference on Human-robot interaction - {HRI} {\textquotesingle}10}}

2009

  • [DOI] M. A. Lozano, F. Escolano, B. Bonev, P. suau, W. Aguilar, J. M. saez, and M. A. Cazorla, «Region and Constellations Based Categorization of Images with Unsupervised Graph Learning,» Image vision comput., vol. 27, iss. 7, p. 960–978, 2009.
    [Bibtex]
    @article{Lozano:2009:RCB:1534927.1534960,
    address = {Newton, MA, UsA},
    author = {Lozano, M A and Escolano, F and Bonev, B and suau, P and Aguilar, W and saez, J M and Cazorla, M A},
    doi = {10.1016/j.imavis.2008.09.011},
    issn = {0262-8856},
    journal = {Image Vision Comput.},
    keywords = {Clustering of graphs,EM algorithms,Image categorization},
    number = {7},
    pages = {960--978},
    publisher = {Butterworth-Heinemann},
    title = {{Region and Constellations Based Categorization of Images with Unsupervised Graph Learning}},
    url = {http://dx.doi.org/10.1016/j.imavis.2008.09.011},
    volume = {27},
    year = {2009}
    }
  • J. M. C. Plaza, M. Cazorla, and V. Matellan, «Uso de simuladores en Docencia de Robotica Movil.,» Ieee-rita, vol. 4, iss. 4, p. 269–278, 2009.
    [Bibtex]
    @article{Plaza2009Uso,
    author = {Plaza, Jose Maria Canyas and Cazorla, Miguel and Matellan, Vicente},
    journal = {IEEE-RITA},
    number = {4},
    pages = {269--278},
    title = {{Uso de simuladores en Docencia de Robotica Movil.}},
    volume = {4},
    year = {2009}
    }
  • A. M. Romero Cortijo, M. angel Cazorla, and Others, «Comparativa de detectores de caracteristicas visuales y su aplicacion al sLAM.» 2009.
    [Bibtex]
    @inproceedings{romero2009comparativa,
    author = {Romero Cortijo, Anna Maria and Cazorla, Miguel angel and Others},
    title = {{Comparativa de detectores de caracteristicas visuales y su aplicacion al sLAM}},
    year = {2009}
    }
  • [DOI] E. Martinez-Martin and A. P. del Pobil, «Safety for human-robot interaction in dynamic environments,» in 2009 IEEE international symposium on assembly and manufacturing, 2009.
    [Bibtex]
    @inproceedings{Martinez_2009,doi = {10.1109/isam.2009.5376949},url = {http://dx.doi.org/10.1109/isam.2009.5376949},year = 2009,month = {nov},publisher = {Institute of Electrical {\&}amp$\mathsemicolon$ Electronics Engineers ({IEEE})},author = {Ester Martinez-Martin and Angel P. del Pobil},title = {safety for human-robot interaction in dynamic environments},booktitle = {2009 {IEEE} International symposium on Assembly and Manufacturing}}

2008

  • [DOI] B. Bonev, F. Escolano, and M. Cazorla, «Feature selection, mutual information, and the classification of high-dimensional patterns,» Pattern analysis and applications, vol. 11, iss. 3-4, p. 309–319, 2008.
    [Bibtex]
    @article{Bonev:2008aa,
    abstract = {We propose a novel feature selection filter for supervised learning, which relies on the efficient estimation of the mutual information between a high-dimensional set of features and the classes. We bypass the estimation of the probability density function with the aid of the entropic-graphs approximation of R{\{}e{\}}nyi entropy, and the subsequent approximation of the shannon entropy. Thus, the complexity does not depend on the number of dimensions but on the number of patterns/samples, and the curse of dimensionality is circumvented. We show that it is then possible to outperform algorithms which individually rank features, as well as a greedy algorithm based on the maximal relevance and minimal redundancy criterion. We successfully test our method both in the contexts of image classification and microarray data classification. For most of the tested data sets, we obtain better classification results than those reported in the literature.},
    author = {Bonev, Boyan and Escolano, Francisco and Cazorla, Miguel},
    doi = {10.1007/s10044-008-0107-0},
    issn = {1433-7541},
    journal = {Pattern Analysis and Applications},
    keywords = {Filter feature selection; Mutual information; Entropy},
    number = {3-4},
    pages = {309--319},
    publisher = {springer-Verlag},
    title = {{Feature selection, mutual information, and the classification of high-dimensional patterns}},
    url = {http://dx.doi.org/10.1007/s10044-008-0107-0},
    volume = {11},
    year = {2008}
    }
  • D. Viejo and M. Cazorla, «3D Model Based Map Building,» in International symposium on robotics, isr 2008, 2008.
    [Bibtex]
    @inproceedings{Viejo2008,
    author = {Viejo, Diego and Cazorla, Miguel},
    booktitle = {International symposium on Robotics, IsR 2008},
    title = {{3D Model Based Map Building}},
    year = {2008}
    }
  • D. {Viejo Hernando}, M. angel Cazorla, and Others, «3D Feature Extraction and Modelling for sLAM.» 2008.
    [Bibtex]
    @inproceedings{viejo20083d1,
    author = {{Viejo Hernando}, Diego and Cazorla, Miguel angel and Others},
    title = {{3D Feature Extraction and Modelling for sLAM}},
    year = {2008}
    }
  • [DOI] M. Prats, E. Martinez-Martin, P. J. sanz, and A. P. del Pobil, «The UJI librarian robot,» Intelligent service robotics, vol. 1, iss. 4, p. 321–335, 2008.
    [Bibtex]
    @article{Prats_2008,doi = {10.1007/s11370-008-0028-1},url = {http://dx.doi.org/10.1007/s11370-008-0028-1},year = 2008,month = {jul},publisher = {springer science $\mathplus$ Business Media},volume = {1},number = {4},pages = {321--335},author = {Mario Prats and Ester Martinez-Martin and Pedro J. sanz and Angel P. del Pobil},title = {The {UJI} librarian robot},journal = {Intelligent service Robotics}}
  • [DOI] E. Cervera, N. Garcia-Aracil, E. Martinez-Martin, L. Nomdedeu, and A. P. del Pobil, «Safety for a robot arm moving amidst humans by using panoramic vision,» in 2008 IEEE international conference on robotics and automation, 2008.
    [Bibtex]
    @inproceedings{Cervera_2008,doi = {10.1109/robot.2008.4543530},url = {http://dx.doi.org/10.1109/robot.2008.4543530},year = 2008,month = {may},publisher = {Institute of Electrical {\&} Electronics Engineers ({IEEE})},author = {Enric Cervera and Nicolas Garcia-Aracil and Ester Martinez-Martin and Leo Nomdedeu and Angel P. del Pobil},title = {safety for a robot arm moving amidst humans by using panoramic vision},booktitle = {2008 {IEEE} International Conference on Robotics and Automation}}
  • [DOI] M. Prats, P. J. sanz, E. Martinez-Martin, R. Marín, and A. P. del Pobil, «Manipulación autónoma multipropósito en el robot de servicios jaume-2,» Revista iberoamericana de automática e informática industrial RIAI, vol. 5, iss. 2, p. 25–37, 2008.
    [Bibtex]
    @Article{Prats2008,
    author = {Mario Prats and Pedro J. sanz and Ester Martinez-Martin and Ra{\'{u}}l Mar{\'{\i}}n and Angel P. del Pobil},
    title = {Manipulaci{\'{o}}n aut{\'{o}}noma multiprop{\'{o}}sito en el robot de servicios jaume-2},
    journal = {Revista Iberoamericana de Autom{\'{a}}tica e Inform{\'{a}}tica Industrial {RIAI}},
    year = {2008},
    volume = {5},
    number = {2},
    pages = {25--37},
    month = {apr},
    doi = {10.1016/s1697-7912(08)70142-0},
    publisher = {Elsevier {BV}},
    url = {http://dx.doi.org/10.1016/s1697-7912(08)70142-0},
    }

2007

  • B. Bonev, F. Escolano, M. A. Lozano, P. suau, M. Cazorla, and W. Aguilar, «Constellations and the unsupervised learning of graphs,» in Graph-based representations in pattern recognition, Springer berlin heidelberg, 2007, p. 340–350.
    [Bibtex]
    @incollection{bonev2007constellations,
    author = {Bonev, Boyan and Escolano, Francisco and Lozano, Miguel A and suau, Pablo and Cazorla, Miguel and Aguilar, Wendy},
    booktitle = {Graph-Based Representations in Pattern Recognition},
    pages = {340--350},
    publisher = {springer Berlin Heidelberg},
    title = {{Constellations and the unsupervised learning of graphs}},
    year = {2007}
    }
  • B. Bonev, F. Escolano, and M. Cazorla, «A novel information theory method for filter feature selection,» in Micai 2007: advances in artificial intelligence, Springer berlin heidelberg, 2007, p. 431–440.
    [Bibtex]
    @incollection{bonev2007novel,
    author = {Bonev, Boyan and Escolano, Francisco and Cazorla, Miguel},
    booktitle = {MICAI 2007: Advances in Artificial Intelligence},
    pages = {431--440},
    publisher = {springer Berlin Heidelberg},
    title = {{A novel information theory method for filter feature selection}},
    year = {2007}
    }
  • B. Bonev, M. Cazorla, and F. Escolano Ruiz, «Robot navigation behaviors based on omnidirectional vision and information theory,» Journal of physical agents, vol. 1, iss. 1, 2007.
    [Bibtex]
    @article{bonev2007robot,
    author = {Bonev, Boyan and Cazorla, Miguel and Escolano Ruiz, Francisco },
    journal = {Journal of Physical Agents},
    publisher = {Red de Agentes Fisicos},
    title = {{Robot navigation behaviors based on omnidirectional vision and information theory}},
    volume=1,
    number=1,
    year = {2007}
    }
  • F. Escolano, B. Bonev, P. suau, W. Aguilar, Y. Frauel, J. M. saez, and M. Cazorla, «Contextual visual localization: cascaded submap classification, optimized saliency detection, and fast view matching,» in Intelligent robots and systems, 2007. iros 2007. ieee/rsj international conference on, 2007, p. 1715–1722.
    [Bibtex]
    @inproceedings{escolano2007contextual,
    author = {Escolano, Francisco and Bonev, Boyan and suau, Pablo and Aguilar, Wendy and Frauel, Yann and saez, Juan Manuel and Cazorla, Miguel},
    booktitle = {Intelligent Robots and systems, 2007. IROs 2007. IEEE/RsJ International Conference on},
    organization = {IEEE},
    pages = {1715--1722},
    title = {{Contextual visual localization: cascaded submap classification, optimized saliency detection, and fast view matching}},
    year = {2007}
    }
  • [DOI] D. Viejo and M. Cazorla, «3D plane-based egomotion for sLAM on semi-structured environment,» in Intelligent robots and systems, 2007. iros 2007. ieee/rsj international conference on, 2007, p. 2761–2766.
    [Bibtex]
    @inproceedings{Viejo2007,
    author = {Viejo, Diego and Cazorla, Miguel},
    booktitle = {Intelligent Robots and systems, 2007. IROs 2007. IEEE/RsJ International Conference on},
    doi = {10.1109/IROs.2007.4399138},
    pages = {2761--2766},
    title = {{3D plane-based egomotion for sLAM on semi-structured environment}},
    year = {2007}
    }
  • D. Viejo and M. Cazorla, «Pose Registration Model Improvement: Crease Detection.» 2007.
    [Bibtex]
    @inproceedings{viejo2007pose,
    abstract = {several works deal with 3D data in sLAM pro- blem. Data come from a 3D laser sweeping unit or a stereo camera, both providing a huge amount of data. In this paper, we detail an efficient method to find out creases from 3D raw data. This information can be used together with planar patches extracted from 3D raw data in order to build a complete 3D model of the scene. some promising results are shown for both outdoor and indoor environments.},
    author = {Viejo, Diego and Cazorla, Miguel},
    title = {{Pose Registration Model Improvement: Crease Detection}},
    year = {2007}
    }

2006

  • B. Bonev and M. Cazorla, «Towards autonomous adaptation in visual tasks.,» in Workshop de agentes fisicos, 2006, p. 59–66.
    [Bibtex]
    @inproceedings{bonev2006towards,
    author = {Bonev, Boyan and Cazorla, Miguel},
    booktitle = {Workshop de Agentes Fisicos},
    pages = {59--66},
    title = {{Towards autonomous adaptation in visual tasks.}},
    year = {2006}
    }
  • B. Bonev, M. Cazorla, and H. Martinez, «Walk calibration in a four-legged robot,» in Climbing and walking robots, Springer berlin heidelberg, 2006, p. 493–500.
    [Bibtex]
    @incollection{bonev2006walk,
    author = {Bonev, Boyan and Cazorla, Miguel and Martinez, Humberto},
    booktitle = {Climbing and Walking Robots},
    pages = {493--500},
    publisher = {springer Berlin Heidelberg},
    title = {{Walk calibration in a four-legged robot}},
    year = {2006}
    }
  • D. Herrero-Perez, F. Bas-Esparza, H. Martinez-Barbera, F. Martin, C. E. Aguero, V. M. Gomez, V. Matellan, and M. Cazorla, «Team Chaos 2006,» in Robotics symposium, 2006. lars’06. ieee 3rd latin american, 2006, p. 208–213.
    [Bibtex]
    @inproceedings{herrero2006team,
    author = {Herrero-Perez, D and Bas-Esparza, F and Martinez-Barbera, H and Martin, F and Aguero, C E and Gomez, V M and Matellan, V and Cazorla, M},
    booktitle = {Robotics symposium, 2006. LARs'06. IEEE 3rd Latin American},
    organization = {IEEE},
    pages = {208--213},
    title = {{Team Chaos 2006}},
    year = {2006}
    }
  • J. M. {Perez Torres}, D. {Viejo Hernando}, P. {suau Perez}, M. angel {Lozano Ortega}, O. {Colomina Pardo}, M. angel Cazorla, F. {Escolano Ruiz}, and Others, «Una concepcion moderna de Tecnicas de Inteligencia Artificial en la Universidad de Alicante.» 2006.
    [Bibtex]
    @inproceedings{perez2006concepcion,
    author = {{Perez Torres}, Jose Manuel and {Viejo Hernando}, Diego and {suau Perez}, Pablo and {Lozano Ortega}, Miguel angel and {Colomina Pardo}, Otto and Cazorla, Miguel angel and {Escolano Ruiz}, Francisco and Others},
    publisher = {Thomson-Paraninfo},
    title = {{Una concepcion moderna de Tecnicas de Inteligencia Artificial en la Universidad de Alicante}},
    year = {2006}
    }
  • D. Viejo and M. Cazorla, «Extraction and error modeling of 3D data: application to sLAM.,» in Workshop de agentes fisicos, 2006, p. 153–158.
    [Bibtex]
    @inproceedings{viejo2006extraction,
    author = {Viejo, Diego and Cazorla, Miguel},
    booktitle = {Workshop de Agentes Fisicos},
    pages = {153--158},
    title = {{Extraction and error modeling of 3D data: application to sLAM.}},
    year = {2006}
    }
  • D. Viejo and M. Cazorla, «Plane extraction and error modeling of 3d data,» in International symposium on robotics and automation, 2006.
    [Bibtex]
    @inproceedings{viejo2006plane,
    author = {Viejo, Diego and Cazorla, Miguel},
    booktitle = {International symposium on Robotics and Automation},
    title = {{Plane extraction and error modeling of 3d data}},
    year = {2006}
    }

2005

  • B. Bonev, M. Cazorla, and H. Martinez-Barbera, «Parameters optimization for quadruped walking,» in Proc. of the vi workshop de agentes fisicos, granada, spain, 2005.
    [Bibtex]
    @inproceedings{bonev2005parameters,
    author = {Bonev, Boyan and Cazorla, Miguel and Martinez-Barbera, H},
    booktitle = {Proc. of the VI Workshop de agentes fisicos, Granada, spain},
    title = {{Parameters optimization for quadruped walking}},
    year = {2005}
    }
  • M. Cazorla and F. Escolano, «Feature Extraction and Grouping for Robot Vision Tasks,» in Cutting edge robotics, I-tech, 2005, p. 91.
    [Bibtex]
    @incollection{cazorla2005feature,
    author = {Cazorla, Miguel and Escolano, Francisco},
    booktitle = {Cutting Edge Robotics},
    pages = {91},
    publisher = {I-Tech},
    title = {{Feature Extraction and Grouping for Robot Vision Tasks}},
    year = {2005}
    }
  • H. Martinez, V. Matellan, M. A. Cazorla, A. saffiotti, D. Herrero, F. Mart$backslash$in, B. Bonev, and K. LeBlanc, «Team Chaos 2005.» 2005.
    [Bibtex]
    @inproceedings{martinezteam,
    author = {Martinez, H and Matellan, V and Cazorla, M A and saffiotti, A and Herrero, D and Mart$\backslash$in, F and Bonev, B and LeBlanc, K},
    title = {{Team Chaos 2005}},
    year = {2005}
    }
  • D. Viejo, J. M. saez, M. A. Cazorla, and F. Escolano, «Active stereo based compact mapping,» in Intelligent robots and systems, 2005.(iros 2005). 2005 ieee/rsj international conference on, 2005, p. 529–534.
    [Bibtex]
    @inproceedings{viejo2005active,
    author = {Viejo, Diego and saez, Juan Manuel and Cazorla, Miguel Angel and Escolano, Francisco},
    booktitle = {Intelligent Robots and systems, 2005.(IROs 2005). 2005 IEEE/RsJ International Conference on},
    organization = {IEEE},
    pages = {529--534},
    title = {{Active stereo based compact mapping}},
    year = {2005}
    }

2004

  • D. Viejo and M. A. Cazorla, «Construccion de mapas 3D y extraccion de primitivas geometricas del entorno,» in Proc of 5th workshop de agentes fisicos, 2004.
    [Bibtex]
    @inproceedings{viejo2004construccion,
    author = {Viejo, Diego and Cazorla, M A},
    booktitle = {Proc of 5th Workshop de Agentes Fisicos},
    title = {{Construccion de mapas 3D y extraccion de primitivas geometricas del entorno}},
    year = {2004}
    }
  • D. Viejo and M. Cazorla, «Unconstrained 3D-mesh generation applied to map building,» in Progress in pattern recognition, image analysis and applications, Springer berlin heidelberg, 2004, p. 241–248.
    [Bibtex]
    @incollection{viejo2004unconstrained,
    abstract = {3D map building is a complex robotics task which needs mathematical robust models. From a 3D point cloud, we can use the normal vectors to these points to do feature extraction. In this paper, we will present a robust method for normal estimation and unconstrained 3D-mesh generation from a not-uniformly distributed point cloud.},
    author = {Viejo, Diego and Cazorla, Miguel},
    booktitle = {Progress in Pattern Recognition, Image Analysis and Applications},
    pages = {241--248},
    publisher = {springer Berlin Heidelberg},
    title = {{Unconstrained 3D-mesh generation applied to map building}},
    year = {2004}
    }

2003

  • [DOI] M. Cazorla and F. Escolano, «Two Bayesian methods for junction classification,» Image processing, ieee transactions on, vol. 12, iss. 3, p. 317–327, 2003.
    [Bibtex]
    @article{1197837,
    abstract = {We propose two Bayesian methods for junction classification which evolve from the Kona method: a region-based method and an edge-based method. Our region-based method computes a one-dimensional (1-D) profile where wedges are mapped to intervals with homogeneous intensity. These intervals are found through a growing-and-merging algorithm driven by a greedy rule. On the other hand, our edge-based method computes a different profile which maps wedge limits to peaks of contrast, and these peaks are found through thresholding followed by nonmaximum suppression. Experimental results show that both methods are more robust and efficient than the Kona method, and also that the edge-based method outperforms the region-based one.},
    author = {Cazorla, M and Escolano, F},
    doi = {10.1109/TIP.2002.806242},
    issn = {1057-7149},
    journal = {Image Processing, IEEE Transactions on},
    keywords = {Bayes methods;edge detection;image classification;},
    number = {3},
    pages = {317--327},
    title = {{Two Bayesian methods for junction classification}},
    volume = {12},
    year = {2003}
    }

2002

  • [DOI] M. Cazorla, F. Escolano, D. Gallardo, and R. Rizo, «Junction detection and grouping with probabilistic edge models and Bayesian A*,» Pattern recognition, vol. 35, iss. 9, p. 1869–1881, 2002.
    [Bibtex]
    @article{Cazorla20021869,
    abstract = {In this paper, we propose and integrate two Bayesian methods, one of them for junction detection, and the other one for junction grouping. Our junction detection method relies on a probabilistic edge model and a log-likelihood test. Our junction grouping method relies on finding connecting paths between pairs of junctions. Path searching is performed by applying a Bayesian A* algorithm. such algorithm uses both an intensity and geometric model for defining the rewards of a partial path and prunes those paths with low rewards. We have extended such a pruning with an additional rule which favors the stability of longer paths against shorter ones. We have tested experimentally the efficiency and robustness of the methods in an indoor image sequence. In this paper, we propose and integrate two Bayesian methods, one of them for junction detection, and the other one for junction grouping. Our junction detection method relies on a probabilistic edge model and a log-likelihood test. Our junction grouping method relies on finding connecting paths between pairs of junctions. Path searching is performed by applying a Bayesian A algorithm. such algorithm uses both an intensity and geometric model for defining the rewards of a partial path and prunes those paths with low rewards. We have extended such a pruning with an additional rule which favors the stability of longer paths against shorter ones. We have tested experimentally the efficiency and robustness of the methods in an indoor image sequence.},
    author = {Cazorla, M and Escolano, F and Gallardo, D and Rizo, R},
    doi = {http://dx.doi.org/10.1016/s0031-3203(01)00150-9},
    issn = {0031-3203},
    journal = {Pattern Recognition},
    keywords = {Bayesian inference},
    number = {9},
    pages = {1869--1881},
    title = {{Junction detection and grouping with probabilistic edge models and Bayesian A*}},
    url = {http://www.sciencedirect.com/science/article/pii/s0031320301001509},
    volume = {35},
    year = {2002}
    }

2001

  • M. angel Cazorla, O. Colomina Pardo, P. Compan Rosique, F. Escolano Ruiz, J. L. Zamora, and Others, «JavaVis: Una libreria para vision artificial en Java.» 2001.
    [Bibtex]
    @inproceedings{cazorla2001javavis,
    author = {Cazorla, Miguel angel and Colomina Pardo, Otto and Compan Rosique, Patricia and Escolano Ruiz, Francisco and Zamora, Jose Luis and Others},
    publisher = {Universitat de les Illes Balears. servei de Publicacions i Intercanvi Cientific},
    title = {{JavaVis: Una libreria para vision artificial en Java}},
    year = {2001}
    }

1999

  • M. angel Cazorla, F. Escolano Ruiz, D. Gallardo Lopez, O. Colomina Pardo, and Others, «A competition-based deformable template for junction extraction.» 1999.
    [Bibtex]
    @inproceedings{cazorla1999competition,
    author = {Cazorla, Miguel angel and Escolano Ruiz, Francisco and Gallardo Lopez, Domingo and Colomina Pardo, Otto and Others},
    title = {{A competition-based deformable template for junction extraction}},
    year = {1999}
    }
  • M. Cazorla, F. Escolano, D. Gallardo, and R. Rizo, «Bayesian Models for Finding and Grouping Junctions,» in Proc of the emmcvpr99, 1999.
    [Bibtex]
    @inproceedings{Cazorla99a,
    author = {Cazorla, M and Escolano, F and Gallardo, D and Rizo, R},
    booktitle = {Proc of the EMMCVPR99},
    publisher = {Lectures Notes in Computer science},
    title = {{Bayesian Models for Finding and Grouping Junctions}},
    year = {1999}
    }

1998

  • F. Escolano, M. Cazorla, D. Gallardo, F. Llorens, R. satorre, and R. Rizo, «A combined probabilistic framework for learning gestures and actions,» in Tasks and methods in applied artificial intelligence, Springer berlin heidelberg, 1998, p. 658–667.
    [Bibtex]
    @incollection{escolano1998combined,
    author = {Escolano, Francisco and Cazorla, Miguel and Gallardo, Domingo and Llorens, Faraon and satorre, Rosana and Rizo, Ramon},
    booktitle = {Tasks and Methods in Applied Artificial Intelligence},
    pages = {658--667},
    publisher = {springer Berlin Heidelberg},
    title = {{A combined probabilistic framework for learning gestures and actions}},
    year = {1998}
    }
  • D. Gallardo, F. Escolano, R. Rizo, O. Colomina, and M. Cazorla, «Estimacion bayesiana de caracteristicas en robots moviles mediante muestreo de la densidad a posteriori,» in Actas del primer congr\e\s catal\à\ dintellig\è\ncia artificial, 1998.
    [Bibtex]
    @inproceedings{gallardo1998estimacion,
    author = {Gallardo, Domingo and Escolano, Francisco and Rizo, Ramon and Colomina, Otto and Cazorla, M},
    booktitle = {Actas del Primer Congr{\{}e{\}}s Catal{\{}{\`{a}}{\}} dintellig{\{}{\`{e}}{\}}ncia Artificial},
    title = {{Estimacion bayesiana de caracteristicas en robots moviles mediante muestreo de la densidad a posteriori}},
    year = {1998}
    }

1997

  • F. Escolano, M. Cazorla, D. Gallardo, and R. Rizo, «Deformable templates for tracking and analysis of intravascular ultrasound sequences,» in Energy minimization methods in computer vision and pattern recognition, Springer berlin heidelberg, 1997, p. 521–534.
    [Bibtex]
    @incollection{escolano1997deformable,
    author = {Escolano, Francisco and Cazorla, Miguel and Gallardo, Domingo and Rizo, Ramon},
    booktitle = {Energy Minimization Methods in Computer Vision and Pattern Recognition},
    pages = {521--534},
    publisher = {springer Berlin Heidelberg},
    title = {{Deformable templates for tracking and analysis of intravascular ultrasound sequences}},
    year = {1997}
    }
  • F. Escolano Ruiz, M. Cazorla, and Others, «Estimacion del movimiento coherente: computacion evolutiva como alternativa al annealing determinista.» 1997.
    [Bibtex]
    @inproceedings{escolano1997estimacion,
    author = {Escolano Ruiz, Francisco and Cazorla, Miguel and Others},
    title = {{Estimacion del movimiento coherente: computacion evolutiva como alternativa al annealing determinista}},
    year = {1997}
    }
  • F. Escolano Ruiz, M. angel Cazorla, D. {Gallardo Lopez}, F. Llorens Largo, R. satorre Cuerda, R. Rizo Aldeguer, and Others, «Plantillas deformables espacio-temporales para el tracking y reconocimiento gestual.» 1997.
    [Bibtex]
    @inproceedings{escolano1997plantillas,
    author = {Escolano Ruiz, Francisco and Cazorla, Miguel angel and {Gallardo Lopez}, Domingo and Llorens Largo, Faraon and satorre Cuerda, Rosana and Rizo Aldeguer, Ramon and Others},
    title = {{Plantillas deformables espacio-temporales para el tracking y reconocimiento gestual}},
    year = {1997}
    }
  • I. sabuco, F. {Escolano Ruiz}, M. angel Cazorla, D. {Gallardo Lopez}, R. {Rizo Aldeguer}, and Others, «snakes based tracking and texture analysis of microscopic images.» 1997.
    [Bibtex]
    @inproceedings{sabuco1997snakes,
    author = {sabuco, Isabel and {Escolano Ruiz}, Francisco and Cazorla, Miguel angel and {Gallardo Lopez}, Domingo and {Rizo Aldeguer}, Ramon and Others},
    title = {{snakes based tracking and texture analysis of microscopic images}},
    year = {1997}
    }

1995

  • M. Cazorla, P. Caceres, F. Escolano, D. Gallardo, and R. Rizo, «Deteccion automatica con snakes y Representacion 3D sobre imagenes cerebrales,» in Vi caepia, 1995, p. 331–340.
    [Bibtex]
    @inproceedings{cazorla1995deteccion,
    author = {Cazorla, M and Caceres, Pedro and Escolano, Francisco and Gallardo, Domingo and Rizo, Ramon},
    booktitle = {VI CAEPIA},
    pages = {331--340},
    title = {{Deteccion automatica con snakes y Representacion 3D sobre imagenes cerebrales}},
    year = {1995}
    }
  • [DOI] E. Martinez-Martin and A. P. del Pobil, «Conflict resolution in robotics.» IGI global, p. 263–278.
    [Bibtex]
    @incollection{Martinez_Martin,doi = {10.4018/978-1-5225-0245-6.ch015},url = {http://dx.doi.org/10.4018/978-1-5225-0245-6.ch015},publisher = {{IGI} Global},pages = {263--278},author = {Ester Martinez-Martin and Angel P. del Pobil},title = {Conflict Resolution in Robotics}}