Publications


Authors: Type:

2017

  • A. Dominguez-Sanchez, M. Cazorla, and S. Orts-Escolano, “Pedestrian movement direction recognition using convolutional neural networks,” Ieee transactions on ıntelligent transportation ſystems, vol. Accepted, 2017.
    [Bibtex]
    @Article{Dominguez2017,
    author="Alejandro Dominguez-Sanchez and Miguel Cazorla and Sergio Orts-Escolano",
    title="Pedestrian movement direction recognition using convolutional neural networks",
    journal="IEEE Transactions on Intelligent Transportation Systems",
    year="2017",
    volume="Accepted"
    }
  • A. G. García, J. G. Rodríguez, S. Orts-Escolano, S. Oprea, F. G. Donoso, and M. Cazorla, “A ſtudy of the effect of noise and occlusion on the accuracy of convolutional neural networks applied to 3d object recognition,” Computer vision and ımage understanding, vol. Accepted, 2017.
    [Bibtex]
    @Article{GomezGarcia2017CVIU,
    author="Alberto García García and
    Jose García Rodríguez and
    Sergio Orts-Escolano and
    Sergiu Oprea and
    Francisco Gómez Donoso
    and Miguel Cazorla",
    title="A Study of the Effect of Noise and Occlusion on the Accuracy of Convolutional Neural Networks applied to 3D Object Recognition",
    journal="Computer Vision and Image Understanding",
    year="2017",
    volume="Accepted"
    }
  • F. Gomez-Donoso, S. Orts Escolano, M. Cazorla, A. Garcia-Garcia, J. Garcia-Rodriguez, J. A. Castro-Vargas, and S. Ovidiu-Oprea, “A robotic platform for customized and interactive rehabilitation of persons with disabilities,” Pattern recognition letters, vol. Accepted, 2017.
    [Bibtex]
    @Article{GomezDonoso2017PRL,
    author="Gomez-Donoso, Francisco
    and Orts Escolano, Sergio
    and Cazorla, Miguel
    and Garcia-Garcia, Alberto
    and Garcia-Rodriguez, Jose
    and Castro-Vargas, John A
    and Ovidiu-Oprea, Sergiu",
    title="A robotic platform for customized and interactive rehabilitation of persons with disabilities",
    journal="Pattern Recognition Letters",
    year="2017",
    volume="Accepted"
    }
  • [DOI] J. Garcia-Rodriguez, I. Guyon, S. Escalera, A. Psarrou, A. Lewis, and M. Cazorla, “Editorial: special issue on computational intelligence for vision and robotics,” Neural computing and applications, vol. 28, iss. 5, pp. 853-854, 2017.
    [Bibtex]
    @Article{Garcia-Rodriguez2017,
    author="Garcia-Rodriguez, Jose
    and Guyon, Isabelle
    and Escalera, Sergio
    and Psarrou, Alexandra
    and Lewis, Andrew
    and Cazorla, Miguel",
    title="Editorial: special issue on computational intelligence for vision and robotics",
    journal="Neural Computing and Applications",
    year="2017",
    volume="28",
    number="5",
    pages="853--854",
    issn="1433-3058",
    doi="10.1007/s00521-016-2330-8",
    url="http://dx.doi.org/10.1007/s00521-016-2330-8"
    }
  • A. Dominguez-Sanchez, S. Orts-Escolano, and M. Cazorla, “Pedestrian direction recognition using convolutional neural networks,” in 14th ınternational work-conference on artificial neural networks, 2017.
    [Bibtex]
    @INPROCEEDINGS{Dominguez2017IWANN,
    author={Alex Dominguez-Sanchez and Sergio Orts-Escolano and Miguel Cazorla},
    booktitle={14th International Work-Conference on Artificial Neural Networks},
    title={Pedestrian Direction Recognition using Convolutional Neural Networks},
    year={2017},
    month={June},}
  • M. Zamora, E. Caldwell, J. Garcia-Rodriguez, J. Azorin-Lopez, and M. Cazorla, “Machine learning improves human-robot interaction in productive environments: a review,” in 14th ınternational work-conference on artificial neural networks, 2017.
    [Bibtex]
    @INPROCEEDINGS{Zamora2017IWANN,
    author={Mauricio Zamora and Eldon Caldwell and Jose Garcia-Rodriguez and Jorge Azorin-Lopez and Miguel Cazorla},
    booktitle={14th International Work-Conference on Artificial Neural Networks},
    title={Machine learning improves human-robot interaction in productive environments: A review},
    year={2017},
    month={June},}
  • F. Escalona, Á. Rodríguez, F. Gómez-Donoso, J. Martínez-Gómez, and M. Cazorla, “3d object detection with deep learning,” Journal of physical agents, vol. 8, iss. 1, 2017.
    [Bibtex]
    @article{Donoso2017,
    abstract = {Finding an appropriate environment representation is a crucial problem in robotics. 3D data has been recently used thanks to the advent of low cost RGB-D cameras. We propose a new way to represent a 3D map based on the information provided by an expert. Namely, the expert is the output of a Convolutional Neural Network trained with deep learning techniques. Relying on such information, we propose the generation of 3D maps using individual semantic labels, which are associated with environment objects or semantic labels. So, for each label we are provided with a partial 3D map whose data belong to the 3D perceptions, namely point clouds, which have an associated probability above a given threshold. The final map is obtained by registering and merging all these partial maps. The use of semantic labels provide us a with way to build the map while recognizing objects.},
    author = {F\'elix Escalona and \'Angel Rodr\'iguez and Francisco G\'omez-Donoso and Jesus Mart\'inez-G\'omez and Miguel Cazorla},
    issn = {1888-0258},
    journal = {Journal of Physical Agents},
    keywords = {semantic mapping, 3D point cloud, deep learning},
    number = {1},
    title = {3D object detection with deep learning},
    volume = {8},
    year = {2017}
    }
  • F. Gomez-Donoso, A. Garcia-Garcia, S. Orts-Escolano, J. Garcia-Rodriguez, and M. Cazorla, “Lonchanet: a ſliced-based cnn architecture for real-time 3d object recognition,” in 2017 ınternational joint conference on neural networks (ıjcnn), 2017.
    [Bibtex]
    @INPROCEEDINGS{Garcia2017,
    author={ F. Gomez-Donoso and A. Garcia-Garcia and S. Orts-Escolano and J. Garcia-Rodriguez and M. Cazorla},
    booktitle={2017 International Joint Conference on Neural Networks (IJCNN)},
    title={LonchaNet: A Sliced-based CNN Architecture for Real-time 3D Object Recognition},
    year={2017},
    month={May},}
  • S. Oprea, A. Garcia-Garcia, S. Orts-Escolano, J. Garcia-Rodriguez, and M. Cazorla, “A recurrent neural network based ſchaeffer gesture recognition ſystem,” in 2017 ınternational joint conference on neural networks (ıjcnn), 2017.
    [Bibtex]
    @INPROCEEDINGS{Oprea2017,
    author={Sergiu Oprea and A. Garcia-Garcia and S. Orts-Escolano and J. Garcia-Rodriguez and M. Cazorla},
    booktitle={2017 International Joint Conference on Neural Networks (IJCNN)},
    title={A Recurrent Neural Network based Schaeffer Gesture Recognition System},
    year={2017},
    month={May},}
  • [DOI] A. Garcia-Garcia, S. and Orts-Escolano, S. and Oprea, J. and Garcia-Rodriguez, J. and Azorin-Lopez, M. and Saval-Calvo, and M. and Cazorla, “Multi-sensor 3D Object Dataset for Object Recognition with Full Pose Estimation,” Neural computing and applications, vol. 28, iss. 5, pp. 941-952, 2017.
    [Bibtex]
    @article{Garcia2016,
    abstract = {In this work, we propose a new dataset for 3D object recognition using the new high-resolution Kinect V2 sensor and some other popular low-cost devices like PrimeSense Carmine. Since most already existing datasets for 3D object recognition lack some features such as 3D pose information about objects in the scene, per pixel segmentation or level of occlusion, we propose a new one combining all this information in a single dataset that can be used to validate existing and new 3D object recognition algorithms. Moreover, with the advent of the new Kinect V2 sensor we are able to provide high-resolution data for RGB and depth information using a single sensor, whereas other datasets had to combine multiple sensors. In addition, we will also provide semiautomatic segmentation and semantic labels about the different parts of the objects so that the dataset could be used for testing robot grasping and scene labeling systems as well as for object recognition.},
    author = {Garcia-Garcia, Alberto and and Orts-Escolano, Sergio and and Oprea, Sergiu and and Garcia-Rodriguez, Jose and and Azorin-Lopez, Jorge and and Saval-Calvo, Marcelo and and Cazorla, Miguel},
    journal = {Neural Computing and Applications},
    pages = {941--952},
    volume="28",
    number="5",
    title = {{Multi-sensor 3D Object Dataset for Object Recognition with Full Pose Estimation}},
    year = {2017},
    issn="1433-3058",
    doi="10.1007/s00521-016-2224-9"
    }
  • [DOI] J. C. Rangel, J. Martínez-Gómez, I. García-Varea, and M. Cazorla, “Lextomap: lexical-based topological mapping,” Advanced robotics, vol. 31, iss. 5, pp. 268-281, 2017.
    [Bibtex]
    @article{Rangel2016b,
    author = {José Carlos Rangel and Jesus Martínez-Gómez and Ismael García-Varea and Miguel Cazorla},
    title = {LexToMap: lexical-based topological mapping},
    journal = {Advanced Robotics},
    volume = {31},
    number = {5},
    pages = {268-281},
    year = {2017},
    doi = {10.1080/01691864.2016.1261045},
    URL = {
    http://dx.doi.org/10.1080/01691864.2016.1261045
    },
    eprint = {
    http://dx.doi.org/10.1080/01691864.2016.1261045
    }
    ,
    abstract = { Any robot should be provided with a proper representation of its environment in order to perform navigation and other tasks. In addition to metrical approaches, topological mapping generates graph representations in which nodes and edges correspond to locations and transitions. In this article, we present LexToMap, a topological mapping procedure that relies on image annotations. These annotations, represented in this work by lexical labels, are obtained from pre-trained deep learning models, namely CNNs, and are used to estimate image similarities. Moreover, the lexical labels contribute to the descriptive capabilities of the topological maps. The proposal has been evaluated using the KTH-IDOL 2 data-set, which consists of image sequences acquired within an indoor environment under three different lighting conditions. The generality of the procedure as well as the descriptive capabilities of the generated maps validate the proposal. }
    }

2016

  • A. Garcia-Garcia, F. Gomez-Donoso, J. Garcia-Rodriguez, S. Orts-Escolano, M. Cazorla, and J. Azorin-Lopez, “Pointnet: a 3d convolutional neural network for real-time object class recognition,” in 2016 ınternational joint conference on neural networks (ıjcnn), 2016, pp. 1578-1584.
    [Bibtex]
    @INPROCEEDINGS{Garcia2016,
    author={A. Garcia-Garcia and F. Gomez-Donoso and J. Garcia-Rodriguez and S. Orts-Escolano and M. Cazorla and J. Azorin-Lopez},
    booktitle={2016 International Joint Conference on Neural Networks (IJCNN)},
    title={PointNet: A 3D Convolutional Neural Network for real-time object class recognition},
    year={2016},
    pages={1578-1584},
    keywords={CAD;computer vision;data structures;learning (artificial intelligence);neural net architecture;object recognition;3D ShapeNets;3D convolutional neural network;ModelNet;PointNet;VoxNet;computer vision;deep learning techniques;density occupancy grids representations;large-scale 3D CAD model dataset;real-time object class recognition;supervised convolutional neural network architecture;Computer architecture;Machine learning;Neural networks;Object recognition;Solid modeling;Three-dimensional displays;Two dimensional displays},
    month={July},}
  • C. Loop, Q. Cai, P. Chou, and S. Orts-Escolano, “A closed-form bayesian fusion equation using occupancy probabilities,” in 2016 ınternational conference on 3d vision, 3dv 2016, Stanford, uſa, october 25-28, 2016, 2016.
    [Bibtex]
    @InProceedings{DBLP:conf/3dim/Loop2016,
    author = {Charles Loop and Qin Cai and Philip Chou and Sergio Orts-Escolano},
    title = {A Closed-form Bayesian Fusion Equation using Occupancy Probabilities},
    booktitle = {2016 International Conference on 3D Vision, 3DV 2016, {Stanford}, USA, October 25-28, 2016},
    year = {2016}
    }
  • [DOI] M. Dou, S. Khamis, Y. Degtyarev, P. Davidson, S. R. Fanello, A. Kowdle, S. Orts-Escolano, C. Rhemann, D. Kim, J. Taylor, P. Kohli, V. Tankovich, and S. Izadi, “Fusion4d: real-time performance capture of challenging ſcenes,” Acm trans. graph., vol. 35, iss. 4, p. 114:1–114:13, 2016.
    [Bibtex]
    @Article{Dou2016,
    author = {Dou, Mingsong and Khamis, Sameh and Degtyarev, Yury and Davidson, Philip and Fanello, Sean Ryan and Kowdle, Adarsh and Orts-Escolano, Sergio and Rhemann, Christoph and Kim, David and Taylor, Jonathan and Kohli, Pushmeet and Tankovich, Vladimir and Izadi, Shahram},
    title = {Fusion4D: Real-time Performance Capture of Challenging Scenes},
    journal = {ACM Trans. Graph.},
    year = {2016},
    volume = {35},
    number = {4},
    pages = {114:1--114:13},
    month = jul,
    acmid = {2925969},
    address = {New York, NY, USA},
    articleno = {114},
    doi = {10.1145/2897824.2925969},
    issn = {0730-0301},
    issue_date = {July 2016},
    keywords = {4D reconstruction, multi-view, nonrigid, real-time},
    numpages = {13},
    publisher = {ACM},
    url = {http://doi.acm.org/10.1145/2897824.2925969}
    }
  • S. Ryan Fanello, C. Rhemann, V. Tankovich, A. Kowdle, S. Orts-Escolano, D. Kim, and S. Izadi, “HyperDepth: Learning Depth From Structured Light Without Matching,” in The ıeee conference on computer vision and pattern recognition (cvpr), 2016.
    [Bibtex]
    @InProceedings{Fanello_2016_CVPR,
    author = {Ryan Fanello, Sean and Rhemann, Christoph and Tankovich, Vladimir and Kowdle, Adarsh and Orts-Escolano, Sergio and Kim, David and Izadi, Shahram},
    title = {{HyperDepth: Learning Depth From Structured Light Without Matching}},
    booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
    year = {2016},
    month = {June}
    }
  • A. Garcia-Garcia, F. Gomez-Donoso, J. Garcia-Rodriguez, S. Orts-Escolano, M. Cazorla, and J. Azorin-Lopez, “PointNet: A 3D Convolutional Neural Network for Real-Time Object Class Recognition,” in The ıeee world congress on computational ıntelligence, 2016.
    [Bibtex]
    @inproceedings{Garcia2016a,
    author = {Garcia-Garcia, Albert and Gomez-Donoso, Francisco and Garcia-Rodriguez, Jose and Orts-Escolano, Sergio and Cazorla, Miguel and Azorin-Lopez, Jorge},
    booktitle = {The IEEE World Congress on Computational Intelligence},
    title = {{PointNet: A 3D Convolutional Neural Network for Real-Time Object Class Recognition}},
    year = {2016}
    }
  • F. Gomez-Donoso, M. Cazorla, A. Garcia-Garcia, and J. Garcia Rodriguez, “Automatic ſchaeffer’s gestures recognition ſystem,” Expert ſystems, vol. 33, iss. 5, pp. 480-488, 2016.
    [Bibtex]
    @article{Gomez-Donoso2016,
    abstract = {Schaeffer's sign language consists of a reduced set of gestures designed to help children with autism or cognitive learning disabilities to develop adequate communication skills. Our automatic recognition system for Schaeffer's gesture language uses the information provided by an RGB-D camera to capture body motion and recognize gestures using Dynamic Time Warping combined with k-Nearest Neighbors methods. The learning process is reinforced by the interaction with the proposed system that accelerates learning itself thus helping both children and educators. To demonstrate the validity of the system, a set of qualitative experiments with children were carried out. As a result, a system which is able to recognize a subset of 11 gestures of Schaeffer's sign language online was achieved.},
    author = {Gomez-Donoso, Francisco and Cazorla, Miguel and Garcia-Garcia, Alberto and Garcia Rodriguez, Jose},
    journal = {Expert Systems},
    title = {Automatic Schaeffer's Gestures Recognition System},
    volume={33},
    number={5},
    pages={480--488},
    year = {2016}
    }
  • [DOI] S. Orts-Escolano, C. Rhemann, S. Fanello, D. Kim, A. Kowdle, W. Chang, Y. Degtyarev, P. L. Davidson, S. Khamis, M. Dou, V. Tankovich, C. Loop, Q. Cai, P. A. Chou, S. Mennicken, J. Valentin, V. Pradeep, S. Wang, S. B. Kang, P. Kohli, Y. Lutchyn, C. Keskin, and S. Izadi, “Holoportation: virtual 3d teleportation in real-time,” in 29th acm user ınterface ſoftware and technology ſymposium (uıſt), 2016.
    [Bibtex]
    @InProceedings{holoportation2016,
    author = {Sergio Orts-Escolano and Christoph Rhemann and Sean Fanello and David Kim and Adarsh Kowdle and Wayne Chang and Yury Degtyarev and Philip L Davidson and Sameh Khamis and Mingsong Dou and Vladimir Tankovich and Charles Loop and Qin Cai and Philip A Chou and Sarah Mennicken and Julien Valentin and Vivek Pradeep and Shenlong Wang and Sing Bing Kang and Pushmeet Kohli and Yuliya Lutchyn and Cem Keskin and Shahram Izadi},
    title = {Holoportation: Virtual 3D Teleportation in Real-time},
    booktitle = {29th ACM User Interface Software and Technology Symposium (UIST)},
    year = {2016},
    doi = {10.1145/2984511.2984517},
    url = {http://dl.acm.org/citation.cfm?id=2984517}
    }
  • [DOI] A. Jimeno-Morenilla, J. Garcia-Rodriguez, S. Orts-Escolano, and M. Davia-Aracil, “Gng based foot reconstruction for custom footwear manufacturing,” Computers in ındustry, vol. 75, pp. 116-126, 2016.
    [Bibtex]
    @Article{JimenoMorenilla2016116,
    author = {Antonio Jimeno-Morenilla and Jose Garcia-Rodriguez and Sergio Orts-Escolano and Miguel Davia-Aracil},
    title = {GNG based foot reconstruction for custom footwear manufacturing },
    journal = {Computers in Industry },
    year = {2016},
    volume = {75},
    pages = {116 - 126},
    doi = {http://dx.doi.org/10.1016/j.compind.2015.06.002},
    issn = {0166-3615},
    keywords = {Custom footwear manufacturing},
    url = {http://www.sciencedirect.com/science/article/pii/S0166361515300075}
    }
  • J. Martinez-Gomez, V. Morell Gimenez, M. Cazorla, and I. Garcia-Varea, “Semantic Localization in the PCL library,” Robotics and autonomous ſystems, vol. 75, Part B, pp. 641-648, 2016.
    [Bibtex]
    @article{Martinez2016ras,
    abstract = {The semantic localization problem in robotics consists in determining the place where a robot is located by means of semantic categories. The problem is usually addressed as a supervised classification process, where input data correspond to robot perceptions while classes to semantic categories, like kitchen or corridor. In this paper we propose a framework, implemented in the $\backslash${\{}PCL$\backslash${\}} library, which provides a set of valuable tools to easily develop and evaluate semantic localization systems. The implementation includes the generation of 3D global descriptors following a Bag-of-Words approach. This allows the generation of fixed-dimensionality descriptors from any type of keypoint detector and feature extractor combinations. The framework has been designed, structured and implemented to be easily extended with different keypoint detectors, feature extractors as well as classification models. The proposed framework has also been used to evaluate the performance of a set of already implemented descriptors, when used as input for a specific semantic localization system. The obtained results are discussed paying special attention to the internal parameters of the BoW descriptor generation process. Moreover, we also review the combination of some keypoint detectors with different 3D descriptor generation techniques.},
    author = {Martinez-Gomez, Jesus and Morell Gimenez, Vicente and Cazorla, Miguel and Garcia-Varea, Ismael},
    journal = {Robotics and Autonomous Systems},
    pages = {641--648},
    title = {{Semantic Localization in the PCL library}},
    volume = {75, Part B},
    year = {2016}
    }
  • J. Navarrete, V. Morell, M. Cazorla, D. Viejo, J. Garcia-Rodriguez, and S. Orts-Escolano, “3DCOMET: 3D Compression Methods Test Dataset,” Robotics and autonomous ſystems, vol. 75, Part B, pp. 550-557, 2016.
    [Bibtex]
    @article{Navarrete2016Ras,
    abstract = {The use of 3D data in mobile robotics applications provides valuable information about the robot's environment. However usually the huge amount of 3D information is difficult to manage due to the fact that the robot storage system and computing capabilities are insufficient. Therefore, a data compression method is necessary to store and process this information while preserving as much information as possible. A few methods have been proposed to compress 3D information. Nevertheless, there does not exist a consistent public benchmark for comparing the results (compression level, distance reconstructed error, etc.) obtained with different methods. In this paper, we propose a dataset composed of a set of 3D point clouds with different structure and texture variability to evaluate the results obtained from 3D data compression methods. We also provide useful tools for comparing compression methods, using as a baseline the results obtained by existing relevant compression methods.},
    author = {Javier Navarrete and Vicente Morell and Miguel Cazorla and Diego Viejo and Jose Garcia-Rodriguez and Sergio Orts-Escolano},
    journal = {Robotics and Autonomous Systems},
    pages = {550--557},
    title = {{3DCOMET: 3D Compression Methods Test Dataset}},
    volume = {75, Part B},
    year = {2016}
    }
  • S. Orts-Escolano, J. Garcia-Rodriguez, V. Morell, M. Cazorla, J. A. Serra-Perez, and A. Garcia-Garcia, “3D surface reconstruction of noisy point clouds using Growing Neural Gas,” Neural processing letters, vol. 43, iss. 2, pp. 401-423, 2016.
    [Bibtex]
    @article{Orts2015,
    abstract = {With the advent of low-cost 3D sensors and 3D printers, scene and object 3D surface reconstruction has become an important research topic in the last years. In this work, we propose an automatic (unsupervised) method for 3D surface reconstruction from raw unorganized point clouds acquired using low-cost 3D sensors. We have modified the Grow- ing Neural Gas (GNG) network, which is a suitable model because of its flexibility, rapid adaptation and excellent quality of representation, to perform 3D surface reconstruction of different real-world objects and scenes. Some improvements have been made on the original algorithm considering colour and surface normal information of input data during the learning stage and creating complete triangular meshes instead of basic wire-frame representations. The proposed method is able to successfully create 3D faces online, whereas existing 3D reconstruction methods based on Self-Organizing Maps (SOMs) required post- processing steps to close gaps and holes produced during the 3D reconstruction process. A set of quantitative and qualitative experiments were carried out to validate the proposed method. The method has been implemented and tested on real data, and has been found to be effective at reconstructing noisy point clouds obtained using low-cost 3D sensors.},
    author = {Orts-Escolano, S and Garcia-Rodriguez, J and Morell, V and Cazorla, Miguel and Serra-Perez, J A and Garcia-Garcia, A},
    issn = {1370-4621},
    journal = {Neural Processing Letters},
    number = {2},
    pages = {401--423},
    title = {{3D surface reconstruction of noisy point clouds using Growing Neural Gas}},
    volume = {43},
    year = {2016}
    }
  • [DOI] J. C. Rangel, M. Cazorla, I. Garcia-Varea, J. Martinez-Gomez, E. Fromont, and M. Sebban, “Scene Classification from Semantic Labeling,” Advanced robotics, vol. 30, iss. 11–12, pp. 758-769, 2016.
    [Bibtex]
    @article{Rangel2016,
    abstract = {Finding an appropriate image representation is a crucial problem in robotics. This problem has been classically addressed by means of computer vision techniques, where local and global features are used. The selection or/and combination of different features is carried out by taking into account repeatability and distinctiveness, but also the specific problem to solve. In this article, we propose the generation of image descriptors from general purpose semantic annotations. This approach has been evaluated as source of information for a scene classifier, and specifically using Clarifai as the semantic annotation tool. The experimentation has been carried out using the ViDRILO toolbox as benchmark, which includes a comparison of state-of-the-art global features and tools to make comparisons among them. According to the experimental results, the proposed descriptor performs similarly to well-known domain-specific image descriptors based on global features in a scene classification task. Moreover, the proposed descriptor is based on generalist annotations without any type of problem-oriented parameter tuning.},
    author = {Jose Carlos Rangel and Miguel Cazorla and Ismael Garcia-Varea and Jesus Martinez-Gomez and Elisa Fromont and Marc Sebban},
    doi = {10.1080/01691864.2016.1164621},
    journal = {Advanced Robotics},
    number = {11--12},
    pages = {758--769},
    title = {{Scene Classification from Semantic Labeling}},
    volume = {30},
    year = {2016}
    }
  • [DOI] S. Orts-Escolano, J. Garcia-Rodriguez, M. Cazorla, Vicente Morell, J. Azorin, M. Saval, A. Garcia-Garcia, and V. Villena, “Bioinspired point cloud representation: 3d object tracking,” Neural computing and applications, vol. Accepted, 2016.
    [Bibtex]
    @article{Orts-Escolano2016NCAA,
    title = "Bioinspired Point Cloud Representation: 3D object tracking",
    journal = "Neural Computing and Applications",
    volume = "Accepted",
    number = "",
    pages = "",
    year = "2016",
    note = "",
    doi = "http://dx.doi.org/doi:10.1007/s00521-016-2585-0",
    author = " Sergio Orts-Escolano and Jose Garcia-Rodriguez and Miguel Cazorla and Vicente
    Morell and Jorge Azorin and Marcelo Saval and Alberto Garcia-Garcia and Victor Villena",
    abstract = "We consider the problem of processing point cloud sequences. In particular, we represent and track objects in dynamic scenes acquired using low-cost sensors such as the Kinect. An efficient neural network based approach is proposed to represent and estimate 3D objects motion. This system addresses multiple computer vision tasks such as object segmentation, representation, motion analysis and tracking. The use of a neural network allows the unsupervised estimation of motion and the representation of objects in the scene.
    This proposal avoids the problem of finding corresponding features while tracking moving objects. A set of experiments are presented that demonstrate the validity of our method to track 3D objects. Moreover, an optimization strategy is applied to achieve real-time processing rates. Favorable results are presented demonstrating the capabilities of the GNG-based algorithm for this task. Some videos of the proposed system are available on the project website
    .?
    }
  • [DOI] A. Angelopoulou, J. G. Rodriguez, S. Orts-Escolano, G. Gupta, and A. Psarrou, “Fast 2d/3d object representation with growing neural gas,” Neural computing and applications, vol. Accepted, 2016.
    [Bibtex]
    @article{Angelopoulou2016NCAA,
    title = "Fast 2D/3D Object Representation with Growing Neural Gas",
    journal = "Neural Computing and Applications?,
    volume = "Accepted",
    number = "",
    pages = "",
    year = "2016",
    note = "",
    doi = "http://dx.doi.org/doi:10.1007/s00521-016-2579-y",
    author = " Anastassia Angelopoulou and Jose Garcia Rodriguez and Sergio Orts-Escolano and Gaurav Gupta and Alexandra Psarrou",
    abstract = "We consider the problem of processing point cloud sequences. In particular, we represent and track objects in dynamic scenes acquired using low-cost sensors such as the Kinect. An efficient neural network based approach is proposed to represent and estimate 3D objects motion. This system addresses multiple computer vision tasks such as object segmentation, representation, motion analysis and tracking. The use of a neural network allows the unsupervised estimation of motion and the representation of objects in the scene.
    This proposal avoids the problem of finding corresponding features while tracking moving objects. A set of experiments are presented that demonstrate the validity of our method to track 3D objects. Moreover, an optimization strategy is applied to achieve real-time processing rates. Favorable results are presented demonstrating the capabilities of the GNG-based algorithm for this task. Some videos of the proposed system are available on the project website
    .?
    }
  • [DOI] A. Garcia-Garcia, S. Orts-Escolano, J. Garcia-Rodriguez, and M. Cazorla, “Interactive 3d object recognition pipeline on mobile gpgpu computing platforms using low-cost rgb-d sensors,” Journal of real-time ımage processing, vol. Accepted, 2016.
    [Bibtex]
    @article{Garcia2016RTIP,
    title = "Interactive 3D object recognition pipeline on mobile GPGPU computing platforms using low-cost RGB-D sensors",
    journal = "Journal of Real-Time Image Processing?,
    volume = "Accepted",
    number = "",
    pages = "",
    year = "2016",
    note = "",
    doi = "10.1007/s11554-016-0607-x",
    author = "Albert Garcia-Garcia and Sergio Orts-Escolano and Jose Garcia-Rodriguez and Miguel Cazorla",
    abstract = "In this work, we propose the implementation of a 3D object recognition system which will be optimized to operate under demanding time constraints. The system must be robust so that objects can be recognized properly in poor light conditions and cluttered scenes with significant levels of occlusion. An important requirement must be met: the system must exhibit a reasonable performance running on a low power consumption mobile GPU computing platform (NVIDIA Jetson TK1) so that it can be integrated in mobile robotics systems, ambient intelligence or ambient assisted living applications. The acquisition system is based on the use of color and depth (RGB-D) data streams provided by low-cost 3D sensors like Microsoft Kinect or PrimeSense Carmine. The resulting system is able to recognize objects in a scene in less than 7 seconds, offering an interactive frame rate and thus allowing its deployment on a mobile robotic platform. Because of that, the system has many possible applications, ranging from mobile robot navigation and semantic scene labeling to human-computer interaction (HCI) systems based on visual information.
    A video showing the proposed system while performing online object recognition in various scenes is available on our project website1.?
    }
  • A. Rodriguez, F. Gomez-Donoso, J. Martinez-Gomez, and M. Cazorla, “Building 3d maps with tag information,” in Xvıı workshop en agentes f�sicos (waf 2016), 2016.
    [Bibtex]
    @inproceedings{Rodriguez2016,
    Author = {Angel Rodriguez and Francisco Gomez-Donoso and Jesus Martinez-Gomez and Miguel Cazorla},
    Title = {Building 3D maps with tag information},
    Booktitle = {XVII Workshop en Agentes F�sicos (WAF 2016)},
    Year={2016}
    }
  • M. Saval-Calvo, J. and Azorin-Lopez, A. and Fuster-Guillo, J. and Garcia-Rodriguez, S. and Orts-Escolano, and A. and Garcia-Garcia, “Evaluation of sampling method effects in 3D non-rigid registration,” Neural computing and applications, iss. 1-15, 2016.
    [Bibtex]
    @Article{Saval2016,
    author = {Saval-Calvo, Marcelo and and Azorin-Lopez, Jorge and and Fuster-Guillo, Andres and and Garcia-Rodriguez, Jose and and Orts-Escolano, Sergio and and Garcia-Garcia, Alberto},
    title = {{Evaluation of sampling method effects in 3D non-rigid registration}},
    journal = {Neural Computing and Applications},
    year = {2016},
    number = {1-15}
    }
  • J. Navarrete, D. Viejo, and M. Cazorla, “Color smoothing for rgb-d data using entropy information,” Applied ſoft computing, vol. 46, pp. 361-380, 2016.
    [Bibtex]
    @article{navarrete2016color,
    title={Color smoothing for RGB-D data using entropy information},
    author={Navarrete, Javier and Viejo, Diego and Cazorla, Miguel},
    journal={Applied Soft Computing},
    volume={46},
    pages={361--380},
    year={2016},
    publisher={Elsevier}
    }
  • [DOI] J. C. Rangel, V. Morell, M. Cazorla, S. Orts-Escolano, and J. Garcia-Rodriguez, “Object recognition in noisy rgb-d data using gng,” Pattern analysis and applications, pp. 1-16, 2016.
    [Bibtex]
    @Article{Rangel2016PAAA,
    author="Rangel, Jose Carlos
    and Morell, Vicente
    and Cazorla, Miguel
    and Orts-Escolano, Sergio
    and Garcia-Rodriguez, Jose",
    title="Object recognition in noisy RGB-D data using GNG",
    journal="Pattern Analysis and Applications",
    year="2016",
    pages="1--16",
    abstract="Object recognition in 3D scenes is a research field in which there is intense activity guided by the problems related to the use of 3D point clouds. Some of these problems are influenced by the presence of noise in the cloud that reduces the effectiveness of a recognition process. This work proposes a method for dealing with the noise present in point clouds by applying the growing neural gas (GNG) network filtering algorithm. This method is able to represent the input data with the desired number of neurons while preserving the topology of the input space. The GNG obtained results which were compared with a Voxel grid filter to determine the efficacy of our approach. Moreover, since a stage of the recognition process includes the detection of keypoints in a cloud, we evaluated different keypoint detectors to determine which one produces the best results in the selected pipeline. Experiments show how the GNG method yields better recognition results than other filtering algorithms when noise is present.",
    issn="1433-755X",
    doi="10.1007/s10044-016-0546-y",
    url="http://dx.doi.org/10.1007/s10044-016-0546-y"
    }

2015

  • J. C. Rangel, V. Morell, M. Cazorla, S. Orts-Escolano, and J. Garc{‘i}a-Rodr{‘i}guez, “Object recognition in noisy rgb-d data,” in International work-conference on the ınterplay between natural and artificial computation, 2015, pp. 261-270.
    [Bibtex]
    @inproceedings{rangel2015object,
    title={Object Recognition in Noisy RGB-D Data},
    author={Rangel, Jos{\'e} Carlos and Morell, Vicente and Cazorla, Miguel and Orts-Escolano, Sergio and Garc{\'\i}a-Rodr{\'\i}guez, Jos{\'e}},
    booktitle={International Work-Conference on the Interplay Between Natural and Artificial Computation},
    pages={261--270},
    year={2015},
    organization={Springer International Publishing}
    }
  • M. Saval-Calvo, S. Orts-Escolano, J. Azorin-Lopez, J. Garcia-Rodriguez, A. Fuster-Guillo, V. Morell-Gimenez, and M. Cazorla, “A comparative ſtudy of downsampling techniques for non-rigid point ſet registration using color,” in International work-conference on the ınterplay between natural and artificial computation, 2015, pp. 281-290.
    [Bibtex]
    @inproceedings{saval2015comparative,
    title={A Comparative Study of Downsampling Techniques for Non-rigid Point Set Registration Using Color},
    author={Saval-Calvo, Marcelo and Orts-Escolano, Sergio and Azorin-Lopez, Jorge and Garcia-Rodriguez, Jose and Fuster-Guillo, Andres and Morell-Gimenez, Vicente and Cazorla, Miguel},
    booktitle={International Work-Conference on the Interplay Between Natural and Artificial Computation},
    pages={281--290},
    year={2015},
    organization={Springer International Publishing}
    }
  • S. Orts-Escolano, J. Garcia-Rodriguez, V. Morell, M. Cazorla, A. Garcia-Garcia, and S. Ovidiu-Oprea, “Optimized representation of 3d ſequences using neural networks,” in International work-conference on the ınterplay between natural and artificial computation, 2015, pp. 251-260.
    [Bibtex]
    @inproceedings{orts2015optimized,
    title={Optimized Representation of 3D Sequences Using Neural Networks},
    author={Orts-Escolano, Sergio and Garcia-Rodriguez, Jose and Morell, Vicente and Cazorla, Miguel and Garcia-Garcia, Alberto and Ovidiu-Oprea, Sergiu},
    booktitle={International Work-Conference on the Interplay Between Natural and Artificial Computation},
    pages={251--260},
    year={2015},
    organization={Springer International Publishing}
    }
  • [DOI] J. Gomez, B. Caputo, M. Cazorla, H. Christensen, M. Fornoni, I. Garcia-Varea, and A. Pronobis, “Where Are We After Five Editions?: Robot Vision Challenge, a Competition that Evaluates Solutions for the Visual Place Classification Problem,” Robotics automation magazine, ıeee, vol. 22, iss. 4, pp. 147-156, 2015.
    [Bibtex]
    @article{7349126,
    author = {Gomez, J and Caputo, B and Cazorla, M and Christensen, H and Fornoni, M and Garcia-Varea, I and Pronobis, A},
    doi = {10.1109/MRA.2015.2460931},
    issn = {1070-9932},
    journal = {Robotics Automation Magazine, IEEE},
    keywords = {Benchmark testing;Object recognition;Proposals;Rob},
    number = {4},
    pages = {147--156},
    title = {{Where Are We After Five Editions?: Robot Vision Challenge, a Competition that Evaluates Solutions for the Visual Place Classification Problem}},
    volume = {22},
    year = {2015}
    }
  • A. A. Revett, A. Psarrou, J. Garcia-Rodriguez, S. Orts-Escolano, J. Azorin-Lopez, and Kenneth, “3D Reconstruction of Medical Images from Slices Automatically Landmarked with Growing Neural Models,” Neurocomputing, vol. 150, Part, pp. 16-25, 2015.
    [Bibtex]
    @article{Angelopouloul2015,
    abstract = {In this study, we utilise a novel approach to segment out the ventricular system in a series of high resolution T1-weighted $\backslash${\{}MR$\backslash${\}} images. We present a brain ventricles fast reconstruction method. The method is based on the processing of brain sections and establishing a fixed number of landmarks onto those sections to reconstruct the ventricles 3D surface. Automated landmark extraction is accomplished through the use of the self-organising network, the growing neural gas (GNG), which is able to topographically map the low dimensionality of the network to the high dimensionality of the contour manifold without requiring a priori knowledge of the input space structure. Moreover, our $\backslash${\{}GNG$\backslash${\}} landmark method is tolerant to noise and eliminates outliers. Our method accelerates the classical surface reconstruction and filtering processes. The proposed method offers higher accuracy compared to methods with similar efficiency as Voxel Grid.},
    author = {Revett, Anastassia Angelopoulou and Alexandra Psarrou and Jose Garcia-Rodriguez and Sergio Orts-Escolano and Jorge Azorin-Lopez and Kenneth},
    journal = {Neurocomputing},
    pages = {16--25},
    title = {{3D Reconstruction of Medical Images from Slices Automatically Landmarked with Growing Neural Models}},
    volume = {150, Part},
    year = {2015}
    }
  • [DOI] B. J. Boom, S. Orts-Escolano, X. X. Ning, S. McDonagh, P. Sandilands, and R. B. Fisher, “Interactive light source position estimation for augmented reality with an rgb-d camera,” Computer animation and virtual worlds, p. n/a–n/a, 2015.
    [Bibtex]
    @Article{Boom2016,
    author = {Boom, Bastiaan J. and Orts-Escolano, Sergio and Ning, Xin X. and McDonagh, Steven and Sandilands, Peter and Fisher, Robert B.},
    title = {Interactive light source position estimation for augmented reality with an RGB-D camera},
    journal = {Computer Animation and Virtual Worlds},
    year = {2015},
    pages = {n/a--n/a},
    note = {cav.1686},
    doi = {10.1002/cav.1686},
    issn = {1546-427X},
    keywords = {light source estimation, augmented reality, GPU implementation, RGB-D camera},
    url = {http://dx.doi.org/10.1002/cav.1686}
    }
  • [DOI] M. Cazorla and D. Viejo, “JavaVis: An integrated computer vision library for teaching computer vision,” Computer applications in engineering education, vol. 23, iss. 2, pp. 258-267, 2015.
    [Bibtex]
    @article{CAE:CAE21594,
    abstract = {
    In this article, we present a new framework oriented to teach Computer Vision related subjects called JavaVis. It is a computer vision library divided in three main areas: 2D package is featured for classical computer vision processing; 3D package, which includes a complete 3D geometric toolset, is used for 3D vision computing; Desktop package comprises a tool for graphic designing and testing of new algorithms. JavaVis is designed to be easy to use, both for launching and testing existing algorithms and for developing new ones.},
    author = {Cazorla, Miguel and Viejo, Diego},
    doi = {10.1002/cae.21594},
    issn = {1099-0542},
    journal = {Computer Applications in Engineering Education},
    keywords = {3D data,Java GUI,computer vision,image processing teaching,open source},
    number = {2},
    pages = {258--267},
    title = {{JavaVis: An integrated computer vision library for teaching computer vision}},
    url = {http://dx.doi.org/10.1002/cae.21594},
    volume = {23},
    year = {2015}
    }
  • M. Cazorla and D. Viejo, “Experiences Using an Open Source Software Library to Teach Computer Vision Subjects,” Journal of technology and ſcience education, vol. 4, iss. 3, pp. 214-227, 2015.
    [Bibtex]
    @article{cazorla2015,
    abstract = {Machine vision is an important subject in computer science and engineering degrees. For laboratory experimentation, it is desirable to have a complete and easy-to-use tool. In this work we present a Java library, oriented to teaching computer vision. We have designed and built the library from the scratch with enfasis on readability and understanding rather than on efficiency. However, the library can also be used for research purposes.
    JavaVis is an open source Java library, oriented to the teaching of Computer Vision. It consists of a framework with several features that meet its demands. It has been designed to be easy to use: the user does not have to deal with internal structures or graphical interface, and should the student need to add a new algorithm it can be done simply enough.
    Once we sketch the library, we focus on the experience the student gets using this library in several computer vision courses. Our main goal is to find out whether the students understand what they are doing, that is, find out how much the library helps the student in grasping the basic concepts of computer vision. In the last four years we have conducted surveys to assess how much the students have improved their skills by using this library.
    },
    author = {Cazorla, Miguel and Viejo, Diego},
    issn = {2014-5349},
    journal = {Journal of Technology and Science Education},
    keywords = {Computer vision teaching,Open source,engineering},
    number = {3},
    pages = {214--227},
    title = {{Experiences Using an Open Source Software Library to Teach Computer Vision Subjects}},
    volume = {4},
    year = {2015}
    }
  • M. Cazorla, J. Garcia-Rodriguez, J. M. C. Plaza, I. G. Varea, V. Matellan, F. M. Rico, J. Martinez-Gomez, F. J. R. Lera, C. S. Mejias, and M. E. M. Sahuquillo, “SIRMAVED: Development of a comprehensive robotic system for monitoring and interaction for people with acquired brain damage and dependent people,” in Xvı conferencia de la asociacion espanola para la ınteligencia artificial (caepıa), 2015.
    [Bibtex]
    @inproceedings{Cazorla2015Caepia1,
    author = {Cazorla, Miguel and Garcia-Rodriguez, Jose and Plaza, Jose Maria Canas and Varea, Ismael Garcia and Matellan, Vicente and Rico, Francisco Martin and Martinez-Gomez, Jesus and Lera, Francisco Javier Rodriguez and Mejias, Cristina Suarez and Sahuquillo, Maria Encarnacion Martinez},
    booktitle = {XVI Conferencia de la Asociacion Espanola para la Inteligencia Artificial (CAEPIA)},
    title = {{SIRMAVED: Development of a comprehensive robotic system for monitoring and interaction for people with acquired brain damage and dependent people}},
    year = {2015}
    }
  • F. G. Donoso and M. Cazorla, “Recognizing Schaeffer’s Gestures for Robot Interaction,” in Actas de la conferencia de la asociacion espanola para la ınteligencia artificial (caepıa), 2015.
    [Bibtex]
    @inproceedings{Gomez2015,
    abstract = {In this paper we present a new interaction system for Schaeffer's gesture language recognition. It uses the information provided by an RGBD camera to capture body motion and recognize gestures. Schaeffer's gestures are a reduced set of gestures designed for people with cognitive disabilities. The system is able to send alarms to an assistant or even a robot for human robot interaction.},
    author = {Francisco Gomez Donoso and Miguel Cazorla},
    booktitle= {Actas de la Conferencia de la Asociacion Espanola para la Inteligencia Artificial (CAEPIA)},
    keywords = {3d gesture recognition,Shaeffer's gestures,human robot in-teraction},
    title = {{Recognizing Schaeffer's Gestures for Robot Interaction}},
    url = {http://simd.albacete.org/actascaepia15/papers/01045.pdf},
    year = {2015}
    }
  • J. Martinez-Gomez, M. Cazorla, I. Garcia-Varea, and C. Romero-Gonzalez, “Object categorization from RGB-D local features and Bag Of Words,” in 2nd ıberian robotics conference, 2015.
    [Bibtex]
    @inproceedings{MartinezRobot2015,
    author = {Martinez-Gomez, Jesus and Cazorla, Miguel and Garcia-Varea, Ismael and Romero-Gonzalez, Cristina},
    booktitle = {2nd Iberian robotics conference},
    title = {{Object categorization from RGB-D local features and Bag Of Words}},
    year = {2015}
    }
  • V. Morell, J. Martinez-Gomez, M. Cazorla, and I. Garcia-Varea, “ViDRILO: The Visual and Depth Robot Indoor Localization with Objects information dataset,” International journal of robotics research, vol. 34, iss. 14, pp. 1681-1687, 2015.
    [Bibtex]
    @article{Morell2015,
    author = {Vicente Morell and Jesus Martinez-Gomez and Miguel Cazorla and Ismael Garcia-Varea},
    journal = {International Journal of Robotics Research},
    number = {14},
    pages = {1681--1687},
    title = {{ViDRILO: The Visual and Depth Robot Indoor Localization with Objects information dataset}},
    volume = {34},
    year = {2015}
    }
  • S. Orts-Escolano, J. Garcia-Rodriguez, J. A. Serra-Perez, A. Jimeno-Morenilla, A. Garcia-Garcia, V. Morell, and M. Cazorla, “3D Model Reconstruction using Neural Gas Accelerated on GPUs,” Applied ſoft computing, vol. 32, pp. 87-100, 2015.
    [Bibtex]
    @article{Orts2015,
    abstract = {In this work, we propose the use of the Neural Gas (NG), a neural network with unsupervised competitive hebbian learning (CHL), to develop a reverse engineering process. This is a simple and accurate method to reconstruct objects from the point cloud obtained from overlapped multiple views using low cost sensors. In contrast to other methods that may need several stages that include downsampling, noise filtering and many other tasks, the NG automatically obtains the 3D model of the scanned objects. The combination of the acquired and reconstructed 3D models with virtual and augmented reality environments allows the users interaction and also permits developing a virtual design and manufacturing system.
    To demonstrate the validity of our proposal we tested our method with several models and performed a study of the neural network parameterization calculating the quality of representation and also comparing results with other neural methods like Growing Neural Gas and Kohonen maps or clasical methods like Voxel Grid. We also reconstructed models acquired by low cost sensors that can be included in virtual and augmented reality environments to redesign or manipulation purpose. Since the NG algorithm has a strong computational cost we propose its acceleration. We have redesigned and implemented the NG learning algorithm to fit it onto a Graphic Processor Unit using CUDA. A speed-up of 180x faster is obtained compared to the sequential CPU version.},
    author = {Orts-Escolano, S and Garcia-Rodriguez, J and Serra-Perez, J A and Jimeno-Morenilla, A and Garcia-Garcia, A and Morell, V and Cazorla, Miguel},
    issn = {1568-4946},
    journal = {Applied Soft Computing},
    title = {{3D Model Reconstruction using Neural Gas Accelerated on GPUs}},
    volume = {32},
    pages = {87--100},
    year = {2015}
    }
  • S. Orts-Escolano, J. Garcia-Rodriguez, V. Morell, M. Cazorla, M. Saval-Calvo, and J. Azorin, “Processing Point Cloud Sequences with Growing Neural Gas,” in Neural networks (ıjcnn), the 2015 ınternational joint conference on, 2015.
    [Bibtex]
    @inproceedings{Orts2015IJCNN,
    author = {Orts-Escolano, S and Garcia-Rodriguez, J and Morell, V and Cazorla, M and Saval-Calvo, M and Azorin, J},
    booktitle = {Neural Networks (IJCNN), The 2015 International Joint Conference on},
    title = {{Processing Point Cloud Sequences with Growing Neural Gas}},
    year = {2015}
    }
  • M. Saval-Calvo, S. Orts-Escolano, J. Azorin-Lopez, J. Garcia-Rodriguez, A. Fuster-Guillo, V. Morell-Gimenez, and M. Cazorla, “Non-rigid point set registration using color and data downsampling,” in Neural networks (ıjcnn), the 2015 ınternational joint conference on, 2015.
    [Bibtex]
    @inproceedings{Orts2015IJCNN,
    author = {Saval-Calvo, Marcelo and Orts-Escolano, Sergio and Azorin-Lopez, Jorge and Garcia-Rodriguez, Jose and Fuster-Guillo, Andres and Morell-Gimenez, Vicente and Cazorla, Miguel},
    booktitle = {Neural Networks (IJCNN), The 2015 International Joint Conference on},
    title = {{Non-rigid point set registration using color and data downsampling}},
    year = {2015}
    }
  • [DOI] S. Orts-Escolano, V. Morell, J. Garcia-Rodriguez, M. Cazorla, and R. Fisher, “Real-time 3D semi-local surface patch extraction using GPGPU,” Journal of real-time ımage processing, vol. 10, iss. 4, pp. 647-666, 2015.
    [Bibtex]
    @article{Orts-Escolano2015JRTIP,
    author = {Orts-Escolano, Sergio and Morell, Vicente and Garcia-Rodriguez, Jose and Cazorla, Miguel and Fisher, RobertB.},
    doi = {10.1007/s11554-013-0385-7},
    issn = {1861-8200},
    journal = {Journal of Real-Time Image Processing},
    keywords = {Real-time; GPGPU; RGB-D; 3D local shape descriptor},
    number = {4},
    pages = {647--666},
    publisher = {Springer Berlin Heidelberg},
    title = {{Real-time 3D semi-local surface patch extraction using GPGPU}},
    url = {http://dx.doi.org/10.1007/s11554-013-0385-7},
    volume = {10},
    year = {2015}
    }
  • J. C. Rangel, V. Morell, M. Cazorla, S. Orts-Escolano, and J. Garcia-Rodriguez, “Using GNG on 3D Object Recognition in Noisy RGB-D data,” in Neural networks (ıjcnn), the 2015 ınternational joint conference on, 2015.
    [Bibtex]
    @inproceedings{Rangel2015IJCNN,
    author = {Rangel, J C and Morell, V and Cazorla, M and Orts-Escolano, S and Garcia-Rodriguez, J},
    booktitle = {Neural Networks (IJCNN), The 2015 International Joint Conference on},
    title = {{Using GNG on 3D Object Recognition in Noisy RGB-D data}},
    year = {2015}
    }
  • J. C. Rangel, M. Cazorla, I. G. Varea, J. Martinez-Gomez, E. Fromont, and M. Sebban, “Computing Image Descriptors from Annotations Acquired from External Tools.” 2015.
    [Bibtex]
    @inproceedings{RangelRobot2015,
    author = {Rangel, Jose Carlos and Cazorla, Miguel and Varea, Ismael Garcia and Martinez-Gomez, Jesus and Fromont, Elisa and Sebban, Marc},
    booktitl = {2nd Iberian robotics conference},
    title = {{Computing Image Descriptors from Annotations Acquired from External Tools}},
    year = {2015}
    }

2014

  • M. Cazorla, P. Gil, S. Puente, J. L. Munoz, and D. Pastor, “An improvement of a SLAM RGB-D method with movement prediction derived from a study of visual features,” Advanced robotics, vol. 28, iss. 18, pp. 1231-1242, 2014.
    [Bibtex]
    @article{Cazorla2014An,
    abstract = {This paper presents a method for the fast calculation of a robot's egomotion using visual features. The method is part of a complete system for automatic map building and Simultaneous Location and Mapping (SLAM). The method uses optical flow to determine whether the robot has undergone a movement. If so, some visual features that do not satisfy several criteria are deleted, and then egomotion is calculated. Thus, the proposed method improves the efficiency of the whole process because not all the data is processed. We use a state-of-the-art algorithm (TORO) to rectify the map and solve the SLAM problem. Additionally, a study of different visual detectors and descriptors has been conducted to identify which of them are more suitable for the SLAM problem. Finally, a navigation method is described using the map obtained from the SLAM solution.},
    author = {Cazorla, Miguel and Gil, Pablo and Puente, Santiago and Munoz, Jose Luis and Pastor, Daniel},
    journal = {Advanced robotics},
    keywords = {3D data,RGB-D data,SLAM,visual features},
    number = {18},
    pages = {1231--1242},
    title = {{An improvement of a SLAM RGB-D method with movement prediction derived from a study of visual features}},
    volume = {28},
    year = {2014}
    }
  • J. Garcia-Rodriguez, S. Orts-Escolano, N. Angelopoulou, A. Psarrou, and J. Azorin-Lopez, “Real time motion estimation using a neural architecture implemented on GPUs,” in Journal of real-time ımage processing, 2014.
    [Bibtex]
    @inproceedings{garcia2014z,
    author = {J. Garcia-Rodriguez and S. Orts-Escolano and N. Angelopoulou and A. Psarrou and J. Azorin-Lopez},
    booktitle = {Journal of Real-Time Image Processing},
    title = {{Real time motion estimation using a neural architecture implemented on GPUs}},
    year = {2014}
    }
  • [DOI] D. Gil, J. Garcia-Rodriguez, M. Cazorla, and M. Johnsson, “SARASOM: a supervised architecture based on the recurrent associative SOM,” Neural computing and applications, pp. 1-13, 2014.
    [Bibtex]
    @article{gil2014,
    abstract = {We present and evaluate a novel supervised recurrent neural network architecture, the SARASOM, based on the associative self-organizing map. The performance of the SARASOM is evaluated and compared with the Elman network as well as with a hidden Markov model (HMM) in a number of prediction tasks using sequences of letters, including some experiments with a reduced lexicon of 15 words. The results were very encouraging with the SARASOM learning better and performing with better accuracy than both the Elman network and the HMM.},
    author = {Gil, David and Garcia-Rodriguez, Jose and Cazorla, Miguel and Johnsson, Magnus},
    doi = {10.1007/s00521-014-1785-8},
    issn = {0941-0643},
    journal = {Neural Computing and Applications},
    keywords = {Recurrent associative self-organizing map; Supervi},
    pages = {1--13},
    publisher = {Springer London},
    title = {{SARASOM: a supervised architecture based on the recurrent associative SOM}},
    url = {http://dx.doi.org/10.1007/s00521-014-1785-8},
    year = {2014}
    }
  • [DOI] S. Orts-Escolano, J. Garcia-Rodriguez, V. Morell, M. Cazorla, J. Azorin, and J. M. Garcia-Chamizo, “Parallel Computational Intelligence-Based Multi-Camera Surveillance System,” Journal of ſensor and actuator networks, vol. 3, iss. 2, pp. 95-112, 2014.
    [Bibtex]
    @article{jsan3020095,
    abstract = {In this work, we present a multi-camera surveillance system based on the use of self-organizing neural networks to represent events on video. The system processes several tasks in parallel using GPUs (graphic processor units). It addresses multiple vision tasks at various levels, such as segmentation, representation or characterization, analysis and monitoring of the movement. These features allow the construction of a robust representation of the environment and interpret the behavior of mobile agents in the scene. It is also necessary to integrate the vision module into a global system that operates in a complex environment by receiving images from multiple acquisition devices at video frequency. Offering relevant information to higher level systems, monitoring and making decisions in real time, it must accomplish a set of requirements, such as: time constraints, high availability, robustness, high processing speed and re-configurability. We have built a system able to represent and analyze the motion in video acquired by a multi-camera network and to process multi-source data in parallel on a multi-GPU architecture.},
    author = {Orts-Escolano, Sergio and Garcia-Rodriguez, Jose and Morell, Vicente and Cazorla, Miguel and Azorin, Jorge and Garcia-Chamizo, Juan Manuel},
    doi = {10.3390/jsan3020095},
    issn = {2224-2708},
    journal = {Journal of Sensor and Actuator Networks},
    keywords = {growing neural gas; camera networks; visual survei},
    number = {2},
    pages = {95--112},
    title = {{Parallel Computational Intelligence-Based Multi-Camera Surveillance System}},
    url = {http://www.mdpi.com/2224-2708/3/2/95},
    volume = {3},
    year = {2014}
    }
  • J. Montoyo, V. Morell, M. Cazorla, J. Garcia-Rodriguez, and S. Orts-Escolano, “Registration methods for RGB-D cameras accelerated on GPUs,” in International symposium on robotics, ıſr, 2014.
    [Bibtex]
    @inproceedings{Montoyo20143Registration,
    author = {Montoyo, Javier and Morell, Vicente and Cazorla, Miguel and Garcia-Rodriguez, Jose and Orts-Escolano, Sergio},
    booktitle = {International symposium on robotics, ISR},
    title = {{Registration methods for RGB-D cameras accelerated on GPUs}},
    year = {2014}
    }
  • [DOI] V. Morell, S. Orts-Escolano, M. Cazorla, and J. Garcia-Rodriguez, “Geometric 3D point cloud compression,” Pattern recognition letters, vol. 50, pp. 55-62, 2014.
    [Bibtex]
    @Article{Morell2014,
    author = {Morell, Vicente and Orts-Escolano, Sergio and Cazorla, Miguel and Garcia-Rodriguez, Jose},
    title = {{Geometric 3D point cloud compression}},
    journal = {Pattern Recognition Letters},
    year = {2014},
    volume = {50},
    pages = {55--62},
    abstract = { The use of 3D data in mobile robotics applications provides valuable information about the robot's environment but usually the huge amount of 3D information is unmanageable by the robot storage and computing capabilities. A data compression is necessary to store and manage this information but preserving as much information as possible. In this paper, we propose a 3D lossy compression system based on plane extraction which represent the points of each scene plane as a Delaunay triangulation and a set of points/area information. The compression system can be customized to achieve different data compression or accuracy ratios. It also supports a color segmentation stage to preserve original scene color information and provides a realistic scene reconstruction. The design of the method provides a fast scene reconstruction useful for further visualization or processing tasks. },
    doi = {http://dx.doi.org/10.1016/j.patrec.2014.05.016},
    issn = {0167-8655},
    keywords = {3D data; Compression; Kinect}
    }
  • V. Morell, M. Cazorla, S. Orts-Escolano, and J. Garcia-Rodriguez, “3D Maps Representation using GNG,” in Neural networks (ıjcnn), the 2014 ınternational joint conference on, 2014.
    [Bibtex]
    @InProceedings{Morell20143d,
    author = {Morell, Vicente and Cazorla, Miguel and Orts-Escolano, Sergio and Garcia-Rodriguez, Jose},
    title = {{3D Maps Representation using GNG}},
    booktitle = {Neural Networks (IJCNN), The 2014 International Joint Conference on},
    year = {2014}
    }
  • [DOI] V. Morell-Gimenez, M. Saval-Calvo, J. Azorin-Lopez, J. Garcia-Rodriguez, M. Cazorla, S. Orts-Escolano, and A. Fuster-Guillo, “A Comparative Study of Registration Methods for RGB-D Video of Static Scenes,” Sensors, vol. 14, iss. 5, pp. 8547-8576, 2014.
    [Bibtex]
    @article{morell2014comparative,
    abstract = {The use of RGB-D sensors for mapping and recognition tasks in robotics or, in general, for virtual reconstruction has increased in recent years. The key aspect of these kinds of sensors is that they provide both depth and color information using the same device. In this paper, we present a comparative analysis of the most important methods used in the literature for the registration of subsequent RGB-D video frames in static scenarios. The analysis begins by explaining the characteristics of the registration problem, dividing it into two representative applications: scene modeling and object reconstruction. Then, a detailed experimentation is carried out to determine the behavior of the different methods depending on the application. For both applications, we used standard datasets and a new one built for object reconstruction.},
    author = {Morell-Gimenez, Vicente and Saval-Calvo, Marcelo and Azorin-Lopez, Jorge and Garcia-Rodriguez, Jose and Cazorla, Miguel and Orts-Escolano, Sergio and Fuster-Guillo, Andres},
    doi = {10.3390/s140508547},
    issn = {1424-8220},
    journal = {Sensors},
    keywords = {RGB-D sensor; registration; robotics mapping; obje},
    month = {may},
    number = {5},
    pages = {8547--8576},
    publisher = {Multidisciplinary Digital Publishing Institute},
    title = {{A Comparative Study of Registration Methods for RGB-D Video of Static Scenes}},
    url = {http://www.mdpi.com/1424-8220/14/5/8547},
    volume = {14},
    year = {2014}
    }
  • S. Orts-Escolano, J. Garcia-Rodriguez, V. Morella, M. Cazorla, and J. M. Garcia-Chamizo, “3D Colour Object Reconstruction based on Growing Neural Gas,” in Neural networks (ıjcnn), the 2014 ınternational joint conference on, 2014.
    [Bibtex]
    @inproceedings{Orts20143d,
    author = {Orts-Escolano, Sergio and Garcia-Rodriguez, Jose and Morella, Vicente and Cazorla, Miguel and Garcia-Chamizo, Juan Manuel},
    booktitle = {Neural Networks (IJCNN), The 2014 International Joint Conference on},
    title = {{3D Colour Object Reconstruction based on Growing Neural Gas}},
    year = {2014}
    }
  • D. Viejo, J. Garcia-Rodriguez, and M. Cazorla, “Combining Visual Features and Growing Neural Gas Networks for Robotic 3D SLAM,” Information ſciences, vol. 276, pp. 174-185, 2014.
    [Bibtex]
    @article{viejo2014combining,
    abstract = {The use of 3D data in mobile robotics provides valuable information about the robot's environment. Traditionally, stereo cameras have been used as a low-cost 3D sensor. However, the lack of precision and texture for some surfaces suggests that the use of other 3D sensors could be more suitable. In this work, we examine the use of two sensors: an infrared SR4000 and a Kinect camera. We use a combination of 3D data obtained by these cameras, along with features obtained from 2D images acquired from these cameras, using a Growing Neural Gas (GNG) network applied to the 3D data. The goal is to obtain a robust egomotion technique. The GNG network is used to reduce the camera error. To calculate the egomotion, we test two methods for 3D registration. One is based on an iterative closest points algorithm, and the other employs random sample consensus. Finally, a simultaneous localization and mapping method is applied to the complete sequence to reduce the global error. The error from each sensor and the mapping results from the proposed method are examined.},
    author = {Viejo, Diego and Garcia-Rodriguez, Jose and Cazorla, Miguel},
    journal = {Information Sciences},
    keywords = {GNG; SLAM; 3D registration},
    pages = {174--185},
    title = {{Combining Visual Features and Growing Neural Gas Networks for Robotic 3D SLAM}},
    volume = {276},
    year = {2014}
    }
  • [DOI] D. Viejo and M. Cazorla, “A robust and fast method for 6DoF motion estimation from generalized 3D data,” Autonomous robots, 2014.
    [Bibtex]
    @article{Viejo2014raey,
    author = {Viejo, Diego and Cazorla, Miguel},
    doi = {10.1007/s10514-013-9354-z},
    issn = {0929-5593},
    journal = {Autonomous Robots},
    keywords = {6DoF pose registration; 3D mapping; Mobile robots;},
    publisher = {Springer US},
    title = {{A robust and fast method for 6DoF motion estimation from generalized 3D data}},
    year = {2014}
    }

2013

  • [DOI] S. Orts-Escolano, V. Morell, J. Garcia-Rodriguez, and M. Cazorla, “Point cloud data filtering and downsampling using growing neural gas,” in Neural networks (ıjcnn), the 2013 ınternational joint conference on, 2013, pp. 1-8.
    [Bibtex]
    @inproceedings{6706719,
    abstract = {3D sensors provide valuable information for mobile robotic tasks like scene classification or object recognition, but these sensors often produce noisy data that makes impossible applying classical keypoint detection and feature extraction techniques. Therefore, noise removal and downsampling have become essential steps in 3D data processing. In this work, we propose the use of a 3D filtering and downsampling technique based on a Growing Neural Gas (GNG) network. GNG method is able to deal with outliers presents in the input data. These features allows to represent 3D spaces, obtaining an induced Delaunay Triangulation of the input space. Experiments show how GNG method yields better input space adaptation to noisy data than other filtering and downsampling methods like Voxel Grid. It is also demonstrated how the state-of-the-art keypoint detectors improve their performance using filtered data with GNG network. Descriptors extracted on improved keypoints perform better matching in robotics applications as 3D scene registration.},
    author = {Orts-Escolano, S and Morell, V and Garcia-Rodriguez, J and Cazorla, M},
    booktitle = {Neural Networks (IJCNN), The 2013 International Joint Conference on},
    doi = {10.1109/IJCNN.2013.6706719},
    issn = {2161-4393},
    keywords = {feature extraction;image classification;image registration},
    pages = {1--8},
    title = {{Point cloud data filtering and downsampling using growing neural gas}},
    year = {2013}
    }
  • J. Navarrete, D. Viejo, and M. Cazorla, “Portable 3D laser-camera calibration system with color fusion for SLAM,” International journal of automation and ſmart technology, vol. 3, iss. 1, 2013.
    [Bibtex]
    @article{AUSMT163,
    abstract = { Nowadays, the use of RGB-D sensors have focused a lot of research in computer vision and robotics. These kinds of sensors, like Kinect, allow to obtain 3D data together with color information. However, their working range is limited to less than 10 meters, making them useless in some robotics applications, like outdoor mapping. In these environments, 3D lasers, working in ranges of 20-80 meters, are better. But 3D lasers do not usually provide color information. A simple 2D camera can be used to provide color information to the point cloud, but a calibration process between camera and laser must be done. In this paper we present a portable calibration system to calibrate any traditional camera with a 3D laser in order to assign color information to the 3D points obtained. Thus, we can use laser precision and simultaneously make use of color information. Unlike other techniques that make use of a three-dimensional body of known dimensions in the calibration process, this system is highly portable because it makes use of small catadioptrics that can be placed in a simple manner in the environment. We use our calibration system in a 3D mapping system, including Simultaneous Location and Mapping (SLAM), in order to get a 3D colored map which can be used in different tasks. We show that an additional problem arises: 2D cameras information is different when lighting conditions change. So when we merge 3D point clouds from two different views, several points in a given neighborhood could have different color information. A new method for color fusion is presented, obtaining correct colored maps. The system will be tested by applying it to 3D reconstruction.},
    author = {Navarrete, Javier and Viejo, Diego and Cazorla, Miguel},
    issn = {2223-9766},
    journal = {International Journal of Automation and Smart Technology},
    keywords = {2D-3D calibration; RGB-D information; color fusion},
    number = {1},
    title = {{Portable 3D laser-camera calibration system with color fusion for SLAM}},
    url = {http://www.ausmt.org/index.php/AUSMT/article/view/163},
    volume = {3},
    year = {2013}
    }
  • B. Boom, S. Orts-Escolano, X. Ning, S. McDonagh, P. Sandilands, and R. Fisher, “Point light ſource estimation based on ſcenes recorded by a rgb-d camera,” in Proceedings of the british machine vision conference, 2013.
    [Bibtex]
    @InProceedings{Boom2013,
    author = {Bas Boom and Sergio Orts-Escolano and Xi Ning and Steven McDonagh and Peter Sandilands and Robert Fisher },
    title = {Point Light Source Estimation based on Scenes Recorded by a RGB-D camera },
    booktitle = {Proceedings of the British Machine Vision Conference},
    year = {2013},
    publisher = {BMVA Press},
    editors = {Burghardt, Tilo and Damen, Dima and Mayol-Cuevas, Walterio and Mirmehdi, Majid}
    }
  • [DOI] B. Caputo, H. Muller, B. Thomee, M. Villegas, R. Paredes, D. Zellhofer, H. Goeau, A. Joly, P. Bonnet, J. M. Gomez, I. G. Varea, and M. Cazorla, “ImageCLEF 2013: The Vision, the Data and the Open Challenges,” in Information access evaluation. multilinguality, multimodality, and visualization, P. Forner, H. Muller, R. Paredes, P. Rosso, and B. Stein, Eds., Springer Berlin Heidelberg, 2013, vol. 8138, pp. 250-268.
    [Bibtex]
    @incollection{Caputo:2013aa,
    abstract = {This paper presents an overview of the ImageCLEF 2013 lab. Since its first edition in 2003, ImageCLEF has become one of the key initiatives promoting the benchmark evaluation of algorithms for the cross-language annotation and retrieval of images in various domains, such as public and personal images, to data acquired by mobile robot platforms and botanic collections. Over the years, by providing new data collections and challenging tasks to the community of interest, the ImageCLEF lab has achieved an unique position in the multi lingual image annotation and retrieval research landscape. The 2013 edition consisted of three tasks: the photo annotation and retrieval task, the plant identification task and the robot vision task. Furthermore, the medical annotation task, that traditionally has been under the ImageCLEF umbrella and that this year celebrates its tenth anniversary, has been organized in conjunction with AMIA for the first time. The paper describes the tasks and the 2013 competition, giving an unifying perspective of the present activities of the lab while discussion the future challenges and opportunities.},
    author = {Barbara Caputo and Henning Muller and Bart Thomee and Mauricio Villegas and Roberto Paredes and David Zellhofer and Herve Goeau and Alexis Joly and Pierre Bonnet and Jesus Martinez Gomez and Ismael Garcia Varea and Miguel Cazorla},
    booktitle = {Information Access Evaluation. Multilinguality, Multimodality, and Visualization},
    doi = {10.1007/978-3-642-40802-1_26},
    editor = {Forner, Pamela and Muller, Henning and Paredes, Roberto and Rosso, Paolo and Stein, Benno},
    isbn = {978-3-642-40801-4},
    keywords = {Language Translation and Linguistics Artificial In},
    pages = {250--268},
    publisher = {Springer Berlin Heidelberg},
    series = {Lecture Notes in Computer Science},
    title = {{ImageCLEF 2013: The Vision, the Data and the Open Challenges}},
    url = {http://dx.doi.org/10.1007/978-3-642-40802-1{\_}26},
    volume = {8138},
    year = {2013}
    }
  • I. Garcia-Varea, M. Cazorla, J. Martinez-Gomez, and B. Caputo, “Overview of the ImageCLEF 2013 Robot Vision Task,” in Working notes, clef 2013, 2013.
    [Bibtex]
    @inproceedings{garcia2013overview,
    author = {Garcia-Varea, Ismael and Cazorla, Miguel and Martinez-Gomez, Jesus and Caputo, Barbara},
    booktitle = {Working Notes, CLEF 2013},
    number = {EPFL-CONF-192517},
    title = {{Overview of the ImageCLEF 2013 Robot Vision Task}},
    year = {2013}
    }
  • [DOI] J. Garcia-Rodriguez, M. Cazorla, S. Orts-Escolano, and V. Morell, “Improving 3D Keypoint Detection from Noisy Data Using Growing Neural Gas,” in Advances in computational ıntelligence, I. Rojas, G. Joya, and J. Cabestany, Eds., Springer Berlin Heidelberg, 2013, vol. 7903, pp. 480-487.
    [Bibtex]
    @incollection{Garcia-Rodriguez:2013aa,
    abstract = {3D sensors provides valuable information for mobile robotic tasks like scene classification or object recognition, but these sensors often produce noisy data that makes impossible applying classical keypoint detection and feature extraction techniques. Therefore, noise removal and downsampling have become essential steps in 3D data processing. In this work, we propose the use of a 3D filtering and down-sampling technique based on a Growing Neural Gas (GNG) network. GNG method is able to deal with outliers presents in the input data. These features allows to represent 3D spaces, obtaining an induced Delaunay Triangulation of the input space. Experiments show how the state-of-the-art keypoint detectors improve their performance using GNG output representation as input data. Descriptors extracted on improved keypoints perform better matching in robotics applications as 3D scene registration.},
    author = {Garcia-Rodriguez, Jos?$\backslash$copyright and Cazorla, Miguel and Orts-Escolano, Sergio and Morell, Vicente},
    booktitle = {Advances in Computational Intelligence},
    doi = {10.1007/978-3-642-38682-4_51},
    editor = {Rojas, Ignacio and Joya, Gonzalo and Cabestany, Joan},
    isbn = {978-3-642-38681-7},
    keywords = {GNG; Noisy Point Cloud; Visual Features; Keypoint},
    pages = {480--487},
    publisher = {Springer Berlin Heidelberg},
    series = {Lecture Notes in Computer Science},
    title = {{Improving 3D Keypoint Detection from Noisy Data Using Growing Neural Gas}},
    url = {http://dx.doi.org/10.1007/978-3-642-38682-4{\_}51},
    volume = {7903},
    year = {2013}
    }
  • [DOI] A. Jimeno-Morenilla, J. Garcia-Rodriguez, S. Orts-Escolano, and M. Davia-Aracil, “3d-based reconstruction using growing neural gas landmark: application to rapid prototyping in shoe last manufacturing,” The ınternational journal of advanced manufacturing technology, vol. 69, iss. 1, pp. 657-668, 2013.
    [Bibtex]
    @Article{Jimeno-Morenilla2013,
    author = {Jimeno-Morenilla, Antonio and Garcia-Rodriguez, Jose and Orts-Escolano, Sergio and Davia-Aracil, Miguel},
    title = {3D-based reconstruction using growing neural gas landmark: application to rapid prototyping in shoe last manufacturing},
    journal = {The International Journal of Advanced Manufacturing Technology},
    year = {2013},
    volume = {69},
    number = {1},
    pages = {657--668},
    abstract = {Customizing shoe manufacturing is one of the great challenges in the footwear industry. It is a production model change where design adopts not only the main role, but also the main bottleneck. It is therefore necessary to accelerate this process by improving the accuracy of current methods. Rapid prototyping techniques are based on the reuse of manufactured footwear lasts so that they can be modified with CAD systems leading rapidly to new shoe models. In this work, we present a shoe last fast reconstruction method that fits current design and manufacturing processes. The method is based on the scanning of shoe last obtaining sections and establishing a fixed number of landmarks onto those sections to reconstruct the shoe last 3D surface. Automated landmark extraction is accomplished through the use of the self-organizing network, the growing neural gas (GNG), which is able to topographically map the low dimensionality of the network to the high dimensionality of the contour manifold without requiring a priori knowledge of the input space structure. Moreover, our GNG landmark method is tolerant to noise and eliminates outliers. Our method accelerates up to 12 times the surface reconstruction and filtering processes used by the current shoe last design software. The proposed method offers higher accuracy compared with methods with similar efficiency as voxel grid.},
    doi = {10.1007/s00170-013-5061-3},
    issn = {1433-3015},
    url = {http://dx.doi.org/10.1007/s00170-013-5061-3}
    }
  • A. Romero and M. Cazorla, “Learning Multi-class Topological Mapping using Visual Information.,” in Vıſapp (2), 2013, pp. 316-321.
    [Bibtex]
    @inproceedings{Romero2013Learning,
    author = {Romero, Anna and Cazorla, Miguel},
    booktitle = {VISAPP (2)},
    pages = {316--321},
    title = {{Learning Multi-class Topological Mapping using Visual Information.}},
    year = {2013}
    }

2012

  • [DOI] B. Bonev, M. Cazorla, F. Martin, and V. Matellan, “Portable autonomous walk calibration for 4-legged robots,” Applied ıntelligence, vol. 36, iss. 1, pp. 136-147, 2012.
    [Bibtex]
    @article{Bonev:2012aa,
    abstract = {In the present paper we describe an efficient and portable optimization method for calibrating the walk parameters of a quadruped robot, and its contribution for the robot control and localization. The locomotion of a legged robot presents not only the problem of maximizing the speed, but also the problem of obtaining a precise speed response, and achieving an acceptable odometry information. In this study we use a simulated annealing algorithm for calibrating different parametric sets for different speed ranges, with the goal of avoiding discontinuities. The results are applied to the robot AIBO in the RoboCup domain. Moreover, we outline the relevance of calibration to the control, showing the improvement obtained in odometry and, as a consequence, in robot localization.},
    author = {Bonev, Boyan and Cazorla, Miguel and Martin, Francisco and Matellan, Vicente},
    doi = {10.1007/s10489-010-0249-9},
    issn = {0924-669X},
    journal = {Applied Intelligence},
    keywords = {Legged locomotion; Walk parameters estimation; Aut},
    number = {1},
    pages = {136--147},
    publisher = {Springer US},
    title = {{Portable autonomous walk calibration for 4-legged robots}},
    url = {http://dx.doi.org/10.1007/s10489-010-0249-9},
    volume = {36},
    year = {2012}
    }
  • V. Morell, M. Cazorla, D. Viejo, S. Orts–Escolano, and J. Garcia-Rodriguez, “A study of registration techniques for 6DoF SLAM,” in Ccıa, 2012, pp. 143-150.
    [Bibtex]
    @inproceedings{Morell2012A,
    author = {Morell, Vicente and Cazorla, Miguel and Viejo, Diego and Orts--Escolano, Sergio and Garcia-Rodriguez, Jose},
    booktitle = {CCIA},
    editor = {Riano, David and Onaindia, Eva and Cazorla, Miguel},
    isbn = {978-1-61499-138-0},
    pages = {143--150},
    publisher = {IOS Press},
    series = {Frontiers in Artificial Intelligence and Applications},
    title = {{A study of registration techniques for 6DoF SLAM}},
    volume = {248},
    year = {2012}
    }
  • V. Morell, M. Cazorla, D. Viejo, S. Orts-Escolano, and J. Garcia-Rodriguez, “A study of registration techniques for 6DoF SLAM,” in Ccıa, 2012, pp. 143-150.
    [Bibtex]
    @InProceedings{Morell2012A,
    author = {Morell, Vicente and Cazorla, Miguel and Viejo, Diego and Orts-Escolano, Sergio and Garcia-Rodriguez, Jose},
    title = {{A study of registration techniques for 6DoF SLAM}},
    booktitle = {CCIA},
    year = {2012},
    editor = {Riano, David and Onaindia, Eva and Cazorla, Miguel},
    volume = {248},
    series = {Frontiers in Artificial Intelligence and Applications},
    pages = {143--150},
    publisher = {IOS Press},
    isbn = {978-1-61499-138-0}
    }
  • J. Munoz, D. Pastor, P. Gil Vazquez, S. Puente Mendez, and M. Cazorla, “A study of 2D features for 3D visual SLAM,” in 43th ınternational ſymposium on robotics, 2012.
    [Bibtex]
    @inproceedings{munoz2012study,
    author = {Jose Munoz and Daniel Pastor and Pablo {Gil Vazquez} and Santiago {Puente Mendez} and Miguel Cazorla},
    booktitle = {43th International Symposium on Robotics},
    title = {{A study of 2D features for 3D visual SLAM}},
    year = {2012}
    }
  • J. Munoz, D. Pastor, P. Gil, S. P. T. Mendez, and M. Cazorla, “Using a RGB-D camera for 6DoF SLAM.,” in Ccıa, 2012, pp. 143-150.
    [Bibtex]
    @inproceedings{MunozPGMC12,
    author = {Munoz, Jose and Pastor, Daniel and Gil, Pablo and Mendez, Santiago T Puente and Cazorla, Miguel},
    booktitle = {CCIA},
    editor = {Riano, David and Onaindia, Eva and Cazorla, Miguel},
    isbn = {978-1-61499-138-0},
    keywords = {dblp},
    pages = {143--150},
    publisher = {IOS Press},
    series = {Frontiers in Artificial Intelligence and Applications},
    title = {{Using a RGB-D camera for 6DoF SLAM.}},
    url = {http://dblp.uni-trier.de/db/conf/ccia/ccia2012.html{\#}MunozPGMC12},
    volume = {248},
    year = {2012}
    }
  • J. Navarrete-Sanchez, D. Viejo, and M. Cazorla, “Portable 3D laser-camera calibration system with color fusion for SLAM,” in International symposium on robotics, ıſr, 2012.
    [Bibtex]
    @inproceedings{Navarrete2012Portable,
    author = {Navarrete-Sanchez, Javier and Viejo, Diego and Cazorla, Miguel},
    booktitle = {International symposium on robotics, ISR},
    title = {{Portable 3D laser-camera calibration system with color fusion for SLAM}},
    year = {2012}
    }
  • [DOI] S. Orts-Escolano, J. Garcia-Rodriguez, D. Viejo, M. Cazorla, and V. Morell, “GPGPU implementation of growing neural gas: Application to 3D scene reconstruction,” Journal of parallel and distributed computing, vol. 72, iss. 10, pp. 1361-1372, 2012.
    [Bibtex]
    @Article{Orts20121361,
    author = {Orts-Escolano, Sergio and Garcia-Rodriguez, Jose and Viejo, Diego and Cazorla, Miguel and Morell, Vicente},
    title = {{GPGPU implementation of growing neural gas: Application to 3D scene reconstruction}},
    journal = {Journal of Parallel and Distributed Computing},
    year = {2012},
    volume = {72},
    number = {10},
    pages = {1361--1372},
    abstract = {Self-organising neural models have the ability to provide a good representation of the input space. In particular the Growing Neural Gas (GNG) is a suitable model because of its flexibility, rapid adaptation and excellent quality of representation. However, this type of learning is time-consuming, especially for high-dimensional input data. Since real applications often work under time constraints, it is necessary to adapt the learning process in order to complete it in a predefined time. This paper proposes a Graphics Processing Unit (GPU) parallel implementation of the {\{}GNG{\}} with Compute Unified Device Architecture (CUDA). In contrast to existing algorithms, the proposed {\{}GPU{\}} implementation allows the acceleration of the learning process keeping a good quality of representation. Comparative experiments using iterative, parallel and hybrid implementations are carried out to demonstrate the effectiveness of {\{}CUDA{\}} implementation. The results show that {\{}GNG{\}} learning with the proposed implementation achieves a speed-up of 6 ?{\{}o{\}} compared with the single-threaded {\{}CPU{\}} implementation. {\{}GPU{\}} implementation has also been applied to a real application with time constraints: acceleration of 3D scene reconstruction for egomotion, in order to validate the proposal. },
    doi = {http://dx.doi.org/10.1016/j.jpdc.2012.05.008},
    issn = {0743-7315},
    keywords = {Egomotion},
    url = {http://www.sciencedirect.com/science/article/pii/S0743731512001268}
    }
  • S. Orts–Escolano, J. Garcia-Rodriguez, D. Viejo, M. Cazorla, V. Morell, and J. Serra, “6DoF pose estimation using Growing Neural Gas Network,” in Proceedings of 5th ınternational conference on cognitive ſystems, 2012.
    [Bibtex]
    @inproceedings{Orts2012b,
    author = {Orts--Escolano, Sergio and Garcia-Rodriguez, Jose and Viejo, Diego and Cazorla, Miguel and Morell, Vicente and Serra, Jose},
    booktitle = {proceedings of 5th International Conference on Cognitive Systems},
    title = {{6DoF pose estimation using Growing Neural Gas Network}},
    year = {2012}
    }
  • A. Romero and M. Cazorla, “Finding nodes into a topological map using visual features,” in International symposium on robotics, ıſr, 2012.
    [Bibtex]
    @inproceedings{Romero2012Finding,
    author = {Romero, Anna and Cazorla, Miguel},
    booktitle = {International symposium on robotics, ISR},
    title = {{Finding nodes into a topological map using visual features}},
    year = {2012}
    }
  • A. Romero and M. Cazorla, “Learning Multi-class Topological Mapping using Visual Information,” in Ccıa, 2012, pp. 143-150.
    [Bibtex]
    @inproceedings{Romero2012Learning,
    author = {Romero, Anna and Cazorla, Miguel},
    booktitle = {CCIA},
    editor = {Riano, David and Onaindia, Eva and Cazorla, Miguel},
    isbn = {978-1-61499-138-0},
    pages = {143--150},
    publisher = {IOS Press},
    series = {Frontiers in Artificial Intelligence and Applications},
    title = {{Learning Multi-class Topological Mapping using Visual Information}},
    volume = {248},
    year = {2012}
    }
  • A. Romero and M. Cazorla, “Topological visual mapping in robotics,” in Proceedings of the 5th ınternational conference on ſpatial cognition, 2012.
    [Bibtex]
    @inproceedings{Romero2012Topological,
    author = {Romero, Anna and Cazorla, Miguel},
    booktitle = {Proceedings of the 5th International Conference on Spatial Cognition},
    title = {{Topological visual mapping in robotics}},
    year = {2012}
    }
  • A. Romero and M. Cazorla, “Topological visual mapping in robotics,” Cognitive processing, vol. 3, iss. 305–308, 2012.
    [Bibtex]
    @Article{Romero2012Cognitive,
    author = {Romero, Anna and Cazorla, Miguel},
    title = {Topological visual mapping in robotics},
    journal = {Cognitive Processing},
    year = {2012},
    volume = {3},
    number = {305--308}
    }
  • [DOI] J. Salinas, M. de la Iglesia-Vaya, L. Bonmati, R. Valenzuela, and M. Cazorla, “R & D Cloud CEIB: Management System and Knowledge Extraction for Bioimaging in the Cloud,” in Distributed computing and artificial ıntelligence, S. Omatu, J. F. {De Paz Santana}, S. R. Gonzalez, J. M. Molina, A. M. Bernardos, and J. C. M. Rodriguez, Eds., Springer Berlin Heidelberg, 2012, vol. 151, pp. 331-338.
    [Bibtex]
    @incollection{Salinas:2012aa,
    abstract = {The management system and knowledge extraction of bioimaging in the cloud (R {\&} D Cloud CEIB) which is proposed in this article will use the services offered by the centralization of bioimaging through Valencian Biobank Medical Imaging (GIMC in Spanish) as a basis for managing and extracting knowledge from a bioimaging bank, providing that knowledge as services with high added value and expertise to the Electronic Patient History System (HSE), thus bringing the results of R {\&} D to the patient, improving the quality of the information contained therein. R {\&} D Cloud CEIB has four general modules: Search engine (SE), manager of clinical trials (GEBID), anonymizer (ANON) and motor knowledge (BIKE). The BIKE is the central module and through its sub modules analyses and generates knowledge to provide to the HSE through services. The technology used in R {\&} D Cloud CEIB is completely based on Open Source.
    Within the BIKE, we focus on the development of the classifier module (BIKEClassifier), which aims to establish a method for the extraction of biomarkers for bioimaging and subsequent analysis to obtain a classification in bioimaging available pools following GIMC diagnostic experience.},
    author = {Salinas, JoseMaria and de la Iglesia-Vaya, Maria and Bonmati, LuisMarti and Valenzuela, Rosa and Cazorla, Miguel},
    booktitle = {Distributed Computing and Artificial Intelligence},
    doi = {10.1007/978-3-642-28765-7_39},
    editor = {Omatu, Sigeru and {De Paz Santana}, Juan F and Gonzalez, Sara Rodriguez and Molina, Jose M and Bernardos, Ana M and Rodriguez, Juan M Corchado},
    isbn = {978-3-642-28764-0},
    pages = {331--338},
    publisher = {Springer Berlin Heidelberg},
    series = {Advances in Intelligent and Soft Computing},
    title = {{R {\&} D Cloud CEIB: Management System and Knowledge Extraction for Bioimaging in the Cloud}},
    url = {http://dx.doi.org/10.1007/978-3-642-28765-7{\_}39},
    volume = {151},
    year = {2012}
    }
  • J. M. Salinas, M. D. {la Iglesia Vaya}, and M. Cazorla, “R & D Cloud CEIB,” in Proceedings of the ıeee ınternational conference on biomedical engineering and biotechnology, 2012.
    [Bibtex]
    @inproceedings{Salinas2012c,
    author = {Salinas, Jose Maria and {la Iglesia Vaya}, Maria De and Cazorla, Miguel},
    booktitle = {Proceedings of the IEEE International Conference on Biomedical Engineering and Biotechnology},
    title = {{R {\&} D Cloud CEIB}},
    year = {2012}
    }
  • [DOI] D. Viejo, J. Garcia, M. Cazorla, D. Gil, and M. Johnsson, “Using GNG to improve 3D feature extraction. Application to 6DoF egomotion,” Neural networks, vol. 32, pp. 138-146, 2012.
    [Bibtex]
    @article{Viejo2012138,
    abstract = {Several recent works deal with 3D data in mobile robotic problems, e.g. mapping or egomotion. Data comes from any kind of sensor such as stereo vision systems, time of flight cameras or 3D lasers, providing a huge amount of unorganized 3D data. In this paper, we describe an efficient method to build complete 3D models from a Growing Neural Gas (GNG). The {\{}GNG{\}} is applied to the 3D raw data and it reduces both the subjacent error and the number of points, keeping the topology of the 3D data. The {\{}GNG{\}} output is then used in a 3D feature extraction method. We have performed a deep study in which we quantitatively show that the use of {\{}GNG{\}} improves the 3D feature extraction method. We also show that our method can be applied to any kind of 3D data. The 3D features obtained are used as input in an Iterative Closest Point (ICP)-like method to compute the 6DoF movement performed by a mobile robot. A comparison with standard {\{}ICP{\}} is performed, showing that the use of {\{}GNG{\}} improves the results. Final results of 3D mapping from the egomotion calculated are also shown. },
    annote = {Selected Papers from {\{}IJCNN{\}} 2011},
    author = {Viejo, Diego and Garcia, Jose and Cazorla, Miguel and Gil, David and Johnsson, Magnus},
    doi = {http://dx.doi.org/10.1016/j.neunet.2012.02.014},
    issn = {0893-6080},
    journal = {Neural Networks},
    keywords = {6DoF registration},
    number = {0},
    pages = {138--146},
    title = {{Using GNG to improve 3D feature extraction. Application to 6DoF egomotion}},
    url = {http://www.sciencedirect.com/science/article/pii/S0893608012000433},
    volume = {32},
    year = {2012}
    }
  • [DOI] D. Viejo, J. Garcia-Rodriguez, and M. Cazorla, “A study of a soft computing based method for 3D scenario reconstruction,” Applied ſoft computing, vol. 12, iss. 10, pp. 3158-3164, 2012.
    [Bibtex]
    @article{Viejo20123158,
    abstract = {Several recent works deal with 3D data in mobile robotic problems, e.g., mapping. Data comes from any kind of sensor (time of flight, Kinect or 3D lasers) that provide a huge amount of unorganized 3D data. In this paper we detail an efficient approach to build complete 3D models using a soft computing method, the Growing Neural Gas (GNG). As neural models deal easily with noise, imprecision, uncertainty or partial data, {\{}GNG{\}} provides better results than other approaches. The {\{}GNG{\}} obtained is then applied to a sequence. We present a comprehensive study on {\{}GNG{\}} parameters to ensure the best result at the lowest time cost. From this {\{}GNG{\}} structure, we propose to calculate planar patches and thus obtaining a fast method to compute the movement performed by a mobile robot by means of a 3D models registration algorithm. Final results of 3D mapping are also shown. },
    author = {Viejo, Diego and Garcia-Rodriguez, Jose and Cazorla, Miguel},
    doi = {http://dx.doi.org/10.1016/j.asoc.2012.05.025},
    issn = {1568-4946},
    journal = {Applied Soft Computing},
    keywords = {3D feature extraction},
    number = {10},
    pages = {3158--3164},
    title = {{A study of a soft computing based method for 3D scenario reconstruction}},
    url = {http://www.sciencedirect.com/science/article/pii/S1568494612002803},
    volume = {12},
    year = {2012}
    }
  • D. Viejo and M. Cazorla, “A framework for managing heterogenous sensor data in a single map,” in Ieee ıntelligent vehicles ſymposium, 2012.
    [Bibtex]
    @inproceedings{Viejo20123A,
    author = {Viejo, Diego and Cazorla, Miguel},
    booktitle = {IEEE Intelligent Vehicles Symposium},
    title = {{A framework for managing heterogenous sensor data in a single map}},
    year = {2012}
    }

2011

  • M. angel Cazorla, V. {Matellan Olivera}, and Others, “Special issue about advances in Physical Agents,” , 2011.
    [Bibtex]
    @article{cazorla2011special,
    author = {Cazorla, Miguel angel and {Matellan Olivera}, Vicente and Others},
    publisher = {Red de Agentes Fisicos},
    title = {{Special issue about advances in Physical Agents}},
    year = {2011}
    }
  • M. Cazorla and A. Romero, “VIDEO LECTURES++: Combining information and interaction in an open source framework,” in Inted2011 proceedings, 2011, pp. 4034-4040.
    [Bibtex]
    @inproceedings{cazorla2011video,
    author = {Cazorla, M and Romero, A},
    booktitle = {INTED2011 Proceedings},
    pages = {4034--4040},
    publisher = {IATED},
    title = {{VIDEO LECTURES++: Combining information and interaction in an open source framework}},
    year = {2011}
    }
  • D. Gil, J. Garcia, M. Cazorla, and M. Johnsson, “Predictions tasks with words and sequences: Comparing a novel recurrent architecture with the Elman network,” in Neural networks (ıjcnn), the 2011 ınternational joint conference on, 2011, pp. 1207-1213.
    [Bibtex]
    @inproceedings{gil2011predictions,
    author = {Gil, David and Garcia, J and Cazorla, Miguel and Johnsson, Magnus},
    booktitle = {Neural Networks (IJCNN), The 2011 International Joint Conference on},
    organization = {IEEE},
    pages = {1207--1213},
    title = {{Predictions tasks with words and sequences: Comparing a novel recurrent architecture with the Elman network}},
    year = {2011}
    }
  • D. Viejo, J. Garcia, and M. Cazorla, “6DoF egomotion computing using 3D GNG-based reconstruction,” in Advances in computational ıntelligence, Springer Berlin Heidelberg, 2011, pp. 50-57.
    [Bibtex]
    @incollection{viejo20116dof,
    author = {Viejo, Diego and Garcia, Jose and Cazorla, Miguel},
    booktitle = {Advances in Computational Intelligence},
    pages = {50--57},
    publisher = {Springer Berlin Heidelberg},
    title = {{6DoF egomotion computing using 3D GNG-based reconstruction}},
    year = {2011}
    }
  • D. Viejo, J. Garcia, M. Cazorla, D. Gil, and M. Johnsson, “Using 3d gng-based reconstruction for 6dof egomotion,” in Neural networks (ıjcnn), the 2011 ınternational joint conference on, 2011, pp. 1042-1048.
    [Bibtex]
    @inproceedings{viejo2011using,
    author = {Viejo, Diego and Garcia, Jose and Cazorla, Miguel and Gil, David and Johnsson, Magnus},
    booktitle = {Neural Networks (IJCNN), The 2011 International Joint Conference on},
    organization = {IEEE},
    pages = {1042--1048},
    title = {{Using 3d gng-based reconstruction for 6dof egomotion}},
    year = {2011}
    }
  • D. Viejo, J. Garcia, and M. Cazorla, “Visual features extraction based egomotion calculation from a infrared time-of-flight camera,” in Advances in computational ıntelligence, Springer Berlin Heidelberg, 2011, pp. 9-16.
    [Bibtex]
    @incollection{viejo2011visual,
    author = {Viejo, Diego and Garcia, Jose and Cazorla, Miguel},
    booktitle = {Advances in Computational Intelligence},
    pages = {9--16},
    publisher = {Springer Berlin Heidelberg},
    title = {{Visual features extraction based egomotion calculation from a infrared time-of-flight camera}},
    year = {2011}
    }

2010

  • M. Cazorla, D. Viejo, A. Hernandez, J. Nieto, and E. Nebot, “Large Scale Egomotion and Error Analysis with Visual Features,” Journal of physical agents, vol. 4, pp. 19-24, 2010.
    [Bibtex]
    @article{Cazorla10,
    author = {Cazorla, Miguel and Viejo, Diego and Hernandez, Andres and Nieto, Juan and Nebot, Eduardo},
    journal = {Journal of Physical Agents},
    pages = {19--24},
    title = {{Large Scale Egomotion and Error Analysis with Visual Features}},
    volume = {4},
    year = {2010}
    }
  • [DOI] M. Cazorla, D. V. Hernando, A. H. Gutierrez, J. Nieto, and E. Nebot, “Large scale egomotion and error analysis with visual features,” Journal of physical agents, vol. 4, iss. 1, pp. 19-24, 2010.
    [Bibtex]
    @article{CazorlaLargeScale2010,
    author = {Miguel Cazorla and Diego Viejo Hernando and Andres Hernandez Gutierrez and Juan Nieto and Eduardo Nebot},
    title = {Large scale egomotion and error analysis with visual features},
    journal = {Journal of Physical Agents},
    volume = {4},
    number = {1},
    year = {2010},
    keywords = {Computer vision; Mobile robotics},
    abstract = {Several works deal with 3D data in SLAM problem but many of them are focused on short scale maps. In this paper, we propose a method that can be used for computing the 6DoF trajectory performed by a robot from the stereo images captured during a large scale trajectory. The method transforms robust 2D features extracted from the reference stereo images to the 3D space. These 3D features are then used for obtaining the correct robot movement. Both Sift and Surf methods for feature extraction have been used. Also, a comparison between our method and the results of the ICP algorithm have been performed. We have also made a study about errors in stereo cameras.},
    pages = {19--24},
    doi = {10.14198/JoPha.2010.4.1.04},
    url = {http://www.jopha.net/article/view/2010-v4-n1-large-scale-egomotion-and-error-analysis-with-visual-features}
    }
  • M. Cazorla and D. Viejo, “EXPERIENCES USING AN OPEN SOURCE SOFTWARE LIBRARY TO TEACH A COMPUTER VISION SUBJECT,” in Inted2010 proceedings, 2010, pp. 4514-4522.
    [Bibtex]
    @inproceedings{cazorla2010experiences,
    author = {Cazorla, M and Viejo, D},
    booktitle = {INTED2010 Proceedings},
    pages = {4514--4522},
    publisher = {IATED},
    title = {{EXPERIENCES USING AN OPEN SOURCE SOFTWARE LIBRARY TO TEACH A COMPUTER VISION SUBJECT}},
    year = {2010}
    }
  • M. Cazorla and A. Romero, “A NEW FRAMEWORK IN VIDEO LECTURES: ADDING INTERACTION AND ADDITIONAL INFORMATION,” Icerı2010 proceedings, pp. 4593-4598, 2010.
    [Bibtex]
    @article{cazorla2010new,
    author = {Cazorla, M and Romero, A},
    journal = {ICERI2010 Proceedings},
    pages = {4593--4598},
    publisher = {IATED},
    title = {{A NEW FRAMEWORK IN VIDEO LECTURES: ADDING INTERACTION AND ADDITIONAL INFORMATION}},
    year = {2010}
    }
  • M. Cazorla, D. Viejo, and C. Pomares, “Study of the SR4000 camera,” in Workshop of physical agents, 2010.
    [Bibtex]
    @inproceedings{cazorla2010study,
    author = {Cazorla, Miguel and Viejo, Diego and Pomares, Cristina},
    booktitle = {Workshop of Physical Agents},
    organization = {Red de Agentes Fisicos},
    title = {{Study of the SR4000 camera}},
    year = {2010}
    }
  • M. Cazorla and B. Bonev, “Large Scale Environment Partitioning in Mobile Robotics Recognition Tasks,” Journal of physical agents, vol. 4, iss. 2, 2010.
    [Bibtex]
    @article{JoPhA71,
    abstract = {In this paper we present a scalable machine learning approach to mobile robots visual localization. The applicability of machine learning approaches is constrained by the complexity and size of the problem{\&}rsquo;s domain. Thus, dividing the problem becomes necessary and two essential questions arise: which partition set is optimal for the problem and how to integrate the separate results into a single solution. The novelty of this work is the use of Information Theory for partitioning high-dimensional data. In the presented experiments the domain of the problem is a large sequence of omnidirectional images, each one of them providing a high number of features. A robot which follows the same trajectory has to answer which is the most similar image from the sequence. The sequence is divided so that each partition is suitable for building a simple classifier. The partitions are established on the basis of the information divergence peaks among the images. Measuring the divergence has usually been considered unfeasible in high-dimensional data spaces. We overcome this problem by estimating the Jensen-Renyi divergence with an entropy approximation based on entropic spanning graphs. Finally, the responses of the different classifiers provide a multimodal hypothesis for each incoming image. As the robot is moving, a particle filter is used for attaining the convergence to a unimodal hypothesis.},
    author = {Cazorla, Miguel and Bonev, Boyan},
    issn = {1888-0258},
    journal = {Journal of Physical Agents},
    keywords = {Jensen-Renyi diver- gence,Visual localization,classifier,entropy,particle filter},
    number = {2},
    title = {{Large Scale Environment Partitioning in Mobile Robotics Recognition Tasks}},
    url = {http://www.jopha.net/index.php/jopha/article/view/71},
    volume = {4},
    year = {2010}
    }
  • A. Romero and M. Cazorla, “An Improvement of Topological Mapping Using a Graph-Matching Based Method with Omnidirectional Images.,” in Ccıa, 2010, pp. 311-320.
    [Bibtex]
    @inproceedings{romero2010improvement,
    author = {Romero, Anna and Cazorla, Miguel},
    booktitle = {CCIA},
    pages = {311--320},
    title = {{An Improvement of Topological Mapping Using a Graph-Matching Based Method with Omnidirectional Images.}},
    year = {2010}
    }
  • A. Romero and M. Cazorla, “Testing image segmentation for topological SLAM with omnidirectional images,” in Advances in artificial ıntelligence, Springer Berlin Heidelberg, 2010, pp. 266-277.
    [Bibtex]
    @incollection{romero2010testing,
    author = {Romero, Anna and Cazorla, Miguel},
    booktitle = {Advances in Artificial Intelligence},
    pages = {266--277},
    publisher = {Springer Berlin Heidelberg},
    title = {{Testing image segmentation for topological SLAM with omnidirectional images}},
    year = {2010}
    }
  • A. Romero and M. Cazorla, “Topological slam using omnidirectional images: Merging feature detectors and graph-matching,” in Advanced concepts for ıntelligent vision ſystems, Springer Berlin Heidelberg, 2010, pp. 464-475.
    [Bibtex]
    @incollection{romero2010topological,
    author = {Romero, Anna and Cazorla, Miguel},
    booktitle = {Advanced Concepts for Intelligent Vision Systems},
    pages = {464--475},
    publisher = {Springer Berlin Heidelberg},
    title = {{Topological slam using omnidirectional images: Merging feature detectors and graph-matching}},
    year = {2010}
    }
  • [DOI] A. M. {Romero Cortijo}, M. angel Cazorla, and Others, “Topological SLAM using a graph-matching based method on omnidirectional images,” in Advanced concepts for ıntelligent vision ſystems – 12th ınternational conference, 2010.
    [Bibtex]
    @inproceedings{romero2010topological1,
    author = {{Romero Cortijo}, Anna Maria and Cazorla, Miguel angel and Others},
    booktitle = {Advanced Concepts for Intelligent Vision Systems - 12th International Conference},
    doi = {10.1007/978-3-642-17688-3_43},
    title = {{Topological SLAM using a graph-matching based method on omnidirectional images}},
    year = {2010}
    }

2009

  • [DOI] M. A. Lozano, F. Escolano, B. Bonev, P. Suau, W. Aguilar, J. M. Saez, and M. A. Cazorla, “Region and Constellations Based Categorization of Images with Unsupervised Graph Learning,” Image vision comput., vol. 27, iss. 7, pp. 960-978, 2009.
    [Bibtex]
    @article{Lozano:2009:RCB:1534927.1534960,
    address = {Newton, MA, USA},
    author = {Lozano, M A and Escolano, F and Bonev, B and Suau, P and Aguilar, W and Saez, J M and Cazorla, M A},
    doi = {10.1016/j.imavis.2008.09.011},
    issn = {0262-8856},
    journal = {Image Vision Comput.},
    keywords = {Clustering of graphs,EM algorithms,Image categorization},
    number = {7},
    pages = {960--978},
    publisher = {Butterworth-Heinemann},
    title = {{Region and Constellations Based Categorization of Images with Unsupervised Graph Learning}},
    url = {http://dx.doi.org/10.1016/j.imavis.2008.09.011},
    volume = {27},
    year = {2009}
    }
  • J. M. C. Plaza, M. Cazorla, and V. Matellan, “Uso de Simuladores en Docencia de Robotica Movil.,” Ieee-rıta, vol. 4, iss. 4, pp. 269-278, 2009.
    [Bibtex]
    @article{Plaza2009Uso,
    author = {Plaza, Jose Maria Canyas and Cazorla, Miguel and Matellan, Vicente},
    journal = {IEEE-RITA},
    number = {4},
    pages = {269--278},
    title = {{Uso de Simuladores en Docencia de Robotica Movil.}},
    volume = {4},
    year = {2009}
    }
  • A. M. Romero Cortijo, M. angel Cazorla, and Others, “Comparativa de detectores de caracteristicas visuales y su aplicacion al SLAM.” 2009.
    [Bibtex]
    @inproceedings{romero2009comparativa,
    author = {Romero Cortijo, Anna Maria and Cazorla, Miguel angel and Others},
    title = {{Comparativa de detectores de caracteristicas visuales y su aplicacion al SLAM}},
    year = {2009}
    }

2008

  • [DOI] B. Bonev, F. Escolano, and M. Cazorla, “Feature selection, mutual information, and the classification of high-dimensional patterns,” Pattern analysis and applications, vol. 11, iss. 3-4, pp. 309-319, 2008.
    [Bibtex]
    @article{Bonev:2008aa,
    abstract = {We propose a novel feature selection filter for supervised learning, which relies on the efficient estimation of the mutual information between a high-dimensional set of features and the classes. We bypass the estimation of the probability density function with the aid of the entropic-graphs approximation of R{\{}e{\}}nyi entropy, and the subsequent approximation of the Shannon entropy. Thus, the complexity does not depend on the number of dimensions but on the number of patterns/samples, and the curse of dimensionality is circumvented. We show that it is then possible to outperform algorithms which individually rank features, as well as a greedy algorithm based on the maximal relevance and minimal redundancy criterion. We successfully test our method both in the contexts of image classification and microarray data classification. For most of the tested data sets, we obtain better classification results than those reported in the literature.},
    author = {Bonev, Boyan and Escolano, Francisco and Cazorla, Miguel},
    doi = {10.1007/s10044-008-0107-0},
    issn = {1433-7541},
    journal = {Pattern Analysis and Applications},
    keywords = {Filter feature selection; Mutual information; Entropy},
    number = {3-4},
    pages = {309--319},
    publisher = {Springer-Verlag},
    title = {{Feature selection, mutual information, and the classification of high-dimensional patterns}},
    url = {http://dx.doi.org/10.1007/s10044-008-0107-0},
    volume = {11},
    year = {2008}
    }
  • C. Pomares and D. Gallardo, “Discretization of the State Space with a Stochastic Version of the Value Iteration Algorithm,” in Ieee/rſj ınternational conference on ıntelligent robots and ſystems, 2008.
    [Bibtex]
    @inproceedings{pomares2008discretization,
    author = {Pomares, Cristina and Gallardo, Domingo},
    booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems},
    title = {{Discretization of the State Space with a Stochastic Version of the Value Iteration Algorithm}},
    year = {2008}
    }
  • D. Viejo and M. Cazorla, “3D Model Based Map Building,” in International ſymposium on robotics, ıſr 2008, 2008.
    [Bibtex]
    @inproceedings{Viejo2008,
    author = {Viejo, Diego and Cazorla, Miguel},
    booktitle = {International Symposium on Robotics, ISR 2008},
    title = {{3D Model Based Map Building}},
    year = {2008}
    }
  • D. {Viejo Hernando}, M. angel Cazorla, and Others, “3D Feature Extraction and Modelling for SLAM.” 2008.
    [Bibtex]
    @inproceedings{viejo20083d1,
    author = {{Viejo Hernando}, Diego and Cazorla, Miguel angel and Others},
    title = {{3D Feature Extraction and Modelling for SLAM}},
    year = {2008}
    }

2007

  • B. Bonev, F. Escolano, M. A. Lozano, P. Suau, M. Cazorla, and W. Aguilar, “Constellations and the unsupervised learning of graphs,” in Graph-based representations in pattern recognition, Springer Berlin Heidelberg, 2007, pp. 340-350.
    [Bibtex]
    @incollection{bonev2007constellations,
    author = {Bonev, Boyan and Escolano, Francisco and Lozano, Miguel A and Suau, Pablo and Cazorla, Miguel and Aguilar, Wendy},
    booktitle = {Graph-Based Representations in Pattern Recognition},
    pages = {340--350},
    publisher = {Springer Berlin Heidelberg},
    title = {{Constellations and the unsupervised learning of graphs}},
    year = {2007}
    }
  • B. Bonev, F. Escolano, and M. Cazorla, “A novel information theory method for filter feature selection,” in Mıcaı 2007: advances in artificial ıntelligence, Springer Berlin Heidelberg, 2007, pp. 431-440.
    [Bibtex]
    @incollection{bonev2007novel,
    author = {Bonev, Boyan and Escolano, Francisco and Cazorla, Miguel},
    booktitle = {MICAI 2007: Advances in Artificial Intelligence},
    pages = {431--440},
    publisher = {Springer Berlin Heidelberg},
    title = {{A novel information theory method for filter feature selection}},
    year = {2007}
    }
  • B. Bonev, M. Cazorla, and F. Escolano Ruiz, “Robot navigation behaviors based on omnidirectional vision and information theory,” Journal of physical agents, vol. 1, iss. 1, 2007.
    [Bibtex]
    @article{bonev2007robot,
    author = {Bonev, Boyan and Cazorla, Miguel and Escolano Ruiz, Francisco },
    journal = {Journal of Physical Agents},
    publisher = {Red de Agentes Fisicos},
    title = {{Robot navigation behaviors based on omnidirectional vision and information theory}},
    volume=1,
    number=1,
    year = {2007}
    }
  • F. Escolano, B. Bonev, P. Suau, W. Aguilar, Y. Frauel, J. M. Saez, and M. Cazorla, “Contextual visual localization: cascaded submap classification, optimized saliency detection, and fast view matching,” in Intelligent robots and ſystems, 2007. ıroſ 2007. ıeee/rſj ınternational conference on, 2007, pp. 1715-1722.
    [Bibtex]
    @inproceedings{escolano2007contextual,
    author = {Escolano, Francisco and Bonev, Boyan and Suau, Pablo and Aguilar, Wendy and Frauel, Yann and Saez, Juan Manuel and Cazorla, Miguel},
    booktitle = {Intelligent Robots and Systems, 2007. IROS 2007. IEEE/RSJ International Conference on},
    organization = {IEEE},
    pages = {1715--1722},
    title = {{Contextual visual localization: cascaded submap classification, optimized saliency detection, and fast view matching}},
    year = {2007}
    }
  • C. Pomares and D. Gallardo, “Discretizacion del espacio de estados mediante un algoritmo estocastico de iteracion de valores,” in Conferencia de la asociaci\o\n espa\n\ola para la ınteligencia artificial (caepıa), 2007.
    [Bibtex]
    @inproceedings{pomares2007discretizacion,
    author = {Pomares, C and Gallardo, D},
    booktitle = {Conferencia de la Asociaci{\{}o{\}}n Espa{\{}n{\}}ola para la Inteligencia Artificial (CAEPIA)},
    title = {{Discretizacion del espacio de estados mediante un algoritmo estocastico de iteracion de valores}},
    year = {2007}
    }
  • [DOI] D. Viejo and M. Cazorla, “3D plane-based egomotion for SLAM on semi-structured environment,” in Intelligent robots and ſystems, 2007. ıroſ 2007. ıeee/rſj ınternational conference on, 2007, pp. 2761-2766.
    [Bibtex]
    @inproceedings{Viejo2007,
    author = {Viejo, Diego and Cazorla, Miguel},
    booktitle = {Intelligent Robots and Systems, 2007. IROS 2007. IEEE/RSJ International Conference on},
    doi = {10.1109/IROS.2007.4399138},
    pages = {2761--2766},
    title = {{3D plane-based egomotion for SLAM on semi-structured environment}},
    year = {2007}
    }
  • D. Viejo and M. Cazorla, “Pose Registration Model Improvement: Crease Detection.” 2007.
    [Bibtex]
    @inproceedings{viejo2007pose,
    abstract = {Several works deal with 3D data in SLAM pro- blem. Data come from a 3D laser sweeping unit or a stereo camera, both providing a huge amount of data. In this paper, we detail an efficient method to find out creases from 3D raw data. This information can be used together with planar patches extracted from 3D raw data in order to build a complete 3D model of the scene. Some promising results are shown for both outdoor and indoor environments.},
    author = {Viejo, Diego and Cazorla, Miguel},
    title = {{Pose Registration Model Improvement: Crease Detection}},
    year = {2007}
    }

2006

  • B. Bonev and M. Cazorla, “Towards autonomous adaptation in visual tasks.,” in Workshop de agentes fisicos, 2006, pp. 59-66.
    [Bibtex]
    @inproceedings{bonev2006towards,
    author = {Bonev, Boyan and Cazorla, Miguel},
    booktitle = {Workshop de Agentes Fisicos},
    pages = {59--66},
    title = {{Towards autonomous adaptation in visual tasks.}},
    year = {2006}
    }
  • B. Bonev, M. Cazorla, and H. Martinez, “Walk calibration in a four-legged robot,” in Climbing and walking robots, Springer Berlin Heidelberg, 2006, pp. 493-500.
    [Bibtex]
    @incollection{bonev2006walk,
    author = {Bonev, Boyan and Cazorla, Miguel and Martinez, Humberto},
    booktitle = {Climbing and Walking Robots},
    pages = {493--500},
    publisher = {Springer Berlin Heidelberg},
    title = {{Walk calibration in a four-legged robot}},
    year = {2006}
    }
  • D. Herrero-Perez, F. Bas-Esparza, H. Martinez-Barbera, F. Martin, C. E. Aguero, V. M. Gomez, V. Matellan, and M. Cazorla, “Team Chaos 2006,” in Robotics ſymposium, 2006. larſ’06. ıeee 3rd latin american, 2006, pp. 208-213.
    [Bibtex]
    @inproceedings{herrero2006team,
    author = {Herrero-Perez, D and Bas-Esparza, F and Martinez-Barbera, H and Martin, F and Aguero, C E and Gomez, V M and Matellan, V and Cazorla, M},
    booktitle = {Robotics Symposium, 2006. LARS'06. IEEE 3rd Latin American},
    organization = {IEEE},
    pages = {208--213},
    title = {{Team Chaos 2006}},
    year = {2006}
    }
  • J. M. {Perez Torres}, D. {Viejo Hernando}, P. {Suau Perez}, M. angel {Lozano Ortega}, O. {Colomina Pardo}, M. angel Cazorla, F. {Escolano Ruiz}, and Others, “Una concepcion moderna de Tecnicas de Inteligencia Artificial en la Universidad de Alicante.” 2006.
    [Bibtex]
    @inproceedings{perez2006concepcion,
    author = {{Perez Torres}, Jose Manuel and {Viejo Hernando}, Diego and {Suau Perez}, Pablo and {Lozano Ortega}, Miguel angel and {Colomina Pardo}, Otto and Cazorla, Miguel angel and {Escolano Ruiz}, Francisco and Others},
    publisher = {Thomson-Paraninfo},
    title = {{Una concepcion moderna de Tecnicas de Inteligencia Artificial en la Universidad de Alicante}},
    year = {2006}
    }
  • C. Pomares and D. Gallardo, “Uso de Apache Forrest y CVS para la actualizacion y generacion de material docente de una asignatura,” in Jornadas de ensenanza universitaria de la ınformatica (jenuı), 2006.
    [Bibtex]
    @inproceedings{pomares2006uso,
    author = {Pomares, C and Gallardo, D},
    booktitle = {Jornadas de Ensenanza Universitaria de la Informatica (JENUI)},
    title = {{Uso de Apache Forrest y CVS para la actualizacion y generacion de material docente de una asignatura}},
    year = {2006}
    }
  • D. Viejo and M. Cazorla, “Extraction and error modeling of 3D data: application to SLAM.,” in Workshop de agentes fisicos, 2006, pp. 153-158.
    [Bibtex]
    @inproceedings{viejo2006extraction,
    author = {Viejo, Diego and Cazorla, Miguel},
    booktitle = {Workshop de Agentes Fisicos},
    pages = {153--158},
    title = {{Extraction and error modeling of 3D data: application to SLAM.}},
    year = {2006}
    }
  • D. Viejo and M. Cazorla, “Plane extraction and error modeling of 3d data,” in International ſymposium on robotics and automation, 2006.
    [Bibtex]
    @inproceedings{viejo2006plane,
    author = {Viejo, Diego and Cazorla, Miguel},
    booktitle = {International Symposium on Robotics and Automation},
    title = {{Plane extraction and error modeling of 3d data}},
    year = {2006}
    }

2005

  • B. Bonev, M. Cazorla, and H. Martinez-Barbera, “Parameters optimization for quadruped walking,” in Proc. of the vı workshop de agentes fisicos, granada, ſpain, 2005.
    [Bibtex]
    @inproceedings{bonev2005parameters,
    author = {Bonev, Boyan and Cazorla, Miguel and Martinez-Barbera, H},
    booktitle = {Proc. of the VI Workshop de agentes fisicos, Granada, Spain},
    title = {{Parameters optimization for quadruped walking}},
    year = {2005}
    }
  • M. Cazorla and F. Escolano, “Feature Extraction and Grouping for Robot Vision Tasks,” in Cutting edge robotics, I-Tech, 2005, p. 91.
    [Bibtex]
    @incollection{cazorla2005feature,
    author = {Cazorla, Miguel and Escolano, Francisco},
    booktitle = {Cutting Edge Robotics},
    pages = {91},
    publisher = {I-Tech},
    title = {{Feature Extraction and Grouping for Robot Vision Tasks}},
    year = {2005}
    }
  • H. Martinez, V. Matellan, M. A. Cazorla, A. Saffiotti, D. Herrero, F. Mart$backslash$in, B. Bonev, and K. LeBlanc, “Team Chaos 2005.” 2005.
    [Bibtex]
    @inproceedings{martinezteam,
    author = {Martinez, H and Matellan, V and Cazorla, M A and Saffiotti, A and Herrero, D and Mart$\backslash$in, F and Bonev, B and LeBlanc, K},
    title = {{Team Chaos 2005}},
    year = {2005}
    }
  • D. Viejo, J. M. Saez, M. A. Cazorla, and F. Escolano, “Active stereo based compact mapping,” in Intelligent robots and ſystems, 2005.(ıroſ 2005). 2005 ıeee/rſj ınternational conference on, 2005, pp. 529-534.
    [Bibtex]
    @inproceedings{viejo2005active,
    author = {Viejo, Diego and Saez, Juan Manuel and Cazorla, Miguel Angel and Escolano, Francisco},
    booktitle = {Intelligent Robots and Systems, 2005.(IROS 2005). 2005 IEEE/RSJ International Conference on},
    organization = {IEEE},
    pages = {529--534},
    title = {{Active stereo based compact mapping}},
    year = {2005}
    }

2004

  • D. Viejo and M. A. Cazorla, “Construccion de mapas 3D y extraccion de primitivas geometricas del entorno,” in Proc of 5th workshop de agentes fisicos, 2004.
    [Bibtex]
    @inproceedings{viejo2004construccion,
    author = {Viejo, Diego and Cazorla, M A},
    booktitle = {Proc of 5th Workshop de Agentes Fisicos},
    title = {{Construccion de mapas 3D y extraccion de primitivas geometricas del entorno}},
    year = {2004}
    }
  • D. Viejo and M. Cazorla, “Unconstrained 3D-mesh generation applied to map building,” in Progress in pattern recognition, ımage analysis and applications, Springer Berlin Heidelberg, 2004, pp. 241-248.
    [Bibtex]
    @incollection{viejo2004unconstrained,
    abstract = {3D map building is a complex robotics task which needs mathematical robust models. From a 3D point cloud, we can use the normal vectors to these points to do feature extraction. In this paper, we will present a robust method for normal estimation and unconstrained 3D-mesh generation from a not-uniformly distributed point cloud.},
    author = {Viejo, Diego and Cazorla, Miguel},
    booktitle = {Progress in Pattern Recognition, Image Analysis and Applications},
    pages = {241--248},
    publisher = {Springer Berlin Heidelberg},
    title = {{Unconstrained 3D-mesh generation applied to map building}},
    year = {2004}
    }

2003

  • [DOI] M. Cazorla and F. Escolano, “Two Bayesian methods for junction classification,” Image processing, ıeee transactions on, vol. 12, iss. 3, pp. 317-327, 2003.
    [Bibtex]
    @article{1197837,
    abstract = {We propose two Bayesian methods for junction classification which evolve from the Kona method: a region-based method and an edge-based method. Our region-based method computes a one-dimensional (1-D) profile where wedges are mapped to intervals with homogeneous intensity. These intervals are found through a growing-and-merging algorithm driven by a greedy rule. On the other hand, our edge-based method computes a different profile which maps wedge limits to peaks of contrast, and these peaks are found through thresholding followed by nonmaximum suppression. Experimental results show that both methods are more robust and efficient than the Kona method, and also that the edge-based method outperforms the region-based one.},
    author = {Cazorla, M and Escolano, F},
    doi = {10.1109/TIP.2002.806242},
    issn = {1057-7149},
    journal = {Image Processing, IEEE Transactions on},
    keywords = {Bayes methods;edge detection;image classification;},
    number = {3},
    pages = {317--327},
    title = {{Two Bayesian methods for junction classification}},
    volume = {12},
    year = {2003}
    }

2002

  • [DOI] M. Cazorla, F. Escolano, D. Gallardo, and R. Rizo, “Junction detection and grouping with probabilistic edge models and Bayesian A*,” Pattern recognition, vol. 35, iss. 9, pp. 1869-1881, 2002.
    [Bibtex]
    @article{Cazorla20021869,
    abstract = {In this paper, we propose and integrate two Bayesian methods, one of them for junction detection, and the other one for junction grouping. Our junction detection method relies on a probabilistic edge model and a log-likelihood test. Our junction grouping method relies on finding connecting paths between pairs of junctions. Path searching is performed by applying a Bayesian A* algorithm. Such algorithm uses both an intensity and geometric model for defining the rewards of a partial path and prunes those paths with low rewards. We have extended such a pruning with an additional rule which favors the stability of longer paths against shorter ones. We have tested experimentally the efficiency and robustness of the methods in an indoor image sequence. In this paper, we propose and integrate two Bayesian methods, one of them for junction detection, and the other one for junction grouping. Our junction detection method relies on a probabilistic edge model and a log-likelihood test. Our junction grouping method relies on finding connecting paths between pairs of junctions. Path searching is performed by applying a Bayesian A algorithm. Such algorithm uses both an intensity and geometric model for defining the rewards of a partial path and prunes those paths with low rewards. We have extended such a pruning with an additional rule which favors the stability of longer paths against shorter ones. We have tested experimentally the efficiency and robustness of the methods in an indoor image sequence.},
    author = {Cazorla, M and Escolano, F and Gallardo, D and Rizo, R},
    doi = {http://dx.doi.org/10.1016/S0031-3203(01)00150-9},
    issn = {0031-3203},
    journal = {Pattern Recognition},
    keywords = {Bayesian inference},
    number = {9},
    pages = {1869--1881},
    title = {{Junction detection and grouping with probabilistic edge models and Bayesian A*}},
    url = {http://www.sciencedirect.com/science/article/pii/S0031320301001509},
    volume = {35},
    year = {2002}
    }

2001

  • M. angel Cazorla, O. Colomina Pardo, P. Compan Rosique, F. Escolano Ruiz, J. L. Zamora, and Others, “JavaVis: Una libreria para vision artificial en Java.” 2001.
    [Bibtex]
    @inproceedings{cazorla2001javavis,
    author = {Cazorla, Miguel angel and Colomina Pardo, Otto and Compan Rosique, Patricia and Escolano Ruiz, Francisco and Zamora, Jose Luis and Others},
    publisher = {Universitat de les Illes Balears. Servei de Publicacions i Intercanvi Cientific},
    title = {{JavaVis: Una libreria para vision artificial en Java}},
    year = {2001}
    }

1999

  • M. angel Cazorla, F. Escolano Ruiz, D. Gallardo Lopez, O. Colomina Pardo, and Others, “A competition-based deformable template for junction extraction.” 1999.
    [Bibtex]
    @inproceedings{cazorla1999competition,
    author = {Cazorla, Miguel angel and Escolano Ruiz, Francisco and Gallardo Lopez, Domingo and Colomina Pardo, Otto and Others},
    title = {{A competition-based deformable template for junction extraction}},
    year = {1999}
    }
  • M. Cazorla, F. Escolano, D. Gallardo, and R. Rizo, “Bayesian Models for Finding and Grouping Junctions,” in Proc of the emmcvpr99, 1999.
    [Bibtex]
    @inproceedings{Cazorla99a,
    author = {Cazorla, M and Escolano, F and Gallardo, D and Rizo, R},
    booktitle = {Proc of the EMMCVPR99},
    publisher = {Lectures Notes in Computer Science},
    title = {{Bayesian Models for Finding and Grouping Junctions}},
    year = {1999}
    }

1998

  • F. Escolano, M. Cazorla, D. Gallardo, F. Llorens, R. Satorre, and R. Rizo, “A combined probabilistic framework for learning gestures and actions,” in Tasks and methods in applied artificial ıntelligence, Springer Berlin Heidelberg, 1998, pp. 658-667.
    [Bibtex]
    @incollection{escolano1998combined,
    author = {Escolano, Francisco and Cazorla, Miguel and Gallardo, Domingo and Llorens, Faraon and Satorre, Rosana and Rizo, Ramon},
    booktitle = {Tasks and Methods in Applied Artificial Intelligence},
    pages = {658--667},
    publisher = {Springer Berlin Heidelberg},
    title = {{A combined probabilistic framework for learning gestures and actions}},
    year = {1998}
    }
  • D. Gallardo, F. Escolano, R. Rizo, O. Colomina, and M. Cazorla, “Estimacion bayesiana de caracteristicas en robots moviles mediante muestreo de la densidad a posteriori,” in Actas del primer congr\e\s catal\à\ dintellig\è\ncia artificial, 1998.
    [Bibtex]
    @inproceedings{gallardo1998estimacion,
    author = {Gallardo, Domingo and Escolano, Francisco and Rizo, Ramon and Colomina, Otto and Cazorla, M},
    booktitle = {Actas del Primer Congr{\{}e{\}}s Catal{\{}{\`{a}}{\}} dintellig{\{}{\`{e}}{\}}ncia Artificial},
    title = {{Estimacion bayesiana de caracteristicas en robots moviles mediante muestreo de la densidad a posteriori}},
    year = {1998}
    }

1997

  • F. Escolano, M. Cazorla, D. Gallardo, and R. Rizo, “Deformable templates for tracking and analysis of intravascular ultrasound sequences,” in Energy minimization methods in computer vision and pattern recognition, Springer Berlin Heidelberg, 1997, pp. 521-534.
    [Bibtex]
    @incollection{escolano1997deformable,
    author = {Escolano, Francisco and Cazorla, Miguel and Gallardo, Domingo and Rizo, Ramon},
    booktitle = {Energy Minimization Methods in Computer Vision and Pattern Recognition},
    pages = {521--534},
    publisher = {Springer Berlin Heidelberg},
    title = {{Deformable templates for tracking and analysis of intravascular ultrasound sequences}},
    year = {1997}
    }
  • F. Escolano Ruiz, M. Cazorla, and Others, “Estimacion del movimiento coherente: computacion evolutiva como alternativa al annealing determinista.” 1997.
    [Bibtex]
    @inproceedings{escolano1997estimacion,
    author = {Escolano Ruiz, Francisco and Cazorla, Miguel and Others},
    title = {{Estimacion del movimiento coherente: computacion evolutiva como alternativa al annealing determinista}},
    year = {1997}
    }
  • F. Escolano Ruiz, M. angel Cazorla, D. {Gallardo Lopez}, F. Llorens Largo, R. Satorre Cuerda, R. Rizo Aldeguer, and Others, “Plantillas deformables espacio-temporales para el tracking y reconocimiento gestual.” 1997.
    [Bibtex]
    @inproceedings{escolano1997plantillas,
    author = {Escolano Ruiz, Francisco and Cazorla, Miguel angel and {Gallardo Lopez}, Domingo and Llorens Largo, Faraon and Satorre Cuerda, Rosana and Rizo Aldeguer, Ramon and Others},
    title = {{Plantillas deformables espacio-temporales para el tracking y reconocimiento gestual}},
    year = {1997}
    }
  • I. Sabuco, F. {Escolano Ruiz}, M. angel Cazorla, D. {Gallardo Lopez}, R. {Rizo Aldeguer}, and Others, “Snakes based tracking and texture analysis of microscopic images.” 1997.
    [Bibtex]
    @inproceedings{sabuco1997snakes,
    author = {Sabuco, Isabel and {Escolano Ruiz}, Francisco and Cazorla, Miguel angel and {Gallardo Lopez}, Domingo and {Rizo Aldeguer}, Ramon and Others},
    title = {{Snakes based tracking and texture analysis of microscopic images}},
    year = {1997}
    }

1995

  • M. Cazorla, P. Caceres, F. Escolano, D. Gallardo, and R. Rizo, “Deteccion automatica con Snakes y Representacion 3D sobre imagenes cerebrales,” in Vı caepıa, 1995, pp. 331-340.
    [Bibtex]
    @inproceedings{cazorla1995deteccion,
    author = {Cazorla, M and Caceres, Pedro and Escolano, Francisco and Gallardo, Domingo and Rizo, Ramon},
    booktitle = {VI CAEPIA},
    pages = {331--340},
    title = {{Deteccion automatica con Snakes y Representacion 3D sobre imagenes cerebrales}},
    year = {1995}
    }