PageRenderTime 899ms CodeModel.GetById 31ms RepoModel.GetById 1ms app.codeStats 0ms

/src/morse/sensors/semantic_camera.py

https://github.com/davidhodo/morse
Python | 166 lines | 141 code | 3 blank | 22 comment | 0 complexity | cd466dab9382ada8eb7ebeecfad28a06 MD5 | raw file
  1. import logging; logger = logging.getLogger("morse." + __name__)
  2. from morse.core import blenderapi
  3. import morse.sensors.camera
  4. import morse.helpers.colors
  5. from morse.helpers import passive_objects
  6. from morse.helpers.components import add_data, add_property
  7. class SemanticCamera(morse.sensors.camera.Camera):
  8. """
  9. This sensor emulates a hight level camera that outputs the names of
  10. the objects that are located within the field of view of the camera.
  11. The sensor determines first which objects are to be tracked (objects
  12. marked with a **Logic Property** called ``Object``, cf documentation
  13. on :doc:`passive objects <../others/passive_objects>` for more on
  14. that). If the ``Label`` property is defined, it is used as exported
  15. name. Else the Blender object name is used. If the ``Type`` property
  16. is set, it is exported as well.
  17. Then a test is made to identify which of these objects are inside of
  18. the view frustum of the camera. Finally, a single visibility test is
  19. performed by casting a ray from the center of the camera to the
  20. center of the object. If anything other than the test object is
  21. found first by the ray, the object is considered to be occluded by
  22. something else, even if it is only the center that is being blocked.
  23. This last check is a bit costly and can be deactivated by setting the
  24. sensor property ``noocclusion`` to ``True``.
  25. The cameras make use of Blender's **bge.texture** module, which
  26. requires a graphic card capable of GLSL shading. Also, the 3D view
  27. window in Blender must be set to draw **Textured** objects.
  28. """
  29. _name = "Semantic camera"
  30. _short_desc = "A smart camera allowing to retrieve objects in its \
  31. field of view"
  32. add_data('visible_objects', [], 'list<objects>',
  33. "A list containing the different objects visible by the camera. \
  34. Each object is represented by a dictionary composed of: \n\
  35. - **name** (String): the name of the object \n\
  36. - **type** (String): the type of the object \n\
  37. - **position** (vec3<float>): the position of the \
  38. object, in meter, in the blender frame \n\
  39. - **orientation** (quaternion): the orientation of the \
  40. object, in the blender frame")
  41. add_property('noocclusion', False, 'noocclusion', 'bool', 'Do not check for'
  42. ' objects possibly hiding each others (faster but less '
  43. 'realistic behaviour)')
  44. def __init__(self, obj, parent=None):
  45. """ Constructor method.
  46. Receives the reference to the Blender object.
  47. The second parameter should be the name of the object's parent.
  48. """
  49. logger.info('%s initialization' % obj.name)
  50. # Call the constructor of the parent class
  51. super(self.__class__, self).__init__(obj, parent)
  52. # Locate the Blender camera object associated with this sensor
  53. main_obj = self.bge_object
  54. for obj in main_obj.children:
  55. if hasattr(obj, 'lens'):
  56. self.blender_cam = obj
  57. logger.info("Camera object: {0}".format(self.blender_cam))
  58. break
  59. if not self.blender_cam:
  60. logger.error("no camera object associated to the semantic camera. \
  61. The semantic camera requires a standard Blender \
  62. camera in its children.")
  63. # TrackedObject is a dictionary containing the list of tracked objects
  64. # (->meshes with a class property set up) as keys
  65. # and the bounding boxes of these objects as value.
  66. if not 'trackedObjects' in blenderapi.persistantstorage():
  67. logger.info('Initialization of tracked objects:')
  68. blenderapi.persistantstorage().trackedObjects = \
  69. dict.fromkeys(passive_objects.active_objects())
  70. # Store the bounding box of the marked objects
  71. for obj in blenderapi.persistantstorage().trackedObjects.keys():
  72. # bound_box returns the bounding box in local space
  73. # instead of world space.
  74. blenderapi.persistantstorage().trackedObjects[obj] = \
  75. blenderapi.objectdata(obj.name).bound_box
  76. details = passive_objects.details(obj)
  77. logger.info(' - {%s} (type:%s)'%
  78. (details['label'], details['type']))
  79. if self.noocclusion:
  80. logger.info("Semantic camera running in 'no occlusion' mode (fast mode).")
  81. logger.info("Component initialized, runs at %.2f Hz ", self.frequency)
  82. def default_action(self):
  83. """ Do the actual semantic 'grab'.
  84. Iterate over all the tracked objects, and check if they are
  85. visible for the robot. Visible objects must have a bounding box
  86. and be active for physical simulation (have the 'Actor' checkbox
  87. selected)
  88. """
  89. # Call the action of the parent class
  90. super(self.__class__, self).default_action()
  91. # Create dictionaries
  92. self.local_data['visible_objects'] = []
  93. for obj in blenderapi.persistantstorage().trackedObjects.keys():
  94. if self._check_visible(obj):
  95. # Create dictionary to contain object name, type,
  96. # description, position and orientation
  97. obj_dict = {'name': passive_objects.label(obj),
  98. 'description': obj.get('Description', ''),
  99. 'type': obj.get('Type', ''),
  100. 'position': obj.worldPosition,
  101. 'orientation': obj.worldOrientation.to_quaternion()}
  102. self.local_data['visible_objects'].append(obj_dict)
  103. logger.debug("Visible objects: %s" % self.local_data['visible_objects'])
  104. def _check_visible(self, obj):
  105. """ Check if an object lies inside of the camera frustum.
  106. The behaviour of this method is impacted by the sensor's
  107. property 'noocclusion': if true, only checks the object is in the
  108. frustum. Does not check it is actually visible (ie, not hidden
  109. away by another object).
  110. """
  111. # TrackedObjects was filled at initialization
  112. # with the object's bounding boxes
  113. bb = blenderapi.persistantstorage().trackedObjects[obj]
  114. pos = obj.position
  115. bbox = [[bb_corner[i] + pos[i] for i in range(3)] for bb_corner in bb]
  116. if logger.isEnabledFor(logging.DEBUG):
  117. logger.debug("\n--- NEW TEST ---")
  118. logger.debug("OBJECT '{0}' AT {1}".format(obj, pos))
  119. logger.debug("CAMERA '{0}' AT {1}".format(
  120. self.blender_cam, self.blender_cam.position))
  121. logger.debug("BBOX: >{0}<".format(bbox))
  122. logger.debug("BBOX: {0}".format(bb))
  123. # Translate the bounding box to the current object position
  124. # and check if it is in the frustum
  125. if self.blender_cam.boxInsideFrustum(bbox) != self.blender_cam.OUTSIDE:
  126. if not self.noocclusion:
  127. # Check that there are no other objects between the camera
  128. # and the selected object
  129. # NOTE: This is a very simple test. Hiding only the 'center'
  130. # of an object will make it invisible, even if the rest is
  131. # still seen from the camera
  132. closest_obj = self.bge_object.rayCastTo(obj)
  133. if closest_obj in [obj] + list(obj.children):
  134. return True
  135. else:
  136. return True
  137. return False