Hi guys!
I use a RealSense d435 with my Jetson TX2. After working perfectly, the depth always stop working/ freezes after time. No problem with the RGB.
My code run at around 13fps; and I set the pipeline to 60 fps. I thought this was the problem. So I tried setting the pipeline to 6fps. This resulted indeed in my code running at 6fps. But still the same problem… even though, it comes later ( after 5min, sometimes, 10min, compared to 1min - 2 min).
The guy that will solve this claim is brilliant! I’m lost…
Here is my code:
# Libraries imports
import pyrealsense2 as rs # for realsense camera
import numpy as np
import cv2 as cv
import time # needded to calculate FPS
class Camera:
'''
Class that contains basic Camera management functions without using threading
'''
def __init__(self, src=0):
# Create a pipeline
self.pipeline = rs.pipeline()
# Create a config and configure the pipeline to stream
# different resolutions of color and depth streams
self.config = rs.config()
self.config.enable_stream(rs.stream.depth, 640, 360, rs.format.z16,
60) # other possible valus for the fps are 6, 15,...
self.config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8,
60) # other possible valus for the fps are 6, 15,...
# Start streaming
self.profile = self.pipeline.start(self.config)
# Getting the depth sensor's depth scale:
self.depth_sensor = self.profile.get_device().first_depth_sensor()
self.depth_scale = self.depth_sensor.get_depth_scale()
print("Depth Scale is: ", self.depth_scale)
# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
self.align_to = rs.stream.color
self.align = rs.align(self.align_to)
def grab(self):
# Get frameset of color and depth
frames = self.pipeline.wait_for_frames()
# frames.get_depth_frame() is a 640x360 depth image
# Align the depth frame to color frame
aligned_frames = self.align.process(frames)
# Get aligned frames
aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
color_frame = aligned_frames.get_color_frame()
# Create the array that will countains the frame.
# Channels will be: BGRD
image = np.ones((480, 640, 4))
image[:, :, :3] = np.asanyarray(color_frame.get_data()) # Store the BGR image in the array image
image[:, :, 3] = np.asanyarray(
aligned_depth_frame.get_data()) * self.depth_scale # store the depth (in meters) in the fourth channels
return image
def read(self): # used for an easy conversion from the CameraThreading class to this one
return self.grab()
def readRGB(self, image): # Isolate the BGR canals and put it in the right format for it to be understood by openCV
return image[:, :, :3].astype(np.uint8)
def showDepthGreen(self, image):
depth = np.ones((480, 640, 3))
depth[:, :, 1] = image[:, :, 3] * 100
return depth.astype(np.uint8)
def stop(self): # Close pipeline
self.pipeline.stop()
# If run of the file, test the frame grabbing and calculate performance
if __name__ == "__main__":
camera = Camera(0)
time.sleep(1)
while (cv.waitKey(1) & 0xff != ord('q')): # Loop as q is not pressed
start = time.time() # start clock measurer
image = camera.grab() # grab An Image
cv.imshow("frames", camera.readRGB(image)) # Show the RGB image
cv.imshow("depth", camera.showDepthGreen(image)) # Show the RGB image
# Write performances:
print("Took: " + str(time.time() - start) + " s")
print('FPS: ' + str(1 / (time.time() - start)))
# Once q is pressed, close the camera and destroy the windows:
camera.stop()
cv.destroyAllWindows()
Thank you!!
Quentin