Update to socket based solution

This commit is contained in:
Andrew Johnson
2025-11-09 13:22:41 +01:00
parent 73454a03c8
commit fb44f33bea
4 changed files with 70 additions and 12 deletions

1
.gitignore vendored
View File

@@ -87,3 +87,4 @@ cython_debug/
.devcontainer/devcontainer.json
output/
artifacts/

View File

@@ -20,12 +20,16 @@ tello.streamoff()
# Prepare directory to save
script_dir = os.path.dirname(__file__)
artifact_folder_path = os.path.join(script_dir, "../../artifacts/images")
artifact_folder_path = os.path.join(script_dir, "../artifacts/images")
os.makedirs(artifact_folder_path, exist_ok=True)
print("[Example] Saving captured picture to:", artifact_folder_path)
# Save the frame
save_path = os.path.join(artifact_folder_path, "picture.png")
cv2.imwrite(save_path, np.array(frame_read.frame))
# Land
tello.land()

View File

@@ -244,13 +244,25 @@ class CommandServer:
conn.send(state.encode())
elif data == "get_latest_frame":
# Save the frame to disk first
frame_path = os.path.join(self._recording_folder, "latest_frame.png")
# Send frame data directly over TCP instead of using filesystem
if self._ursina_adapter.latest_frame is not None:
cv2.imwrite(frame_path, self._ursina_adapter.latest_frame)
conn.send(frame_path.encode())
# Encode frame as PNG in memory
success, buffer = cv2.imencode('.png', self._ursina_adapter.latest_frame)
if success:
# Send frame size first (4 bytes)
frame_data = buffer.tobytes()
frame_size = len(frame_data)
conn.send(frame_size.to_bytes(4, byteorder='big'))
# Then send the actual frame data
conn.send(frame_data)
print(f"[Frame Transfer] Sent {frame_size} bytes over TCP")
else:
# Send 0 size to indicate no frame
conn.send((0).to_bytes(4, byteorder='big'))
else:
conn.send(b"N/A")
# Send 0 size to indicate no frame available
conn.send((0).to_bytes(4, byteorder='big'))
elif data == "capture_frame":
self._ursina_adapter.capture_frame()
elif data.startswith("set_speed"):

View File

@@ -69,12 +69,53 @@ class TelloSimClient:
print(f"[Error] Unable to connect to the simulation at {self.host}:{self.port}")
def get_frame_read(self) -> BackgroundFrameRead:
frame_path = self._request_data('get_latest_frame')
if frame_path != "N/A" and os.path.exists(frame_path):
image = cv2.imread(frame_path)
if image is not None:
return BackgroundFrameRead(frame=cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
return BackgroundFrameRead(frame=np.zeros([360, 640, 3], dtype=np.uint8))
"""Get the latest frame directly from the simulator over TCP."""
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((self.host, self.port))
s.send(b'get_latest_frame')
# Receive frame size (4 bytes)
size_data = s.recv(4)
if len(size_data) != 4:
print("[Error] Failed to receive frame size")
return BackgroundFrameRead(frame=np.zeros([360, 640, 3], dtype=np.uint8))
frame_size = int.from_bytes(size_data, byteorder='big')
# If size is 0, no frame available
if frame_size == 0:
print("[Debug] No frame available from simulator")
return BackgroundFrameRead(frame=np.zeros([360, 640, 3], dtype=np.uint8))
# Receive the frame data
frame_data = b''
bytes_received = 0
while bytes_received < frame_size:
chunk = s.recv(min(4096, frame_size - bytes_received))
if not chunk:
break
frame_data += chunk
bytes_received += len(chunk)
# Decode the frame from PNG bytes
if len(frame_data) == frame_size:
nparr = np.frombuffer(frame_data, np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
if image is not None:
# Return frame in BGR format (OpenCV's native format)
# Users should convert to RGB if needed for display
return BackgroundFrameRead(frame=image)
print("[Error] Failed to decode frame data")
return BackgroundFrameRead(frame=np.zeros([360, 640, 3], dtype=np.uint8))
except ConnectionRefusedError:
print(f"[Error] Unable to connect to the simulation at {self.host}:{self.port}")
return BackgroundFrameRead(frame=np.zeros([360, 640, 3], dtype=np.uint8))
except Exception as e:
print(f"[Error] Failed to get frame: {e}")
return BackgroundFrameRead(frame=np.zeros([360, 640, 3], dtype=np.uint8))
def _request_data(self, command):
try: