Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Création d'une interface web et mise en place du hotspot #26

Merged
merged 29 commits into from
Nov 7, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
4d1c598
chore - add flash in requirements
EtienneSchmitz Oct 18, 2023
dfffc42
feat - add web structure flask app
EtienneSchmitz Oct 18, 2023
85a97c8
feat - Add index.html work
EtienneSchmitz Oct 22, 2023
88c1d11
feat - Update logo
EtienneSchmitz Oct 22, 2023
c955b5c
feat - add all routes
EtienneSchmitz Oct 28, 2023
d9dacb8
feat - index.html use base template
EtienneSchmitz Oct 28, 2023
b6618c4
feat - add tailwind
EtienneSchmitz Oct 28, 2023
f7de8b9
fix - resolve some errors with tailwind integration
EtienneSchmitz Oct 28, 2023
8905a62
feat - continue to rework the index page
EtienneSchmitz Oct 28, 2023
7a57bcb
style - update index style
EtienneSchmitz Oct 28, 2023
6367b7f
style - upgrade style for all page and index page
EtienneSchmitz Oct 28, 2023
950dc9f
feat - add settings page
EtienneSchmitz Oct 28, 2023
dca1235
feat - add webrtc
EtienneSchmitz Nov 3, 2023
e413ccb
fix - continue to work on webrtc
EtienneSchmitz Nov 3, 2023
9b9c385
feat: web_rtc works now
EtienneSchmitz Nov 3, 2023
e870ddd
fix - correct photographer
EtienneSchmitz Nov 5, 2023
0c8aa08
feat(threading): Implement threading for object access and detection
EtienneSchmitz Nov 5, 2023
f06117b
fix - fix some errors and clean print
EtienneSchmitz Nov 5, 2023
74ef530
feat - add update button to settings
EtienneSchmitz Nov 5, 2023
e109c8d
feat - begin api, add update
EtienneSchmitz Nov 5, 2023
f510995
feat - add finish settings
EtienneSchmitz Nov 5, 2023
9b9c423
feat - add program deposit
EtienneSchmitz Nov 6, 2023
7af6aa3
feat - continue to work on deposit program
EtienneSchmitz Nov 6, 2023
1d58407
Add delete function
EtienneSchmitz Nov 6, 2023
ebadd26
chore - remove print
EtienneSchmitz Nov 6, 2023
41cd724
feat - remove urgency button
EtienneSchmitz Nov 6, 2023
043c8c6
chore - green button for update
EtienneSchmitz Nov 6, 2023
7876ce9
fix - Reduced Capture Delay and Introduced Multi-Threading
EtienneSchmitz Nov 7, 2023
35a837b
feat - finish webcam
EtienneSchmitz Nov 7, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -55,9 +55,13 @@ dkms.conf
*.pyc
api/python/rosa.egg-info

# NodeJS
node_modules/

# Build folder
build/
dist/

# Code editor
.vscode/
.idea/
27 changes: 0 additions & 27 deletions .travis.yml

This file was deleted.

6 changes: 5 additions & 1 deletion rpi/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,8 @@ tensorflow<2
matplotlib
protobuf==3.20
h5py==2.10.0
websockets
websockets
aiortc
aiohttp
aiohttp_cors
av
3 changes: 2 additions & 1 deletion rpi/sockets/camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@ async def send_to_all_clients(self):
if len(self.connected) == 0:
continue

img = self.camera.grab_frame_loop()
_, img = self.camera.grab_frame_loop()

if img is None:
continue

Expand Down
170 changes: 170 additions & 0 deletions rpi/sockets/webrtc.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,170 @@
import cv2
import asyncio
import logging
import json
import aiohttp_cors
import av

from aiohttp import web
from aiortc import VideoStreamTrack, RTCPeerConnection, RTCSessionDescription
from aiortc.contrib.media import MediaBlackhole, MediaPlayer
from vision.camera import Camera

class VideoCameraPI(VideoStreamTrack):
def __init__(self, frame_interval=16):
super().__init__()
self.direction = 'sendonly'
self.camera = Camera()
self.logger = logging.getLogger(__name__)
self.frame_interval = frame_interval

async def recv(self):
while True:
frame = self.camera.grab_frame()
if frame is None:
continue

# Convert the OpenCV frame (a NumPy array) to an aiortc VideoFrame
video_frame = av.VideoFrame.from_ndarray(frame, format="bgr24")

pts, time_base = await self.next_timestamp()
video_frame.pts = pts
video_frame.time_base = time_base

await asyncio.sleep(self.frame_interval / 1000)

return video_frame

class DetectionVideoCameraPI(VideoStreamTrack):
def __init__(self, frame_interval=16):
super().__init__()
self.direction = 'sendonly'
self.camera = Camera()
self.logger = logging.getLogger(__name__)
self.frame_interval = frame_interval

async def recv(self):
while True:
frame = self.camera.grab_detected_frame()
if frame is None:
continue

# Convert the OpenCV frame (a NumPy array) to an aiortc VideoFrame
video_frame = av.VideoFrame.from_ndarray(frame, format="bgr24")

# Update the timestamp of the video frame
pts, time_base = await self.next_timestamp()
video_frame.pts = pts
video_frame.time_base = time_base

# Sleep for the frame interval
await asyncio.sleep(self.frame_interval / 1000)

return video_frame

class WebRTC:
def __init__(self):
self.rtc_peer_connections = set()
self.logger = logging.getLogger(__name__)

async def offer_camera(self, request):
params = await request.json()
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])

pc = RTCPeerConnection()
self.rtc_peer_connections.add(pc)

video_pi = VideoCameraPI()
pc.addTrack(video_pi)

@pc.on("connectionstatechange")
async def on_connectionstatechange():
self.logger.info("Connection state is %s", pc.connectionState)
if pc.connectionState == "failed":
await pc.close()
self.rtc_peer_connections.discard(pc)

# Handle offer
await pc.setRemoteDescription(offer)

# Send answer
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)

return web.Response(
content_type="application/json",
text=json.dumps(
{"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}
),
)

async def offer_detection(self, request):
params = await request.json()
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])

pc = RTCPeerConnection()
self.rtc_peer_connections.add(pc)

video_detection = DetectionVideoCameraPI() # Use DetectionVideoCameraPI for detection
pc.addTrack(video_detection)

@pc.on("connectionstatechange")
async def on_connectionstatechange():
self.logger.info("Connection state is %s", pc.connectionState)
if pc.connectionState == "failed":
await pc.close()
self.rtc_peer_connections.discard(pc)

# Handle offer
await pc.setRemoteDescription(offer)

# Send answer
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)

return web.Response(
content_type="application/json",
text=json.dumps(
{"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}
),
)

async def run_server(self):
# Create HTTP Application
self.app = web.Application()

# Configure default CORS settings.
cors = aiohttp_cors.setup(self.app, defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_headers="*",
)
})

# Define the /offer_camera route for camera streaming
resource_camera = cors.add(self.app.router.add_resource("/offer_camera"))
cors.add(resource_camera.add_route("POST", self.offer_camera))

# Define the /offer_detection route for detection streaming
resource_detection = cors.add(self.app.router.add_resource("/offer_detection"))
cors.add(resource_detection.add_route("POST", self.offer_detection))

self.logger.info("Raspberry Pi WebRTC server listening on port 8080...")

self.runner = web.AppRunner(self.app)
await self.runner.setup()

self.site = web.TCPSite(self.runner, '0.0.0.0', 8080)
await self.site.start()

def run(self):
self.loop = asyncio.get_event_loop()
server_coroutine = self.run_server()
server_task = asyncio.ensure_future(server_coroutine)
self.loop.run_until_complete(server_task)

async def close(self):
coros = [pc.close() for pc in self.rtc_peer_connections]
await asyncio.gather(*coros)
self.rtc_peer_connections.clear()
5 changes: 5 additions & 0 deletions rpi/tasks/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

from sockets.input_output import InputOuputServer
from sockets.camera import CameraServer
from sockets.webrtc import WebRTC
import asyncio
import threading

Expand All @@ -27,7 +28,11 @@ def start_asyncio_loop(self):

self.camera_server = CameraServer()
self.camera_server.run()

self.webrtc_server = WebRTC()
self.webrtc_server.run()


self.loop.run_forever()

def run(self):
Expand Down
11 changes: 3 additions & 8 deletions rpi/tasks/thymio/photographer.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,6 @@
from vision.camera import Camera
from tasks.base import Task

from vision import detect_objects


class Photographer(Task):
def __init__(self, controller: ThymioController):
self.controller = controller
Expand All @@ -16,14 +13,12 @@ def __init__(self, controller: ThymioController):
self.controller.set_led("top", [0,32,0])

def run(self):
self.logger.info("PHOTOGRAPHER")
img = self.cam.grab_frame_loop()
self.logger.debug("PHOTOGRAPHER")
found_obj = self.cam.grab_detected_data()

if img is None:
if found_obj is None:
return

found_obj = detect_objects(img, render=True)

if found_obj:
obj = found_obj[0]
label = obj.label
Expand Down
4 changes: 2 additions & 2 deletions rpi/vision/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
from .line_tracking import get_line_center # noqa F401
from .object_detector import detect_objects # noqa F401
# from .line_tracking import get_line_center # noqa F401
# from .object_detector import detect_objects # noqa F401
89 changes: 84 additions & 5 deletions rpi/vision/camera.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,99 @@
import cv2 as cv
import json
import os
import threading
import time
import numpy as np

def visual_object_to_dict(vo):
return {
'label': vo.label,
'center': tuple(float(c) for c in vo.center),
'box': [float(b) for b in vo.box.tolist()],
'confidence': float(vo.confidence)
}

class Camera:
_instance = None

def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance.init_camera()
cls._instance._init_camera()
return cls._instance

def init_camera(self):
def _init_camera(self):
self.cap = cv.VideoCapture(0)
self.last_frame = None
self.last_detected_frame = None
self.last_found_obj = np.empty((0, 0))

self.image_dir = "/tmp/rosa/images"
os.makedirs(self.image_dir, exist_ok=True)

self.capture_thread = threading.Thread(target=self._capture_frames)
self.capture_thread.daemon = True # Set the thread as a daemon to exit when the main program exits
self.capture_thread.start()

self.detect_thread = threading.Thread(target=self._detect_objects_continuously)
self.detect_thread.daemon = True
self.detect_thread.start()

def _capture_frames(self):
while True:
ret, frame = self.cap.read()
if ret:
original_img_path = os.path.join(self.image_dir, 'camera.jpg')
cv.imwrite(original_img_path, frame)
self.last_frame = frame

time.sleep(0.010)

def _detect_objects_continuously(self):
from .object_detector import detect_objects

while True:
frame = self.last_frame # Capture the last available frame
if frame is not None:
self.last_detected_frame = frame # Assuming detect_objects does not modify the frame
self.last_found_obj = detect_objects(frame, render=True)

# Save the detected image and data
detected_img_path = os.path.join(self.image_dir, 'detected_img.jpg')
detected_data_path = os.path.join(self.image_dir, 'detected_data.json')

cv.imwrite(detected_img_path, frame)
with open(detected_data_path, 'w') as f:
json_data = [visual_object_to_dict(vo) for vo in self.last_found_obj]
json.dump(json_data, f)

def grab_frame(self):
"""
Returns the last frame captured.
:return: frame: ndarray
"""
return self.last_frame

def grab_detected_data_and_frame(self):
"""
Returns the last detected data and processed frame.
:return: last_found_obj: dict, detected_frame: ndarray
"""
return self.last_found_obj, self.last_detected_frame

def grab_detected_data(self):
"""
Returns the last detected data.
:return: last_found_obj: dict
"""
return self.last_found_obj

def grab_frame_loop(self):
_, img = self.cap.read()
return img
def grab_detected_frame(self):
"""
Returns the last detected frame.
:return: detected_frame: ndarray
"""
return self.last_detected_frame

def __del__(self):
self.cap.release()
Loading