Spaces:
Sleeping
Sleeping
Jon Taylor
commited on
Commit
•
6e1bb73
1
Parent(s):
8354b1a
fixed SD image size
Browse files- app/bot.py +7 -5
- app/pipeline.py +2 -2
app/bot.py
CHANGED
@@ -50,6 +50,9 @@ class DailyVision(EventHandler):
|
|
50 |
|
51 |
self.logger.info(f"Expiration timer set to: {self.__expiration}")
|
52 |
|
|
|
|
|
|
|
53 |
def run(self, meeting_url, token):
|
54 |
# Join
|
55 |
self.logger.info(f"Connecting to room {meeting_url} as {self.__bot_name}")
|
@@ -81,12 +84,12 @@ class DailyVision(EventHandler):
|
|
81 |
# Say hello
|
82 |
self.wave()
|
83 |
|
84 |
-
def setup_camera(self
|
85 |
if not self.__camera:
|
86 |
self.__camera = Daily.create_camera_device("camera",
|
87 |
width = 640,
|
88 |
height = 480,
|
89 |
-
color_format="
|
90 |
self.__client.update_inputs({
|
91 |
"camera": {
|
92 |
"isEnabled": True,
|
@@ -114,7 +117,7 @@ class DailyVision(EventHandler):
|
|
114 |
video_frame = self.__queue.get(timeout=5)
|
115 |
|
116 |
if video_frame:
|
117 |
-
image = Image.frombytes("
|
118 |
result_image = self.__pipeline.predict(params, image)
|
119 |
self.__camera.write_frame(result_image.tobytes())
|
120 |
#pil = Image.fromarray(result.render()[0], mode="RGB").tobytes()
|
@@ -123,9 +126,8 @@ class DailyVision(EventHandler):
|
|
123 |
|
124 |
def on_video_frame(self, participant_id, video_frame):
|
125 |
# Process ~15 frames per second (considering incoming frames at 30fps).
|
126 |
-
if time.time() - self.__time >
|
127 |
self.__time = time.time()
|
128 |
-
self.setup_camera(video_frame)
|
129 |
self.__queue.put(video_frame)
|
130 |
|
131 |
def wave(self, emoji="👋"):
|
|
|
50 |
|
51 |
self.logger.info(f"Expiration timer set to: {self.__expiration}")
|
52 |
|
53 |
+
# Setup camera
|
54 |
+
self.setup_camera()
|
55 |
+
|
56 |
def run(self, meeting_url, token):
|
57 |
# Join
|
58 |
self.logger.info(f"Connecting to room {meeting_url} as {self.__bot_name}")
|
|
|
84 |
# Say hello
|
85 |
self.wave()
|
86 |
|
87 |
+
def setup_camera(self):
|
88 |
if not self.__camera:
|
89 |
self.__camera = Daily.create_camera_device("camera",
|
90 |
width = 640,
|
91 |
height = 480,
|
92 |
+
color_format="RGB")
|
93 |
self.__client.update_inputs({
|
94 |
"camera": {
|
95 |
"isEnabled": True,
|
|
|
117 |
video_frame = self.__queue.get(timeout=5)
|
118 |
|
119 |
if video_frame:
|
120 |
+
image = Image.frombytes("RGB", (video_frame.width, video_frame.height), video_frame.buffer)
|
121 |
result_image = self.__pipeline.predict(params, image)
|
122 |
self.__camera.write_frame(result_image.tobytes())
|
123 |
#pil = Image.fromarray(result.render()[0], mode="RGB").tobytes()
|
|
|
126 |
|
127 |
def on_video_frame(self, participant_id, video_frame):
|
128 |
# Process ~15 frames per second (considering incoming frames at 30fps).
|
129 |
+
if time.time() - self.__time > 2: #0.05:
|
130 |
self.__time = time.time()
|
|
|
131 |
self.__queue.put(video_frame)
|
132 |
|
133 |
def wave(self, emoji="👋"):
|
app/pipeline.py
CHANGED
@@ -181,8 +181,8 @@ class Pipeline:
|
|
181 |
|
182 |
self.pipe(
|
183 |
prompt="warmup",
|
184 |
-
image=[Image.new("
|
185 |
-
control_image=[Image.new("
|
186 |
)
|
187 |
|
188 |
def predict(self, params: "Pipeline.InputParams", image) -> Image.Image:
|
|
|
181 |
|
182 |
self.pipe(
|
183 |
prompt="warmup",
|
184 |
+
image=[Image.new("RGB", (640, 480))],
|
185 |
+
control_image=[Image.new("RGB", (640, 480))],
|
186 |
)
|
187 |
|
188 |
def predict(self, params: "Pipeline.InputParams", image) -> Image.Image:
|