Shengyi Qian commited on
Commit
b61616c
1 Parent(s): 5b046eb

add chinese support

Browse files
Files changed (1) hide show
  1. app.py +32 -4
app.py CHANGED
@@ -215,17 +215,40 @@ examples = [
215
  ]
216
 
217
  title = 'Understanding 3D Object Interaction from a Single Image'
218
- description = """
219
  <p style='text-align: center'> <a href='https://jasonqsy.github.io/3DOI/' target='_blank'>Project Page</a> | <a href='https://arxiv.org/abs/2305.09664' target='_blank'>Paper</a> | <a href='https://github.com/JasonQSY/3DOI' target='_blank'>Code</a></p>
 
 
220
  Gradio demo for Understanding 3D Object Interaction from a Single Image. \n
221
  You may click on of the examples or upload your own image. \n
222
  After having the image, you can click on the image to create a single query point. You can then hit Run.\n
223
  Our approach can predict 3D object interaction from a single image, including Movable (one hand or two hands), Rigid, Articulation type and axis, Action, Bounding box, Mask, Affordance and Depth.
224
- """ # noqa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
 
226
  with gr.Blocks().queue() as demo:
227
  gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>" + title + "</h1>")
228
- gr.Markdown(description)
 
 
 
 
 
 
 
229
 
230
  with gr.Row():
231
  with gr.Column(scale=1):
@@ -259,8 +282,13 @@ with gr.Blocks().queue() as demo:
259
  with gr.Column(scale=1):
260
  pass
261
 
262
- output_components = [query_image, pred_properties, pred_localization, pred_affordance, pred_depth]
 
 
 
 
263
 
 
264
  run_button.click(fn=run_model, inputs=[input_image], outputs=output_components)
265
 
266
 
 
215
  ]
216
 
217
  title = 'Understanding 3D Object Interaction from a Single Image'
218
+ authors = """
219
  <p style='text-align: center'> <a href='https://jasonqsy.github.io/3DOI/' target='_blank'>Project Page</a> | <a href='https://arxiv.org/abs/2305.09664' target='_blank'>Paper</a> | <a href='https://github.com/JasonQSY/3DOI' target='_blank'>Code</a></p>
220
+ """
221
+ description = """
222
  Gradio demo for Understanding 3D Object Interaction from a Single Image. \n
223
  You may click on of the examples or upload your own image. \n
224
  After having the image, you can click on the image to create a single query point. You can then hit Run.\n
225
  Our approach can predict 3D object interaction from a single image, including Movable (one hand or two hands), Rigid, Articulation type and axis, Action, Bounding box, Mask, Affordance and Depth.
226
+ """
227
+
228
+ def change_language(lang_select, description_controller, run_button):
229
+ description_cn = """
230
+ 要运行demo,首先点击右边的示例图片或者上传自己的图片。在有了图片以后,点击图片上的点来创建query point,然后点击 Run。
231
+ """
232
+ if lang_select == "简体中文":
233
+ description_controller = description_cn
234
+ run_button = '运行'
235
+ else:
236
+ description_controller = description
237
+ run_button = 'Run'
238
+
239
+ return description_controller, run_button
240
+
241
 
242
  with gr.Blocks().queue() as demo:
243
  gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>" + title + "</h1>")
244
+ gr.Markdown(authors)
245
+ # gr.Markdown("<p style='text-align: center'>ICCV 2023</p>")
246
+
247
+
248
+ lang_select = gr.Dropdown(["简体中文", "English"], label='Language / 语言')
249
+
250
+ description_controller = gr.Markdown(description)
251
+
252
 
253
  with gr.Row():
254
  with gr.Column(scale=1):
 
282
  with gr.Column(scale=1):
283
  pass
284
 
285
+ lang_select.change(
286
+ change_language,
287
+ inputs=[lang_select, description_controller, run_button],
288
+ outputs=[description_controller, run_button]
289
+ )
290
 
291
+ output_components = [query_image, pred_properties, pred_localization, pred_affordance, pred_depth]
292
  run_button.click(fn=run_model, inputs=[input_image], outputs=output_components)
293
 
294