Celeste-cj commited on
Commit
55b92ea
1 Parent(s): c75d8ab
Files changed (5) hide show
  1. .gitignore +3 -0
  2. app.py +42 -0
  3. examples/agra.jpeg +0 -0
  4. examples/human.jpeg +0 -0
  5. examples/images.jpeg +0 -0
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .vscode
2
+ flagged
3
+ gradio_cached_examples
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from PIL import Image
3
+
4
+ import gradio as gr
5
+ import numpy as np
6
+ import torch
7
+ from transformers import AutoModelForDepthEstimation, DPTImageProcessor
8
+
9
+
10
+ processor = DPTImageProcessor.from_pretrained(
11
+ "Intel/dpt-large")
12
+ model = AutoModelForDepthEstimation.from_pretrained("Intel/dpt-large")
13
+
14
+
15
+ def main(image, input_size=384):
16
+ # prepare image for the model
17
+ inputs = processor(images=image, return_tensors="pt", do_resize=True, size=(
18
+ input_size, input_size), keep_aspect_ratio=True)
19
+ print(type(inputs), inputs.data["pixel_values"].shape)
20
+
21
+ # do inference
22
+ with torch.no_grad():
23
+ outputs = model(**inputs)
24
+ predicted_depth = outputs.predicted_depth
25
+
26
+ # interpolate to original size
27
+ prediction = torch.nn.functional.interpolate(predicted_depth.unsqueeze(
28
+ 1), size=image.shape[:-1], mode="bicubic").squeeze()
29
+ output = prediction.cpu().numpy().copy()
30
+ formatted = (output * 255 / output.max()).astype("uint8")
31
+ depth = Image.fromarray(formatted)
32
+ return depth
33
+
34
+
35
+ title = "Demo: monocular depth estimation with DPT"
36
+ description = "This demo uses <a href='https://huggingface.co/Intel/dpt-large' target='_blank'>DPT</a> to estimate depth from monocular image."
37
+ examples = [[f"examples/{file}"]
38
+ for file in os.listdir("examples") if file[0] != "."]
39
+
40
+ demo = gr.Interface(fn=main, inputs=[gr.Image(label="Input Image"), gr.Slider(128, 512, value=384, label="Input Size")], outputs="image",
41
+ title=title, description=description, examples=examples, cache_examples=True)
42
+ demo.launch(debug=True, share=True)
examples/agra.jpeg ADDED
examples/human.jpeg ADDED
examples/images.jpeg ADDED