Spaces:
Sleeping
Sleeping
fabiencasenave
commited on
Commit
•
2e0c5aa
1
Parent(s):
b451065
Update app.py
Browse files
app.py
CHANGED
@@ -1,25 +1,28 @@
|
|
1 |
import gradio as gr
|
2 |
-
import pickle
|
3 |
# from datasets import load_from_disk
|
4 |
from plaid.containers.sample import Sample
|
5 |
# import pyvista as pv
|
6 |
|
|
|
7 |
import pyrender
|
8 |
import trimesh
|
9 |
-
import matplotlib
|
|
|
10 |
|
11 |
import os
|
12 |
# switch to "osmesa" or "egl" before loading pyrender
|
13 |
os.environ["PYOPENGL_PLATFORM"] = "egl"
|
14 |
|
15 |
-
|
|
|
|
|
16 |
|
17 |
# FOLDER = "plot"
|
18 |
|
19 |
# dataset = load_from_disk("Rotor37")
|
20 |
|
21 |
-
field_names_train = ["
|
22 |
-
field_names_test = []
|
23 |
|
24 |
|
25 |
|
@@ -29,45 +32,51 @@ field_names_test = []
|
|
29 |
|
30 |
def sample_info(sample_id_str, fieldn):
|
31 |
|
32 |
-
|
33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
-
str__ = f"loading sample {sample_id_str}"
|
36 |
-
|
37 |
-
|
38 |
# generate mesh
|
39 |
-
|
40 |
-
|
41 |
-
mesh = pyrender.Mesh.from_trimesh(
|
42 |
-
|
43 |
# compose scene
|
44 |
scene = pyrender.Scene(ambient_light=[.1, .1, .3], bg_color=[0, 0, 0])
|
45 |
camera = pyrender.PerspectiveCamera( yfov=np.pi / 3.0)
|
46 |
-
light = pyrender.DirectionalLight(color=[1,1,1], intensity=
|
47 |
-
|
48 |
scene.add(mesh, pose= np.eye(4))
|
49 |
scene.add(light, pose= np.eye(4))
|
50 |
-
|
51 |
-
c =
|
52 |
scene.add(camera, pose=[[ 1, 0, 0, 0],
|
53 |
[ 0, c, -c, -2],
|
54 |
-
[ 0, c, c, 2],
|
55 |
[ 0, 0, 0, 1]])
|
56 |
-
|
57 |
# render scene
|
58 |
-
r = pyrender.OffscreenRenderer(
|
59 |
color, _ = r.render(scene)
|
60 |
-
# color = np.random.rand(512, 512)
|
61 |
-
|
62 |
-
plt.figure(figsize=(8,8))
|
63 |
-
plt.imshow(color)
|
64 |
|
65 |
-
plt.savefig("test.png")
|
66 |
-
|
67 |
|
|
|
68 |
|
69 |
-
return str__,
|
70 |
-
# return str__, str__
|
71 |
|
72 |
|
73 |
if __name__ == "__main__":
|
@@ -78,10 +87,8 @@ if __name__ == "__main__":
|
|
78 |
|
79 |
|
80 |
output1 = gr.Text(label="Training sample info")
|
81 |
-
# output2 = gr.Text(label="Training sample visualization")
|
82 |
output2 = gr.Image(label="Training sample visualization")
|
83 |
|
84 |
-
# d1.input(update_second, d1, d2)
|
85 |
|
86 |
d1.input(sample_info, [d1, d2], [output1, output2])
|
87 |
d2.input(sample_info, [d1, d2], [output1, output2])
|
|
|
1 |
import gradio as gr
|
2 |
+
# import pickle
|
3 |
# from datasets import load_from_disk
|
4 |
from plaid.containers.sample import Sample
|
5 |
# import pyvista as pv
|
6 |
|
7 |
+
import numpy as np
|
8 |
import pyrender
|
9 |
import trimesh
|
10 |
+
import matplotlib as mpl
|
11 |
+
import matplotlib.cm as cm
|
12 |
|
13 |
import os
|
14 |
# switch to "osmesa" or "egl" before loading pyrender
|
15 |
os.environ["PYOPENGL_PLATFORM"] = "egl"
|
16 |
|
17 |
+
|
18 |
+
os.system("wget https://zenodo.org/records/10124594/files/Tensile2d.tar.gz")
|
19 |
+
os.system("tar -xvf Tensile2d.tar.gz")
|
20 |
|
21 |
# FOLDER = "plot"
|
22 |
|
23 |
# dataset = load_from_disk("Rotor37")
|
24 |
|
25 |
+
field_names_train = ["sig11", "sig22", "sig12", "U1", "U2", "evrcum"]
|
|
|
26 |
|
27 |
|
28 |
|
|
|
32 |
|
33 |
def sample_info(sample_id_str, fieldn):
|
34 |
|
35 |
+
plaid_sample = Sample.load_from_dir(f"Tensile2d/dataset/samples/sample_"+str(sample_id_str).zfill(9))
|
36 |
+
nodes = plaid_sample.get_nodes()
|
37 |
+
field = plaid_sample.get_field(fieldn)
|
38 |
+
if nodes.shape[1] == 2:
|
39 |
+
nodes__ = np.zeros((nodes.shape[0],nodes.shape[1]+1))
|
40 |
+
nodes__[:,:-1] = nodes
|
41 |
+
nodes = nodes__
|
42 |
+
|
43 |
+
|
44 |
+
triangles = plaid_sample.get_elements()['TRI_3']
|
45 |
+
|
46 |
+
# generate colormap
|
47 |
+
norm = mpl.colors.Normalize(vmin=np.min(field), vmax=np.max(field))
|
48 |
+
cmap = cm.coolwarm
|
49 |
+
|
50 |
+
m = cm.ScalarMappable(norm=norm, cmap=cmap)
|
51 |
+
vertex_colors = m.to_rgba(field)[:,:3]
|
52 |
|
|
|
|
|
|
|
53 |
# generate mesh
|
54 |
+
trimesh = Trimesh(vertices = nodes, faces = triangles)
|
55 |
+
trimesh.visual.vertex_colors = vertex_colors
|
56 |
+
mesh = pyrender.Mesh.from_trimesh(trimesh, smooth=False)
|
57 |
+
|
58 |
# compose scene
|
59 |
scene = pyrender.Scene(ambient_light=[.1, .1, .3], bg_color=[0, 0, 0])
|
60 |
camera = pyrender.PerspectiveCamera( yfov=np.pi / 3.0)
|
61 |
+
light = pyrender.DirectionalLight(color=[1,1,1], intensity=1000.)
|
62 |
+
|
63 |
scene.add(mesh, pose= np.eye(4))
|
64 |
scene.add(light, pose= np.eye(4))
|
65 |
+
|
66 |
+
c = 3**-0.5
|
67 |
scene.add(camera, pose=[[ 1, 0, 0, 0],
|
68 |
[ 0, c, -c, -2],
|
69 |
+
[ 0, c, c, 1.2],
|
70 |
[ 0, 0, 0, 1]])
|
71 |
+
|
72 |
# render scene
|
73 |
+
r = pyrender.OffscreenRenderer(1024, 1024)
|
74 |
color, _ = r.render(scene)
|
|
|
|
|
|
|
|
|
75 |
|
|
|
|
|
76 |
|
77 |
+
str__ = f"loading sample {sample_id_str}"
|
78 |
|
79 |
+
return str__, color
|
|
|
80 |
|
81 |
|
82 |
if __name__ == "__main__":
|
|
|
87 |
|
88 |
|
89 |
output1 = gr.Text(label="Training sample info")
|
|
|
90 |
output2 = gr.Image(label="Training sample visualization")
|
91 |
|
|
|
92 |
|
93 |
d1.input(sample_info, [d1, d2], [output1, output2])
|
94 |
d2.input(sample_info, [d1, d2], [output1, output2])
|