Spaces:
Runtime error
Runtime error
import streamlit as st | |
import tensorflow as tf | |
import numpy as np | |
from transformations import * | |
from rendering import * | |
# Setting random seed to obtain reproducible results. | |
tf.random.set_seed(42) | |
# Initialize global variables. | |
AUTO = tf.data.AUTOTUNE | |
BATCH_SIZE = 1 | |
NUM_SAMPLES = 32 | |
POS_ENCODE_DIMS = 16 | |
EPOCHS = 30 | |
H = 25 | |
W = 25 | |
focal = 0.6911112070083618 | |
def show_rendered_image(r,theta,phi): | |
# Get the camera to world matrix. | |
c2w = pose_spherical(theta, phi, r) | |
ray_oris, ray_dirs = get_rays(H, W, focal, c2w) | |
rays_flat, t_vals = render_flat_rays( | |
ray_oris, ray_dirs, near=2.0, far=6.0, num_samples=NUM_SAMPLES, rand=False | |
) | |
rgb, depth = render_rgb_depth( | |
nerf_loaded, rays_flat[None, ...], t_vals[None, ...], rand=False, train=False | |
) | |
return(rgb[0], depth[0]) | |
# app.py text matter starts here | |
st.title('NeRF:3D volumetric rendering with NeRF') | |
st.markdown("Authors: [Aritra Roy Gosthipathy](https://twitter.com/ariG23498) and [Ritwik Raha](https://twitter.com/ritwik_raha)") | |
st.markdown("## Description") | |
st.markdown("[NeRF](https://arxiv.org/abs/2003.08934) proposes an ingenious way to synthesize novel views of a scene by modelling the volumetric scene function through a neural network.") | |
st.markdown("## Interactive Demo") | |
# download the model: | |
# from huggingface_hub import snapshot_download | |
# snapshot_download(repo_id="Alesteba/your-model-name", local_dir="./nerf") | |
from huggingface_hub import from_pretrained_keras | |
nerf_loaded = from_pretrained_keras("Alesteba/NeRF_ficus") | |
# load the pre-trained model | |
# nerf_loaded = tf.keras.models.load_model("nerf", compile=False) | |
# set the values of r theta phi | |
r = 4.0 | |
theta = st.slider("Enter a value for Θ:", min_value=0.0, max_value=360.0) | |
phi = -30.0 | |
color, depth = show_rendered_image(r, theta, phi) | |
col1, col2= st.columns(2) | |
with col1: | |
color = tf.keras.utils.array_to_img(color) | |
st.image(color, caption="Color Image", clamp=True, width=300) | |
with col2: | |
depth = tf.keras.utils.array_to_img(depth[..., None]) | |
st.image(depth, caption="Depth Map", clamp=True, width=300) | |
st.markdown("## Tutorials") | |
st.markdown("- [Keras](https://keras.io/examples/vision/nerf/)") | |
st.markdown("- [PyImageSearch NeRF 1](https://www.pyimagesearch.com/2021/11/10/computer-graphics-and-deep-learning-with-nerf-using-tensorflow-and-keras-part-1/)") | |
st.markdown("- [PyImageSearch NeRF 2](https://www.pyimagesearch.com/2021/11/17/computer-graphics-and-deep-learning-with-nerf-using-tensorflow-and-keras-part-2/)") | |
st.markdown("- [PyImageSearch NeRF 3](https://www.pyimagesearch.com/2021/11/24/computer-graphics-and-deep-learning-with-nerf-using-tensorflow-and-keras-part-3/)") | |
st.markdown("## Credits") | |
st.markdown("- [PyImageSearch](https://www.pyimagesearch.com/)") | |
st.markdown("- [JarvisLabs.ai GPU credits](https://jarvislabs.ai/)") |