import streamlit as st import tensorflow as tf import numpy as np from config import * from transformations import * from rendering import * # Setting random seed to obtain reproducible results. tf.random.set_seed(42) def show_rendered_image(r,theta,phi): # Get the camera to world matrix. c2w = pose_spherical(theta, phi, r) ray_oris, ray_dirs = get_rays(H, W, focal, c2w) rays_flat, t_vals = render_flat_rays( ray_oris, ray_dirs, near=2.0, far=6.0, num_samples=NUM_SAMPLES, rand=False ) rgb, depth = render_rgb_depth( nerf_loaded, rays_flat[None, ...], t_vals[None, ...], rand=False, train=False ) return(rgb[0], depth[0]) # app.py text matter starts here st.title('NeRF:3D volumetric rendering with NeRF') st.markdown("Authors: [Aritra Roy Gosthipathy](https://twitter.com/ariG23498) and [Ritwik Raha](https://twitter.com/ritwik_raha)") st.markdown("## Description") st.markdown("[NeRF](https://arxiv.org/abs/2003.08934) proposes an ingenious way to synthesize novel views of a scene by modelling the volumetric scene function through a neural network.") st.markdown("## Interactive Demo") # download the model: # from my own model repo from huggingface_hub import from_pretrained_keras nerf_loaded = from_pretrained_keras("Alesteba/NeRF_ficus") # set the values of r theta phi r = 4.0 theta = st.slider("Enter a value for Θ:", min_value=0.0, max_value=360.0) phi = -30.0 color, depth = show_rendered_image(r, theta, phi) col1, col2= st.columns(2) with col1: color = tf.keras.utils.array_to_img(color) st.image(color, caption="Color Image", clamp=True, width=300) with col2: depth = tf.keras.utils.array_to_img(depth[..., None]) st.image(depth, caption="Depth Map", clamp=True, width=300)