import gradio as gr import pickle from datasets import load_dataset from plaid.containers.sample import Sample import numpy as np import pyrender from trimesh import Trimesh import matplotlib as mpl import matplotlib.cm as cm import os # switch to "osmesa" or "egl" before loading pyrender os.environ["PYOPENGL_PLATFORM"] = "egl" # os.system("wget https://zenodo.org/records/10124594/files/Tensile2d.tar.gz") # os.system("tar -xvf Tensile2d.tar.gz") hf_dataset = load_dataset("PLAID-datasets/AirfRANS_remeshed", split="all_samples") nb_samples = 1000 field_names_train = ["Ux", "Uy", "p", "nut", "implicit_distance"] _HEADER_ = '''

Visualization demo of AirfRANS_remeshed dataset

''' def round_num(num)->str: return '%s' % float('%.3g' % num) def sample_info(sample_id_str, fieldn): sample_ = hf_dataset[int(sample_id_str)]["sample"] plaid_sample = Sample.model_validate(pickle.loads(sample_)) # plaid_sample = Sample.load_from_dir(f"Tensile2d/dataset/samples/sample_"+str(sample_id_str).zfill(9)) nodes = plaid_sample.get_nodes() field = plaid_sample.get_field(fieldn) if nodes.shape[1] == 2: nodes__ = np.zeros((nodes.shape[0],nodes.shape[1]+1)) nodes__[:,:-1] = nodes nodes = nodes__ triangles = plaid_sample.get_elements()['TRI_3'] # generate colormap if np.linalg.norm(field) > 0: norm = mpl.colors.Normalize(vmin=np.min(field), vmax=np.max(field)) cmap = cm.seismic#cm.coolwarm m = cm.ScalarMappable(norm=norm, cmap=cmap) vertex_colors = m.to_rgba(field)[:,:3] else: vertex_colors = 1+np.zeros((field.shape[0], 3)) vertex_colors[:,0] = 0.2298057 vertex_colors[:,1] = 0.01555616 vertex_colors[:,2] = 0.15023281 # generate mesh trimesh = Trimesh(vertices = nodes, faces = triangles) trimesh.visual.vertex_colors = vertex_colors mesh = pyrender.Mesh.from_trimesh(trimesh, smooth=False) # compose scene scene = pyrender.Scene(ambient_light=[.1, .1, .3], bg_color=[0, 0, 0]) camera = pyrender.PerspectiveCamera( yfov=np.pi / 3.0) light = pyrender.DirectionalLight(color=[1,1,1], intensity=1000.) scene.add(mesh, pose= np.eye(4)) scene.add(light, pose= np.eye(4)) scene.add(camera, pose=[[ 1, 0, 0, 1], [ 0, 1, 0, 0], [ 0, 0, 1, 6], [ 0, 0, 0, 1]]) # render scene r = pyrender.OffscreenRenderer(1024, 1024) color, _ = r.render(scene) str__ = f"Training sample {sample_id_str}\n" str__ += str(plaid_sample)+"\n" if len(hf_dataset.description['in_scalars_names'])>0: str__ += "\ninput scalars:\n" for sname in hf_dataset.description['in_scalars_names']: str__ += f"- {sname}: {round_num(plaid_sample.get_scalar(sname))}\n" if len(hf_dataset.description['out_scalars_names'])>0: str__ += "\noutput scalars:\n" for sname in hf_dataset.description['out_scalars_names']: str__ += f"- {sname}: {round_num(plaid_sample.get_scalar(sname))}\n" str__ += f"\n\nMesh number of nodes: {nodes.shape[0]}\n" if len(hf_dataset.description['in_fields_names'])>0: str__ += "\ninput fields:\n" for fname in hf_dataset.description['in_fields_names']: str__ += f"- {fname}\n" if len(hf_dataset.description['out_fields_names'])>0: str__ += "\noutput fields:\n" for fname in hf_dataset.description['out_fields_names']: str__ += f"- {fname}\n" return str__, color if __name__ == "__main__": with gr.Blocks() as demo: gr.Markdown(_HEADER_) with gr.Row(variant="panel"): with gr.Column(): d1 = gr.Slider(0, nb_samples-1, value=0, label="Training sample id", info="Choose between 0 and "+str(nb_samples-1)) output1 = gr.Text(label="Training sample info") with gr.Column(): d2 = gr.Dropdown(field_names_train, value=field_names_train[0], label="Field name") output2 = gr.Image(label="Training sample visualization") d1.input(sample_info, [d1, d2], [output1, output2]) d2.input(sample_info, [d1, d2], [output1, output2]) demo.launch()