import gradio as gr from transformers import AutoModel, AutoProcessor from PIL import Image import torch import requests from io import BytesIO # Load model and processor from Hugging Face try: model = AutoModel.from_pretrained("zxhezexin/openlrm-mix-large-1.1") processor = AutoProcessor.from_pretrained("zxhezexin/openlrm-mix-large-1.1") except Exception as e: print(f"Error loading model or processor: {e}") # Example image URL (replace this with a suitable example) example_image_url = "https://huggingface.co/datasets/nateraw/image-folder/resolve/main/example_1.png" # Function to load example image from URL def load_example_image(): try: response = requests.get(example_image_url) image = Image.open(BytesIO(response.content)) return image except Exception as e: print(f"Error loading example image: {e}") return None # Define function to generate 3D output from 2D image def image_to_3d(image): try: # Preprocess the input image inputs = processor(images=image, return_tensors="pt") # Run inference with torch.no_grad(): outputs = model(**inputs) # Placeholder return, replace this with actual 3D visualization logic return "3D model generated from input image!" except Exception as e: return f"Error during inference: {str(e)}" # Load the example image for the Gradio interface example_image = load_example_image() # Gradio interface setup interface = gr.Interface( fn=image_to_3d, inputs=gr.Image(type="pil", label="Upload an Image or use Example"), outputs="text", # Placeholder output (replace with 3D rendering if needed) title="OpenLRM Mix-Large 1.1 - Image to 3D", description="Upload an image to generate a 3D model using OpenLRM Mix-Large 1.1.", examples=[[example_image]] if example_image else None # Include the example image if loaded ) # Launch the Gradio interface interface.launch()