import os from groq import Groq client = Groq( api_key=os.environ.get("GROQ_API_KEY"), ) query = st.input_text("Enter your query") chat_completion = client.chat.completions.create( messages=[ { "role": "user", "content": query, } ], model="mixtral-8x7b-32768", ) print(chat_completion.choices[0].message.content) print(chat_completion.choices[1].message.content) print(chat_completion) # # Text to 3D # import streamlit as st # import torch # from diffusers import ShapEPipeline # from diffusers.utils import export_to_gif # # Model loading (Ideally done once at the start for efficiency) # ckpt_id = "openai/shap-e" # @st.cache_resource # Caches the model for faster subsequent runs # def load_model(): # return ShapEPipeline.from_pretrained(ckpt_id).to("cuda") # pipe = load_model() # # App Title # st.title("Shark 3D Image Generator") # # User Inputs # prompt = st.text_input("Enter your prompt:", "a shark") # guidance_scale = st.slider("Guidance Scale", 0.0, 20.0, 15.0, step=0.5) # # Generate and Display Images # if st.button("Generate"): # with st.spinner("Generating images..."): # images = pipe( # prompt, # guidance_scale=guidance_scale, # num_inference_steps=64, # size=256, # ).images # gif_path = export_to_gif(images, "shark_3d.gif") # st.image(images[0]) # Display the first image # st.success("GIF saved as shark_3d.gif")