Spaces:
Sleeping
Sleeping
import gradio as gr | |
import requests | |
import json | |
import os | |
# Retrieve the API key from environment variables | |
API_KEY = os.getenv('API_KEY') | |
API_URL = "https://api-inference.huggingface.co/models/meta-llama/CodeLlama-7b-hf" | |
headers = {"Authorization": f"Bearer {API_KEY}"} | |
def generate_response(prompt): | |
data = { | |
"inputs": prompt | |
} | |
response = requests.post(API_URL, headers=headers, json=data) | |
if response.status_code == 200: | |
return response.json() | |
else: | |
return f"Error: {response.status_code}\n{response.text}" | |
def main(prompt): | |
response = generate_response(prompt) | |
if isinstance(response, str): | |
return response | |
else: | |
return json.dumps(response, indent=2) | |
iface = gr.Interface( | |
fn=main, | |
inputs=gr.Textbox(lines=4, placeholder="Enter your prompt here..."), | |
outputs=gr.Textbox(lines=10), | |
title="Code Assistant", | |
description="Enter your prompt and get responses from the model." | |
) | |
if __name__ == "__main__": | |
iface.launch() | |