Spaces:
No application file
No application file
te
Browse files- app.py +23 -0
- requirements.txt +2 -0
app.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import pipeline, set_seed
|
3 |
+
import os
|
4 |
+
|
5 |
+
|
6 |
+
|
7 |
+
api_key = os.environ.get("LLama2Secret")
|
8 |
+
# Your Hugging Face API key
|
9 |
+
|
10 |
+
|
11 |
+
# Initialize the pipeline with the LLaMA 2-7B chat model
|
12 |
+
model_name = "meta-llama/Llama-2-7b-chat"
|
13 |
+
# model_name = "meta-llama/Llama-2-7b-chat"
|
14 |
+
chat_model = pipeline("text2text-generation", model=model_name, use_auth_token=api_key)
|
15 |
+
|
16 |
+
def ask_llama2(question):
|
17 |
+
set_seed(42)
|
18 |
+
responses = chat_model(question, max_length=50)
|
19 |
+
return responses[0]['generated_text']
|
20 |
+
|
21 |
+
iface = gr.Interface(fn=ask_llama2, inputs="text", outputs="text", title="Chat with LLaMA 2")
|
22 |
+
|
23 |
+
iface.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
torch
|