Spaces:
Running
on
Zero
Running
on
Zero
respond_to_question_with_llama
Browse files
app.py
CHANGED
@@ -38,6 +38,22 @@ def respond_to_question(transcript, question):
|
|
38 |
|
39 |
return response
|
40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
@spaces.GPU
|
42 |
def audio_transcribe(inputs):
|
43 |
if inputs is None:
|
@@ -63,7 +79,7 @@ with gr.Blocks() as transcriberUI:
|
|
63 |
|
64 |
def ask_question_callback(transcription,question):
|
65 |
if ask_question:
|
66 |
-
response =
|
67 |
response_output.value = response
|
68 |
else:
|
69 |
response_output.value = "No question asked"
|
|
|
38 |
|
39 |
return response
|
40 |
|
41 |
+
@spaces.GPU
|
42 |
+
def respond_to_question_llama(transcript, question):
|
43 |
+
from huggingface_hub import InferenceClient
|
44 |
+
|
45 |
+
client = InferenceClient(
|
46 |
+
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
47 |
+
token=os.environ["HUGGINGFACEHUB_API_KEY"],
|
48 |
+
)
|
49 |
+
|
50 |
+
response = client.chat_completion(
|
51 |
+
messages=[{"role": "user", "content": f"Transcript: {transcript}\n\nUser: {question}}],
|
52 |
+
max_tokens=500,
|
53 |
+
).choices[0].content
|
54 |
+
|
55 |
+
return response
|
56 |
+
|
57 |
@spaces.GPU
|
58 |
def audio_transcribe(inputs):
|
59 |
if inputs is None:
|
|
|
79 |
|
80 |
def ask_question_callback(transcription,question):
|
81 |
if ask_question:
|
82 |
+
response = respond_to_question_llama(transcription, question)
|
83 |
response_output.value = response
|
84 |
else:
|
85 |
response_output.value = "No question asked"
|