Spaces:
Running
Running
VictorDanielG
commited on
Commit
•
b92e7d5
1
Parent(s):
17eaf89
Update app.py
Browse files
app.py
CHANGED
@@ -1,80 +1,50 @@
|
|
1 |
import gradio as gr
|
2 |
-
|
3 |
-
import openai
|
4 |
-
import pinecone
|
5 |
-
import torch
|
6 |
-
import langchain as lc
|
7 |
-
from langchain.embeddings import HuggingFaceEmbeddings
|
8 |
-
from langchain.vectorstores import Pinecone
|
9 |
-
from langchain.chains import ConversationalRetrievalChain
|
10 |
-
from langchain.agents import initialize_agent, load_tools
|
11 |
-
from google.generativeai import genai
|
12 |
|
13 |
-
# Configure
|
14 |
-
|
15 |
|
16 |
-
#
|
17 |
-
|
|
|
|
|
|
|
18 |
|
19 |
-
#
|
20 |
-
|
21 |
-
|
22 |
-
llama_model = AutoModelForCausalLM.from_pretrained(llama_model_id, torch_dtype=torch.bfloat16)
|
23 |
-
|
24 |
-
# Embeddings for vector storage
|
25 |
-
embedding_model = HuggingFaceEmbeddings("sentence-transformers/all-MiniLM-L6-v2")
|
26 |
-
|
27 |
-
# Pinecone vector database setup
|
28 |
-
pinecone_index = pinecone.Index("your-index-name")
|
29 |
-
vector_db = Pinecone(index=pinecone_index, embedding_function=embedding_model.embed_query, text_key="text")
|
30 |
-
|
31 |
-
# Define RAG chain using LangChain's ConversationalRetrievalChain
|
32 |
-
rag_chain = ConversationalRetrievalChain.from_llm(
|
33 |
-
llm=llama_model,
|
34 |
-
retriever=vector_db.as_retriever(),
|
35 |
-
memory=lc.memory.ConversationBufferMemory()
|
36 |
-
)
|
37 |
-
|
38 |
-
# Function for Transcription, RAG, and Multilingual Response
|
39 |
-
def process_input(input_text=None, input_audio=None):
|
40 |
-
# Step 1: Process Audio Input (if any) with Whisper
|
41 |
-
if input_audio:
|
42 |
-
transcription = whisper_model(input_audio)["text"]
|
43 |
-
input_text = transcription # Use transcription as input for LLaMA 3.2 processing
|
44 |
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
# Step 3: Pass text to LLaMA 3.2 for primary processing
|
50 |
-
prompt = f"""
|
51 |
-
From the following user story, extract the unarticulated needs and wants.
|
52 |
-
User story: {input_text}
|
53 |
-
|
54 |
-
Needs are unspoken requirements or desires, while wants are explicit wishes.
|
55 |
"""
|
56 |
-
inputs = tokenizer(prompt, return_tensors="pt")
|
57 |
-
llama_output = llama_model.generate(**inputs, max_new_tokens=100)
|
58 |
-
llama_response = tokenizer.decode(llama_output[0], skip_special_tokens=True)
|
59 |
|
60 |
-
#
|
61 |
-
|
|
|
|
|
62 |
|
63 |
-
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
-
#
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
outputs=output_text
|
77 |
-
)
|
78 |
|
79 |
-
# Launch the Gradio interface
|
80 |
interface.launch()
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import google.generativeai as genai
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
+
# Configure Google Gemini API
|
5 |
+
genai.configure(api_key="AIzaSyDBGF5y7WqDB0SO7-UO6yjshiEZN3Hpt3g") # Replace with your actual API key
|
6 |
|
7 |
+
# Function to get a response from the Google Gemini model
|
8 |
+
def get_gemini_response(input_text):
|
9 |
+
# Check if the input is detailed enough
|
10 |
+
if len(input_text.split()) < 10:
|
11 |
+
return "Please provide a more detailed user story to help generate relevant needs and wants."
|
12 |
|
13 |
+
# Concise prompt to limit output to essential insights
|
14 |
+
input_prompt = f"""
|
15 |
+
Based on the user story "{input_text}", briefly extract any unarticulated needs and wants.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
+
Only provide essential needs and wants directly relevant to the given story. Do not speculate or over-extrapolate.
|
18 |
+
|
19 |
+
Needs and Wants:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
"""
|
|
|
|
|
|
|
21 |
|
22 |
+
# Generate the content based on text input
|
23 |
+
model = genai.GenerativeModel('gemini-1.5-flash')
|
24 |
+
response = model.generate_content([input_text, input_prompt])
|
25 |
+
return response.text
|
26 |
|
27 |
+
# Gradio interface function
|
28 |
+
def extract_needs_and_wants(user_story):
|
29 |
+
try:
|
30 |
+
return get_gemini_response(user_story)
|
31 |
+
except Exception as e:
|
32 |
+
return f"Error: {str(e)}"
|
33 |
|
34 |
+
# Create the Gradio interface
|
35 |
+
import gradio as gr
|
36 |
+
|
37 |
+
interface = gr.Interface(
|
38 |
+
fn=extract_needs_and_wants,
|
39 |
+
inputs="text",
|
40 |
+
outputs="text",
|
41 |
+
title="Unarticulated Needs & Wants Extractor",
|
42 |
+
description="**Author:** VictorDaniel\n\nEnter a detailed user story to extract the unarticulated needs and wants.",
|
43 |
+
examples=[["The user often speaks about wanting to improve their health but is hesitant to join a gym."]]
|
44 |
+
)
|
|
|
|
|
45 |
|
|
|
46 |
interface.launch()
|
47 |
+
|
48 |
+
|
49 |
+
# Launch the Gradio app
|
50 |
+
interface.launch()
|