File size: 2,160 Bytes
2e04d80
b2af484
b92e7d5
b2af484
b92e7d5
8293c26
b2af484
b92e7d5
062faeb
 
 
b2af484
062faeb
 
b92e7d5
17eaf89
b92e7d5
 
 
17eaf89
062faeb
 
 
 
 
 
17eaf89
b92e7d5
 
 
062faeb
 
17eaf89
062faeb
 
b92e7d5
062faeb
 
b92e7d5
062faeb
b92e7d5
062faeb
b92e7d5
 
9a6753b
 
 
 
426c7b8
b92e7d5
062faeb
 
b92e7d5
afca1b9
1cd5580
b92e7d5
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58

import gradio as gr
import google.generativeai as genai

# Configure Google Gemini API
genai.configure(api_key="AIzaSyDBGF5y7WqDB0SO7-UO6yjshiEZN3Hpt3g")  # Replace with your actual API key

# Function to get a response from the Google Gemini model
def get_gemini_response(input_text, context):
    if len(input_text.split()) < 10 and not context:
        return "Please provide a more detailed user story to help generate relevant needs and wants.", context

    if not context:  # Initial request to extract needs and wants
        input_prompt = f"""
    Based on the user story "{input_text}", briefly extract any unarticulated needs and wants.
    
    Only provide essential needs and wants directly relevant to the given story. Do not speculate or over-extrapolate.
    
    Needs and Wants:
    """
    else:  # Follow-up question based on existing needs and wants
        input_prompt = f"""
        Context: {context}
        Question: {input_text}
        Answer:
        """

    # Generate the content based on text input
    model = genai.GenerativeModel('gemini-1.5-flash')
    response = model.generate_content([input_text, input_prompt])
    new_context = response.text if not context else context + " " + response.text
    return response.text, new_context

# Gradio interface function with state
def extract_needs_and_wants(user_story, context=''):
    try:
        response, new_context = get_gemini_response(user_story, context)
        return response, new_context
    except Exception as e:
        return f"Error: {str(e)}", context

# Create the Gradio interface with state
interface = gr.Interface(
    fn=extract_needs_and_wants,
    inputs=[gr.Textbox(label="Enter your story or follow-up question"), gr.State()],
   


    outputs=[gr.Textbox(label="Extracted Information"), gr.State()],
    title="Unarticulated Needs & Wants Extractor",
    description="**Author:** VictorDaniel\n\nEnter a detailed user story to extract the unarticulated needs and wants or ask follow-up questions.",
    examples=[["The user often speaks about wanting to improve their health but is hesitant to join a gym"]]
)

interface.launch(share=True)