Spaces:
Running
Running
Commit
Β·
22fac7a
1
Parent(s):
fd02d2d
last ig
Browse files
app.py
CHANGED
@@ -1,80 +1,116 @@
|
|
1 |
import streamlit as st
|
2 |
-
from openai import OpenAI
|
3 |
import os
|
|
|
4 |
|
5 |
-
#
|
6 |
client = OpenAI(
|
7 |
-
base_url
|
8 |
api_key=os.environ.get("NVIDIA_API_KEY")
|
9 |
)
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
try:
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
responses = [choice.message.content for choice in completion.choices]
|
26 |
-
return responses
|
27 |
|
28 |
except Exception as e:
|
29 |
-
st.error(f"
|
|
|
|
|
30 |
|
31 |
-
# Streamlit
|
32 |
-
st.
|
33 |
-
st.write("Interact with an AI model to generate customized text.")
|
34 |
|
35 |
-
#
|
36 |
-
|
|
|
37 |
|
38 |
-
#
|
39 |
-
|
40 |
-
|
41 |
-
["Story", "Poem", "Article", "Code"],
|
42 |
-
index=0
|
43 |
-
)
|
44 |
|
45 |
-
#
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
# Numeric input for number of responses
|
57 |
-
num_responses = st.number_input("Number of responses:", min_value=1, max_value=5, value=1, step=1)
|
58 |
-
|
59 |
-
# Checkboxes for additional features
|
60 |
-
creative_mode = st.checkbox("Enable creative mode")
|
61 |
-
fact_checking = st.checkbox("Enable fact-checking")
|
62 |
-
|
63 |
-
# Button to trigger AI query
|
64 |
-
if st.button("Generate Text"):
|
65 |
-
if user_prompt.strip():
|
66 |
-
st.subheader("Generated Text:")
|
67 |
-
responses = query_ai(user_prompt, output_format, tone, temperature, max_length, num_responses)
|
68 |
-
|
69 |
-
for idx, response in enumerate(responses):
|
70 |
-
st.text_area(f"Response {idx + 1}", value=response, height=200)
|
71 |
else:
|
72 |
-
st.warning("Please enter a prompt before clicking the button.")
|
73 |
-
|
74 |
-
# Feedback mechanism
|
75 |
-
st.write("### Feedback")
|
76 |
-
feedback = st.radio("Did you like the generated text?", ("Yes", "No"))
|
77 |
-
if feedback == "Yes":
|
78 |
-
st.success("Thank you for your feedback!")
|
79 |
-
elif feedback == "No":
|
80 |
-
st.warning("We appreciate your feedback and will work to improve.")
|
|
|
1 |
import streamlit as st
|
|
|
2 |
import os
|
3 |
+
from openai import OpenAI
|
4 |
|
5 |
+
# Set up NVIDIA API client
|
6 |
client = OpenAI(
|
7 |
+
base_url="https://integrate.api.nvidia.com/v1",
|
8 |
api_key=os.environ.get("NVIDIA_API_KEY")
|
9 |
)
|
10 |
+
# Streamlit UI
|
11 |
+
st.title("AI-Powered Text Generation App")
|
12 |
+
st.write("Interact with an AI model to generate text based on your inputs.")
|
13 |
+
|
14 |
+
# Response specification features
|
15 |
+
st.markdown("## π οΈ Response Specification Features")
|
16 |
+
st.markdown("*The expanders below are parameters that you can adjust to customize the AI response.*")
|
17 |
+
|
18 |
+
with st.expander("π¨ *Temperature (Creativity Control)*"):
|
19 |
+
st.write("""
|
20 |
+
This parameter controls the *creativity* of the AI's responses:
|
21 |
+
- *0.0*: Always the same response (deterministic).
|
22 |
+
- *0.1 - 0.3*: Mostly factual and repetitive.
|
23 |
+
- *0.4 - 0.7*: Balanced between coherence and creativity.
|
24 |
+
- *0.8 - 1.0*: Highly creative but less predictable.
|
25 |
+
""")
|
26 |
+
|
27 |
+
with st.expander("π *Max Tokens (Response Length)*"):
|
28 |
+
st.write("Defines the maximum number of words/subwords in the response.")
|
29 |
+
|
30 |
+
with st.expander("π― *Top-p (Nucleus Sampling)*"):
|
31 |
+
st.write("""
|
32 |
+
Controls word diversity by sampling from top-probability tokens:
|
33 |
+
- **High top_p + Low temperature** β More factual, structured responses.
|
34 |
+
- **High top_p + High temperature** β More diverse, unexpected responses.
|
35 |
+
""")
|
36 |
+
|
37 |
+
with st.expander("π *Number of Responses*"):
|
38 |
+
st.write("Specifies how many response variations the AI should generate.")
|
39 |
+
|
40 |
+
with st.expander("β
*Fact-Checking*"):
|
41 |
+
st.write("""
|
42 |
+
- If *enabled*, AI prioritizes factual accuracy.
|
43 |
+
- If *disabled*, AI prioritizes creativity.
|
44 |
+
""")
|
45 |
+
|
46 |
+
st.markdown("""
|
47 |
+
### π *Summary*
|
48 |
+
- temperature β Adjusts *creativity vs accuracy*.
|
49 |
+
- max_tokens β Defines *response length*.
|
50 |
+
- top_p β Fine-tunes *word diversity*.
|
51 |
+
- fact_check β Ensures *factual correctness* (but may reduce fluency).
|
52 |
+
- num_responses β Generates *different variations* of the same prompt.
|
53 |
+
""")
|
54 |
+
st.title("Jephone AI app")
|
55 |
+
# Function to query the AI model (based on your friend's code)
|
56 |
+
def query_ai_model(prompt, model="meta/llama-3.1-405b-instruct", temperature=0.7, max_tokens=512, top_p=0.9, fact_check=False, num_responses=1):
|
57 |
+
responses = []
|
58 |
+
|
59 |
try:
|
60 |
+
if fact_check:
|
61 |
+
prompt = "Ensure factual accuracy. " + prompt
|
62 |
+
|
63 |
+
for _ in range(num_responses): # Response loop for multiple responses
|
64 |
+
completion = client.chat.completions.create(
|
65 |
+
model=model,
|
66 |
+
messages=[{"role": "user", "content": prompt}],
|
67 |
+
temperature=temperature,
|
68 |
+
top_p=top_p,
|
69 |
+
max_tokens=max_tokens
|
70 |
+
)
|
71 |
+
response = completion.choices[0].message.content
|
72 |
+
responses.append(response)
|
|
|
|
|
73 |
|
74 |
except Exception as e:
|
75 |
+
st.error(f"An error occurred: {str(e)}")
|
76 |
+
|
77 |
+
return responses # Return a list of responses
|
78 |
|
79 |
+
# Input Fields for Streamlit UI
|
80 |
+
user_input = st.text_area("Your Prompt:", placeholder="Type something...")
|
|
|
81 |
|
82 |
+
# Dropdown Menus
|
83 |
+
output_format = st.selectbox("Select Output Format:", ["Story", "Poem", "Article", "Code"])
|
84 |
+
tone_style = st.selectbox("Select Tone/Style:", ["Formal", "Informal", "Humorous", "Technical"])
|
85 |
|
86 |
+
# Sliders
|
87 |
+
creativity_level = st.slider("Creativity Level:", min_value=0.0, max_value=1.0, value=0.7, step=0.1)
|
88 |
+
max_length = st.slider("Max Length (tokens):", min_value=100, max_value=1024, value=512, step=50)
|
|
|
|
|
|
|
89 |
|
90 |
+
# Numeric Inputs
|
91 |
+
num_responses = st.number_input("Number of Responses:", min_value=1, max_value=5, value=1, step=1)
|
92 |
+
|
93 |
+
# Checkboxes
|
94 |
+
enable_creativity = st.checkbox("Enable Creative Mode", value=True)
|
95 |
+
fact_checking = st.checkbox("Enable Fact-Checking")
|
96 |
+
|
97 |
+
# Button to generate response
|
98 |
+
if st.button("Generate Answer"):
|
99 |
+
if user_input.strip():
|
100 |
+
with st.spinner("Generating response..."):
|
101 |
+
full_prompt = f"Format: {output_format}\nTone: {tone_style}\nPrompt: {user_input}"
|
102 |
+
ai_responses = query_ai_model(
|
103 |
+
full_prompt,
|
104 |
+
temperature=creativity_level if enable_creativity else 0.2,
|
105 |
+
max_tokens=max_length,
|
106 |
+
top_p=0.9 if enable_creativity else 0.7,
|
107 |
+
fact_check=fact_checking,
|
108 |
+
num_responses=num_responses
|
109 |
+
)
|
110 |
|
111 |
+
st.success("AI Responses:")
|
112 |
+
for i, response in enumerate(ai_responses, 1):
|
113 |
+
st.markdown(f"### Response {i}")
|
114 |
+
st.write(response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
else:
|
116 |
+
st.warning("Please enter a prompt before clicking the button.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|