Spaces:
Runtime error
Runtime error
Added `Token limit` & `Stop sequence`
Browse files
README.md
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
emoji: 💬
|
4 |
colorFrom: pink
|
5 |
colorTo: purple
|
|
|
1 |
---
|
2 |
+
title: Gemini Playground
|
3 |
emoji: 💬
|
4 |
colorFrom: pink
|
5 |
colorTo: purple
|
app.py
CHANGED
@@ -19,11 +19,19 @@ DUPLICATE = """
|
|
19 |
print("google-generativeai:", genai.__version__)
|
20 |
|
21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
def predict(
|
23 |
google_key: str,
|
24 |
text_prompt: str,
|
25 |
image_prompt: Optional[Image.Image],
|
26 |
temperature: float,
|
|
|
|
|
27 |
chatbot: List[Tuple[str, str]]
|
28 |
) -> Tuple[str, List[Tuple[str, str]]]:
|
29 |
if not google_key:
|
@@ -32,7 +40,10 @@ def predict(
|
|
32 |
"Please follow the instructions in the README to set it up.")
|
33 |
|
34 |
genai.configure(api_key=google_key)
|
35 |
-
generation_config = genai.types.GenerationConfig(
|
|
|
|
|
|
|
36 |
|
37 |
if image_prompt is None:
|
38 |
model = genai.GenerativeModel('gemini-pro')
|
@@ -71,16 +82,42 @@ run_button_component = gr.Button()
|
|
71 |
temperature_component = gr.Slider(
|
72 |
minimum=0,
|
73 |
maximum=1.0,
|
74 |
-
value=0.
|
75 |
step=0.05,
|
76 |
label="Temperature",
|
77 |
-
info=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
|
79 |
inputs = [
|
80 |
google_key_component,
|
81 |
text_prompt_component,
|
82 |
image_prompt_component,
|
83 |
temperature_component,
|
|
|
|
|
84 |
chatbot_component
|
85 |
]
|
86 |
|
@@ -96,6 +133,8 @@ with gr.Blocks() as demo:
|
|
96 |
run_button_component.render()
|
97 |
with gr.Accordion("Parameters", open=False):
|
98 |
temperature_component.render()
|
|
|
|
|
99 |
|
100 |
run_button_component.click(
|
101 |
fn=predict,
|
|
|
19 |
print("google-generativeai:", genai.__version__)
|
20 |
|
21 |
|
22 |
+
def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
|
23 |
+
if not stop_sequences:
|
24 |
+
return None
|
25 |
+
return [sequence.strip() for sequence in stop_sequences.split(",")]
|
26 |
+
|
27 |
+
|
28 |
def predict(
|
29 |
google_key: str,
|
30 |
text_prompt: str,
|
31 |
image_prompt: Optional[Image.Image],
|
32 |
temperature: float,
|
33 |
+
max_output_tokens: int,
|
34 |
+
stop_sequences: str,
|
35 |
chatbot: List[Tuple[str, str]]
|
36 |
) -> Tuple[str, List[Tuple[str, str]]]:
|
37 |
if not google_key:
|
|
|
40 |
"Please follow the instructions in the README to set it up.")
|
41 |
|
42 |
genai.configure(api_key=google_key)
|
43 |
+
generation_config = genai.types.GenerationConfig(
|
44 |
+
temperature=temperature,
|
45 |
+
max_output_tokens=max_output_tokens,
|
46 |
+
stop_sequences=preprocess_stop_sequences(stop_sequences=stop_sequences))
|
47 |
|
48 |
if image_prompt is None:
|
49 |
model = genai.GenerativeModel('gemini-pro')
|
|
|
82 |
temperature_component = gr.Slider(
|
83 |
minimum=0,
|
84 |
maximum=1.0,
|
85 |
+
value=0.4,
|
86 |
step=0.05,
|
87 |
label="Temperature",
|
88 |
+
info=(
|
89 |
+
"Temperature controls the degree of randomness in token selection. Lower "
|
90 |
+
"temperatures are good for prompts that expect a true or correct response, "
|
91 |
+
"while higher temperatures can lead to more diverse or unexpected results. "
|
92 |
+
))
|
93 |
+
max_output_tokens_component = gr.Slider(
|
94 |
+
minimum=1,
|
95 |
+
maximum=2048,
|
96 |
+
value=1024,
|
97 |
+
step=1,
|
98 |
+
label="Token limit",
|
99 |
+
info=(
|
100 |
+
"Token limit determines the maximum amount of text output from one prompt. A "
|
101 |
+
"token is approximately four characters. The default value is 2048."
|
102 |
+
))
|
103 |
+
stop_sequences_component = gr.Textbox(
|
104 |
+
label="Add stop sequence",
|
105 |
+
value="",
|
106 |
+
type="text",
|
107 |
+
placeholder="STOP, END",
|
108 |
+
info=(
|
109 |
+
"A stop sequence is a series of characters (including spaces) that stops "
|
110 |
+
"response generation if the model encounters it. The sequence is not included "
|
111 |
+
"as part of the response. You can add up to five stop sequences."
|
112 |
+
))
|
113 |
|
114 |
inputs = [
|
115 |
google_key_component,
|
116 |
text_prompt_component,
|
117 |
image_prompt_component,
|
118 |
temperature_component,
|
119 |
+
max_output_tokens_component,
|
120 |
+
stop_sequences_component,
|
121 |
chatbot_component
|
122 |
]
|
123 |
|
|
|
133 |
run_button_component.render()
|
134 |
with gr.Accordion("Parameters", open=False):
|
135 |
temperature_component.render()
|
136 |
+
max_output_tokens_component.render()
|
137 |
+
stop_sequences_component.render()
|
138 |
|
139 |
run_button_component.click(
|
140 |
fn=predict,
|