Spaces:
Sleeping
Sleeping
Increase input size, use Llama 2, and add SDXL image
Browse files- app.py +13 -1
- global_config.py +4 -4
- strings.json +1 -1
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import os
|
2 |
import pathlib
|
3 |
import json5
|
@@ -281,7 +282,7 @@ def show_bonus_stuff(ppt_headers: List[str]):
|
|
281 |
:param ppt_headers: A list of the slide headings.
|
282 |
"""
|
283 |
|
284 |
-
# Use the presentation title and the
|
285 |
logging.info('Calling Metaphor search...')
|
286 |
ppt_text = ' '.join(ppt_headers)
|
287 |
search_results = get_web_search_results_wrapper(ppt_text)
|
@@ -294,6 +295,17 @@ def show_bonus_stuff(ppt_headers: List[str]):
|
|
294 |
st.markdown('\n\n'.join(md_text_items))
|
295 |
|
296 |
# Avoid image generation. It costs time and an API call, so just limit to the text generation.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
297 |
|
298 |
|
299 |
def main():
|
|
|
1 |
+
import base64
|
2 |
import os
|
3 |
import pathlib
|
4 |
import json5
|
|
|
282 |
:param ppt_headers: A list of the slide headings.
|
283 |
"""
|
284 |
|
285 |
+
# Use the presentation title and the slide headers to find relevant info online
|
286 |
logging.info('Calling Metaphor search...')
|
287 |
ppt_text = ' '.join(ppt_headers)
|
288 |
search_results = get_web_search_results_wrapper(ppt_text)
|
|
|
295 |
st.markdown('\n\n'.join(md_text_items))
|
296 |
|
297 |
# Avoid image generation. It costs time and an API call, so just limit to the text generation.
|
298 |
+
with st.expander('AI-generated image on the presentation topic'):
|
299 |
+
logging.info('Calling SDXL for image generation...')
|
300 |
+
# img_empty.write('')
|
301 |
+
# img_text.write(APP_TEXT['image_info'])
|
302 |
+
image = get_ai_image_wrapper(ppt_text)
|
303 |
+
|
304 |
+
if len(image) > 0:
|
305 |
+
image = base64.b64decode(image)
|
306 |
+
st.image(image, caption=ppt_text)
|
307 |
+
st.info('Tip: Right-click on the image to save it.', icon="💡️")
|
308 |
+
logging.info('Image added')
|
309 |
|
310 |
|
311 |
def main():
|
global_config.py
CHANGED
@@ -9,9 +9,9 @@ load_dotenv()
|
|
9 |
@dataclass(frozen=True)
|
10 |
class GlobalConfig:
|
11 |
CLARIFAI_PAT = os.environ.get('CLARIFAI_PAT', '')
|
12 |
-
CLARIFAI_USER_ID = '
|
13 |
-
CLARIFAI_APP_ID = '
|
14 |
-
CLARIFAI_MODEL_ID = 'llama2-13b-
|
15 |
|
16 |
CLARIFAI_USER_ID_GPT = 'openai'
|
17 |
CLARIFAI_APP_ID_GPT = 'chat-completion'
|
@@ -25,7 +25,7 @@ class GlobalConfig:
|
|
25 |
# LLM_MODEL_TEMPERATURE: float = 0.5
|
26 |
LLM_MODEL_MIN_OUTPUT_LENGTH: int = 50
|
27 |
LLM_MODEL_MAX_OUTPUT_LENGTH: int = 2000
|
28 |
-
LLM_MODEL_MAX_INPUT_LENGTH: int =
|
29 |
|
30 |
METAPHOR_API_KEY = os.environ.get('METAPHOR_API_KEY', '')
|
31 |
|
|
|
9 |
@dataclass(frozen=True)
|
10 |
class GlobalConfig:
|
11 |
CLARIFAI_PAT = os.environ.get('CLARIFAI_PAT', '')
|
12 |
+
CLARIFAI_USER_ID = 'clarifai'
|
13 |
+
CLARIFAI_APP_ID = 'ml'
|
14 |
+
CLARIFAI_MODEL_ID = 'llama2-13b-alternative-4k'
|
15 |
|
16 |
CLARIFAI_USER_ID_GPT = 'openai'
|
17 |
CLARIFAI_APP_ID_GPT = 'chat-completion'
|
|
|
25 |
# LLM_MODEL_TEMPERATURE: float = 0.5
|
26 |
LLM_MODEL_MIN_OUTPUT_LENGTH: int = 50
|
27 |
LLM_MODEL_MAX_OUTPUT_LENGTH: int = 2000
|
28 |
+
LLM_MODEL_MAX_INPUT_LENGTH: int = 1500
|
29 |
|
30 |
METAPHOR_API_KEY = os.environ.get('METAPHOR_API_KEY', '')
|
31 |
|
strings.json
CHANGED
@@ -14,7 +14,7 @@
|
|
14 |
"Since you have come this far, we have unlocked some more good stuff for you!"
|
15 |
],
|
16 |
"input_labels": [
|
17 |
-
"**Describe the topic of the presentation using 10 to
|
18 |
],
|
19 |
"button_labels": [
|
20 |
"Generate contents",
|
|
|
14 |
"Since you have come this far, we have unlocked some more good stuff for you!"
|
15 |
],
|
16 |
"input_labels": [
|
17 |
+
"**Describe the topic of the presentation using 10 to 1500 characters. Avoid mentioning the count of slides.**"
|
18 |
],
|
19 |
"button_labels": [
|
20 |
"Generate contents",
|