barunsaha commited on
Commit
9d8752c
·
unverified ·
2 Parent(s): 6269917 813ce6e

Merge pull request #51 from barun-saha/visual

Browse files
app.py CHANGED
@@ -20,7 +20,6 @@ from langchain_core.prompts import ChatPromptTemplate
20
  sys.path.append('..')
21
  sys.path.append('../..')
22
 
23
- import helpers.icons_embeddings as ice
24
  from global_config import GlobalConfig
25
  from helpers import llm_helper, pptx_helper, text_helper
26
 
@@ -56,25 +55,16 @@ def _get_prompt_template(is_refinement: bool) -> str:
56
 
57
 
58
  @st.cache_resource
59
- def _get_llm():
60
  """
61
  Get an LLM instance.
62
 
 
 
63
  :return: The LLM.
64
  """
65
 
66
- return llm_helper.get_hf_endpoint()
67
-
68
-
69
- @st.cache_data
70
- def _get_icons_list() -> List[str]:
71
- """
72
- Get a list of available icons names without the dir name and file extension.
73
-
74
- :return: A llist of the icons.
75
- """
76
-
77
- return ice.get_icons_list()
78
 
79
 
80
  APP_TEXT = _load_strings()
@@ -89,12 +79,19 @@ logger = logging.getLogger(__name__)
89
 
90
  texts = list(GlobalConfig.PPTX_TEMPLATE_FILES.keys())
91
  captions = [GlobalConfig.PPTX_TEMPLATE_FILES[x]['caption'] for x in texts]
92
- pptx_template = st.sidebar.radio(
93
- 'Select a presentation template:',
94
- texts,
95
- captions=captions,
96
- horizontal=True
97
- )
 
 
 
 
 
 
 
98
 
99
 
100
  def build_ui():
@@ -185,14 +182,12 @@ def set_up_chat_ui():
185
  **{
186
  'instructions': list_of_msgs,
187
  'previous_content': _get_last_response(),
188
- 'icons_list': '\n'.join(_get_icons_list())
189
  }
190
  )
191
  else:
192
  formatted_template = prompt_template.format(
193
  **{
194
  'question': prompt,
195
- 'icons_list': '\n'.join(_get_icons_list())
196
  }
197
  )
198
 
@@ -200,12 +195,15 @@ def set_up_chat_ui():
200
  response = ''
201
 
202
  try:
203
- for chunk in _get_llm().stream(formatted_template):
 
 
 
204
  response += chunk
205
 
206
  # Update the progress bar
207
  progress_percentage = min(
208
- len(response) / GlobalConfig.LLM_MODEL_MAX_OUTPUT_LENGTH, 0.95
209
  )
210
  progress_bar.progress(
211
  progress_percentage,
 
20
  sys.path.append('..')
21
  sys.path.append('../..')
22
 
 
23
  from global_config import GlobalConfig
24
  from helpers import llm_helper, pptx_helper, text_helper
25
 
 
55
 
56
 
57
  @st.cache_resource
58
+ def _get_llm(repo_id: str, max_new_tokens: int):
59
  """
60
  Get an LLM instance.
61
 
62
+ :param repo_id: The model name.
63
+ :param max_new_tokens: The max new tokens to generate.
64
  :return: The LLM.
65
  """
66
 
67
+ return llm_helper.get_hf_endpoint(repo_id, max_new_tokens)
 
 
 
 
 
 
 
 
 
 
 
68
 
69
 
70
  APP_TEXT = _load_strings()
 
79
 
80
  texts = list(GlobalConfig.PPTX_TEMPLATE_FILES.keys())
81
  captions = [GlobalConfig.PPTX_TEMPLATE_FILES[x]['caption'] for x in texts]
82
+
83
+ with st.sidebar:
84
+ pptx_template = st.sidebar.radio(
85
+ 'Select a presentation template:',
86
+ texts,
87
+ captions=captions,
88
+ horizontal=True
89
+ )
90
+ st.divider()
91
+ llm_to_use = st.sidebar.selectbox(
92
+ 'Select an LLM to use:',
93
+ [f'{k} ({v["description"]})' for k, v in GlobalConfig.HF_MODELS.items()]
94
+ ).split(' ')[0]
95
 
96
 
97
  def build_ui():
 
182
  **{
183
  'instructions': list_of_msgs,
184
  'previous_content': _get_last_response(),
 
185
  }
186
  )
187
  else:
188
  formatted_template = prompt_template.format(
189
  **{
190
  'question': prompt,
 
191
  }
192
  )
193
 
 
195
  response = ''
196
 
197
  try:
198
+ for chunk in _get_llm(
199
+ repo_id=llm_to_use,
200
+ max_new_tokens=GlobalConfig.HF_MODELS[llm_to_use]['max_new_tokens']
201
+ ).stream(formatted_template):
202
  response += chunk
203
 
204
  # Update the progress bar
205
  progress_percentage = min(
206
+ len(response) / GlobalConfig.HF_MODELS[llm_to_use]['max_new_tokens'], 0.95
207
  )
208
  progress_bar.progress(
209
  progress_percentage,
global_config.py CHANGED
@@ -17,10 +17,18 @@ class GlobalConfig:
17
  A data class holding the configurations.
18
  """
19
 
20
- HF_LLM_MODEL_NAME = 'mistralai/Mistral-Nemo-Instruct-2407'
 
 
 
 
 
 
 
 
 
21
  LLM_MODEL_TEMPERATURE = 0.2
22
  LLM_MODEL_MIN_OUTPUT_LENGTH = 100
23
- LLM_MODEL_MAX_OUTPUT_LENGTH = 4 * 4096 # tokens
24
  LLM_MODEL_MAX_INPUT_LENGTH = 400 # characters
25
 
26
  HUGGINGFACEHUB_API_TOKEN = os.environ.get('HUGGINGFACEHUB_API_TOKEN', '')
 
17
  A data class holding the configurations.
18
  """
19
 
20
+ HF_MODELS = {
21
+ 'mistralai/Mistral-Nemo-Instruct-2407': {
22
+ 'description': 'longer response',
23
+ 'max_new_tokens': 12228
24
+ },
25
+ 'mistralai/Mistral-7B-Instruct-v0.2': {
26
+ 'description': 'faster, shorter',
27
+ 'max_new_tokens': 8192
28
+ },
29
+ }
30
  LLM_MODEL_TEMPERATURE = 0.2
31
  LLM_MODEL_MIN_OUTPUT_LENGTH = 100
 
32
  LLM_MODEL_MAX_INPUT_LENGTH = 400 # characters
33
 
34
  HUGGINGFACEHUB_API_TOKEN = os.environ.get('HUGGINGFACEHUB_API_TOKEN', '')
helpers/llm_helper.py CHANGED
@@ -9,7 +9,6 @@ from langchain_core.language_models import LLM
9
  from global_config import GlobalConfig
10
 
11
 
12
- HF_API_URL = f"https://api-inference.huggingface.co/models/{GlobalConfig.HF_LLM_MODEL_NAME}"
13
  HF_API_HEADERS = {"Authorization": f"Bearer {GlobalConfig.HUGGINGFACEHUB_API_TOKEN}"}
14
  REQUEST_TIMEOUT = 35
15
 
@@ -28,18 +27,20 @@ http_session.mount('https://', adapter)
28
  http_session.mount('http://', adapter)
29
 
30
 
31
- def get_hf_endpoint() -> LLM:
32
  """
33
  Get an LLM via the HuggingFaceEndpoint of LangChain.
34
 
35
- :return: The LLM.
 
 
36
  """
37
 
38
- logger.debug('Getting LLM via HF endpoint')
39
 
40
  return HuggingFaceEndpoint(
41
- repo_id=GlobalConfig.HF_LLM_MODEL_NAME,
42
- max_new_tokens=GlobalConfig.LLM_MODEL_MAX_OUTPUT_LENGTH,
43
  top_k=40,
44
  top_p=0.95,
45
  temperature=GlobalConfig.LLM_MODEL_TEMPERATURE,
@@ -51,69 +52,69 @@ def get_hf_endpoint() -> LLM:
51
  )
52
 
53
 
54
- def hf_api_query(payload: dict) -> dict:
55
- """
56
- Invoke HF inference end-point API.
57
-
58
- :param payload: The prompt for the LLM and related parameters.
59
- :return: The output from the LLM.
60
- """
61
-
62
- try:
63
- response = http_session.post(
64
- HF_API_URL,
65
- headers=HF_API_HEADERS,
66
- json=payload,
67
- timeout=REQUEST_TIMEOUT
68
- )
69
- result = response.json()
70
- except requests.exceptions.Timeout as te:
71
- logger.error('*** Error: hf_api_query timeout! %s', str(te))
72
- result = []
73
-
74
- return result
75
-
76
-
77
- def generate_slides_content(topic: str) -> str:
78
- """
79
- Generate the outline/contents of slides for a presentation on a given topic.
80
-
81
- :param topic: Topic on which slides are to be generated.
82
- :return: The content in JSON format.
83
- """
84
-
85
- with open(GlobalConfig.SLIDES_TEMPLATE_FILE, 'r', encoding='utf-8') as in_file:
86
- template_txt = in_file.read().strip()
87
- template_txt = template_txt.replace('<REPLACE_PLACEHOLDER>', topic)
88
-
89
- output = hf_api_query({
90
- 'inputs': template_txt,
91
- 'parameters': {
92
- 'temperature': GlobalConfig.LLM_MODEL_TEMPERATURE,
93
- 'min_length': GlobalConfig.LLM_MODEL_MIN_OUTPUT_LENGTH,
94
- 'max_length': GlobalConfig.LLM_MODEL_MAX_OUTPUT_LENGTH,
95
- 'max_new_tokens': GlobalConfig.LLM_MODEL_MAX_OUTPUT_LENGTH,
96
- 'num_return_sequences': 1,
97
- 'return_full_text': False,
98
- # "repetition_penalty": 0.0001
99
- },
100
- 'options': {
101
- 'wait_for_model': True,
102
- 'use_cache': True
103
- }
104
- })
105
-
106
- output = output[0]['generated_text'].strip()
107
- # output = output[len(template_txt):]
108
-
109
- json_end_idx = output.rfind('```')
110
- if json_end_idx != -1:
111
- # logging.debug(f'{json_end_idx=}')
112
- output = output[:json_end_idx]
113
-
114
- logger.debug('generate_slides_content: output: %s', output)
115
-
116
- return output
117
 
118
 
119
  if __name__ == '__main__':
 
9
  from global_config import GlobalConfig
10
 
11
 
 
12
  HF_API_HEADERS = {"Authorization": f"Bearer {GlobalConfig.HUGGINGFACEHUB_API_TOKEN}"}
13
  REQUEST_TIMEOUT = 35
14
 
 
27
  http_session.mount('http://', adapter)
28
 
29
 
30
+ def get_hf_endpoint(repo_id: str, max_new_tokens: int) -> LLM:
31
  """
32
  Get an LLM via the HuggingFaceEndpoint of LangChain.
33
 
34
+ :param repo_id: The model name.
35
+ :param max_new_tokens: The max new tokens to generate.
36
+ :return: The HF LLM inference endpoint.
37
  """
38
 
39
+ logger.debug('Getting LLM via HF endpoint: %s', repo_id)
40
 
41
  return HuggingFaceEndpoint(
42
+ repo_id=repo_id,
43
+ max_new_tokens=max_new_tokens,
44
  top_k=40,
45
  top_p=0.95,
46
  temperature=GlobalConfig.LLM_MODEL_TEMPERATURE,
 
52
  )
53
 
54
 
55
+ # def hf_api_query(payload: dict) -> dict:
56
+ # """
57
+ # Invoke HF inference end-point API.
58
+ #
59
+ # :param payload: The prompt for the LLM and related parameters.
60
+ # :return: The output from the LLM.
61
+ # """
62
+ #
63
+ # try:
64
+ # response = http_session.post(
65
+ # HF_API_URL,
66
+ # headers=HF_API_HEADERS,
67
+ # json=payload,
68
+ # timeout=REQUEST_TIMEOUT
69
+ # )
70
+ # result = response.json()
71
+ # except requests.exceptions.Timeout as te:
72
+ # logger.error('*** Error: hf_api_query timeout! %s', str(te))
73
+ # result = []
74
+ #
75
+ # return result
76
+
77
+
78
+ # def generate_slides_content(topic: str) -> str:
79
+ # """
80
+ # Generate the outline/contents of slides for a presentation on a given topic.
81
+ #
82
+ # :param topic: Topic on which slides are to be generated.
83
+ # :return: The content in JSON format.
84
+ # """
85
+ #
86
+ # with open(GlobalConfig.SLIDES_TEMPLATE_FILE, 'r', encoding='utf-8') as in_file:
87
+ # template_txt = in_file.read().strip()
88
+ # template_txt = template_txt.replace('<REPLACE_PLACEHOLDER>', topic)
89
+ #
90
+ # output = hf_api_query({
91
+ # 'inputs': template_txt,
92
+ # 'parameters': {
93
+ # 'temperature': GlobalConfig.LLM_MODEL_TEMPERATURE,
94
+ # 'min_length': GlobalConfig.LLM_MODEL_MIN_OUTPUT_LENGTH,
95
+ # 'max_length': GlobalConfig.LLM_MODEL_MAX_OUTPUT_LENGTH,
96
+ # 'max_new_tokens': GlobalConfig.LLM_MODEL_MAX_OUTPUT_LENGTH,
97
+ # 'num_return_sequences': 1,
98
+ # 'return_full_text': False,
99
+ # # "repetition_penalty": 0.0001
100
+ # },
101
+ # 'options': {
102
+ # 'wait_for_model': True,
103
+ # 'use_cache': True
104
+ # }
105
+ # })
106
+ #
107
+ # output = output[0]['generated_text'].strip()
108
+ # # output = output[len(template_txt):]
109
+ #
110
+ # json_end_idx = output.rfind('```')
111
+ # if json_end_idx != -1:
112
+ # # logging.debug(f'{json_end_idx=}')
113
+ # output = output[:json_end_idx]
114
+ #
115
+ # logger.debug('generate_slides_content: output: %s', output)
116
+ #
117
+ # return output
118
 
119
 
120
  if __name__ == '__main__':
langchain_templates/chat_prompts/initial_template_v4_two_cols_img.txt CHANGED
@@ -13,12 +13,8 @@ In addition, for each slide, add image keywords based on the content of the resp
13
  These keywords will be later used to search for images from the Web relevant to the slide content.
14
 
15
  In addition, create one slide containing 4 TO 6 icons (pictograms) illustrating some key ideas/aspects/concepts relevant to the topic.
16
- In this slide, each line of text will begin with the name of a relevant icon enclosed between [[ and ]].
17
- Select appropriate and exact icon names from the <ICONS> section provided below.
18
-
19
- <ICONS>
20
- {icons_list}
21
- </ICONS>
22
 
23
  The content of each slide should be VERBOSE, DESCRIPTIVE, and very DETAILED. Each bullet point should be detailed and explanatory, not just short phrases.
24
 
 
13
  These keywords will be later used to search for images from the Web relevant to the slide content.
14
 
15
  In addition, create one slide containing 4 TO 6 icons (pictograms) illustrating some key ideas/aspects/concepts relevant to the topic.
16
+ In this slide, each line of text will begin with the name of a relevant icon enclosed between [[ and ]], e.g., [[machine-learning]] and [[fairness]].
17
+ Insert icons only in this slide.
 
 
 
 
18
 
19
  The content of each slide should be VERBOSE, DESCRIPTIVE, and very DETAILED. Each bullet point should be detailed and explanatory, not just short phrases.
20
 
langchain_templates/chat_prompts/refinement_template_v4_two_cols_img.txt CHANGED
@@ -14,12 +14,8 @@ In addition, for each slide, add image keywords based on the content of the resp
14
  These keywords will be later used to search for images from the Web relevant to the slide content.
15
 
16
  In addition, create one slide containing 4 TO 6 icons (pictograms) illustrating some key ideas/aspects/concepts relevant to the topic.
17
- In this slide, each line of text will begin with the name of a relevant icon enclosed between [[ and ]].
18
- Select appropriate and exact icon names from the <ICONS> section provided below.
19
-
20
- <ICONS>
21
- {icons_list}
22
- </ICONS>
23
 
24
  The content of each slide should be VERBOSE, DESCRIPTIVE, and very DETAILED. Each bullet point should be detailed and explanatory, not just short phrases.
25
 
 
14
  These keywords will be later used to search for images from the Web relevant to the slide content.
15
 
16
  In addition, create one slide containing 4 TO 6 icons (pictograms) illustrating some key ideas/aspects/concepts relevant to the topic.
17
+ In this slide, each line of text will begin with the name of a relevant icon enclosed between [[ and ]], e.g., [[machine-learning]] and [[fairness]].
18
+ Insert icons only in this slide.
 
 
 
 
19
 
20
  The content of each slide should be VERBOSE, DESCRIPTIVE, and very DETAILED. Each bullet point should be detailed and explanatory, not just short phrases.
21
 
legacy_app.py DELETED
@@ -1,294 +0,0 @@
1
- import pathlib
2
- import logging
3
- import tempfile
4
- from typing import List, Tuple
5
-
6
- import json5
7
- import metaphor_python as metaphor
8
- import streamlit as st
9
-
10
- from helpers import llm_helper, pptx_helper
11
- from global_config import GlobalConfig
12
-
13
-
14
- APP_TEXT = json5.loads(open(GlobalConfig.APP_STRINGS_FILE, 'r', encoding='utf-8').read())
15
- GB_CONVERTER = 2 ** 30
16
-
17
-
18
- logger = logging.getLogger(__name__)
19
-
20
-
21
- @st.cache_data
22
- def get_contents_wrapper(text: str) -> str:
23
- """
24
- Fetch and cache the slide deck contents on a topic by calling an external API.
25
-
26
- :param text: The presentation topic.
27
- :return: The slide deck contents or outline in JSON format.
28
- """
29
-
30
- logger.info('LLM call because of cache miss...')
31
- return llm_helper.generate_slides_content(text).strip()
32
-
33
-
34
- @st.cache_resource
35
- def get_metaphor_client_wrapper() -> metaphor.Metaphor:
36
- """
37
- Create a Metaphor client for semantic Web search.
38
-
39
- :return: Metaphor instance.
40
- """
41
-
42
- return metaphor.Metaphor(api_key=GlobalConfig.METAPHOR_API_KEY)
43
-
44
-
45
- @st.cache_data
46
- def get_web_search_results_wrapper(text: str) -> List[Tuple[str, str]]:
47
- """
48
- Fetch and cache the Web search results on a given topic.
49
-
50
- :param text: The topic.
51
- :return: A list of (title, link) tuples.
52
- """
53
-
54
- results = []
55
- search_results = get_metaphor_client_wrapper().search(
56
- text,
57
- use_autoprompt=True,
58
- num_results=5
59
- )
60
-
61
- for a_result in search_results.results:
62
- results.append((a_result.title, a_result.url))
63
-
64
- return results
65
-
66
-
67
- def build_ui():
68
- """
69
- Display the input elements for content generation. Only covers the first step.
70
- """
71
-
72
- # get_disk_used_percentage()
73
-
74
- st.title(APP_TEXT['app_name'])
75
- st.subheader(APP_TEXT['caption'])
76
- st.markdown(
77
- 'Powered by'
78
- ' [Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2).'
79
- )
80
- st.markdown(
81
- '*If the JSON is generated or parsed incorrectly, try again later by making minor changes'
82
- ' to the input text.*'
83
- )
84
-
85
- with st.form('my_form'):
86
- # Topic input
87
- try:
88
- with open(GlobalConfig.PRELOAD_DATA_FILE, 'r', encoding='utf-8') as in_file:
89
- preload_data = json5.loads(in_file.read())
90
- except (FileExistsError, FileNotFoundError):
91
- preload_data = {'topic': '', 'audience': ''}
92
-
93
- topic = st.text_area(
94
- APP_TEXT['input_labels'][0],
95
- value=preload_data['topic']
96
- )
97
-
98
- texts = list(GlobalConfig.PPTX_TEMPLATE_FILES.keys())
99
- captions = [GlobalConfig.PPTX_TEMPLATE_FILES[x]['caption'] for x in texts]
100
-
101
- pptx_template = st.radio(
102
- 'Select a presentation template:',
103
- texts,
104
- captions=captions,
105
- horizontal=True
106
- )
107
-
108
- st.divider()
109
- submit = st.form_submit_button('Generate slide deck')
110
-
111
- if submit:
112
- # st.write(f'Clicked {time.time()}')
113
- st.session_state.submitted = True
114
-
115
- # https://github.com/streamlit/streamlit/issues/3832#issuecomment-1138994421
116
- if 'submitted' in st.session_state:
117
- progress_text = 'Generating the slides...give it a moment'
118
- progress_bar = st.progress(0, text=progress_text)
119
-
120
- topic_txt = topic.strip()
121
- generate_presentation(topic_txt, pptx_template, progress_bar)
122
-
123
- st.divider()
124
- st.text(APP_TEXT['tos'])
125
- st.text(APP_TEXT['tos2'])
126
-
127
- st.markdown(
128
- '![Visitors]'
129
- '(https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fbarunsaha%2Fslide-deck-ai&countColor=%23263759)'
130
- )
131
-
132
-
133
- def generate_presentation(topic: str, pptx_template: str, progress_bar):
134
- """
135
- Process the inputs to generate the slides.
136
-
137
- :param topic: The presentation topic based on which contents are to be generated.
138
- :param pptx_template: The PowerPoint template name to be used.
139
- :param progress_bar: Progress bar from the page.
140
- """
141
-
142
- topic_length = len(topic)
143
- logger.debug('Input length:: topic: %s', topic_length)
144
-
145
- if topic_length >= 10:
146
- logger.debug('Topic: %s', topic)
147
- target_length = min(topic_length, GlobalConfig.LLM_MODEL_MAX_INPUT_LENGTH)
148
-
149
- try:
150
- # Step 1: Generate the contents in JSON format using an LLM
151
- json_str = process_slides_contents(topic[:target_length], progress_bar)
152
- logger.debug('Truncated topic: %s', topic[:target_length])
153
- logger.debug('Length of JSON: %d', len(json_str))
154
-
155
- # Step 2: Generate the slide deck based on the template specified
156
- if len(json_str) > 0:
157
- st.info(
158
- 'Tip: The generated content doesn\'t look so great?'
159
- ' Need alternatives? Just change your description text and try again.',
160
- icon="💡️"
161
- )
162
- else:
163
- st.error(
164
- 'Unfortunately, JSON generation failed, so the next steps would lead'
165
- ' to nowhere. Try again or come back later.'
166
- )
167
- return
168
-
169
- all_headers = generate_slide_deck(json_str, pptx_template, progress_bar)
170
-
171
- # Step 3: Bonus stuff: Web references and AI art
172
- show_bonus_stuff(all_headers)
173
-
174
- except ValueError as ve:
175
- st.error(f'Unfortunately, an error occurred: {ve}! '
176
- f'Please change the text, try again later, or report it, sharing your inputs.')
177
-
178
- else:
179
- st.error('Not enough information provided! Please be little more descriptive :)')
180
-
181
-
182
- def process_slides_contents(text: str, progress_bar: st.progress) -> str:
183
- """
184
- Convert given text into structured data and display. Update the UI.
185
-
186
- :param text: The topic description for the presentation.
187
- :param progress_bar: Progress bar for this step.
188
- :return: The contents as a JSON-formatted string.
189
- """
190
-
191
- json_str = ''
192
-
193
- try:
194
- logger.info('Calling LLM for content generation on the topic: %s', text)
195
- json_str = get_contents_wrapper(text)
196
- except Exception as ex:
197
- st.error(
198
- f'An exception occurred while trying to convert to JSON. It could be because of heavy'
199
- f' traffic or something else. Try doing it again or try again later.'
200
- f'\nError message: {ex}'
201
- )
202
-
203
- progress_bar.progress(50, text='Contents generated')
204
-
205
- with st.expander('The generated contents (in JSON format)'):
206
- st.code(json_str, language='json')
207
-
208
- return json_str
209
-
210
-
211
- def generate_slide_deck(json_str: str, pptx_template: str, progress_bar) -> List:
212
- """
213
- Create a slide deck.
214
-
215
- :param json_str: The contents in JSON format.
216
- :param pptx_template: The PPTX template name.
217
- :param progress_bar: Progress bar.
218
- :return: A list of all slide headers and the title.
219
- """
220
-
221
- progress_text = 'Creating the slide deck...give it a moment'
222
- progress_bar.progress(75, text=progress_text)
223
-
224
- # # Get a unique name for the file to save -- use the session ID
225
- # ctx = st_sr.get_script_run_ctx()
226
- # session_id = ctx.session_id
227
- # timestamp = time.time()
228
- # output_file_name = f'{session_id}_{timestamp}.pptx'
229
-
230
- temp = tempfile.NamedTemporaryFile(delete=False, suffix='.pptx')
231
- path = pathlib.Path(temp.name)
232
-
233
- logger.info('Creating PPTX file...')
234
- all_headers = pptx_helper.generate_powerpoint_presentation(
235
- json_str,
236
- slides_template=pptx_template,
237
- output_file_path=path
238
- )
239
- progress_bar.progress(100, text='Done!')
240
-
241
- with open(path, 'rb') as f:
242
- st.download_button('Download PPTX file', f, file_name='Presentation.pptx')
243
-
244
- if temp:
245
- temp.close()
246
-
247
- return all_headers
248
-
249
-
250
- def show_bonus_stuff(ppt_headers: List[str]):
251
- """
252
- Show bonus stuff for the presentation.
253
-
254
- :param ppt_headers: A list of the slide headings.
255
- """
256
-
257
- # Use the presentation title and the slide headers to find relevant info online
258
- logger.info('Calling Metaphor search...')
259
- ppt_text = ' '.join(ppt_headers)
260
- search_results = get_web_search_results_wrapper(ppt_text)
261
- md_text_items = []
262
-
263
- for (title, link) in search_results:
264
- md_text_items.append(f'[{title}]({link})')
265
-
266
- with st.expander('Related Web references'):
267
- st.markdown('\n\n'.join(md_text_items))
268
-
269
- logger.info('Done!')
270
-
271
- # # Avoid image generation. It costs time and an API call, so just limit to the text generation.
272
- # with st.expander('AI-generated image on the presentation topic'):
273
- # logger.info('Calling SDXL for image generation...')
274
- # # img_empty.write('')
275
- # # img_text.write(APP_TEXT['image_info'])
276
- # image = get_ai_image_wrapper(ppt_text)
277
- #
278
- # if len(image) > 0:
279
- # image = base64.b64decode(image)
280
- # st.image(image, caption=ppt_text)
281
- # st.info('Tip: Right-click on the image to save it.', icon="💡️")
282
- # logger.info('Image added')
283
-
284
-
285
- def main():
286
- """
287
- Trigger application run.
288
- """
289
-
290
- build_ui()
291
-
292
-
293
- if __name__ == '__main__':
294
- main()