UMESH266 commited on
Commit
d359a51
·
1 Parent(s): d6f7b0e

App updated

Browse files
app.py CHANGED
@@ -1,7 +1,9 @@
1
  # Main driver code of chatbot
 
2
  import streamlit as st
3
  from streamlit_option_menu import option_menu
4
  from src.components.avatarsys import AvatarSystem
 
5
  from src.utils.accessory import play_speech, listen, save_output, load_output
6
  import speech_recognition as sr
7
 
@@ -41,10 +43,33 @@ def response(input_text):
41
 
42
  return ans, response_sentiment
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  if mode == "Text" and st.session_state.HF_TOKEN == '':
45
  if 'chathist' not in st.session_state:
46
  st.session_state.chathist = dict()
47
-
48
  # Form requires unique key
49
  with st.form(key=f'Chat form', clear_on_submit=True):
50
  user_input = st.text_input("You: ", value="", placeholder="Ask anything or Type 'Exit'")
@@ -64,7 +89,44 @@ if mode == "Text" and st.session_state.HF_TOKEN == '':
64
  # Chat history
65
  st.session_state.chathist = chat_history(user_input, ans, senti)
66
 
67
- # Chat history display
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  st.markdown("### Chat History: ")
69
  with st.container(border=True):
70
  for key in st.session_state.chathist.keys():
@@ -74,32 +136,6 @@ if mode == "Text" and st.session_state.HF_TOKEN == '':
74
  bot_col1, bot_col2, bot_col3 = st.columns([4, 1, 1], vertical_alignment='center')
75
  bot = bot_col1.container(border=True)
76
  bot.write(st.session_state.chathist[key][0])
77
-
78
- elif mode == "Voice" and st.session_state.HF_TOKEN != '':
79
- # Voice to text conversion
80
- r = sr.Recognizer()
81
- while 1:
82
- with sr.Microphone() as source:
83
- st.write("Speak: ") # print("Say something!")
84
- st.write("Please wait, response under process...")
85
- audio = r.listen(source)
86
- r.adjust_for_ambient_noise(source, duration=0.2)
87
- text = r.recognize_google(audio)
88
- user_input = text + '?'
89
- if text == '':
90
- user_input='exit?'
91
- st.write("Start again please. Failed to recognise the voice.")
92
-
93
- # Exiting the chat
94
- if 'exit' in user_input:
95
- st.write(salutation) # print("Pleasure meeting you. Have a nice day!")
96
- play_speech(salutation)
97
- st.stop()
98
- break
99
-
100
- #Getting response and sentiment of response
101
- response(user_input)
102
-
103
 
104
  # if mode == "Doc-Bot" and st.session_state.HF_TOKEN != '':
105
  # st.write("Doc-Bot implementation")
 
1
  # Main driver code of chatbot
2
+ from cgitb import text
3
  import streamlit as st
4
  from streamlit_option_menu import option_menu
5
  from src.components.avatarsys import AvatarSystem
6
+ from src.exception.exception import customexception
7
  from src.utils.accessory import play_speech, listen, save_output, load_output
8
  import speech_recognition as sr
9
 
 
43
 
44
  return ans, response_sentiment
45
 
46
+ # Voice to text conversion
47
+ r = sr.Recognizer()
48
+
49
+ def record_voice():
50
+ try:
51
+ with sr.Microphone() as source:
52
+ st.write("Speak: ") # print("Say something!")
53
+ st.write("Please wait, response under process...")
54
+ audio = r.listen(source)
55
+ # r.adjust_for_ambient_noise(source, duration=0.2)
56
+ text = r.recognize_google(audio)
57
+ user_input = text + '?'
58
+
59
+ return user_input
60
+
61
+ except sr.RequestError as e:
62
+ st.write("Could not request results: {0}".format(e))
63
+
64
+ except sr.UnknownValueError:
65
+ st.write("Unknown error occurred.")
66
+
67
+
68
+
69
  if mode == "Text" and st.session_state.HF_TOKEN == '':
70
  if 'chathist' not in st.session_state:
71
  st.session_state.chathist = dict()
72
+
73
  # Form requires unique key
74
  with st.form(key=f'Chat form', clear_on_submit=True):
75
  user_input = st.text_input("You: ", value="", placeholder="Ask anything or Type 'Exit'")
 
89
  # Chat history
90
  st.session_state.chathist = chat_history(user_input, ans, senti)
91
 
92
+ # # Chat history display
93
+ # if st.button("View chat")
94
+ # st.markdown("### Chat History: ")
95
+ # with st.container(border=True):
96
+ # for key in st.session_state.chathist.keys():
97
+ # user_col1, user_col2, user_col3 = st.columns(3, vertical_alignment="center")
98
+ # user = user_col3.container(border=True)
99
+ # user.write(key)
100
+ # bot_col1, bot_col2, bot_col3 = st.columns([4, 1, 1], vertical_alignment='center')
101
+ # bot = bot_col1.container(border=True)
102
+ # bot.write(st.session_state.chathist[key][0])
103
+
104
+ elif mode == "Voice" and st.session_state.HF_TOKEN == '':
105
+ if 'chathist' not in st.session_state:
106
+ st.session_state.chathist = dict()
107
+
108
+ if st.button("speak"):
109
+ user_input = record_voice()
110
+
111
+ if user_input != "":
112
+
113
+ # Exiting the chat
114
+ if 'exit' in user_input:
115
+ st.write(salutation)
116
+ st.stop()
117
+
118
+ # Getting response and sentiment of response
119
+ ans, senti = response(user_input)
120
+
121
+ st.write(f"You: {user_input}")
122
+ st.write(f"Bot: {ans}")
123
+ play_speech(ans)
124
+
125
+ # Chat history
126
+ st.session_state.chathist = chat_history(user_input, ans, senti)
127
+
128
+ # Chat history display
129
+ if st.button("View Chat history"):
130
  st.markdown("### Chat History: ")
131
  with st.container(border=True):
132
  for key in st.session_state.chathist.keys():
 
136
  bot_col1, bot_col2, bot_col3 = st.columns([4, 1, 1], vertical_alignment='center')
137
  bot = bot_col1.container(border=True)
138
  bot.write(st.session_state.chathist[key][0])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
 
140
  # if mode == "Doc-Bot" and st.session_state.HF_TOKEN != '':
141
  # st.write("Doc-Bot implementation")
artifacts/Audio.mp3 DELETED
Binary file (29.4 kB)
 
artifacts/Docmate.ipynb CHANGED
@@ -825,59 +825,527 @@
825
  "metadata": {},
826
  "outputs": [
827
  {
828
- "name": "stderr",
829
  "output_type": "stream",
830
  "text": [
831
- "c:\\Users\\umesh\\anaconda3\\envs\\DeepLearning\\lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
832
- " from .autonotebook import tqdm as notebook_tqdm\n",
833
- "c:\\Users\\umesh\\anaconda3\\envs\\DeepLearning\\lib\\site-packages\\torchvision\\io\\image.py:13: UserWarning: Failed to load image Python extension: '[WinError 127] The specified procedure could not be found'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source?\n",
834
- " warn(\n",
835
- "c:\\Users\\umesh\\anaconda3\\envs\\DeepLearning\\lib\\site-packages\\torchvision\\datapoints\\__init__.py:12: UserWarning: The torchvision.datapoints and torchvision.transforms.v2 namespaces are still Beta. While we do not expect major breaking changes, some APIs may still change according to user feedback. Please submit any feedback you may have in this issue: https://github.com/pytorch/vision/issues/6753, and you can also check out https://github.com/pytorch/vision/issues/7319 to learn more about the APIs that we suspect might involve future changes. You can silence this warning by calling torchvision.disable_beta_transforms_warning().\n",
836
- " warnings.warn(_BETA_TRANSFORMS_WARNING)\n",
837
- "c:\\Users\\umesh\\anaconda3\\envs\\DeepLearning\\lib\\site-packages\\torchvision\\transforms\\v2\\__init__.py:54: UserWarning: The torchvision.datapoints and torchvision.transforms.v2 namespaces are still Beta. While we do not expect major breaking changes, some APIs may still change according to user feedback. Please submit any feedback you may have in this issue: https://github.com/pytorch/vision/issues/6753, and you can also check out https://github.com/pytorch/vision/issues/7319 to learn more about the APIs that we suspect might involve future changes. You can silence this warning by calling torchvision.disable_beta_transforms_warning().\n",
838
- " warnings.warn(_BETA_TRANSFORMS_WARNING)\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
839
  ]
840
- },
 
 
 
 
 
 
 
 
 
 
841
  {
842
- "ename": "ImportError",
843
- "evalue": "Loading an AWQ quantized model requires auto-awq library (`pip install autoawq`)",
844
- "output_type": "error",
845
- "traceback": [
846
- "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
847
- "\u001b[1;31mImportError\u001b[0m Traceback (most recent call last)",
848
- "Cell \u001b[1;32mIn[1], line 4\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[38;5;66;03m# Use a pipeline as a high-level helper\u001b[39;00m\n\u001b[0;32m 2\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtransformers\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m pipeline\n\u001b[1;32m----> 4\u001b[0m pipe \u001b[38;5;241m=\u001b[39m \u001b[43mpipeline\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtext-generation\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mTheBloke/meditron-7B-chat-AWQ\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
849
- "File \u001b[1;32mc:\\Users\\umesh\\anaconda3\\envs\\DeepLearning\\lib\\site-packages\\transformers\\pipelines\\__init__.py:926\u001b[0m, in \u001b[0;36mpipeline\u001b[1;34m(task, model, config, tokenizer, feature_extractor, image_processor, processor, framework, revision, use_fast, token, device, device_map, torch_dtype, trust_remote_code, model_kwargs, pipeline_class, **kwargs)\u001b[0m\n\u001b[0;32m 924\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(model, \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;129;01mor\u001b[39;00m framework \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m 925\u001b[0m model_classes \u001b[38;5;241m=\u001b[39m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtf\u001b[39m\u001b[38;5;124m\"\u001b[39m: targeted_task[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtf\u001b[39m\u001b[38;5;124m\"\u001b[39m], \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpt\u001b[39m\u001b[38;5;124m\"\u001b[39m: targeted_task[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpt\u001b[39m\u001b[38;5;124m\"\u001b[39m]}\n\u001b[1;32m--> 926\u001b[0m framework, model \u001b[38;5;241m=\u001b[39m infer_framework_load_model(\n\u001b[0;32m 927\u001b[0m model,\n\u001b[0;32m 928\u001b[0m model_classes\u001b[38;5;241m=\u001b[39mmodel_classes,\n\u001b[0;32m 929\u001b[0m config\u001b[38;5;241m=\u001b[39mconfig,\n\u001b[0;32m 930\u001b[0m framework\u001b[38;5;241m=\u001b[39mframework,\n\u001b[0;32m 931\u001b[0m task\u001b[38;5;241m=\u001b[39mtask,\n\u001b[0;32m 932\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mhub_kwargs,\n\u001b[0;32m 933\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mmodel_kwargs,\n\u001b[0;32m 934\u001b[0m )\n\u001b[0;32m 936\u001b[0m model_config \u001b[38;5;241m=\u001b[39m model\u001b[38;5;241m.\u001b[39mconfig\n\u001b[0;32m 937\u001b[0m hub_kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m_commit_hash\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m model\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39m_commit_hash\n",
850
- "File \u001b[1;32mc:\\Users\\umesh\\anaconda3\\envs\\DeepLearning\\lib\\site-packages\\transformers\\pipelines\\base.py:289\u001b[0m, in \u001b[0;36minfer_framework_load_model\u001b[1;34m(model, config, model_classes, task, framework, **model_kwargs)\u001b[0m\n\u001b[0;32m 283\u001b[0m logger\u001b[38;5;241m.\u001b[39mwarning(\n\u001b[0;32m 284\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mModel might be a PyTorch model (ending with `.bin`) but PyTorch is not available. \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 285\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTrying to load the model with Tensorflow.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 286\u001b[0m )\n\u001b[0;32m 288\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 289\u001b[0m model \u001b[38;5;241m=\u001b[39m model_class\u001b[38;5;241m.\u001b[39mfrom_pretrained(model, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m 290\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(model, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124meval\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[0;32m 291\u001b[0m model \u001b[38;5;241m=\u001b[39m model\u001b[38;5;241m.\u001b[39meval()\n",
851
- "File \u001b[1;32mc:\\Users\\umesh\\anaconda3\\envs\\DeepLearning\\lib\\site-packages\\transformers\\models\\auto\\auto_factory.py:564\u001b[0m, in \u001b[0;36m_BaseAutoModelClass.from_pretrained\u001b[1;34m(cls, pretrained_model_name_or_path, *model_args, **kwargs)\u001b[0m\n\u001b[0;32m 562\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mtype\u001b[39m(config) \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m_model_mapping\u001b[38;5;241m.\u001b[39mkeys():\n\u001b[0;32m 563\u001b[0m model_class \u001b[38;5;241m=\u001b[39m _get_model_class(config, \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m_model_mapping)\n\u001b[1;32m--> 564\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m model_class\u001b[38;5;241m.\u001b[39mfrom_pretrained(\n\u001b[0;32m 565\u001b[0m pretrained_model_name_or_path, \u001b[38;5;241m*\u001b[39mmodel_args, config\u001b[38;5;241m=\u001b[39mconfig, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mhub_kwargs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs\n\u001b[0;32m 566\u001b[0m )\n\u001b[0;32m 567\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m 568\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUnrecognized configuration class \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mconfig\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m for this kind of AutoModel: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 569\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mModel type should be one of \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m, \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;241m.\u001b[39mjoin(c\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mfor\u001b[39;00m\u001b[38;5;250m \u001b[39mc\u001b[38;5;250m \u001b[39m\u001b[38;5;129;01min\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m_model_mapping\u001b[38;5;241m.\u001b[39mkeys())\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 570\u001b[0m )\n",
852
- "File \u001b[1;32mc:\\Users\\umesh\\anaconda3\\envs\\DeepLearning\\lib\\site-packages\\transformers\\modeling_utils.py:3656\u001b[0m, in \u001b[0;36mPreTrainedModel.from_pretrained\u001b[1;34m(cls, pretrained_model_name_or_path, config, cache_dir, ignore_mismatched_sizes, force_download, local_files_only, token, revision, use_safetensors, weights_only, *model_args, **kwargs)\u001b[0m\n\u001b[0;32m 3653\u001b[0m hf_quantizer \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m 3655\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m hf_quantizer \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m-> 3656\u001b[0m \u001b[43mhf_quantizer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mvalidate_environment\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 3657\u001b[0m \u001b[43m \u001b[49m\u001b[43mtorch_dtype\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtorch_dtype\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfrom_tf\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfrom_tf\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfrom_flax\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfrom_flax\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice_map\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdevice_map\u001b[49m\n\u001b[0;32m 3658\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 3659\u001b[0m torch_dtype \u001b[38;5;241m=\u001b[39m hf_quantizer\u001b[38;5;241m.\u001b[39mupdate_torch_dtype(torch_dtype)\n\u001b[0;32m 3660\u001b[0m device_map \u001b[38;5;241m=\u001b[39m hf_quantizer\u001b[38;5;241m.\u001b[39mupdate_device_map(device_map)\n",
853
- "File \u001b[1;32mc:\\Users\\umesh\\anaconda3\\envs\\DeepLearning\\lib\\site-packages\\transformers\\quantizers\\quantizer_awq.py:50\u001b[0m, in \u001b[0;36mAwqQuantizer.validate_environment\u001b[1;34m(self, device_map, **kwargs)\u001b[0m\n\u001b[0;32m 48\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mvalidate_environment\u001b[39m(\u001b[38;5;28mself\u001b[39m, device_map, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m 49\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m is_auto_awq_available():\n\u001b[1;32m---> 50\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mImportError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mLoading an AWQ quantized model requires auto-awq library (`pip install autoawq`)\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 52\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m is_accelerate_available():\n\u001b[0;32m 53\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mImportError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mLoading an AWQ quantized model requires accelerate (`pip install accelerate`)\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
854
- "\u001b[1;31mImportError\u001b[0m: Loading an AWQ quantized model requires auto-awq library (`pip install autoawq`)"
855
  ]
856
  }
857
  ],
858
  "source": [
859
- "# Use a pipeline as a high-level helper\n",
860
- "from transformers import pipeline\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
861
  "\n",
862
- "pipe = pipeline(\"text-generation\", model=\"TheBloke/meditron-7B-chat-AWQ\")"
 
 
 
 
 
 
 
 
 
 
863
  ]
864
  },
865
  {
866
  "cell_type": "code",
867
- "execution_count": 2,
868
  "metadata": {},
869
  "outputs": [
870
  {
871
  "name": "stdout",
872
  "output_type": "stream",
873
  "text": [
874
- "^C\n",
875
  "Note: you may need to restart the kernel to use updated packages.\n"
876
  ]
877
  }
878
  ],
879
  "source": [
880
- "pip install autoawq"
881
  ]
882
  }
883
  ],
 
825
  "metadata": {},
826
  "outputs": [
827
  {
828
+ "name": "stdout",
829
  "output_type": "stream",
830
  "text": [
831
+ "Collecting streamlit-bokeh-events\n",
832
+ " Downloading streamlit_bokeh_events-0.1.2-py3-none-any.whl.metadata (407 bytes)\n",
833
+ "Requirement already satisfied: bokeh>=2.0.0 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from streamlit-bokeh-events) (3.6.0)\n",
834
+ "Requirement already satisfied: streamlit>=0.63 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from streamlit-bokeh-events) (1.39.0)\n",
835
+ "Requirement already satisfied: Jinja2>=2.9 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from bokeh>=2.0.0->streamlit-bokeh-events) (3.1.2)\n",
836
+ "Requirement already satisfied: contourpy>=1.2 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from bokeh>=2.0.0->streamlit-bokeh-events) (1.3.0)\n",
837
+ "Requirement already satisfied: numpy>=1.16 in c:\\users\\umesh\\appdata\\roaming\\python\\python310\\site-packages (from bokeh>=2.0.0->streamlit-bokeh-events) (1.26.4)\n",
838
+ "Requirement already satisfied: packaging>=16.8 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from bokeh>=2.0.0->streamlit-bokeh-events) (24.1)\n",
839
+ "Requirement already satisfied: pandas>=1.2 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from bokeh>=2.0.0->streamlit-bokeh-events) (2.1.1)\n",
840
+ "Requirement already satisfied: pillow>=7.1.0 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from bokeh>=2.0.0->streamlit-bokeh-events) (9.5.0)\n",
841
+ "Requirement already satisfied: PyYAML>=3.10 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from bokeh>=2.0.0->streamlit-bokeh-events) (6.0.1)\n",
842
+ "Requirement already satisfied: tornado>=6.2 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from bokeh>=2.0.0->streamlit-bokeh-events) (6.3.3)\n",
843
+ "Requirement already satisfied: xyzservices>=2021.09.1 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from bokeh>=2.0.0->streamlit-bokeh-events) (2024.9.0)\n",
844
+ "Requirement already satisfied: altair<6,>=4.0 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from streamlit>=0.63->streamlit-bokeh-events) (5.4.1)\n",
845
+ "Requirement already satisfied: blinker<2,>=1.0.0 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from streamlit>=0.63->streamlit-bokeh-events) (1.6.2)\n",
846
+ "Requirement already satisfied: cachetools<6,>=4.0 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from streamlit>=0.63->streamlit-bokeh-events) (4.2.2)\n",
847
+ "Requirement already satisfied: click<9,>=7.0 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from streamlit>=0.63->streamlit-bokeh-events) (8.1.7)\n",
848
+ "Requirement already satisfied: protobuf<6,>=3.20 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from streamlit>=0.63->streamlit-bokeh-events) (3.20.3)\n",
849
+ "Requirement already satisfied: pyarrow>=7.0 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from streamlit>=0.63->streamlit-bokeh-events) (11.0.0)\n",
850
+ "Requirement already satisfied: requests<3,>=2.27 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from streamlit>=0.63->streamlit-bokeh-events) (2.31.0)\n",
851
+ "Requirement already satisfied: rich<14,>=10.14.0 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from streamlit>=0.63->streamlit-bokeh-events) (13.7.1)\n",
852
+ "Requirement already satisfied: tenacity<10,>=8.1.0 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from streamlit>=0.63->streamlit-bokeh-events) (8.2.2)\n",
853
+ "Requirement already satisfied: toml<2,>=0.10.1 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from streamlit>=0.63->streamlit-bokeh-events) (0.10.2)\n",
854
+ "Requirement already satisfied: typing-extensions<5,>=4.3.0 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from streamlit>=0.63->streamlit-bokeh-events) (4.12.2)\n",
855
+ "Requirement already satisfied: gitpython!=3.1.19,<4,>=3.0.7 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from streamlit>=0.63->streamlit-bokeh-events) (3.1.43)\n",
856
+ "Requirement already satisfied: pydeck<1,>=0.8.0b4 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from streamlit>=0.63->streamlit-bokeh-events) (0.9.1)\n",
857
+ "Requirement already satisfied: watchdog<6,>=2.1.5 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from streamlit>=0.63->streamlit-bokeh-events) (5.0.3)\n",
858
+ "Requirement already satisfied: jsonschema>=3.0 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from altair<6,>=4.0->streamlit>=0.63->streamlit-bokeh-events) (4.19.2)\n",
859
+ "Requirement already satisfied: narwhals>=1.5.2 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from altair<6,>=4.0->streamlit>=0.63->streamlit-bokeh-events) (1.12.1)\n",
860
+ "Requirement already satisfied: colorama in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from click<9,>=7.0->streamlit>=0.63->streamlit-bokeh-events) (0.4.6)\n",
861
+ "Requirement already satisfied: gitdb<5,>=4.0.1 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from gitpython!=3.1.19,<4,>=3.0.7->streamlit>=0.63->streamlit-bokeh-events) (4.0.11)\n",
862
+ "Requirement already satisfied: MarkupSafe>=2.0 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from Jinja2>=2.9->bokeh>=2.0.0->streamlit-bokeh-events) (2.1.1)\n",
863
+ "Requirement already satisfied: python-dateutil>=2.8.2 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from pandas>=1.2->bokeh>=2.0.0->streamlit-bokeh-events) (2.8.2)\n",
864
+ "Requirement already satisfied: pytz>=2020.1 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from pandas>=1.2->bokeh>=2.0.0->streamlit-bokeh-events) (2023.3.post1)\n",
865
+ "Requirement already satisfied: tzdata>=2022.1 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from pandas>=1.2->bokeh>=2.0.0->streamlit-bokeh-events) (2023.3)\n",
866
+ "Requirement already satisfied: charset-normalizer<4,>=2 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from requests<3,>=2.27->streamlit>=0.63->streamlit-bokeh-events) (2.0.4)\n",
867
+ "Requirement already satisfied: idna<4,>=2.5 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from requests<3,>=2.27->streamlit>=0.63->streamlit-bokeh-events) (3.4)\n",
868
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from requests<3,>=2.27->streamlit>=0.63->streamlit-bokeh-events) (1.26.18)\n",
869
+ "Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from requests<3,>=2.27->streamlit>=0.63->streamlit-bokeh-events) (2024.2.2)\n",
870
+ "Requirement already satisfied: markdown-it-py>=2.2.0 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from rich<14,>=10.14.0->streamlit>=0.63->streamlit-bokeh-events) (3.0.0)\n",
871
+ "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from rich<14,>=10.14.0->streamlit>=0.63->streamlit-bokeh-events) (2.15.1)\n",
872
+ "Requirement already satisfied: smmap<6,>=3.0.1 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from gitdb<5,>=4.0.1->gitpython!=3.1.19,<4,>=3.0.7->streamlit>=0.63->streamlit-bokeh-events) (5.0.1)\n",
873
+ "Requirement already satisfied: attrs>=22.2.0 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit>=0.63->streamlit-bokeh-events) (23.1.0)\n",
874
+ "Requirement already satisfied: jsonschema-specifications>=2023.03.6 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit>=0.63->streamlit-bokeh-events) (2023.7.1)\n",
875
+ "Requirement already satisfied: referencing>=0.28.4 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit>=0.63->streamlit-bokeh-events) (0.30.2)\n",
876
+ "Requirement already satisfied: rpds-py>=0.7.1 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit>=0.63->streamlit-bokeh-events) (0.10.6)\n",
877
+ "Requirement already satisfied: mdurl~=0.1 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from markdown-it-py>=2.2.0->rich<14,>=10.14.0->streamlit>=0.63->streamlit-bokeh-events) (0.1.2)\n",
878
+ "Requirement already satisfied: six>=1.5 in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (from python-dateutil>=2.8.2->pandas>=1.2->bokeh>=2.0.0->streamlit-bokeh-events) (1.16.0)\n",
879
+ "Downloading streamlit_bokeh_events-0.1.2-py3-none-any.whl (2.0 MB)\n",
880
+ " ---------------------------------------- 0.0/2.0 MB ? eta -:--:--\n",
881
+ " ---------------------------------------- 0.0/2.0 MB ? eta -:--:--\n",
882
+ " ---------------------------------------- 0.0/2.0 MB ? eta -:--:--\n",
883
+ " ---------------------------------------- 0.0/2.0 MB ? eta -:--:--\n",
884
+ " ---------------------------------------- 0.0/2.0 MB ? eta -:--:--\n",
885
+ " ---------------------------------------- 0.0/2.0 MB 93.1 kB/s eta 0:00:21\n",
886
+ " --------------------------------------- 0.0/2.0 MB 108.9 kB/s eta 0:00:18\n",
887
+ " --------------------------------------- 0.0/2.0 MB 108.9 kB/s eta 0:00:18\n",
888
+ " --------------------------------------- 0.0/2.0 MB 108.9 kB/s eta 0:00:18\n",
889
+ " --------------------------------------- 0.0/2.0 MB 108.9 kB/s eta 0:00:18\n",
890
+ " --------------------------------------- 0.0/2.0 MB 85.3 kB/s eta 0:00:23\n",
891
+ " --------------------------------------- 0.0/2.0 MB 85.3 kB/s eta 0:00:23\n",
892
+ " - -------------------------------------- 0.1/2.0 MB 109.2 kB/s eta 0:00:18\n",
893
+ " - -------------------------------------- 0.1/2.0 MB 109.2 kB/s eta 0:00:18\n",
894
+ " - -------------------------------------- 0.1/2.0 MB 109.2 kB/s eta 0:00:18\n",
895
+ " - -------------------------------------- 0.1/2.0 MB 120.7 kB/s eta 0:00:16\n",
896
+ " - -------------------------------------- 0.1/2.0 MB 120.7 kB/s eta 0:00:16\n",
897
+ " - -------------------------------------- 0.1/2.0 MB 120.7 kB/s eta 0:00:16\n",
898
+ " - -------------------------------------- 0.1/2.0 MB 120.7 kB/s eta 0:00:16\n",
899
+ " - -------------------------------------- 0.1/2.0 MB 120.7 kB/s eta 0:00:16\n",
900
+ " - -------------------------------------- 0.1/2.0 MB 120.7 kB/s eta 0:00:16\n",
901
+ " - -------------------------------------- 0.1/2.0 MB 120.7 kB/s eta 0:00:16\n",
902
+ " - -------------------------------------- 0.1/2.0 MB 120.7 kB/s eta 0:00:16\n",
903
+ " - -------------------------------------- 0.1/2.0 MB 120.7 kB/s eta 0:00:16\n",
904
+ " - -------------------------------------- 0.1/2.0 MB 120.7 kB/s eta 0:00:16\n",
905
+ " - -------------------------------------- 0.1/2.0 MB 120.7 kB/s eta 0:00:16\n",
906
+ " - -------------------------------------- 0.1/2.0 MB 120.7 kB/s eta 0:00:16\n",
907
+ " - -------------------------------------- 0.1/2.0 MB 120.7 kB/s eta 0:00:16\n",
908
+ " - -------------------------------------- 0.1/2.0 MB 120.7 kB/s eta 0:00:16\n",
909
+ " - -------------------------------------- 0.1/2.0 MB 64.7 kB/s eta 0:00:29\n",
910
+ " - -------------------------------------- 0.1/2.0 MB 64.7 kB/s eta 0:00:29\n",
911
+ " - -------------------------------------- 0.1/2.0 MB 64.7 kB/s eta 0:00:29\n",
912
+ " -- ------------------------------------- 0.1/2.0 MB 73.6 kB/s eta 0:00:26\n",
913
+ " -- ------------------------------------- 0.1/2.0 MB 73.6 kB/s eta 0:00:26\n",
914
+ " -- ------------------------------------- 0.1/2.0 MB 73.6 kB/s eta 0:00:26\n",
915
+ " -- ------------------------------------- 0.1/2.0 MB 72.1 kB/s eta 0:00:26\n",
916
+ " -- ------------------------------------- 0.1/2.0 MB 72.1 kB/s eta 0:00:26\n",
917
+ " -- ------------------------------------- 0.1/2.0 MB 72.1 kB/s eta 0:00:26\n",
918
+ " -- ------------------------------------- 0.1/2.0 MB 79.6 kB/s eta 0:00:23\n",
919
+ " -- ------------------------------------- 0.1/2.0 MB 79.6 kB/s eta 0:00:23\n",
920
+ " -- ------------------------------------- 0.1/2.0 MB 79.6 kB/s eta 0:00:23\n",
921
+ " --- ------------------------------------ 0.2/2.0 MB 84.0 kB/s eta 0:00:22\n",
922
+ " --- ------------------------------------ 0.2/2.0 MB 84.0 kB/s eta 0:00:22\n",
923
+ " --- ------------------------------------ 0.2/2.0 MB 83.9 kB/s eta 0:00:22\n",
924
+ " --- ------------------------------------ 0.2/2.0 MB 83.9 kB/s eta 0:00:22\n",
925
+ " --- ------------------------------------ 0.2/2.0 MB 88.7 kB/s eta 0:00:20\n",
926
+ " --- ------------------------------------ 0.2/2.0 MB 88.7 kB/s eta 0:00:20\n",
927
+ " --- ------------------------------------ 0.2/2.0 MB 88.7 kB/s eta 0:00:20\n",
928
+ " ---- ----------------------------------- 0.2/2.0 MB 88.9 kB/s eta 0:00:20\n",
929
+ " ---- ----------------------------------- 0.2/2.0 MB 88.9 kB/s eta 0:00:20\n",
930
+ " ---- ----------------------------------- 0.2/2.0 MB 93.6 kB/s eta 0:00:19\n",
931
+ " ---- ----------------------------------- 0.2/2.0 MB 93.6 kB/s eta 0:00:19\n",
932
+ " ----- ---------------------------------- 0.2/2.0 MB 97.8 kB/s eta 0:00:18\n",
933
+ " ----- ---------------------------------- 0.2/2.0 MB 97.8 kB/s eta 0:00:18\n",
934
+ " ----- ---------------------------------- 0.3/2.0 MB 98.3 kB/s eta 0:00:18\n",
935
+ " ----- ---------------------------------- 0.3/2.0 MB 98.3 kB/s eta 0:00:18\n",
936
+ " ----- ---------------------------------- 0.3/2.0 MB 102.6 kB/s eta 0:00:17\n",
937
+ " ----- ---------------------------------- 0.3/2.0 MB 102.6 kB/s eta 0:00:17\n",
938
+ " ----- ---------------------------------- 0.3/2.0 MB 102.3 kB/s eta 0:00:17\n",
939
+ " ----- ---------------------------------- 0.3/2.0 MB 102.3 kB/s eta 0:00:17\n",
940
+ " ------ --------------------------------- 0.3/2.0 MB 105.6 kB/s eta 0:00:16\n",
941
+ " ------ --------------------------------- 0.3/2.0 MB 105.6 kB/s eta 0:00:16\n",
942
+ " ------ --------------------------------- 0.3/2.0 MB 105.6 kB/s eta 0:00:16\n",
943
+ " ------ --------------------------------- 0.3/2.0 MB 108.6 kB/s eta 0:00:15\n",
944
+ " ------ --------------------------------- 0.3/2.0 MB 108.6 kB/s eta 0:00:15\n",
945
+ " ------ --------------------------------- 0.3/2.0 MB 108.1 kB/s eta 0:00:15\n",
946
+ " ------ --------------------------------- 0.3/2.0 MB 108.1 kB/s eta 0:00:15\n",
947
+ " ------ --------------------------------- 0.3/2.0 MB 108.1 kB/s eta 0:00:15\n",
948
+ " ------- -------------------------------- 0.4/2.0 MB 109.2 kB/s eta 0:00:15\n",
949
+ " ------- -------------------------------- 0.4/2.0 MB 109.2 kB/s eta 0:00:15\n",
950
+ " ------- -------------------------------- 0.4/2.0 MB 109.2 kB/s eta 0:00:15\n",
951
+ " ------- -------------------------------- 0.4/2.0 MB 109.2 kB/s eta 0:00:15\n",
952
+ " ------- -------------------------------- 0.4/2.0 MB 111.7 kB/s eta 0:00:15\n",
953
+ " ------- -------------------------------- 0.4/2.0 MB 111.7 kB/s eta 0:00:15\n",
954
+ " -------- ------------------------------- 0.4/2.0 MB 114.1 kB/s eta 0:00:14\n",
955
+ " -------- ------------------------------- 0.4/2.0 MB 114.1 kB/s eta 0:00:14\n",
956
+ " -------- ------------------------------- 0.4/2.0 MB 114.1 kB/s eta 0:00:14\n",
957
+ " -------- ------------------------------- 0.4/2.0 MB 113.5 kB/s eta 0:00:14\n",
958
+ " -------- ------------------------------- 0.4/2.0 MB 113.5 kB/s eta 0:00:14\n",
959
+ " --------- ------------------------------ 0.4/2.0 MB 116.1 kB/s eta 0:00:14\n",
960
+ " --------- ------------------------------ 0.4/2.0 MB 116.1 kB/s eta 0:00:14\n",
961
+ " --------- ------------------------------ 0.5/2.0 MB 115.5 kB/s eta 0:00:14\n",
962
+ " --------- ------------------------------ 0.5/2.0 MB 115.5 kB/s eta 0:00:14\n",
963
+ " --------- ------------------------------ 0.5/2.0 MB 115.5 kB/s eta 0:00:14\n",
964
+ " --------- ------------------------------ 0.5/2.0 MB 117.5 kB/s eta 0:00:13\n",
965
+ " --------- ------------------------------ 0.5/2.0 MB 117.5 kB/s eta 0:00:13\n",
966
+ " ---------- ----------------------------- 0.5/2.0 MB 118.9 kB/s eta 0:00:13\n",
967
+ " ---------- ----------------------------- 0.5/2.0 MB 118.9 kB/s eta 0:00:13\n",
968
+ " ---------- ----------------------------- 0.5/2.0 MB 118.7 kB/s eta 0:00:13\n",
969
+ " ---------- ----------------------------- 0.5/2.0 MB 118.7 kB/s eta 0:00:13\n",
970
+ " ---------- ----------------------------- 0.5/2.0 MB 118.7 kB/s eta 0:00:13\n",
971
+ " ---------- ----------------------------- 0.5/2.0 MB 119.6 kB/s eta 0:00:12\n",
972
+ " ---------- ----------------------------- 0.5/2.0 MB 119.6 kB/s eta 0:00:12\n",
973
+ " ---------- ----------------------------- 0.5/2.0 MB 119.6 kB/s eta 0:00:12\n",
974
+ " ---------- ----------------------------- 0.5/2.0 MB 118.9 kB/s eta 0:00:12\n",
975
+ " ---------- ----------------------------- 0.5/2.0 MB 118.9 kB/s eta 0:00:12\n",
976
+ " ----------- ---------------------------- 0.6/2.0 MB 120.2 kB/s eta 0:00:12\n",
977
+ " ----------- ---------------------------- 0.6/2.0 MB 120.2 kB/s eta 0:00:12\n",
978
+ " ----------- ---------------------------- 0.6/2.0 MB 120.2 kB/s eta 0:00:12\n",
979
+ " ----------- ---------------------------- 0.6/2.0 MB 121.8 kB/s eta 0:00:12\n",
980
+ " ----------- ---------------------------- 0.6/2.0 MB 121.8 kB/s eta 0:00:12\n",
981
+ " ----------- ---------------------------- 0.6/2.0 MB 120.7 kB/s eta 0:00:12\n",
982
+ " ----------- ---------------------------- 0.6/2.0 MB 120.7 kB/s eta 0:00:12\n",
983
+ " ----------- ---------------------------- 0.6/2.0 MB 120.7 kB/s eta 0:00:12\n",
984
+ " ------------ --------------------------- 0.6/2.0 MB 122.2 kB/s eta 0:00:12\n",
985
+ " ------------ --------------------------- 0.6/2.0 MB 122.2 kB/s eta 0:00:12\n",
986
+ " ------------ --------------------------- 0.6/2.0 MB 121.2 kB/s eta 0:00:12\n",
987
+ " ------------ --------------------------- 0.6/2.0 MB 121.2 kB/s eta 0:00:12\n",
988
+ " ------------ --------------------------- 0.6/2.0 MB 121.2 kB/s eta 0:00:12\n",
989
+ " ------------ --------------------------- 0.6/2.0 MB 121.2 kB/s eta 0:00:12\n",
990
+ " ------------ --------------------------- 0.6/2.0 MB 121.2 kB/s eta 0:00:12\n",
991
+ " ------------ --------------------------- 0.6/2.0 MB 120.8 kB/s eta 0:00:11\n",
992
+ " ------------ --------------------------- 0.6/2.0 MB 120.8 kB/s eta 0:00:11\n",
993
+ " ------------- -------------------------- 0.7/2.0 MB 122.1 kB/s eta 0:00:11\n",
994
+ " ------------- -------------------------- 0.7/2.0 MB 122.1 kB/s eta 0:00:11\n",
995
+ " ------------- -------------------------- 0.7/2.0 MB 122.1 kB/s eta 0:00:11\n",
996
+ " ------------- -------------------------- 0.7/2.0 MB 120.9 kB/s eta 0:00:11\n",
997
+ " ------------- -------------------------- 0.7/2.0 MB 120.9 kB/s eta 0:00:11\n",
998
+ " ------------- -------------------------- 0.7/2.0 MB 120.9 kB/s eta 0:00:11\n",
999
+ " -------------- ------------------------- 0.7/2.0 MB 121.8 kB/s eta 0:00:11\n",
1000
+ " -------------- ------------------------- 0.7/2.0 MB 121.8 kB/s eta 0:00:11\n",
1001
+ " -------------- ------------------------- 0.7/2.0 MB 121.3 kB/s eta 0:00:11\n",
1002
+ " -------------- ------------------------- 0.7/2.0 MB 121.3 kB/s eta 0:00:11\n",
1003
+ " -------------- ------------------------- 0.7/2.0 MB 122.9 kB/s eta 0:00:11\n",
1004
+ " -------------- ------------------------- 0.7/2.0 MB 122.9 kB/s eta 0:00:11\n",
1005
+ " --------------- ------------------------ 0.7/2.0 MB 124.1 kB/s eta 0:00:10\n",
1006
+ " --------------- ------------------------ 0.7/2.0 MB 124.1 kB/s eta 0:00:10\n",
1007
+ " --------------- ------------------------ 0.7/2.0 MB 124.1 kB/s eta 0:00:10\n",
1008
+ " --------------- ------------------------ 0.7/2.0 MB 123.5 kB/s eta 0:00:10\n",
1009
+ " --------------- ------------------------ 0.7/2.0 MB 123.5 kB/s eta 0:00:10\n",
1010
+ " --------------- ------------------------ 0.8/2.0 MB 124.3 kB/s eta 0:00:10\n",
1011
+ " --------------- ------------------------ 0.8/2.0 MB 124.3 kB/s eta 0:00:10\n",
1012
+ " --------------- ------------------------ 0.8/2.0 MB 124.1 kB/s eta 0:00:10\n",
1013
+ " --------------- ------------------------ 0.8/2.0 MB 124.1 kB/s eta 0:00:10\n",
1014
+ " --------------- ------------------------ 0.8/2.0 MB 124.1 kB/s eta 0:00:10\n",
1015
+ " ---------------- ----------------------- 0.8/2.0 MB 124.9 kB/s eta 0:00:10\n",
1016
+ " ---------------- ----------------------- 0.8/2.0 MB 124.9 kB/s eta 0:00:10\n",
1017
+ " ---------------- ----------------------- 0.8/2.0 MB 124.9 kB/s eta 0:00:10\n",
1018
+ " ---------------- ----------------------- 0.8/2.0 MB 125.4 kB/s eta 0:00:10\n",
1019
+ " ---------------- ----------------------- 0.8/2.0 MB 125.4 kB/s eta 0:00:10\n",
1020
+ " ---------------- ----------------------- 0.8/2.0 MB 125.4 kB/s eta 0:00:10\n",
1021
+ " ---------------- ----------------------- 0.8/2.0 MB 125.4 kB/s eta 0:00:10\n",
1022
+ " ---------------- ----------------------- 0.8/2.0 MB 125.4 kB/s eta 0:00:10\n",
1023
+ " ---------------- ----------------------- 0.8/2.0 MB 125.4 kB/s eta 0:00:10\n",
1024
+ " ---------------- ----------------------- 0.8/2.0 MB 125.4 kB/s eta 0:00:10\n",
1025
+ " ---------------- ----------------------- 0.8/2.0 MB 125.4 kB/s eta 0:00:10\n",
1026
+ " ---------------- ----------------------- 0.8/2.0 MB 125.4 kB/s eta 0:00:10\n",
1027
+ " ---------------- ----------------------- 0.8/2.0 MB 125.4 kB/s eta 0:00:10\n",
1028
+ " ---------------- ----------------------- 0.8/2.0 MB 125.4 kB/s eta 0:00:10\n",
1029
+ " ---------------- ----------------------- 0.8/2.0 MB 125.4 kB/s eta 0:00:10\n",
1030
+ " ---------------- ----------------------- 0.8/2.0 MB 125.4 kB/s eta 0:00:10\n",
1031
+ " ---------------- ----------------------- 0.8/2.0 MB 125.4 kB/s eta 0:00:10\n",
1032
+ " ---------------- ----------------------- 0.8/2.0 MB 125.4 kB/s eta 0:00:10\n",
1033
+ " ---------------- ----------------------- 0.8/2.0 MB 125.4 kB/s eta 0:00:10\n",
1034
+ " ---------------- ----------------------- 0.8/2.0 MB 114.0 kB/s eta 0:00:10\n",
1035
+ " ---------------- ----------------------- 0.8/2.0 MB 114.0 kB/s eta 0:00:10\n",
1036
+ " ----------------- ---------------------- 0.8/2.0 MB 115.1 kB/s eta 0:00:10\n",
1037
+ " ----------------- ---------------------- 0.8/2.0 MB 115.1 kB/s eta 0:00:10\n",
1038
+ " ----------------- ---------------------- 0.9/2.0 MB 114.7 kB/s eta 0:00:10\n",
1039
+ " ----------------- ---------------------- 0.9/2.0 MB 114.7 kB/s eta 0:00:10\n",
1040
+ " ----------------- ---------------------- 0.9/2.0 MB 114.7 kB/s eta 0:00:10\n",
1041
+ " ------------------ --------------------- 0.9/2.0 MB 116.1 kB/s eta 0:00:10\n",
1042
+ " ------------------ --------------------- 0.9/2.0 MB 116.1 kB/s eta 0:00:10\n",
1043
+ " ------------------ --------------------- 0.9/2.0 MB 116.1 kB/s eta 0:00:10\n",
1044
+ " ------------------ --------------------- 0.9/2.0 MB 116.1 kB/s eta 0:00:10\n",
1045
+ " ------------------ --------------------- 0.9/2.0 MB 116.1 kB/s eta 0:00:10\n",
1046
+ " ------------------ --------------------- 0.9/2.0 MB 116.1 kB/s eta 0:00:10\n",
1047
+ " ------------------ --------------------- 0.9/2.0 MB 116.1 kB/s eta 0:00:10\n",
1048
+ " ------------------ --------------------- 0.9/2.0 MB 116.1 kB/s eta 0:00:10\n",
1049
+ " ------------------ --------------------- 0.9/2.0 MB 116.1 kB/s eta 0:00:10\n",
1050
+ " ------------------ --------------------- 0.9/2.0 MB 116.1 kB/s eta 0:00:10\n",
1051
+ " ------------------ --------------------- 0.9/2.0 MB 116.1 kB/s eta 0:00:10\n",
1052
+ " ------------------ --------------------- 0.9/2.0 MB 116.1 kB/s eta 0:00:10\n",
1053
+ " ------------------ --------------------- 0.9/2.0 MB 116.1 kB/s eta 0:00:10\n",
1054
+ " ------------------ --------------------- 0.9/2.0 MB 109.6 kB/s eta 0:00:10\n",
1055
+ " ------------------ --------------------- 0.9/2.0 MB 109.6 kB/s eta 0:00:10\n",
1056
+ " ------------------ --------------------- 0.9/2.0 MB 109.6 kB/s eta 0:00:10\n",
1057
+ " ------------------ --------------------- 0.9/2.0 MB 109.6 kB/s eta 0:00:10\n",
1058
+ " ------------------- -------------------- 0.9/2.0 MB 110.9 kB/s eta 0:00:10\n",
1059
+ " ------------------- -------------------- 0.9/2.0 MB 110.9 kB/s eta 0:00:10\n",
1060
+ " ------------------- -------------------- 0.9/2.0 MB 110.6 kB/s eta 0:00:10\n",
1061
+ " ------------------- -------------------- 0.9/2.0 MB 110.6 kB/s eta 0:00:10\n",
1062
+ " ------------------- -------------------- 0.9/2.0 MB 110.6 kB/s eta 0:00:10\n",
1063
+ " ------------------- -------------------- 0.9/2.0 MB 110.6 kB/s eta 0:00:10\n",
1064
+ " ------------------- -------------------- 0.9/2.0 MB 110.6 kB/s eta 0:00:10\n",
1065
+ " ------------------- -------------------- 0.9/2.0 MB 110.6 kB/s eta 0:00:10\n",
1066
+ " ------------------- -------------------- 0.9/2.0 MB 110.6 kB/s eta 0:00:10\n",
1067
+ " ------------------- -------------------- 0.9/2.0 MB 110.6 kB/s eta 0:00:10\n",
1068
+ " ------------------- -------------------- 1.0/2.0 MB 108.6 kB/s eta 0:00:10\n",
1069
+ " ------------------- -------------------- 1.0/2.0 MB 108.6 kB/s eta 0:00:10\n",
1070
+ " ------------------- -------------------- 1.0/2.0 MB 108.6 kB/s eta 0:00:10\n",
1071
+ " -------------------- ------------------- 1.0/2.0 MB 109.2 kB/s eta 0:00:09\n",
1072
+ " -------------------- ------------------- 1.0/2.0 MB 109.2 kB/s eta 0:00:09\n",
1073
+ " -------------------- ------------------- 1.0/2.0 MB 109.0 kB/s eta 0:00:09\n",
1074
+ " -------------------- ------------------- 1.0/2.0 MB 109.0 kB/s eta 0:00:09\n",
1075
+ " -------------------- ------------------- 1.0/2.0 MB 109.0 kB/s eta 0:00:09\n",
1076
+ " -------------------- ------------------- 1.0/2.0 MB 109.0 kB/s eta 0:00:09\n",
1077
+ " -------------------- ------------------- 1.0/2.0 MB 109.0 kB/s eta 0:00:09\n",
1078
+ " -------------------- ------------------- 1.0/2.0 MB 109.0 kB/s eta 0:00:09\n",
1079
+ " -------------------- ------------------- 1.0/2.0 MB 109.0 kB/s eta 0:00:09\n",
1080
+ " -------------------- ------------------- 1.0/2.0 MB 109.0 kB/s eta 0:00:09\n",
1081
+ " -------------------- ------------------- 1.0/2.0 MB 107.2 kB/s eta 0:00:09\n",
1082
+ " -------------------- ------------------- 1.0/2.0 MB 107.2 kB/s eta 0:00:09\n",
1083
+ " -------------------- ------------------- 1.0/2.0 MB 107.2 kB/s eta 0:00:09\n",
1084
+ " -------------------- ------------------- 1.0/2.0 MB 107.2 kB/s eta 0:00:09\n",
1085
+ " --------------------- ------------------ 1.0/2.0 MB 108.0 kB/s eta 0:00:09\n",
1086
+ " --------------------- ------------------ 1.0/2.0 MB 108.0 kB/s eta 0:00:09\n",
1087
+ " --------------------- ------------------ 1.0/2.0 MB 108.0 kB/s eta 0:00:09\n",
1088
+ " --------------------- ------------------ 1.1/2.0 MB 108.5 kB/s eta 0:00:09\n",
1089
+ " --------------------- ------------------ 1.1/2.0 MB 108.5 kB/s eta 0:00:09\n",
1090
+ " --------------------- ------------------ 1.1/2.0 MB 108.5 kB/s eta 0:00:09\n",
1091
+ " --------------------- ------------------ 1.1/2.0 MB 108.5 kB/s eta 0:00:09\n",
1092
+ " --------------------- ------------------ 1.1/2.0 MB 107.8 kB/s eta 0:00:09\n",
1093
+ " --------------------- ------------------ 1.1/2.0 MB 107.8 kB/s eta 0:00:09\n",
1094
+ " ---------------------- ----------------- 1.1/2.0 MB 108.5 kB/s eta 0:00:08\n",
1095
+ " ---------------------- ----------------- 1.1/2.0 MB 108.5 kB/s eta 0:00:08\n",
1096
+ " ---------------------- ----------------- 1.1/2.0 MB 108.5 kB/s eta 0:00:08\n",
1097
+ " ---------------------- ----------------- 1.1/2.0 MB 108.0 kB/s eta 0:00:08\n",
1098
+ " ---------------------- ----------------- 1.1/2.0 MB 108.0 kB/s eta 0:00:08\n",
1099
+ " ---------------------- ----------------- 1.1/2.0 MB 108.0 kB/s eta 0:00:08\n",
1100
+ " ----------------------- ---------------- 1.1/2.0 MB 108.9 kB/s eta 0:00:08\n",
1101
+ " ----------------------- ---------------- 1.1/2.0 MB 108.9 kB/s eta 0:00:08\n",
1102
+ " ----------------------- ---------------- 1.1/2.0 MB 108.9 kB/s eta 0:00:08\n",
1103
+ " ----------------------- ---------------- 1.1/2.0 MB 109.4 kB/s eta 0:00:08\n",
1104
+ " ----------------------- ---------------- 1.1/2.0 MB 109.4 kB/s eta 0:00:08\n",
1105
+ " ----------------------- ---------------- 1.2/2.0 MB 109.2 kB/s eta 0:00:08\n",
1106
+ " ----------------------- ---------------- 1.2/2.0 MB 109.2 kB/s eta 0:00:08\n",
1107
+ " ----------------------- ---------------- 1.2/2.0 MB 109.2 kB/s eta 0:00:08\n",
1108
+ " ------------------------ --------------- 1.2/2.0 MB 109.9 kB/s eta 0:00:08\n",
1109
+ " ------------------------ --------------- 1.2/2.0 MB 109.9 kB/s eta 0:00:08\n",
1110
+ " ------------------------ --------------- 1.2/2.0 MB 109.7 kB/s eta 0:00:08\n",
1111
+ " ------------------------ --------------- 1.2/2.0 MB 109.7 kB/s eta 0:00:08\n",
1112
+ " ------------------------ --------------- 1.2/2.0 MB 109.7 kB/s eta 0:00:08\n",
1113
+ " ------------------------ --------------- 1.2/2.0 MB 110.3 kB/s eta 0:00:07\n",
1114
+ " ------------------------ --------------- 1.2/2.0 MB 110.3 kB/s eta 0:00:07\n",
1115
+ " ------------------------- -------------- 1.2/2.0 MB 111.1 kB/s eta 0:00:07\n",
1116
+ " ------------------------- -------------- 1.2/2.0 MB 111.1 kB/s eta 0:00:07\n",
1117
+ " ------------------------- -------------- 1.2/2.0 MB 111.1 kB/s eta 0:00:07\n",
1118
+ " ------------------------- -------------- 1.2/2.0 MB 111.1 kB/s eta 0:00:07\n",
1119
+ " ------------------------- -------------- 1.2/2.0 MB 110.1 kB/s eta 0:00:07\n",
1120
+ " ------------------------- -------------- 1.2/2.0 MB 110.1 kB/s eta 0:00:07\n",
1121
+ " ------------------------- -------------- 1.2/2.0 MB 110.1 kB/s eta 0:00:07\n",
1122
+ " ------------------------- -------------- 1.3/2.0 MB 110.6 kB/s eta 0:00:07\n",
1123
+ " ------------------------- -------------- 1.3/2.0 MB 110.6 kB/s eta 0:00:07\n",
1124
+ " ------------------------- -------------- 1.3/2.0 MB 110.6 kB/s eta 0:00:07\n",
1125
+ " ------------------------- -------------- 1.3/2.0 MB 110.3 kB/s eta 0:00:07\n",
1126
+ " ------------------------- -------------- 1.3/2.0 MB 110.3 kB/s eta 0:00:07\n",
1127
+ " ------------------------- -------------- 1.3/2.0 MB 110.3 kB/s eta 0:00:07\n",
1128
+ " ------------------------- -------------- 1.3/2.0 MB 110.3 kB/s eta 0:00:07\n",
1129
+ " ------------------------- -------------- 1.3/2.0 MB 110.3 kB/s eta 0:00:07\n",
1130
+ " ------------------------- -------------- 1.3/2.0 MB 110.3 kB/s eta 0:00:07\n",
1131
+ " -------------------------- ------------- 1.3/2.0 MB 109.4 kB/s eta 0:00:07\n",
1132
+ " -------------------------- ------------- 1.3/2.0 MB 110.4 kB/s eta 0:00:06\n",
1133
+ " -------------------------- ------------- 1.3/2.0 MB 110.4 kB/s eta 0:00:06\n",
1134
+ " -------------------------- ------------- 1.3/2.0 MB 110.4 kB/s eta 0:00:06\n",
1135
+ " --------------------------- ------------ 1.3/2.0 MB 110.1 kB/s eta 0:00:06\n",
1136
+ " --------------------------- ------------ 1.3/2.0 MB 110.1 kB/s eta 0:00:06\n",
1137
+ " --------------------------- ------------ 1.3/2.0 MB 110.8 kB/s eta 0:00:06\n",
1138
+ " --------------------------- ------------ 1.3/2.0 MB 110.8 kB/s eta 0:00:06\n",
1139
+ " --------------------------- ------------ 1.3/2.0 MB 110.8 kB/s eta 0:00:06\n",
1140
+ " --------------------------- ------------ 1.3/2.0 MB 110.8 kB/s eta 0:00:06\n",
1141
+ " --------------------------- ------------ 1.3/2.0 MB 110.8 kB/s eta 0:00:06\n",
1142
+ " --------------------------- ------------ 1.3/2.0 MB 110.8 kB/s eta 0:00:06\n",
1143
+ " --------------------------- ------------ 1.3/2.0 MB 110.8 kB/s eta 0:00:06\n",
1144
+ " --------------------------- ------------ 1.3/2.0 MB 110.8 kB/s eta 0:00:06\n",
1145
+ " --------------------------- ------------ 1.3/2.0 MB 110.8 kB/s eta 0:00:06\n",
1146
+ " --------------------------- ------------ 1.3/2.0 MB 110.8 kB/s eta 0:00:06\n",
1147
+ " --------------------------- ------------ 1.3/2.0 MB 110.8 kB/s eta 0:00:06\n",
1148
+ " --------------------------- ------------ 1.4/2.0 MB 107.2 kB/s eta 0:00:06\n",
1149
+ " --------------------------- ------------ 1.4/2.0 MB 107.2 kB/s eta 0:00:06\n",
1150
+ " ---------------------------- ----------- 1.4/2.0 MB 108.1 kB/s eta 0:00:06\n",
1151
+ " ---------------------------- ----------- 1.4/2.0 MB 108.1 kB/s eta 0:00:06\n",
1152
+ " ---------------------------- ----------- 1.4/2.0 MB 108.7 kB/s eta 0:00:06\n",
1153
+ " ---------------------------- ----------- 1.4/2.0 MB 108.7 kB/s eta 0:00:06\n",
1154
+ " ---------------------------- ----------- 1.4/2.0 MB 108.7 kB/s eta 0:00:06\n",
1155
+ " ---------------------------- ----------- 1.4/2.0 MB 108.6 kB/s eta 0:00:06\n",
1156
+ " ---------------------------- ----------- 1.4/2.0 MB 108.6 kB/s eta 0:00:06\n",
1157
+ " ---------------------------- ----------- 1.4/2.0 MB 108.6 kB/s eta 0:00:06\n",
1158
+ " ---------------------------- ----------- 1.4/2.0 MB 108.6 kB/s eta 0:00:06\n",
1159
+ " ---------------------------- ----------- 1.4/2.0 MB 108.6 kB/s eta 0:00:06\n",
1160
+ " ---------------------------- ----------- 1.4/2.0 MB 108.6 kB/s eta 0:00:06\n",
1161
+ " ---------------------------- ----------- 1.4/2.0 MB 108.6 kB/s eta 0:00:06\n",
1162
+ " ---------------------------- ----------- 1.4/2.0 MB 108.6 kB/s eta 0:00:06\n",
1163
+ " ---------------------------- ----------- 1.4/2.0 MB 108.6 kB/s eta 0:00:06\n",
1164
+ " ----------------------------- ---------- 1.4/2.0 MB 106.3 kB/s eta 0:00:06\n",
1165
+ " ----------------------------- ---------- 1.4/2.0 MB 106.3 kB/s eta 0:00:06\n",
1166
+ " ----------------------------- ---------- 1.4/2.0 MB 106.4 kB/s eta 0:00:05\n",
1167
+ " ----------------------------- ---------- 1.4/2.0 MB 106.4 kB/s eta 0:00:05\n",
1168
+ " ----------------------------- ---------- 1.4/2.0 MB 106.4 kB/s eta 0:00:05\n",
1169
+ " ----------------------------- ---------- 1.4/2.0 MB 106.4 kB/s eta 0:00:05\n",
1170
+ " ----------------------------- ---------- 1.5/2.0 MB 106.2 kB/s eta 0:00:05\n",
1171
+ " ----------------------------- ---------- 1.5/2.0 MB 106.2 kB/s eta 0:00:05\n",
1172
+ " ----------------------------- ---------- 1.5/2.0 MB 106.2 kB/s eta 0:00:05\n",
1173
+ " ------------------------------ --------- 1.5/2.0 MB 106.9 kB/s eta 0:00:05\n",
1174
+ " ------------------------------ --------- 1.5/2.0 MB 106.9 kB/s eta 0:00:05\n",
1175
+ " ------------------------------ --------- 1.5/2.0 MB 106.8 kB/s eta 0:00:05\n",
1176
+ " ------------------------------ --------- 1.5/2.0 MB 106.8 kB/s eta 0:00:05\n",
1177
+ " ------------------------------ --------- 1.5/2.0 MB 106.8 kB/s eta 0:00:05\n",
1178
+ " ------------------------------ --------- 1.5/2.0 MB 107.4 kB/s eta 0:00:05\n",
1179
+ " ------------------------------ --------- 1.5/2.0 MB 107.4 kB/s eta 0:00:05\n",
1180
+ " ------------------------------ --------- 1.5/2.0 MB 107.4 kB/s eta 0:00:05\n",
1181
+ " ------------------------------ --------- 1.5/2.0 MB 107.4 kB/s eta 0:00:05\n",
1182
+ " ------------------------------ --------- 1.5/2.0 MB 107.4 kB/s eta 0:00:05\n",
1183
+ " ------------------------------- -------- 1.5/2.0 MB 107.7 kB/s eta 0:00:04\n",
1184
+ " ------------------------------- -------- 1.5/2.0 MB 107.7 kB/s eta 0:00:04\n",
1185
+ " ------------------------------- -------- 1.6/2.0 MB 108.3 kB/s eta 0:00:04\n",
1186
+ " ------------------------------- -------- 1.6/2.0 MB 108.3 kB/s eta 0:00:04\n",
1187
+ " -------------------------------- ------- 1.6/2.0 MB 108.3 kB/s eta 0:00:04\n",
1188
+ " -------------------------------- ------- 1.6/2.0 MB 108.3 kB/s eta 0:00:04\n",
1189
+ " -------------------------------- ------- 1.6/2.0 MB 108.3 kB/s eta 0:00:04\n",
1190
+ " -------------------------------- ------- 1.6/2.0 MB 108.9 kB/s eta 0:00:04\n",
1191
+ " -------------------------------- ------- 1.6/2.0 MB 108.9 kB/s eta 0:00:04\n",
1192
+ " -------------------------------- ------- 1.6/2.0 MB 108.8 kB/s eta 0:00:04\n",
1193
+ " -------------------------------- ------- 1.6/2.0 MB 108.8 kB/s eta 0:00:04\n",
1194
+ " --------------------------------- ------ 1.6/2.0 MB 109.3 kB/s eta 0:00:04\n",
1195
+ " --------------------------------- ------ 1.6/2.0 MB 109.3 kB/s eta 0:00:04\n",
1196
+ " --------------------------------- ------ 1.6/2.0 MB 109.3 kB/s eta 0:00:04\n",
1197
+ " --------------------------------- ------ 1.6/2.0 MB 109.8 kB/s eta 0:00:03\n",
1198
+ " --------------------------------- ------ 1.6/2.0 MB 109.8 kB/s eta 0:00:03\n",
1199
+ " --------------------------------- ------ 1.6/2.0 MB 109.8 kB/s eta 0:00:03\n",
1200
+ " --------------------------------- ------ 1.6/2.0 MB 109.8 kB/s eta 0:00:03\n",
1201
+ " --------------------------------- ------ 1.6/2.0 MB 109.8 kB/s eta 0:00:03\n",
1202
+ " ---------------------------------- ----- 1.7/2.0 MB 110.1 kB/s eta 0:00:03\n",
1203
+ " ---------------------------------- ----- 1.7/2.0 MB 110.1 kB/s eta 0:00:03\n",
1204
+ " ---------------------------------- ----- 1.7/2.0 MB 110.2 kB/s eta 0:00:03\n",
1205
+ " ---------------------------------- ----- 1.7/2.0 MB 110.2 kB/s eta 0:00:03\n",
1206
+ " ---------------------------------- ----- 1.7/2.0 MB 110.9 kB/s eta 0:00:03\n",
1207
+ " ---------------------------------- ----- 1.7/2.0 MB 110.9 kB/s eta 0:00:03\n",
1208
+ " ---------------------------------- ----- 1.7/2.0 MB 110.9 kB/s eta 0:00:03\n",
1209
+ " ----------------------------------- ---- 1.7/2.0 MB 111.1 kB/s eta 0:00:03\n",
1210
+ " ----------------------------------- ---- 1.7/2.0 MB 111.1 kB/s eta 0:00:03\n",
1211
+ " ----------------------------------- ---- 1.7/2.0 MB 111.1 kB/s eta 0:00:03\n",
1212
+ " ----------------------------------- ---- 1.7/2.0 MB 111.1 kB/s eta 0:00:03\n",
1213
+ " ----------------------------------- ---- 1.7/2.0 MB 111.1 kB/s eta 0:00:03\n",
1214
+ " ----------------------------------- ---- 1.8/2.0 MB 111.5 kB/s eta 0:00:02\n",
1215
+ " ----------------------------------- ---- 1.8/2.0 MB 111.5 kB/s eta 0:00:02\n",
1216
+ " ------------------------------------ --- 1.8/2.0 MB 111.2 kB/s eta 0:00:02\n",
1217
+ " ------------------------------------ --- 1.8/2.0 MB 111.2 kB/s eta 0:00:02\n",
1218
+ " ------------------------------------ --- 1.8/2.0 MB 111.2 kB/s eta 0:00:02\n",
1219
+ " ------------------------------------ --- 1.8/2.0 MB 111.2 kB/s eta 0:00:02\n",
1220
+ " ------------------------------------ --- 1.8/2.0 MB 111.2 kB/s eta 0:00:02\n",
1221
+ " ------------------------------------ --- 1.8/2.0 MB 111.2 kB/s eta 0:00:02\n",
1222
+ " ------------------------------------ --- 1.8/2.0 MB 111.2 kB/s eta 0:00:02\n",
1223
+ " ------------------------------------ --- 1.8/2.0 MB 111.2 kB/s eta 0:00:02\n",
1224
+ " ------------------------------------ --- 1.8/2.0 MB 110.0 kB/s eta 0:00:02\n",
1225
+ " ------------------------------------ --- 1.8/2.0 MB 110.0 kB/s eta 0:00:02\n",
1226
+ " ------------------------------------ --- 1.8/2.0 MB 110.0 kB/s eta 0:00:02\n",
1227
+ " ------------------------------------ --- 1.8/2.0 MB 110.5 kB/s eta 0:00:02\n",
1228
+ " ------------------------------------ --- 1.8/2.0 MB 110.5 kB/s eta 0:00:02\n",
1229
+ " ------------------------------------- -- 1.8/2.0 MB 110.3 kB/s eta 0:00:02\n",
1230
+ " ------------------------------------- -- 1.8/2.0 MB 110.3 kB/s eta 0:00:02\n",
1231
+ " ------------------------------------- -- 1.8/2.0 MB 110.3 kB/s eta 0:00:02\n",
1232
+ " ------------------------------------- -- 1.8/2.0 MB 110.8 kB/s eta 0:00:02\n",
1233
+ " ------------------------------------- -- 1.8/2.0 MB 110.8 kB/s eta 0:00:02\n",
1234
+ " ------------------------------------- -- 1.8/2.0 MB 110.8 kB/s eta 0:00:02\n",
1235
+ " ------------------------------------- -- 1.8/2.0 MB 110.8 kB/s eta 0:00:02\n",
1236
+ " ------------------------------------- -- 1.8/2.0 MB 110.8 kB/s eta 0:00:02\n",
1237
+ " ------------------------------------- -- 1.8/2.0 MB 110.8 kB/s eta 0:00:02\n",
1238
+ " ------------------------------------- -- 1.8/2.0 MB 110.8 kB/s eta 0:00:02\n",
1239
+ " ------------------------------------- -- 1.8/2.0 MB 109.2 kB/s eta 0:00:02\n",
1240
+ " ------------------------------------- -- 1.8/2.0 MB 109.2 kB/s eta 0:00:02\n",
1241
+ " -------------------------------------- - 1.9/2.0 MB 109.8 kB/s eta 0:00:01\n",
1242
+ " -------------------------------------- - 1.9/2.0 MB 109.8 kB/s eta 0:00:01\n",
1243
+ " -------------------------------------- - 1.9/2.0 MB 110.2 kB/s eta 0:00:01\n",
1244
+ " -------------------------------------- - 1.9/2.0 MB 110.2 kB/s eta 0:00:01\n",
1245
+ " -------------------------------------- - 1.9/2.0 MB 110.2 kB/s eta 0:00:01\n",
1246
+ " -------------------------------------- - 1.9/2.0 MB 110.1 kB/s eta 0:00:01\n",
1247
+ " -------------------------------------- - 1.9/2.0 MB 110.1 kB/s eta 0:00:01\n",
1248
+ " -------------------------------------- - 1.9/2.0 MB 110.1 kB/s eta 0:00:01\n",
1249
+ " -------------------------------------- - 1.9/2.0 MB 110.1 kB/s eta 0:00:01\n",
1250
+ " -------------------------------------- - 1.9/2.0 MB 110.1 kB/s eta 0:00:01\n",
1251
+ " -------------------------------------- - 1.9/2.0 MB 110.1 kB/s eta 0:00:01\n",
1252
+ " --------------------------------------- 1.9/2.0 MB 109.4 kB/s eta 0:00:01\n",
1253
+ " --------------------------------------- 1.9/2.0 MB 109.4 kB/s eta 0:00:01\n",
1254
+ " --------------------------------------- 1.9/2.0 MB 109.4 kB/s eta 0:00:01\n",
1255
+ " --------------------------------------- 1.9/2.0 MB 109.1 kB/s eta 0:00:01\n",
1256
+ " --------------------------------------- 1.9/2.0 MB 109.1 kB/s eta 0:00:01\n",
1257
+ " --------------------------------------- 1.9/2.0 MB 109.1 kB/s eta 0:00:01\n",
1258
+ " --------------------------------------- 1.9/2.0 MB 109.1 kB/s eta 0:00:01\n",
1259
+ " --------------------------------------- 2.0/2.0 MB 109.7 kB/s eta 0:00:01\n",
1260
+ " ---------------------------------------- 2.0/2.0 MB 109.4 kB/s eta 0:00:00\n",
1261
+ "Installing collected packages: streamlit-bokeh-events\n",
1262
+ "Successfully installed streamlit-bokeh-events-0.1.2\n",
1263
+ "Note: you may need to restart the kernel to use updated packages.\n"
1264
  ]
1265
+ }
1266
+ ],
1267
+ "source": [
1268
+ "pip install streamlit-bokeh-events"
1269
+ ]
1270
+ },
1271
+ {
1272
+ "cell_type": "code",
1273
+ "execution_count": 1,
1274
+ "metadata": {},
1275
+ "outputs": [
1276
  {
1277
+ "name": "stderr",
1278
+ "output_type": "stream",
1279
+ "text": [
1280
+ "2024-10-30 10:52:39.204 WARNING streamlit.runtime.scriptrunner_utils.script_run_context: Thread 'MainThread': missing ScriptRunContext! This warning can be ignored when running in bare mode.\n",
1281
+ "2024-10-30 10:52:39.276 WARNING streamlit.runtime.scriptrunner_utils.script_run_context: Thread 'MainThread': missing ScriptRunContext! This warning can be ignored when running in bare mode.\n",
1282
+ "2024-10-30 10:52:39.277 WARNING streamlit.runtime.scriptrunner_utils.script_run_context: Thread 'MainThread': missing ScriptRunContext! This warning can be ignored when running in bare mode.\n",
1283
+ "2024-10-30 10:52:39.278 WARNING streamlit.runtime.scriptrunner_utils.script_run_context: Thread 'MainThread': missing ScriptRunContext! This warning can be ignored when running in bare mode.\n",
1284
+ "2024-10-30 10:52:39.371 \n",
1285
+ " \u001b[33m\u001b[1mWarning:\u001b[0m to view this Streamlit app on a browser, run it with the following\n",
1286
+ " command:\n",
1287
+ "\n",
1288
+ " streamlit run c:\\Users\\umesh\\anaconda3\\envs\\DeepLearning\\lib\\site-packages\\ipykernel_launcher.py [ARGUMENTS]\n",
1289
+ "2024-10-30 10:52:39.372 Thread 'MainThread': missing ScriptRunContext! This warning can be ignored when running in bare mode.\n"
1290
  ]
1291
  }
1292
  ],
1293
  "source": [
1294
+ "import streamlit as st\n",
1295
+ "from bokeh.models.widgets import Button\n",
1296
+ "from bokeh.models import CustomJS\n",
1297
+ "from streamlit_bokeh_events import streamlit_bokeh_events\n",
1298
+ "\n",
1299
+ "stt_button = Button(label=\"Speak\", width=100)\n",
1300
+ "\n",
1301
+ "stt_button.js_on_event(\"button_click\", CustomJS(code=\"\"\"\n",
1302
+ " var recognition = new webkitSpeechRecognition();\n",
1303
+ " recognition.continuous = true;\n",
1304
+ " recognition.interimResults = true;\n",
1305
+ " \n",
1306
+ " recognition.onresult = function (e) {\n",
1307
+ " var value = \"\";\n",
1308
+ " for (var i = e.resultIndex; i < e.results.length; ++i) {\n",
1309
+ " if (e.results[i].isFinal) {\n",
1310
+ " value += e.results[i][0].transcript;\n",
1311
+ " }\n",
1312
+ " }\n",
1313
+ " if ( value != \"\") {\n",
1314
+ " document.dispatchEvent(new CustomEvent(\"GET_TEXT\", {detail: value}));\n",
1315
+ " }\n",
1316
+ " }\n",
1317
+ " recognition.start();\n",
1318
+ " \"\"\"))\n",
1319
  "\n",
1320
+ "result = streamlit_bokeh_events(\n",
1321
+ " stt_button,\n",
1322
+ " events=\"GET_TEXT\",\n",
1323
+ " key=\"listen\",\n",
1324
+ " refresh_on_update=False,\n",
1325
+ " override_height=75,\n",
1326
+ " debounce_time=0)\n",
1327
+ "\n",
1328
+ "if result:\n",
1329
+ " if \"GET_TEXT\" in result:\n",
1330
+ " st.write(result.get(\"GET_TEXT\"))"
1331
  ]
1332
  },
1333
  {
1334
  "cell_type": "code",
1335
+ "execution_count": 1,
1336
  "metadata": {},
1337
  "outputs": [
1338
  {
1339
  "name": "stdout",
1340
  "output_type": "stream",
1341
  "text": [
1342
+ "Requirement already satisfied: pyzmq in c:\\users\\umesh\\anaconda3\\envs\\deeplearning\\lib\\site-packages (23.2.0)\n",
1343
  "Note: you may need to restart the kernel to use updated packages.\n"
1344
  ]
1345
  }
1346
  ],
1347
  "source": [
1348
+ "pip install pyzmq"
1349
  ]
1350
  }
1351
  ],
artifacts/response.txt CHANGED
@@ -1,3 +1,3 @@
1
 
2
 
3
- I'm doing well, thank you! How about you
 
1
 
2
 
3
+ Pythagoras was an ancient Greek philosopher and mathematician who is best known for the Pythagorean theorem, which states that in a right-angled triangle, the square of the length of the hypotenuse (the side opposite the right angle) is equal to the sum of the squares of the lengths of the other two sides. He is also credited with the discovery of the five regular solids, and he founded the Pythagorean Brotherhood, a religious and philosophical community that had a significant influence on Western thought. Pythagoras lived in the 6th century BC and is considered one of the most important figures in the history of mathematics and philosophy.
logs/10_30_2024_09_32_06.log CHANGED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2024-10-30 09:32:08,069] 28 root - INFO - LLM model for text generation created.
2
+ [2024-10-30 09:32:08,070] 29 root - INFO - LLM model for medical text generation created.
3
+ [2024-10-30 09:32:17,268] 23 root - INFO - Avatar system initiated.
4
+ [2024-10-30 09:38:40,395] 28 root - INFO - LLM model for text generation created.
5
+ [2024-10-30 09:38:40,399] 30 root - INFO - LLM model for medical text generation created.
6
+ [2024-10-30 09:38:42,252] 23 root - INFO - Avatar system initiated.
7
+ [2024-10-30 09:39:23,130] 33 root - INFO - Text response generated.
8
+ [2024-10-30 09:42:06,980] 28 root - INFO - LLM model for text generation created.
9
+ [2024-10-30 09:42:06,984] 30 root - INFO - LLM model for medical text generation created.
10
+ [2024-10-30 09:42:09,191] 23 root - INFO - Avatar system initiated.
11
+ [2024-10-30 09:42:24,758] 33 root - INFO - Text response generated.
12
+ [2024-10-30 09:45:30,730] 33 root - INFO - Text response generated.
13
+ [2024-10-30 09:51:32,564] 28 root - INFO - LLM model for text generation created.
14
+ [2024-10-30 09:51:32,568] 30 root - INFO - LLM model for medical text generation created.
15
+ [2024-10-30 09:51:44,217] 23 root - INFO - Avatar system initiated.
16
+ [2024-10-30 09:52:10,172] 33 root - INFO - Text response generated.
17
+ [2024-10-30 09:56:50,731] 31 root - INFO - Text response generated.
18
+ [2024-10-30 09:57:01,301] 32 root - INFO - Text response generated.
19
+ [2024-10-30 09:57:02,340] 30 root - INFO - Sentiment of response generated.
20
+ [2024-10-30 09:57:02,344] 36 root - INFO - Response sentiment received.
21
+ [2024-10-30 09:57:10,189] 19 root - INFO - Response text convertion to audio done.
22
+ [2024-10-30 09:57:10,189] 40 root - INFO - Generated response saved as audio mp3 format.
23
+ [2024-10-30 09:57:10,189] 67 root - INFO - Generated response stored in response.txt file in artifacts folder.
24
+ [2024-10-30 09:57:10,190] 73 root - INFO - Stored text loaded.
25
+ [2024-10-30 10:11:10,193] 28 root - INFO - LLM model for text generation created.
26
+ [2024-10-30 10:11:10,197] 30 root - INFO - LLM model for medical text generation created.
27
+ [2024-10-30 10:11:50,869] 23 root - INFO - Avatar system initiated.
28
+ [2024-10-30 10:12:42,676] 33 root - INFO - Text response generated.
29
+ [2024-10-30 10:13:10,566] 29 root - INFO - Docbot Text response generated.
30
+ [2024-10-30 10:13:11,035] 30 root - INFO - Sentiment of response generated.
31
+ [2024-10-30 10:13:11,035] 36 root - INFO - Response sentiment received.
32
+ [2024-10-30 10:15:41,599] 19 root - INFO - Response text convertion to audio done.
33
+ [2024-10-30 10:15:41,599] 40 root - INFO - Generated response saved as audio mp3 format.
34
+ [2024-10-30 10:15:41,606] 67 root - INFO - Generated response stored in response.txt file in artifacts folder.
35
+ [2024-10-30 10:15:41,606] 73 root - INFO - Stored text loaded.
36
+ [2024-10-30 10:22:00,106] 28 root - INFO - LLM model for text generation created.
37
+ [2024-10-30 10:22:00,106] 30 root - INFO - LLM model for medical text generation created.
38
+ [2024-10-30 10:22:02,829] 22 root - INFO - Avatar system initiated.
39
+ [2024-10-30 10:22:33,338] 28 root - INFO - LLM model for text generation created.
40
+ [2024-10-30 10:22:33,338] 30 root - INFO - LLM model for medical text generation created.
41
+ [2024-10-30 10:22:36,576] 22 root - INFO - Avatar system initiated.
42
+ [2024-10-30 10:22:58,937] 28 root - INFO - LLM model for text generation created.
43
+ [2024-10-30 10:22:58,937] 30 root - INFO - LLM model for medical text generation created.
44
+ [2024-10-30 10:23:02,005] 22 root - INFO - Avatar system initiated.
45
+ [2024-10-30 10:24:18,130] 28 root - INFO - LLM model for text generation created.
46
+ [2024-10-30 10:24:18,130] 30 root - INFO - LLM model for medical text generation created.
47
+ [2024-10-30 10:24:20,276] 22 root - INFO - Avatar system initiated.
48
+ [2024-10-30 10:24:33,787] 28 root - INFO - LLM model for text generation created.
49
+ [2024-10-30 10:24:33,788] 30 root - INFO - LLM model for medical text generation created.
50
+ [2024-10-30 10:24:37,352] 22 root - INFO - Avatar system initiated.
51
+ [2024-10-30 10:25:15,562] 28 root - INFO - LLM model for text generation created.
52
+ [2024-10-30 10:25:15,566] 30 root - INFO - LLM model for medical text generation created.
53
+ [2024-10-30 10:25:18,220] 22 root - INFO - Avatar system initiated.
54
+ [2024-10-30 10:25:51,124] 28 root - INFO - LLM model for text generation created.
55
+ [2024-10-30 10:25:51,124] 30 root - INFO - LLM model for medical text generation created.
56
+ [2024-10-30 10:25:53,372] 22 root - INFO - Avatar system initiated.
57
+ [2024-10-30 10:27:31,296] 28 root - INFO - LLM model for text generation created.
58
+ [2024-10-30 10:27:31,297] 30 root - INFO - LLM model for medical text generation created.
59
+ [2024-10-30 10:27:33,655] 22 root - INFO - Avatar system initiated.
60
+ [2024-10-30 10:27:41,868] 31 root - INFO - Text response generated.
61
+ [2024-10-30 10:27:43,255] 28 root - INFO - Text response generated.
62
+ [2024-10-30 10:27:43,536] 30 root - INFO - Sentiment of response generated.
63
+ [2024-10-30 10:27:43,553] 32 root - INFO - Response sentiment received.
64
+ [2024-10-30 10:27:44,542] 19 root - INFO - Response text convertion to audio done.
65
+ [2024-10-30 10:27:44,542] 36 root - INFO - Generated response saved as audio mp3 format.
66
+ [2024-10-30 10:27:44,542] 67 root - INFO - Generated response stored in response.txt file in artifacts folder.
67
+ [2024-10-30 10:27:44,550] 73 root - INFO - Stored text loaded.
68
+ [2024-10-30 10:31:37,573] 28 root - INFO - LLM model for text generation created.
69
+ [2024-10-30 10:31:37,574] 30 root - INFO - LLM model for medical text generation created.
70
+ [2024-10-30 10:31:40,587] 22 root - INFO - Avatar system initiated.
71
+ [2024-10-30 10:31:45,825] 31 root - INFO - Text response generated.
72
+ [2024-10-30 10:31:47,369] 28 root - INFO - Text response generated.
73
+ [2024-10-30 10:31:47,521] 30 root - INFO - Sentiment of response generated.
74
+ [2024-10-30 10:31:47,522] 32 root - INFO - Response sentiment received.
75
+ [2024-10-30 10:31:48,241] 19 root - INFO - Response text convertion to audio done.
76
+ [2024-10-30 10:31:48,241] 36 root - INFO - Generated response saved as audio mp3 format.
77
+ [2024-10-30 10:31:48,243] 67 root - INFO - Generated response stored in response.txt file in artifacts folder.
78
+ [2024-10-30 10:31:48,244] 73 root - INFO - Stored text loaded.
79
+ [2024-10-30 10:31:52,912] 31 root - INFO - Text response generated.
80
+ [2024-10-30 10:31:53,222] 28 root - INFO - Text response generated.
81
+ [2024-10-30 10:31:53,273] 30 root - INFO - Sentiment of response generated.
82
+ [2024-10-30 10:31:53,274] 32 root - INFO - Response sentiment received.
83
+ [2024-10-30 10:31:53,780] 19 root - INFO - Response text convertion to audio done.
84
+ [2024-10-30 10:31:53,780] 36 root - INFO - Generated response saved as audio mp3 format.
85
+ [2024-10-30 10:31:53,780] 67 root - INFO - Generated response stored in response.txt file in artifacts folder.
86
+ [2024-10-30 10:31:53,782] 73 root - INFO - Stored text loaded.
87
+ [2024-10-30 10:31:58,394] 19 root - INFO - Response text convertion to audio done.
88
+ [2024-10-30 10:31:58,606] 23 root - INFO - Generated audio file loaded
89
+ [2024-10-30 10:32:01,688] 32 root - INFO - Created audio file removed for entry of new file.
90
+ [2024-10-30 10:37:51,073] 31 root - INFO - Text response generated.
91
+ [2024-10-30 10:37:59,434] 28 root - INFO - Text response generated.
92
+ [2024-10-30 10:38:00,049] 30 root - INFO - Sentiment of response generated.
93
+ [2024-10-30 10:38:00,050] 32 root - INFO - Response sentiment received.
94
+ [2024-10-30 10:38:44,565] 19 root - INFO - Response text convertion to audio done.
95
+ [2024-10-30 10:38:44,566] 36 root - INFO - Generated response saved as audio mp3 format.
96
+ [2024-10-30 10:38:44,567] 67 root - INFO - Generated response stored in response.txt file in artifacts folder.
97
+ [2024-10-30 10:38:44,569] 73 root - INFO - Stored text loaded.
logs/10_30_2024_10_55_55.log ADDED
File without changes
src/components/__pycache__/textprocess.cpython-311.pyc CHANGED
Binary files a/src/components/__pycache__/textprocess.cpython-311.pyc and b/src/components/__pycache__/textprocess.cpython-311.pyc differ
 
src/components/avatarsys.py CHANGED
@@ -4,6 +4,7 @@ from src.components.textprocess import TextProcessor
4
  from src.components.docchat import DocChatProcessor
5
  from src.components.emotionanalyz import EmotionAnalyzer
6
  from src.components.voicesynth import VoiceSynthesizer
 
7
 
8
  class AvatarConfig:
9
  def __init__(self):
@@ -22,20 +23,23 @@ class AvatarSystem:
22
  logging.info("Avatar system initiated.")
23
 
24
  def process_input(self, user_input):
25
- # Generate response
26
-
27
- response = self.text_processor.generate_response(user_input)
28
- logging.info("Text response generated.")
29
 
30
- # Analyze emotion
31
- emotion = self.emotion_analyzer.analyze_emotion(response)
32
- logging.info("Response sentiment received.")
33
-
34
- # Synthesize voice and saves as mp3 file
35
- self.voice_synthesiser.synthesize_speech(response)
36
- logging.info("Generated response saved as audio mp3 format.")
 
 
 
 
 
37
 
38
- return {
39
- 'response_text': response,
40
- 'emotion': emotion
41
- }
 
4
  from src.components.docchat import DocChatProcessor
5
  from src.components.emotionanalyz import EmotionAnalyzer
6
  from src.components.voicesynth import VoiceSynthesizer
7
+ import sys
8
 
9
  class AvatarConfig:
10
  def __init__(self):
 
23
  logging.info("Avatar system initiated.")
24
 
25
  def process_input(self, user_input):
26
+ try:
27
+ # Generate response
28
+ response = self.text_processor.generate_response(user_input)
29
+ logging.info("Text response generated.")
30
 
31
+ # Analyze emotion
32
+ emotion = self.emotion_analyzer.analyze_emotion(response)
33
+ logging.info("Response sentiment received.")
34
+
35
+ # Synthesize voice and saves as mp3 file
36
+ self.voice_synthesiser.synthesize_speech(response)
37
+ logging.info("Generated response saved as audio mp3 format.")
38
+
39
+ return {
40
+ 'response_text': response,
41
+ 'emotion': emotion
42
+ }
43
 
44
+ except Exception as e:
45
+ raise customexception(e,sys)
 
 
src/components/docchat.py CHANGED
@@ -1,9 +1,7 @@
1
  # Text generation
2
  from src.logger.logger import logging
3
  from src.exception.exception import customexception
4
- # import os
5
- # from dotenv import load_dotenv
6
- # load_dotenv()
7
  from langchain_huggingface import HuggingFaceEndpoint
8
 
9
  # Text generation model
@@ -30,5 +28,8 @@ class DocChatProcessor:
30
  logging.info("LLM model for medical text generation created.")
31
 
32
  def generate_response(self, input_text):
33
- logging.info("Text response generated.")
34
- return self.llm.invoke(input_text)
 
 
 
 
1
  # Text generation
2
  from src.logger.logger import logging
3
  from src.exception.exception import customexception
4
+ import sys
 
 
5
  from langchain_huggingface import HuggingFaceEndpoint
6
 
7
  # Text generation model
 
28
  logging.info("LLM model for medical text generation created.")
29
 
30
  def generate_response(self, input_text):
31
+ try:
32
+ logging.info("Text response generated.")
33
+ return self.llm.invoke(input_text)
34
+ except Exception as e:
35
+ raise customexception(e,sys)
src/components/emotionanalyz.py CHANGED
@@ -4,6 +4,7 @@ from src.logger.logger import logging
4
  from transformers import AutoTokenizer
5
  from transformers import AutoModelForSequenceClassification
6
  from scipy.special import softmax
 
7
 
8
  # Pretrained model
9
  MODEL = f"cardiffnlp/twitter-roberta-base-sentiment"
@@ -17,16 +18,19 @@ class EmotionAnalyzer:
17
 
18
  # Roberta model
19
  def analyze_emotion(self, text):
20
- encoded_text = tokenizer(text, return_tensors='pt')
21
- output = self.emotion_classifier(**encoded_text)
22
- scores = output[0][0].detach().numpy()
23
- scores = softmax(scores)
24
- scores_dict = {
25
- 'negative': scores[0],
26
- 'neutral': scores[1],
27
- 'positive': scores[2],
28
- }
29
- self.emotion = max(scores_dict)
30
- logging.info("Sentiment of response generated.")
 
31
 
32
- return self.emotion
 
 
 
4
  from transformers import AutoTokenizer
5
  from transformers import AutoModelForSequenceClassification
6
  from scipy.special import softmax
7
+ import sys
8
 
9
  # Pretrained model
10
  MODEL = f"cardiffnlp/twitter-roberta-base-sentiment"
 
18
 
19
  # Roberta model
20
  def analyze_emotion(self, text):
21
+ try:
22
+ encoded_text = tokenizer(text, return_tensors='pt')
23
+ output = self.emotion_classifier(**encoded_text)
24
+ scores = output[0][0].detach().numpy()
25
+ scores = softmax(scores)
26
+ scores_dict = {
27
+ 'negative': scores[0],
28
+ 'neutral': scores[1],
29
+ 'positive': scores[2],
30
+ }
31
+ self.emotion = max(scores_dict)
32
+ logging.info("Sentiment of response generated.")
33
 
34
+ return self.emotion
35
+ except Exception as e:
36
+ raise customexception(e,sys)
src/components/textprocess.py CHANGED
@@ -1,9 +1,7 @@
1
  # Text generation
2
  from src.logger.logger import logging
3
  from src.exception.exception import customexception
4
- # import os
5
- # from dotenv import load_dotenv
6
- # load_dotenv()
7
  from langchain_huggingface import HuggingFaceEndpoint
8
 
9
  # Text generation model
@@ -22,11 +20,14 @@ class TextProcessor:
22
  repetition_penalty=1.03,
23
  streaming=False,
24
  # huggingfacehub_api_token= hf_token,
25
- stop_sequences=['?', '</s>', '.\n\n']
26
  )
27
 
28
  logging.info("LLM model for text generation created.")
29
 
30
  def generate_response(self, input_text):
31
- logging.info("Text response generated.")
32
- return self.llm.invoke(input_text)
 
 
 
 
1
  # Text generation
2
  from src.logger.logger import logging
3
  from src.exception.exception import customexception
4
+ import sys
 
 
5
  from langchain_huggingface import HuggingFaceEndpoint
6
 
7
  # Text generation model
 
20
  repetition_penalty=1.03,
21
  streaming=False,
22
  # huggingfacehub_api_token= hf_token,
23
+ # stop_sequences=['?', '</s>', '.\n\n']
24
  )
25
 
26
  logging.info("LLM model for text generation created.")
27
 
28
  def generate_response(self, input_text):
29
+ try:
30
+ logging.info("Text response generated.")
31
+ return self.llm.invoke(input_text)
32
+ except Exception as e:
33
+ raise customexception(e,sys)
src/components/voicesynth.py CHANGED
@@ -2,18 +2,23 @@
2
  from src.logger.logger import logging
3
  from src.exception.exception import customexception
4
  from gtts import gTTS
 
5
 
6
  class VoiceSynthesizer:
7
  def __init__(self):
8
  pass
9
 
10
- def synthesize_speech(self, text):
11
- # Text to English
12
- language = 'en'
 
13
 
14
- # Conversion engine
15
- converter = gTTS(text=text, lang=language, slow=False)
16
 
17
- # Saving the converted audio in a mp3 file named
18
- converter.save("artifacts/Audio.mp3")
19
- logging.info("Response text convertion to audio done.")
 
 
 
 
2
  from src.logger.logger import logging
3
  from src.exception.exception import customexception
4
  from gtts import gTTS
5
+ import sys
6
 
7
  class VoiceSynthesizer:
8
  def __init__(self):
9
  pass
10
 
11
+ def synthesize_speech(self, text):
12
+ try:
13
+ # Text to English
14
+ language = 'en'
15
 
16
+ # Conversion engine
17
+ converter = gTTS(text=text, lang=language, slow=False)
18
 
19
+ # Saving the converted audio in a mp3 file named
20
+ converter.save("artifacts/Audio.mp3")
21
+ logging.info("Response text convertion to audio done.")
22
+
23
+ except Exception as e:
24
+ raise customexception(e,sys)
src/utils/accessory.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
  from pathlib import Path
3
  import speech_recognition as sr
4
  import pyttsx3
@@ -11,34 +12,41 @@ from src.exception.exception import customexception
11
  r = sr.Recognizer()
12
 
13
  def play_speech(text):
14
- # Voice syntesizer initiation
15
- converter = VoiceSynthesizer()
16
- converter.synthesize_speech(text)
 
 
 
 
17
 
18
- # Initialize the mixer module
19
- pygame.mixer.init()
 
20
 
21
- # Load the mp3 file
22
- pygame.mixer.music.load("artifacts/Audio.mp3")
23
- logging.info("Generated audio file loaded")
 
 
 
 
 
24
 
25
- # Play the loaded mp3 file
26
- pygame.mixer.music.play()
27
- pygame.mixer.music.get_endevent()
28
- while pygame.mixer.music.get_busy():
29
- continue
30
- pygame.mixer.music.load("artifacts/Audio_copy.mp3")
31
- os.remove("artifacts/Audio.mp3")
32
- logging.info("Created audio file removed for entry of new file.")
33
 
34
  # Function to convert text to
35
  # speech
36
  def SpeakText(command):
37
- # Initialize the engine
38
- engine = pyttsx3.init()
39
- engine.say(command)
40
- engine.runAndWait()
41
-
 
 
 
42
  def listen():
43
  # obtain audio from the microphone
44
  r = sr.Recognizer()
@@ -54,22 +62,30 @@ def listen():
54
  return text
55
 
56
  except sr.UnknownValueError:
57
- print("Google Speech Recognition could not understand audio")
58
  except sr.RequestError as e:
59
- print("Could not request results from Google Speech Recognition service; {0}".format(e))
 
 
60
 
61
  def save_output(output_data, output_dir="artifacts"):
62
- Path(output_dir).mkdir(exist_ok=True)
63
-
64
- # Save text response
65
- with open(f"{output_dir}/response.txt", "w") as f:
66
- f.write(output_data['response_text'])
67
- logging.info("Generated response stored in response.txt file in artifacts folder.")
 
 
 
68
 
69
- def load_output(output_dir="artifacts"):
70
- # Load text response
71
- with open(f"{output_dir}/response.txt", "r") as f:
72
- answer = f.read()
73
- logging.info("Stored text loaded.")
 
74
 
75
- return answer
 
 
 
1
  import os
2
+ import sys
3
  from pathlib import Path
4
  import speech_recognition as sr
5
  import pyttsx3
 
12
  r = sr.Recognizer()
13
 
14
  def play_speech(text):
15
+ try:
16
+ # Voice syntesizer initiation
17
+ converter = VoiceSynthesizer()
18
+ converter.synthesize_speech(text)
19
+
20
+ # Initialize the mixer module
21
+ pygame.mixer.init()
22
 
23
+ # Load the mp3 file
24
+ pygame.mixer.music.load("artifacts/Audio.mp3")
25
+ logging.info("Generated audio file loaded")
26
 
27
+ # Play the loaded mp3 file
28
+ pygame.mixer.music.play()
29
+ pygame.mixer.music.get_endevent()
30
+ while pygame.mixer.music.get_busy():
31
+ continue
32
+ pygame.mixer.music.load("artifacts/Audio_copy.mp3")
33
+ os.remove("artifacts/Audio.mp3")
34
+ logging.info("Created audio file removed for entry of new file.")
35
 
36
+ except Exception as e:
37
+ raise customexception(e,sys)
 
 
 
 
 
 
38
 
39
  # Function to convert text to
40
  # speech
41
  def SpeakText(command):
42
+ try:
43
+ # Initialize the engine
44
+ engine = pyttsx3.init()
45
+ engine.say(command)
46
+ engine.runAndWait()
47
+ except Exception as e:
48
+ raise customexception(e,sys)
49
+
50
  def listen():
51
  # obtain audio from the microphone
52
  r = sr.Recognizer()
 
62
  return text
63
 
64
  except sr.UnknownValueError:
65
+ raise ("Google Speech Recognition could not understand audio", sys)
66
  except sr.RequestError as e:
67
+ raise customexception("Could not request results from Google Speech Recognition service; {0}".format(e), sys)
68
+ except Exception as e:
69
+ raise customexception(e, sys)
70
 
71
  def save_output(output_data, output_dir="artifacts"):
72
+ try:
73
+ Path(output_dir).mkdir(exist_ok=True)
74
+
75
+ # Save text response
76
+ with open(f"{output_dir}/response.txt", "w") as f:
77
+ f.write(output_data['response_text'])
78
+ logging.info("Generated response stored in response.txt file in artifacts folder.")
79
+ except Exception as e:
80
+ raise customexception(e,sys)
81
 
82
+ def load_output(output_dir="artifacts"):
83
+ try:
84
+ # Load text response
85
+ with open(f"{output_dir}/response.txt", "r") as f:
86
+ answer = f.read()
87
+ logging.info("Stored text loaded.")
88
 
89
+ return answer
90
+ except Exception as e:
91
+ raise customexception(e,sys)