UMESH266 commited on
Commit
d6f7b0e
·
1 Parent(s): 694ec5c

Docchat removed

Browse files
app.py CHANGED
@@ -9,20 +9,20 @@ import speech_recognition as sr
9
  salutation = "Pleasure meeting you. Have a nice day!"
10
 
11
  # Page title
12
- st.title("Hi, Chatmate here!")
13
- st.markdown("<h3 style='text-align: center;'>Hello, I am your chatbot assistant.</h1>", unsafe_allow_html=True)
14
 
15
- mode = option_menu("Choose mode of interaction", ["Doc-Bot", "Text", "Voice"],
16
- icons=['heart-pulse','chat-text', 'mic'],
17
  menu_icon="cast", default_index=0, orientation="horizontal")
18
 
19
  if "HF_TOKEN" not in st.session_state:
20
  st.session_state.HF_TOKEN = ''
21
- st.write("Add your Huggingface Access Token to use the chatbot.")
22
- st.session_state.HF_TOKEN = st.text_input("Your Access Token: ")
23
 
24
  # Chatbot configuration initiation
25
- if st.session_state.HF_TOKEN != '':
26
  avatar = AvatarSystem(st.session_state.HF_TOKEN)
27
 
28
  def chat_history(input, response, sentiment):
@@ -31,9 +31,9 @@ def chat_history(input, response, sentiment):
31
  st.session_state.history[input] = [response, sentiment]
32
  return st.session_state.history
33
 
34
- def response(input_text, docbot):
35
  # Getting response and sentiment of response
36
- output = avatar.process_input(input_text, docbot)
37
  # Save output response in txt
38
  save_output(output)
39
  response_sentiment = output['emotion']
@@ -41,44 +41,7 @@ def response(input_text, docbot):
41
 
42
  return ans, response_sentiment
43
 
44
- if mode == "Doc-Bot" and st.session_state.HF_TOKEN != '':
45
- st.write("Doc-Bot implementation")
46
-
47
- if 'doc_chat_hist' not in st.session_state:
48
- st.session_state.doc_chat_hist = dict()
49
-
50
- # Form requires unique key
51
- with st.form(key=f'Chat form', clear_on_submit=True):
52
- user_input = st.text_input("You: ", value="", placeholder="Ask anything or Type 'Exit'")
53
- col1, col2, col3, col4, col5, col6 = st.columns(6)
54
- save = col6.form_submit_button("Click here")
55
-
56
- if save and user_input != "":
57
- user_input = user_input.lower() + '?'
58
-
59
- # Exiting the chat
60
- if 'exit' in user_input:
61
- st.write(salutation)
62
- play_speech(salutation)
63
-
64
- # Getting response and sentiment of response
65
- ans, senti = response(user_input, docbot=True)
66
-
67
- # Chat history
68
- st.session_state.doc_chat_hist = chat_history(user_input, ans, senti)
69
-
70
- # Chat history display
71
- st.markdown("### Chat History: ")
72
- with st.container(border=True):
73
- for key in st.session_state.doc_chat_hist.keys():
74
- user_col1, user_col2, user_col3 = st.columns(3, vertical_alignment="center")
75
- user = user_col3.container(border=True)
76
- user.write(key)
77
- bot_col1, bot_col2, bot_col3 = st.columns([4, 1, 1], vertical_alignment='center')
78
- bot = bot_col1.container(border=True)
79
- bot.write(st.session_state.doc_chat_hist[key][0])
80
-
81
- elif mode == "Text" and st.session_state.HF_TOKEN != '':
82
  if 'chathist' not in st.session_state:
83
  st.session_state.chathist = dict()
84
 
@@ -96,7 +59,7 @@ elif mode == "Text" and st.session_state.HF_TOKEN != '':
96
  st.write(salutation)
97
 
98
  # Getting response and sentiment of response
99
- ans, senti = response(user_input, docbot=False)
100
 
101
  # Chat history
102
  st.session_state.chathist = chat_history(user_input, ans, senti)
@@ -135,4 +98,42 @@ elif mode == "Voice" and st.session_state.HF_TOKEN != '':
135
  break
136
 
137
  #Getting response and sentiment of response
138
- response(user_input, docbot=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  salutation = "Pleasure meeting you. Have a nice day!"
10
 
11
  # Page title
12
+ st.markdown("<h1 style='text-align: center;'>Hi, Dr Bot Junior here.</h1>", unsafe_allow_html=True)
13
+ st.markdown("<h3 style='text-align: center;'>I am your chatbot assistant.</h3>", unsafe_allow_html=True)
14
 
15
+ mode = option_menu("Choose mode of interaction", ["Text", "Voice"],
16
+ icons=['heart-pulse', 'mic'],
17
  menu_icon="cast", default_index=0, orientation="horizontal")
18
 
19
  if "HF_TOKEN" not in st.session_state:
20
  st.session_state.HF_TOKEN = ''
21
+ # st.write("Add your Huggingface Access Token to use the chatbot.")
22
+ # st.session_state.HF_TOKEN = st.text_input("Your Access Token: ")
23
 
24
  # Chatbot configuration initiation
25
+ if st.session_state.HF_TOKEN == '':
26
  avatar = AvatarSystem(st.session_state.HF_TOKEN)
27
 
28
  def chat_history(input, response, sentiment):
 
31
  st.session_state.history[input] = [response, sentiment]
32
  return st.session_state.history
33
 
34
+ def response(input_text):
35
  # Getting response and sentiment of response
36
+ output = avatar.process_input(input_text)
37
  # Save output response in txt
38
  save_output(output)
39
  response_sentiment = output['emotion']
 
41
 
42
  return ans, response_sentiment
43
 
44
+ if mode == "Text" and st.session_state.HF_TOKEN == '':
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  if 'chathist' not in st.session_state:
46
  st.session_state.chathist = dict()
47
 
 
59
  st.write(salutation)
60
 
61
  # Getting response and sentiment of response
62
+ ans, senti = response(user_input)
63
 
64
  # Chat history
65
  st.session_state.chathist = chat_history(user_input, ans, senti)
 
98
  break
99
 
100
  #Getting response and sentiment of response
101
+ response(user_input)
102
+
103
+
104
+ # if mode == "Doc-Bot" and st.session_state.HF_TOKEN != '':
105
+ # st.write("Doc-Bot implementation")
106
+
107
+ # if 'doc_chat_hist' not in st.session_state:
108
+ # st.session_state.doc_chat_hist = dict()
109
+
110
+ # # Form requires unique key
111
+ # with st.form(key=f'Chat form', clear_on_submit=True):
112
+ # user_input = st.text_input("You: ", value="", placeholder="Ask anything or Type 'Exit'")
113
+ # col1, col2, col3, col4, col5, col6 = st.columns(6)
114
+ # save = col6.form_submit_button("Click here")
115
+
116
+ # if save and user_input != "":
117
+ # user_input = user_input.lower() + '?'
118
+
119
+ # # Exiting the chat
120
+ # if 'exit' in user_input:
121
+ # st.write(salutation)
122
+ # play_speech(salutation)
123
+
124
+ # # Getting response and sentiment of response
125
+ # ans, senti = response(user_input, docbot=True)
126
+
127
+ # # Chat history
128
+ # st.session_state.doc_chat_hist = chat_history(user_input, ans, senti)
129
+
130
+ # # Chat history display
131
+ # st.markdown("### Chat History: ")
132
+ # with st.container(border=True):
133
+ # for key in st.session_state.doc_chat_hist.keys():
134
+ # user_col1, user_col2, user_col3 = st.columns(3, vertical_alignment="center")
135
+ # user = user_col3.container(border=True)
136
+ # user.write(key)
137
+ # bot_col1, bot_col2, bot_col3 = st.columns([4, 1, 1], vertical_alignment='center')
138
+ # bot = bot_col1.container(border=True)
139
+ # bot.write(st.session_state.doc_chat_hist[key][0])
artifacts/Audio.mp3 ADDED
Binary file (29.4 kB). View file
 
artifacts/Docmate.ipynb CHANGED
@@ -821,84 +821,69 @@
821
  },
822
  {
823
  "cell_type": "code",
824
- "execution_count": 2,
825
  "metadata": {},
826
  "outputs": [
827
  {
828
- "name": "stdout",
829
  "output_type": "stream",
830
  "text": [
831
- "Collecting streamlit-TTS\n",
832
- " Using cached streamlit_TTS-0.0.7-py3-none-any.whl.metadata (3.3 kB)\n",
833
- "Requirement already satisfied: streamlit>=0.63 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit-TTS) (1.36.0)\n",
834
- "Requirement already satisfied: pydub in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit-TTS) (0.25.1)\n",
835
- "Requirement already satisfied: gtts in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit-TTS) (2.5.3)\n",
836
- "Requirement already satisfied: altair<6,>=4.0 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit>=0.63->streamlit-TTS) (4.0.0)\n",
837
- "Requirement already satisfied: blinker<2,>=1.0.0 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit>=0.63->streamlit-TTS) (1.8.2)\n",
838
- "Requirement already satisfied: cachetools<6,>=4.0 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit>=0.63->streamlit-TTS) (5.4.0)\n",
839
- "Requirement already satisfied: click<9,>=7.0 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit>=0.63->streamlit-TTS) (8.1.7)\n",
840
- "Requirement already satisfied: numpy<3,>=1.20 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit>=0.63->streamlit-TTS) (1.23.5)\n",
841
- "Requirement already satisfied: packaging<25,>=20 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit>=0.63->streamlit-TTS) (23.2)\n",
842
- "Requirement already satisfied: pandas<3,>=1.3.0 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit>=0.63->streamlit-TTS) (2.2.1)\n",
843
- "Requirement already satisfied: pillow<11,>=7.1.0 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit>=0.63->streamlit-TTS) (10.3.0)\n",
844
- "Requirement already satisfied: protobuf<6,>=3.20 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit>=0.63->streamlit-TTS) (4.25.5)\n",
845
- "Requirement already satisfied: pyarrow>=7.0 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit>=0.63->streamlit-TTS) (17.0.0)\n",
846
- "Requirement already satisfied: requests<3,>=2.27 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit>=0.63->streamlit-TTS) (2.32.3)\n",
847
- "Requirement already satisfied: rich<14,>=10.14.0 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit>=0.63->streamlit-TTS) (13.7.1)\n",
848
- "Requirement already satisfied: tenacity<9,>=8.1.0 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit>=0.63->streamlit-TTS) (8.5.0)\n",
849
- "Requirement already satisfied: toml<2,>=0.10.1 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit>=0.63->streamlit-TTS) (0.10.2)\n",
850
- "Requirement already satisfied: typing-extensions<5,>=4.3.0 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit>=0.63->streamlit-TTS) (4.9.0)\n",
851
- "Requirement already satisfied: gitpython!=3.1.19,<4,>=3.0.7 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit>=0.63->streamlit-TTS) (3.1.43)\n",
852
- "Requirement already satisfied: pydeck<1,>=0.8.0b4 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit>=0.63->streamlit-TTS) (0.9.1)\n",
853
- "Requirement already satisfied: tornado<7,>=6.0.3 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit>=0.63->streamlit-TTS) (6.3.3)\n",
854
- "Requirement already satisfied: watchdog<5,>=2.1.5 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from streamlit>=0.63->streamlit-TTS) (4.0.1)\n",
855
- "Requirement already satisfied: entrypoints in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from altair<6,>=4.0->streamlit>=0.63->streamlit-TTS) (0.4)\n",
856
- "Requirement already satisfied: jinja2 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from altair<6,>=4.0->streamlit>=0.63->streamlit-TTS) (3.1.3)\n",
857
- "Requirement already satisfied: jsonschema in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from altair<6,>=4.0->streamlit>=0.63->streamlit-TTS) (4.19.2)\n",
858
- "Requirement already satisfied: toolz in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from altair<6,>=4.0->streamlit>=0.63->streamlit-TTS) (0.12.1)\n",
859
- "Requirement already satisfied: colorama in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from click<9,>=7.0->streamlit>=0.63->streamlit-TTS) (0.4.6)\n",
860
- "Requirement already satisfied: gitdb<5,>=4.0.1 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from gitpython!=3.1.19,<4,>=3.0.7->streamlit>=0.63->streamlit-TTS) (4.0.11)\n",
861
- "Requirement already satisfied: python-dateutil>=2.8.2 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from pandas<3,>=1.3.0->streamlit>=0.63->streamlit-TTS) (2.8.2)\n",
862
- "Requirement already satisfied: pytz>=2020.1 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from pandas<3,>=1.3.0->streamlit>=0.63->streamlit-TTS) (2024.1)\n",
863
- "Requirement already satisfied: tzdata>=2022.7 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from pandas<3,>=1.3.0->streamlit>=0.63->streamlit-TTS) (2023.3)\n",
864
- "Requirement already satisfied: charset-normalizer<4,>=2 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from requests<3,>=2.27->streamlit>=0.63->streamlit-TTS) (3.3.2)\n",
865
- "Requirement already satisfied: idna<4,>=2.5 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from requests<3,>=2.27->streamlit>=0.63->streamlit-TTS) (3.7)\n",
866
- "Requirement already satisfied: urllib3<3,>=1.21.1 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from requests<3,>=2.27->streamlit>=0.63->streamlit-TTS) (2.1.0)\n",
867
- "Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from requests<3,>=2.27->streamlit>=0.63->streamlit-TTS) (2024.7.4)\n",
868
- "Requirement already satisfied: markdown-it-py>=2.2.0 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from rich<14,>=10.14.0->streamlit>=0.63->streamlit-TTS) (3.0.0)\n",
869
- "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from rich<14,>=10.14.0->streamlit>=0.63->streamlit-TTS) (2.15.1)\n",
870
- "Requirement already satisfied: smmap<6,>=3.0.1 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from gitdb<5,>=4.0.1->gitpython!=3.1.19,<4,>=3.0.7->streamlit>=0.63->streamlit-TTS) (5.0.1)\n",
871
- "Requirement already satisfied: MarkupSafe>=2.0 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from jinja2->altair<6,>=4.0->streamlit>=0.63->streamlit-TTS) (2.1.3)\n",
872
- "Requirement already satisfied: mdurl~=0.1 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from markdown-it-py>=2.2.0->rich<14,>=10.14.0->streamlit>=0.63->streamlit-TTS) (0.1.2)\n",
873
- "Requirement already satisfied: six>=1.5 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from python-dateutil>=2.8.2->pandas<3,>=1.3.0->streamlit>=0.63->streamlit-TTS) (1.16.0)\n",
874
- "Requirement already satisfied: attrs>=22.2.0 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from jsonschema->altair<6,>=4.0->streamlit>=0.63->streamlit-TTS) (23.1.0)\n",
875
- "Requirement already satisfied: jsonschema-specifications>=2023.03.6 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from jsonschema->altair<6,>=4.0->streamlit>=0.63->streamlit-TTS) (2023.7.1)\n",
876
- "Requirement already satisfied: referencing>=0.28.4 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from jsonschema->altair<6,>=4.0->streamlit>=0.63->streamlit-TTS) (0.30.2)\n",
877
- "Requirement already satisfied: rpds-py>=0.7.1 in c:\\users\\umesh\\anaconda3\\envs\\genai\\lib\\site-packages (from jsonschema->altair<6,>=4.0->streamlit>=0.63->streamlit-TTS) (0.10.6)\n",
878
- "Using cached streamlit_TTS-0.0.7-py3-none-any.whl (446 kB)\n",
879
- "Installing collected packages: streamlit-TTS\n",
880
- "Successfully installed streamlit-TTS-0.0.7\n",
881
- "Note: you may need to restart the kernel to use updated packages.\n"
882
  ]
883
  },
884
  {
885
- "name": "stderr",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
886
  "output_type": "stream",
887
  "text": [
888
- "\n",
889
- "[notice] A new release of pip is available: 24.2 -> 24.3.1\n",
890
- "[notice] To update, run: python.exe -m pip install --upgrade pip\n"
891
  ]
892
  }
893
  ],
894
  "source": [
895
- "pip install streamlit-TTS"
896
  ]
897
  }
898
  ],
899
  "metadata": {
900
  "kernelspec": {
901
- "display_name": "GenAI",
902
  "language": "python",
903
  "name": "python3"
904
  },
@@ -912,7 +897,7 @@
912
  "name": "python",
913
  "nbconvert_exporter": "python",
914
  "pygments_lexer": "ipython3",
915
- "version": "3.11.9"
916
  }
917
  },
918
  "nbformat": 4,
 
821
  },
822
  {
823
  "cell_type": "code",
824
+ "execution_count": 1,
825
  "metadata": {},
826
  "outputs": [
827
  {
828
+ "name": "stderr",
829
  "output_type": "stream",
830
  "text": [
831
+ "c:\\Users\\umesh\\anaconda3\\envs\\DeepLearning\\lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
832
+ " from .autonotebook import tqdm as notebook_tqdm\n",
833
+ "c:\\Users\\umesh\\anaconda3\\envs\\DeepLearning\\lib\\site-packages\\torchvision\\io\\image.py:13: UserWarning: Failed to load image Python extension: '[WinError 127] The specified procedure could not be found'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source?\n",
834
+ " warn(\n",
835
+ "c:\\Users\\umesh\\anaconda3\\envs\\DeepLearning\\lib\\site-packages\\torchvision\\datapoints\\__init__.py:12: UserWarning: The torchvision.datapoints and torchvision.transforms.v2 namespaces are still Beta. While we do not expect major breaking changes, some APIs may still change according to user feedback. Please submit any feedback you may have in this issue: https://github.com/pytorch/vision/issues/6753, and you can also check out https://github.com/pytorch/vision/issues/7319 to learn more about the APIs that we suspect might involve future changes. You can silence this warning by calling torchvision.disable_beta_transforms_warning().\n",
836
+ " warnings.warn(_BETA_TRANSFORMS_WARNING)\n",
837
+ "c:\\Users\\umesh\\anaconda3\\envs\\DeepLearning\\lib\\site-packages\\torchvision\\transforms\\v2\\__init__.py:54: UserWarning: The torchvision.datapoints and torchvision.transforms.v2 namespaces are still Beta. While we do not expect major breaking changes, some APIs may still change according to user feedback. Please submit any feedback you may have in this issue: https://github.com/pytorch/vision/issues/6753, and you can also check out https://github.com/pytorch/vision/issues/7319 to learn more about the APIs that we suspect might involve future changes. You can silence this warning by calling torchvision.disable_beta_transforms_warning().\n",
838
+ " warnings.warn(_BETA_TRANSFORMS_WARNING)\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
839
  ]
840
  },
841
  {
842
+ "ename": "ImportError",
843
+ "evalue": "Loading an AWQ quantized model requires auto-awq library (`pip install autoawq`)",
844
+ "output_type": "error",
845
+ "traceback": [
846
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
847
+ "\u001b[1;31mImportError\u001b[0m Traceback (most recent call last)",
848
+ "Cell \u001b[1;32mIn[1], line 4\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[38;5;66;03m# Use a pipeline as a high-level helper\u001b[39;00m\n\u001b[0;32m 2\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtransformers\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m pipeline\n\u001b[1;32m----> 4\u001b[0m pipe \u001b[38;5;241m=\u001b[39m \u001b[43mpipeline\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtext-generation\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mTheBloke/meditron-7B-chat-AWQ\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
849
+ "File \u001b[1;32mc:\\Users\\umesh\\anaconda3\\envs\\DeepLearning\\lib\\site-packages\\transformers\\pipelines\\__init__.py:926\u001b[0m, in \u001b[0;36mpipeline\u001b[1;34m(task, model, config, tokenizer, feature_extractor, image_processor, processor, framework, revision, use_fast, token, device, device_map, torch_dtype, trust_remote_code, model_kwargs, pipeline_class, **kwargs)\u001b[0m\n\u001b[0;32m 924\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(model, \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;129;01mor\u001b[39;00m framework \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m 925\u001b[0m model_classes \u001b[38;5;241m=\u001b[39m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtf\u001b[39m\u001b[38;5;124m\"\u001b[39m: targeted_task[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtf\u001b[39m\u001b[38;5;124m\"\u001b[39m], \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpt\u001b[39m\u001b[38;5;124m\"\u001b[39m: targeted_task[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpt\u001b[39m\u001b[38;5;124m\"\u001b[39m]}\n\u001b[1;32m--> 926\u001b[0m framework, model \u001b[38;5;241m=\u001b[39m infer_framework_load_model(\n\u001b[0;32m 927\u001b[0m model,\n\u001b[0;32m 928\u001b[0m model_classes\u001b[38;5;241m=\u001b[39mmodel_classes,\n\u001b[0;32m 929\u001b[0m config\u001b[38;5;241m=\u001b[39mconfig,\n\u001b[0;32m 930\u001b[0m framework\u001b[38;5;241m=\u001b[39mframework,\n\u001b[0;32m 931\u001b[0m task\u001b[38;5;241m=\u001b[39mtask,\n\u001b[0;32m 932\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mhub_kwargs,\n\u001b[0;32m 933\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mmodel_kwargs,\n\u001b[0;32m 934\u001b[0m )\n\u001b[0;32m 936\u001b[0m model_config \u001b[38;5;241m=\u001b[39m model\u001b[38;5;241m.\u001b[39mconfig\n\u001b[0;32m 937\u001b[0m hub_kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m_commit_hash\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m model\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39m_commit_hash\n",
850
+ "File \u001b[1;32mc:\\Users\\umesh\\anaconda3\\envs\\DeepLearning\\lib\\site-packages\\transformers\\pipelines\\base.py:289\u001b[0m, in \u001b[0;36minfer_framework_load_model\u001b[1;34m(model, config, model_classes, task, framework, **model_kwargs)\u001b[0m\n\u001b[0;32m 283\u001b[0m logger\u001b[38;5;241m.\u001b[39mwarning(\n\u001b[0;32m 284\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mModel might be a PyTorch model (ending with `.bin`) but PyTorch is not available. \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 285\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTrying to load the model with Tensorflow.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 286\u001b[0m )\n\u001b[0;32m 288\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 289\u001b[0m model \u001b[38;5;241m=\u001b[39m model_class\u001b[38;5;241m.\u001b[39mfrom_pretrained(model, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m 290\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(model, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124meval\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[0;32m 291\u001b[0m model \u001b[38;5;241m=\u001b[39m model\u001b[38;5;241m.\u001b[39meval()\n",
851
+ "File \u001b[1;32mc:\\Users\\umesh\\anaconda3\\envs\\DeepLearning\\lib\\site-packages\\transformers\\models\\auto\\auto_factory.py:564\u001b[0m, in \u001b[0;36m_BaseAutoModelClass.from_pretrained\u001b[1;34m(cls, pretrained_model_name_or_path, *model_args, **kwargs)\u001b[0m\n\u001b[0;32m 562\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mtype\u001b[39m(config) \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m_model_mapping\u001b[38;5;241m.\u001b[39mkeys():\n\u001b[0;32m 563\u001b[0m model_class \u001b[38;5;241m=\u001b[39m _get_model_class(config, \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m_model_mapping)\n\u001b[1;32m--> 564\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m model_class\u001b[38;5;241m.\u001b[39mfrom_pretrained(\n\u001b[0;32m 565\u001b[0m pretrained_model_name_or_path, \u001b[38;5;241m*\u001b[39mmodel_args, config\u001b[38;5;241m=\u001b[39mconfig, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mhub_kwargs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs\n\u001b[0;32m 566\u001b[0m )\n\u001b[0;32m 567\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m 568\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUnrecognized configuration class \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mconfig\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m for this kind of AutoModel: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 569\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mModel type should be one of \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m, \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;241m.\u001b[39mjoin(c\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mfor\u001b[39;00m\u001b[38;5;250m \u001b[39mc\u001b[38;5;250m \u001b[39m\u001b[38;5;129;01min\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m_model_mapping\u001b[38;5;241m.\u001b[39mkeys())\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 570\u001b[0m )\n",
852
+ "File \u001b[1;32mc:\\Users\\umesh\\anaconda3\\envs\\DeepLearning\\lib\\site-packages\\transformers\\modeling_utils.py:3656\u001b[0m, in \u001b[0;36mPreTrainedModel.from_pretrained\u001b[1;34m(cls, pretrained_model_name_or_path, config, cache_dir, ignore_mismatched_sizes, force_download, local_files_only, token, revision, use_safetensors, weights_only, *model_args, **kwargs)\u001b[0m\n\u001b[0;32m 3653\u001b[0m hf_quantizer \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m 3655\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m hf_quantizer \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m-> 3656\u001b[0m \u001b[43mhf_quantizer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mvalidate_environment\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 3657\u001b[0m \u001b[43m \u001b[49m\u001b[43mtorch_dtype\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtorch_dtype\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfrom_tf\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfrom_tf\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfrom_flax\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfrom_flax\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice_map\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdevice_map\u001b[49m\n\u001b[0;32m 3658\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 3659\u001b[0m torch_dtype \u001b[38;5;241m=\u001b[39m hf_quantizer\u001b[38;5;241m.\u001b[39mupdate_torch_dtype(torch_dtype)\n\u001b[0;32m 3660\u001b[0m device_map \u001b[38;5;241m=\u001b[39m hf_quantizer\u001b[38;5;241m.\u001b[39mupdate_device_map(device_map)\n",
853
+ "File \u001b[1;32mc:\\Users\\umesh\\anaconda3\\envs\\DeepLearning\\lib\\site-packages\\transformers\\quantizers\\quantizer_awq.py:50\u001b[0m, in \u001b[0;36mAwqQuantizer.validate_environment\u001b[1;34m(self, device_map, **kwargs)\u001b[0m\n\u001b[0;32m 48\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mvalidate_environment\u001b[39m(\u001b[38;5;28mself\u001b[39m, device_map, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m 49\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m is_auto_awq_available():\n\u001b[1;32m---> 50\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mImportError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mLoading an AWQ quantized model requires auto-awq library (`pip install autoawq`)\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 52\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m is_accelerate_available():\n\u001b[0;32m 53\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mImportError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mLoading an AWQ quantized model requires accelerate (`pip install accelerate`)\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
854
+ "\u001b[1;31mImportError\u001b[0m: Loading an AWQ quantized model requires auto-awq library (`pip install autoawq`)"
855
+ ]
856
+ }
857
+ ],
858
+ "source": [
859
+ "# Use a pipeline as a high-level helper\n",
860
+ "from transformers import pipeline\n",
861
+ "\n",
862
+ "pipe = pipeline(\"text-generation\", model=\"TheBloke/meditron-7B-chat-AWQ\")"
863
+ ]
864
+ },
865
+ {
866
+ "cell_type": "code",
867
+ "execution_count": 2,
868
+ "metadata": {},
869
+ "outputs": [
870
+ {
871
+ "name": "stdout",
872
  "output_type": "stream",
873
  "text": [
874
+ "^C\n",
875
+ "Note: you may need to restart the kernel to use updated packages.\n"
 
876
  ]
877
  }
878
  ],
879
  "source": [
880
+ "pip install autoawq"
881
  ]
882
  }
883
  ],
884
  "metadata": {
885
  "kernelspec": {
886
+ "display_name": "DeepLearning",
887
  "language": "python",
888
  "name": "python3"
889
  },
 
897
  "name": "python",
898
  "nbconvert_exporter": "python",
899
  "pygments_lexer": "ipython3",
900
+ "version": "3.10.13"
901
  }
902
  },
903
  "nbformat": 4,
artifacts/response.txt CHANGED
@@ -1,3 +1,3 @@
1
 
2
 
3
- I am a 25 year old woman from the Netherlands. I have been working as a software developer for 4 years now, and I am currently working at a company that specializes in developing software for the healthcare industry. In my free time, I enjoy playing video games, reading books, and spending time with my friends and family
 
1
 
2
 
3
+ I'm doing well, thank you! How about you
logs/10_30_2024_09_32_06.log ADDED
File without changes
src/components/__pycache__/avatarsys.cpython-311.pyc CHANGED
Binary files a/src/components/__pycache__/avatarsys.cpython-311.pyc and b/src/components/__pycache__/avatarsys.cpython-311.pyc differ
 
src/components/__pycache__/docchat.cpython-311.pyc CHANGED
Binary files a/src/components/__pycache__/docchat.cpython-311.pyc and b/src/components/__pycache__/docchat.cpython-311.pyc differ
 
src/components/__pycache__/textprocess.cpython-311.pyc CHANGED
Binary files a/src/components/__pycache__/textprocess.cpython-311.pyc and b/src/components/__pycache__/textprocess.cpython-311.pyc differ
 
src/components/avatarsys.py CHANGED
@@ -10,7 +10,6 @@ class AvatarConfig:
10
  self.image_size = 512
11
  self.voice_sample_rate = 22050
12
  self.max_text_length = 512
13
- # self.emotion_categories = ['negative','neutral', 'positive']
14
 
15
  class AvatarSystem:
16
  def __init__(self, hf_token):
@@ -18,18 +17,15 @@ class AvatarSystem:
18
  self.text_processor = TextProcessor(hf_token)
19
  self.emotion_analyzer = EmotionAnalyzer()
20
  self.voice_synthesiser = VoiceSynthesizer()
21
- self.doc_chat_processor = DocChatProcessor(hf_token)
22
 
23
  logging.info("Avatar system initiated.")
24
 
25
- def process_input(self, user_input, docbot):
26
  # Generate response
27
- if docbot:
28
- response = self.doc_chat_processor.generate_response(user_input)
29
- logging.info("Docbot Text response generated.")
30
- else:
31
- response = self.text_processor.generate_response(user_input)
32
- logging.info("Text response generated.")
33
 
34
  # Analyze emotion
35
  emotion = self.emotion_analyzer.analyze_emotion(response)
 
10
  self.image_size = 512
11
  self.voice_sample_rate = 22050
12
  self.max_text_length = 512
 
13
 
14
  class AvatarSystem:
15
  def __init__(self, hf_token):
 
17
  self.text_processor = TextProcessor(hf_token)
18
  self.emotion_analyzer = EmotionAnalyzer()
19
  self.voice_synthesiser = VoiceSynthesizer()
20
+ # self.doc_chat_processor = DocChatProcessor(hf_token)
21
 
22
  logging.info("Avatar system initiated.")
23
 
24
+ def process_input(self, user_input):
25
  # Generate response
26
+
27
+ response = self.text_processor.generate_response(user_input)
28
+ logging.info("Text response generated.")
 
 
 
29
 
30
  # Analyze emotion
31
  emotion = self.emotion_analyzer.analyze_emotion(response)
src/components/docchat.py CHANGED
@@ -9,7 +9,8 @@ from langchain_huggingface import HuggingFaceEndpoint
9
  # Text generation model
10
  # repo_id="Laim/Llama-3.1-MedPalm2-imitate-8B-Instruct"
11
  # repo_id="Joycean0301/Llama-3.2-3B-Instruct-Medical-Conversational"
12
- repo_id = "TheBloke/medalpaca-13B-GGML"
 
13
 
14
  class DocChatProcessor:
15
  def __init__(self, hf_token):
 
9
  # Text generation model
10
  # repo_id="Laim/Llama-3.1-MedPalm2-imitate-8B-Instruct"
11
  # repo_id="Joycean0301/Llama-3.2-3B-Instruct-Medical-Conversational"
12
+ # repo_id = "TheBloke/medalpaca-13B-GGML"
13
+ repo_id="mistralai/Mistral-7B-Instruct-v0.3"
14
 
15
  class DocChatProcessor:
16
  def __init__(self, hf_token):
src/components/textprocess.py CHANGED
@@ -21,7 +21,7 @@ class TextProcessor:
21
  temperature=0.01,
22
  repetition_penalty=1.03,
23
  streaming=False,
24
- huggingfacehub_api_token= hf_token,
25
  stop_sequences=['?', '</s>', '.\n\n']
26
  )
27
 
 
21
  temperature=0.01,
22
  repetition_penalty=1.03,
23
  streaming=False,
24
+ # huggingfacehub_api_token= hf_token,
25
  stop_sequences=['?', '</s>', '.\n\n']
26
  )
27