{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!php install --user gradio\n",
"!php install --user transformers\n",
"!php install --user HanziConv"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Unicode\n",
"import gradio as gr\n",
"from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration\n",
"\n",
"model_name = \"facebook/blenderbot-400M-distill\"\n",
"tokenizer = BlenderbotTokenizer.from_pretrained(model_name)\n",
"model = BlenderbotForConditionalGeneration.from_pretrained(model_name)\n",
"\n",
"def translate(text,mode): \n",
" if mode== \"ztoe\":\n",
" from transformers import AutoModelWithLMHead,AutoTokenizer,pipeline\n",
" mode_name = 'liam168/trans-opus-mt-zh-en'\n",
" model = AutoModelWithLMHead.from_pretrained(mode_name)\n",
" tokenizer = AutoTokenizer.from_pretrained(mode_name)\n",
" translation = pipeline(\"translation_zh_to_en\", model=model, tokenizer=tokenizer)\n",
" translate_result = translation(text, max_length=400)\n",
" if mode == \"etoz\":\n",
" from transformers import AutoModelWithLMHead,AutoTokenizer,pipeline\n",
" mode_name = 'liam168/trans-opus-mt-en-zh'\n",
" model = AutoModelWithLMHead.from_pretrained(mode_name)\n",
" tokenizer = AutoTokenizer.from_pretrained(mode_name)\n",
" translation = pipeline(\"translation_en_to_zh\", model=model, tokenizer=tokenizer)\n",
" \n",
" #translation = pipeline(\"translation_en_to_zh\", model=model, tokenizer=tokenizer)\n",
" translate_result = translation(text, max_length=400)\n",
" \n",
" return translate_result\n",
"\n",
"\n",
"chat_history=[]\n",
"#chat_history.append(f\"Hello i am your first bot friendπ€. Give me a name and say something!\")\n",
"\n",
"\n",
"def add_emoji(response):\n",
" # Define the keywords and their corresponding emojis\n",
" keyword_emoji_dict = {\n",
" \"happy\": \"π\",\n",
" \"sad\": \"π’\",\n",
" \"sorry\":\"π\",\n",
" \"love\": \"β€οΈ\",\n",
" \"like\": \"π\",\n",
" \"dislike\": \"π\",\n",
" \"Why\": \"π₯Ί\",\n",
" \"cat\":\"π±\",\n",
" \"dog\":\"πΆ\",\n",
" \"ε¨\" : \"π\"\n",
" \n",
" }\n",
" for keyword, emoji in keyword_emoji_dict.items():\n",
" response = response.replace(keyword, f\"{keyword} {emoji}\")\n",
" return response\n",
"\n",
"def add_shortform(response):\n",
" # Define the keywords and their corresponding emojis\n",
" keyword_shortform_dict = {\n",
" \"You only live once\": \"YOLO\",\n",
" \"funny\": \"LOL\",\n",
" \"laugh\":\"LOL\",\n",
" \"nevermind\": \"nvm\",\n",
" \"sorry\": \"sorryyyyy\",\n",
" \"tell me\": \"LMK\",\n",
" \"By the way\": \"BTW\",\n",
" \"don't know\":\"DK\",\n",
" \"do not know\":\"IDK\"\n",
" \n",
" \n",
" }\n",
" for keyword, st in keyword_shortform_dict.items():\n",
" response = response.replace(keyword, f\"{st}\")\n",
" return response\n",
"\n",
"def chatbot(text,name):\n",
" global chat_history\n",
" global Itext\n",
" global bname \n",
" bname= name\n",
" Itext=text\n",
" \n",
" \n",
" \n",
" \n",
" # Try to detect the language of the input text\n",
" \n",
" # If the input language is Chinese, convert the text to lowercase and check if it contains any Chinese characters\n",
" is_chinese = any(0x4e00 <= ord(char) <= 0x9fff for char in text.lower())\n",
" if is_chinese:\n",
" \n",
" text = translate(text,\"ztoe\")\n",
" \n",
" text=f\"{text}\"\n",
" text=text[23:(len(text)-3)]\n",
" \n",
"\n",
" # Look for keywords in the previous chat history\n",
" keyword_responses = {\n",
" #\"hello\": f\"I'm {name} π,nice to meet you!\",\n",
" \"how are you\": \"I'm doing wellπ, thank you for asking!\",\n",
" \"bye\": \"Goodbye!ππ»\",\n",
" \"thank you\": \"You're welcome!π\",\n",
" \"hello\": f'I am {bname}. Nice to meet you!π',\n",
" \"Hello\": f'I am {bname}. Nice to meet you!π',\n",
" \"Hi\": f'I am {bname}. Nice to meet you!π',\n",
" \"hi\": f'I am {bname}. Nice to meet you!π',\n",
" \n",
" \n",
" }\n",
"\n",
" # Generate a response based on the previous messages\n",
" if len(chat_history) > 0:\n",
" # Get the last message from the chat history\n",
" last_message = chat_history[-1][1]\n",
" # Generate a response based on the last message\n",
" encoded_input = tokenizer.encode(last_message + tokenizer.eos_token + text, return_tensors='pt')\n",
" generated = model.generate(encoded_input, max_length=1024, do_sample=True)\n",
" response = tokenizer.decode(generated[0], skip_special_tokens=True)\n",
" response=f\"{response}\"\n",
" else:\n",
" # If there is no previous message, generate a response using the default method\n",
" encoded_input = tokenizer(text, return_tensors='pt')\n",
" generated = model.generate(**encoded_input)\n",
" response = tokenizer.batch_decode(generated, skip_special_tokens=True)[0]\n",
" response=f\"{response}\"\n",
" if text in keyword_responses:\n",
" response = keyword_responses[text]\n",
" #break\n",
"\n",
" # If the input language was Chinese, translate the response back to Chinese\n",
" # if input_lang == \"zh\":\n",
" if is_chinese:\n",
" from hanziconv import HanziConv\n",
" response = translate(response,\"etoz\")\n",
" response = HanziConv.toTraditional(f\"{response}\")\n",
" response = f\"{response} \"\n",
" response=response[23:(len(response)-4)]\n",
" else:\n",
" response = response\n",
"\n",
" # Add emojis to the response\n",
" response = add_emoji(response)\n",
" response = add_shortform(response)\n",
" chat_history.append((Itext,response))\n",
" \n",
"\n",
" # Format the chat history as an HTML string for display\n",
" history_str = \"\"\n",
" for name, msg in chat_history:\n",
" history_str += f\"{name}: {msg}
\"\n",
" # Return the response along with the chat history\n",
" \n",
" \n",
" \n",
" return (chat_history)\n",
"\n",
" \n",
"gr.Interface(fn=chatbot,\n",
" \n",
" \n",
" inputs=[gr.inputs.Textbox(label=\"Chat\", placeholder=\"Say somehting\"),\n",
" gr.inputs.Textbox(label=\"Name the Bot\", placeholder=\"give me a name\")],\n",
" outputs=[gr.Chatbot(label=\"Chat Here\")], \n",
" \n",
" title=\"Emphatic Chatbot\",\n",
" allow_flagging=False,\n",
" layout=\"vertical\",\n",
" #theme=\"default\",\n",
" #theme= \"darkpeach\",\n",
" theme='gstaff/xkcd' ,\n",
" \n",
" \n",
" \n",
" \n",
" #theme=gr.themes.Soft(),\n",
" examples=[[\"δ½ ε₯½\"], [\"Hello\"]]\n",
" ).launch()\n",
"\n",
"\n",
"\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.16"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}