|
import gradio as gr |
|
import os |
|
from langchain_community.llms import HuggingFaceEndpoint |
|
from langchain.chains import LLMChain |
|
from langchain_core.prompts import PromptTemplate |
|
from dotenv import load_dotenv |
|
|
|
load_dotenv() |
|
|
|
|
|
model_Api = os.getenv("HUGGINGFACEHUB_API_TOKEN") |
|
repo_id = "mistralai/Mistral-7B-Instruct-v0.3" |
|
|
|
def QueryBuilding(): |
|
Query_template = """You are a supportive and friendly assistant. For initial greetings like 'hello,' respond briefly and warmly. For more specific queries from the user {query}, respond in a straightforward, compassionate way. Keep it simple and concise unless the user indicates they need detailed guidance. When the user shares something concerning, gently encourage them to reach out to a professional if needed. |
|
Answer: provide guidance and support to the user in a more detailed, simple and straightforward manner when situation gets dire direct the user to an actual professional.""" |
|
return Query_template |
|
|
|
def PromptEngineering(): |
|
Prompt = PromptTemplate.from_template(QueryBuilding()) |
|
return Prompt |
|
|
|
def LLM_building(): |
|
llm_model = HuggingFaceEndpoint( |
|
repo_id=repo_id, |
|
max_length=128, |
|
token=model_Api |
|
) |
|
return llm_model |
|
|
|
def langchainning(): |
|
llm_chain = LLMChain(prompt=PromptEngineering(), llm=LLM_building()) |
|
return llm_chain |
|
|
|
def user_input(user): |
|
ans = langchainning().run(user) |
|
return ans |
|
|
|
def chat_interface(message, history): |
|
response = user_input(message) |
|
return response |
|
|
|
iface = gr.ChatInterface( |
|
fn=chat_interface, |
|
title="Mind Whizz", |
|
description="Hello Friend, How can I help you today?", |
|
examples=["Can you motivate me to learn to code?", "how can i stay positive in a negative situation?"], |
|
) |
|
|
|
if __name__ == "__main__": |
|
iface.launch(share=True) |