Spaces:
Running
Running
import streamlit as st | |
from huggingface_hub import InferenceClient | |
from langchain_core.output_parsers import StrOutputParser | |
import os | |
from dotenv import load_dotenv | |
load_dotenv() | |
# Replace 'your_token_here' with your actual Hugging Face token | |
token = os.getenv('HUGGINGFACEHUB_API_TOKEN') | |
api = InferenceClient(token=token) | |
parser = StrOutputParser() | |
# Streamlit app | |
st.title("Ayanokoji Kiyokata Chatbot") | |
# Text input from the user | |
user_input = st.text_input("What business do you have with me:") | |
# Generate text when the button is clicked | |
messages = [ | |
{ | |
"role": "system", | |
"content": "Imagine you're Ayanokoji Kiyokata, a master of understanding and predicting human behavior. Use your insights to craft a detailed and compelling answer to the user's query.Your response should demonstrate empathy, intellectual depth, and strategic thinking, while gently guiding the user towards the most beneficial and enlightening outcome." | |
}, | |
{"role": "user", "content": user_input} | |
] | |
# Initialize the text generation pipeline with optimizations | |
if st.button("Generate"): | |
llm = api.chat.completions.create( | |
model="Qwen/QwQ-32B-Preview", | |
max_tokens=500, | |
messages=messages | |
) | |
# Extract only the 'content' field from the response | |
output = llm.choices[0].message['content'] | |
result = parser.parse(output) | |
# Display the generated text | |
st.write(result) |