CallmeKaito commited on
Commit
83f07ea
Β·
verified Β·
1 Parent(s): ecb863d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -75
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import streamlit as st
2
- from transformers import pipeline
3
  import torch
 
4
  import random
5
 
6
  # Set page config
@@ -9,39 +9,16 @@ st.set_page_config(
9
  page_icon="🧠"
10
  )
11
 
12
- # Initialize session state for chat history if it doesn't exist
13
- if "messages" not in st.session_state:
14
- st.session_state.messages = []
15
-
16
- # Fun loading messages
17
- LOADING_MESSAGES = [
18
- "Rotting brain cells... 🧠",
19
- "Downloading TikTok wisdom... πŸ“±",
20
- "Absorbing internet culture... 🌐",
21
- "Loading peak brainrot... πŸ€ͺ",
22
- "Gathering viral knowledge... 🦠"
23
- ]
24
-
25
- def load_qa_pipeline():
26
- """Load the question-answering pipeline"""
27
- if "qa_pipeline" not in st.session_state:
28
- with st.spinner(random.choice(LOADING_MESSAGES)):
29
- st.session_state.qa_pipeline = pipeline(
30
- "question-answering",
31
- model="CallmeKaito/llama-3.1-8b-it-brainrot",
32
- device=0 if torch.cuda.is_available() else -1
33
- )
34
-
35
- def get_answer(question, context):
36
- """Get answer from the model"""
37
- try:
38
- result = st.session_state.qa_pipeline(
39
- question=question,
40
- context=context
41
- )
42
- return result['answer']
43
- except Exception as e:
44
- return f"Brain too rotted, can't compute 🀯 Error: {str(e)}"
45
 
46
  # Main UI with themed styling
47
  st.markdown("""
@@ -51,66 +28,96 @@ st.markdown("""
51
  font-weight: bold;
52
  }
53
  </style>
54
- <p class="big-font">🧠 Brainrot Chat πŸ€ͺ</p>
55
  """, unsafe_allow_html=True)
56
 
57
  st.markdown("""
58
- Welcome to the Brainrot Zone! This chatbot has been trained on peak TikTok content.
59
- Prepare for some absolutely unhinged responses!
60
 
61
- ⚠️ Remember: This is for entertainment only - don't take anything too seriously! ⚠️
62
  """)
63
 
64
- # Load the model
65
- load_qa_pipeline()
66
-
67
- # Example prompts to help users
68
- EXAMPLE_CONTEXTS = [
69
- "When the rizz is immaculate but she's literally so real for what fr fr no cap bussin",
70
- "POV: You're chronically online and everything is giving main character energy",
71
- "It's giving slay queen energy with a side of based behavior ngl",
72
- ]
 
 
73
 
74
- # Context input with example
75
- context = st.text_area(
76
- "Drop your TikTok wisdom here:",
77
- placeholder=random.choice(EXAMPLE_CONTEXTS),
78
- height=100
79
- )
80
 
81
- # Display chat history
82
  for message in st.session_state.messages:
83
  with st.chat_message(message["role"]):
84
  st.write(message["content"])
85
 
86
  # User input
87
- if question := st.chat_input("Ask something absolutely unhinged..."):
88
- # Display user message
 
89
  with st.chat_message("user"):
90
- st.write(question)
91
- st.session_state.messages.append({"role": "user", "content": question})
92
-
93
- # Generate and display assistant response
94
- if context:
95
- with st.chat_message("assistant"):
96
- answer = get_answer(question, context)
97
- st.write(f"{answer} {random.choice(['πŸ’…', '😌', 'πŸ’', 'πŸ€ͺ', '✨'])}")
98
- st.session_state.messages.append({"role": "assistant", "content": answer})
99
- else:
100
- with st.chat_message("assistant"):
101
- st.write("Bestie, I need some context to work with! Drop some TikTok wisdom above! ✨")
102
- st.session_state.messages.append(
103
- {"role": "assistant", "content": "Bestie, I need some context to work with! Drop some TikTok wisdom above! ✨"}
104
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
- # Clear chat button with themed text
107
- if st.button("Reset the Brainrot 🧠"):
108
  st.session_state.messages = []
109
  st.experimental_rerun()
110
 
111
  # Footer
112
  st.markdown("""
113
  ---
114
- *This chatbot is living its best life with maximum brainrot energy.
115
- Any weird responses are just part of the aesthetic* ✨
116
  """)
 
1
  import streamlit as st
 
2
  import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
  import random
5
 
6
  # Set page config
 
9
  page_icon="🧠"
10
  )
11
 
12
+ @st.cache_resource
13
+ def load_model():
14
+ """Load the model and tokenizer"""
15
+ model = AutoModelForCausalLM.from_pretrained(
16
+ "CallmeKaito/llama-3.1-8b-it-brainrot",
17
+ torch_dtype=torch.float16,
18
+ device_map="auto"
19
+ )
20
+ tokenizer = AutoTokenizer.from_pretrained("CallmeKaito/llama-3.1-8b-it-brainrot")
21
+ return model, tokenizer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  # Main UI with themed styling
24
  st.markdown("""
 
28
  font-weight: bold;
29
  }
30
  </style>
31
+ <p class="big-font">🧠 Maximum Brainrot Chat πŸ€ͺ</p>
32
  """, unsafe_allow_html=True)
33
 
34
  st.markdown("""
35
+ Welcome to the most unhinged chatbot! Trained on peak TikTok and internet brainrot.
36
+ Please keep it respectful and fun! πŸŽ‰
37
 
38
+ ⚠️ For entertainment purposes only - responses are intentionally chaotic! ⚠️
39
  """)
40
 
41
+ # Initialize session state
42
+ if "messages" not in st.session_state:
43
+ st.session_state.messages = []
44
+
45
+ # Load model and tokenizer
46
+ try:
47
+ with st.spinner("Loading maximum brainrot... 🧠"):
48
+ model, tokenizer = load_model()
49
+ except Exception as e:
50
+ st.error(f"Error loading model: {str(e)}")
51
+ st.stop()
52
 
53
+ # System instruction
54
+ instruction = """You are a chatbot trained on internet and TikTok culture.
55
+ Your responses should be entertaining and reflective of internet slang and memes,
56
+ while keeping content appropriate and avoiding harmful advice."""
 
 
57
 
58
+ # Chat interface
59
  for message in st.session_state.messages:
60
  with st.chat_message(message["role"]):
61
  st.write(message["content"])
62
 
63
  # User input
64
+ if prompt := st.chat_input("Send your most unhinged thoughts..."):
65
+ # Add user message to chat
66
+ st.session_state.messages.append({"role": "user", "content": prompt})
67
  with st.chat_message("user"):
68
+ st.write(prompt)
69
+
70
+ # Generate response
71
+ with st.chat_message("assistant"):
72
+ with st.spinner("Generating maximum brainrot... 🧠"):
73
+ try:
74
+ messages = [
75
+ {"role": "system", "content": instruction},
76
+ {"role": "user", "content": prompt}
77
+ ]
78
+
79
+ # Generate response
80
+ prompt_text = tokenizer.apply_chat_template(
81
+ messages,
82
+ tokenize=False,
83
+ add_generation_prompt=True
84
+ )
85
+ inputs = tokenizer(
86
+ prompt_text,
87
+ return_tensors='pt',
88
+ padding=True,
89
+ truncation=True
90
+ ).to("cuda")
91
+
92
+ outputs = model.generate(
93
+ **inputs,
94
+ max_new_tokens=150,
95
+ num_return_sequences=1,
96
+ temperature=0.7,
97
+ do_sample=True
98
+ )
99
+
100
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
101
+ response = response.split("assistant")[-1].strip()
102
+
103
+ # Add random internet slang emoji
104
+ #emojis = ["😳", "πŸ’€", "πŸ€ͺ", "✨", "πŸ’…", "πŸ”₯", "😌", "⭐", "🎯"]
105
+ #response = f"{response} {random.choice(emojis)}"
106
+
107
+ st.write(response)
108
+ st.session_state.messages.append({"role": "assistant", "content": response})
109
+
110
+ except Exception as e:
111
+ st.error(f"Error generating response: {str(e)}")
112
 
113
+ # Clear chat button
114
+ if st.button("Reset Brainrot 🧠"):
115
  st.session_state.messages = []
116
  st.experimental_rerun()
117
 
118
  # Footer
119
  st.markdown("""
120
  ---
121
+ *This chatbot is intentionally unhinged and chaotic for entertainment.
122
+ Responses are AI-generated and should not be taken seriously* ✨
123
  """)