|
import torch |
|
import torch.nn as nn |
|
import random |
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer |
|
import pickle |
|
import numpy as np |
|
import torch.nn.functional as F |
|
from accelerate import init_empty_weights, infer_auto_device_map, load_checkpoint_and_dispatch |
|
import gradio as gr |
|
|
|
|
|
model_name = 'gpt2' |
|
tokenizer = GPT2Tokenizer.from_pretrained(model_name) |
|
model = GPT2LMHeadModel.from_pretrained(model_name) |
|
model.eval() |
|
|
|
|
|
if tokenizer.pad_token is None: |
|
tokenizer.pad_token = tokenizer.eos_token |
|
|
|
tokenizer.clean_up_tokenization_spaces = True |
|
|
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
model.to(device) |
|
|
|
|
|
session_memory = [] |
|
|
|
def save_memory(memory, filename='chat_memory.pkl'): |
|
with open(filename, 'wb') as f: |
|
pickle.dump(memory, f) |
|
|
|
def load_memory(filename='chat_memory.pkl'): |
|
try: |
|
with open(filename, 'rb') as f: |
|
return pickle.load(f) |
|
except (FileNotFoundError, EOFError): |
|
return [] |
|
|
|
|
|
session_memory = load_memory() |
|
|
|
|
|
def generate_response(prompt, max_length=512): |
|
inputs = tokenizer(prompt, return_tensors='pt', padding=True, truncation=True, max_length=max_length) |
|
input_ids = inputs['input_ids'].to(device) |
|
attention_mask = inputs['attention_mask'].to(device) |
|
pad_token_id = tokenizer.pad_token_id |
|
|
|
with torch.no_grad(): |
|
output = model.generate( |
|
input_ids, |
|
attention_mask=attention_mask, |
|
max_length=max_length, |
|
num_return_sequences=1, |
|
no_repeat_ngram_size=2, |
|
do_sample=True, |
|
temperature=0.9, |
|
top_k=50, |
|
top_p=0.95, |
|
early_stopping=False, |
|
pad_token_id=pad_token_id |
|
) |
|
|
|
response = tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
|
|
parts = response.split("\n", 1) |
|
if len(parts) > 1: |
|
before_indent = parts[0].strip() |
|
after_indent = "vß Gertrude" + parts[1].strip() |
|
final_response = before_indent + '\n' + after_indent |
|
else: |
|
final_response = response.strip() |
|
|
|
return final_response |
|
|
|
|
|
def advanced_agi_chat(user_input): |
|
session_memory.append({"input": user_input}) |
|
save_memory(session_memory) |
|
|
|
|
|
prompt = f"User: {user_input}\nResponse:" |
|
response = generate_response(prompt) |
|
|
|
return response |
|
|
|
|
|
def chat_interface(user_input): |
|
response = advanced_agi_chat(user_input) |
|
return response |
|
|
|
|
|
class RNNModel(nn.Module): |
|
def __init__(self, input_size, hidden_size, output_size): |
|
super(RNNModel, self).__init__() |
|
self.hidden_size = hidden_size |
|
self.rnn = nn.RNN(input_size, hidden_size, batch_first=True) |
|
self.fc = nn.Linear(hidden_size, output_size) |
|
|
|
def forward(self, x, hidden): |
|
out, hidden = self.rnn(x, hidden) |
|
out = self.fc(out[:, -1, :]) |
|
return out, hidden |
|
|
|
def init_hidden(self, batch_size): |
|
return torch.zeros(batch_size, self.hidden_size).to(device) |
|
|
|
|
|
class CNNModel(nn.Module): |
|
def __init__(self, input_channels, output_size): |
|
super(CNNModel, self).__init__() |
|
self.conv1 = nn.Conv2d(input_channels, 16, 3) |
|
self.conv2 = nn.Conv2d(16, 32, 3) |
|
self.fc = nn.Linear(32 * 6 * 6, output_size) |
|
|
|
def forward(self, x): |
|
x = F.relu(F.max_pool2d(self.conv1(x), 2)) |
|
x = F.relu(F.max_pool2d(self.conv2(x), 2)) |
|
x = x.view(x.size(0), -1) |
|
x = self.fc(x) |
|
return x |
|
|
|
|
|
class NNModel(nn.Module): |
|
def __init__(self, input_size, hidden_size, output_size): |
|
super(NNModel, self).__init__() |
|
self.fc1 = nn.Linear(input_size, hidden_size) |
|
self.fc2 = nn.Linear(hidden_size, output_size) |
|
|
|
def forward(self, x): |
|
x = F.relu(self.fc1(x)) |
|
x = self.fc2(x) |
|
return x |
|
|
|
|
|
class PHIModel(nn.Module): |
|
def __init__(self, input_size, output_size): |
|
super(PHIModel, self).__init__() |
|
self.phi = (1 + np.sqrt(5)) / 2 |
|
self.fc1 = nn.Linear(input_size, int(input_size * self.phi)) |
|
self.fc2 = nn.Linear(int(input_size * self.phi), output_size) |
|
|
|
def forward(self, x): |
|
x = F.relu(self.fc1(x)) |
|
x = self.fc2(x) |
|
return x |
|
|
|
|
|
def ga_optimization(population, generations, mutation_rate): |
|
def fitness_function(individual): |
|
return sum(individual) |
|
|
|
for gen in range(generations): |
|
population.sort(key=fitness_function, reverse=True) |
|
next_generation = population[:len(population)//2] |
|
|
|
|
|
for i in range(len(population) // 2): |
|
parent1 = next_generation[i] |
|
parent2 = next_generation[len(population)//2 + i] |
|
crossover_point = random.randint(1, len(parent1) - 1) |
|
child = parent1[:crossover_point] + parent2[crossover_point:] |
|
next_generation.append(child) |
|
|
|
|
|
for individual in next_generation: |
|
if random.random() < mutation_rate: |
|
mutation_point = random.randint(0, len(individual) - 1) |
|
individual[mutation_point] = random.randint(0, 1) |
|
|
|
population = next_generation |
|
|
|
return population[0] |
|
|
|
|
|
auth = ("Tej", "186281mps", "ACC", "HIPE") |
|
|
|
with gr.Blocks() as app: |
|
gr.Markdown("# **Autistic Assistant vß Edition 2024 Ultra: Gertrude's Autistic Experience**") |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
user_input = gr.Textbox(label="🎙️What will you say to Gertrude?🎙️", placeholder="⌨️Type something here...") |
|
submit_button = gr.Button("💬Send💬") |
|
with gr.Column(scale=1): |
|
chatbot = gr.Textbox(label="🤖Gertrude's Response:", interactive=False) |
|
|
|
|
|
gr.HTML(""" |
|
<style> |
|
.gradio-container { |
|
background-color: #B3D9FF; |
|
padding: 20px; |
|
border-radius: 15px; |
|
font-family: 'Comic Sans MS'; |
|
} |
|
.gradio-row { |
|
display: flex; |
|
justify-content: space-between; |
|
} |
|
</style> |
|
""") |
|
|
|
|
|
submit_button.click(chat_interface, inputs=user_input, outputs=chatbot) |
|
|
|
|
|
app.launch() |
|
|
|
|
|
|