|
|
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
|
from langchain_groq import ChatGroq
|
|
import torch
|
|
import requests
|
|
import joblib
|
|
import pandas as pd
|
|
|
|
|
|
counseling_model = GPT2LMHeadModel.from_pretrained('backend\\models\\mental_health_model')
|
|
counselling_tokenizer = GPT2Tokenizer.from_pretrained('backend\\models\\mental_health_model')
|
|
|
|
|
|
medication_model = GPT2LMHeadModel.from_pretrained('backend\\models\\medication_info')
|
|
medication_tokenizer = GPT2Tokenizer.from_pretrained('backend\\models\\medication_info')
|
|
|
|
|
|
diabetes_model = joblib.load('backend\\models\\diabetes_model\\random_forest_modelf.joblib')
|
|
diabetes_scaler = joblib.load('backend\\models\\diabetes_model\\standard_scaler.joblib')
|
|
|
|
|
|
knn = joblib.load('backend\\models\\medication_classification_model\\knn_model.pkl')
|
|
label_encoders = joblib.load('backend\\models\\medication_classification_model\\label_encoders.pkl')
|
|
age_scaler = joblib.load('backend\\models\\medication_classification_model\\age_scaler.pkl')
|
|
medication_encoder = joblib.load('backend\\models\\medication_classification_model\\medication_encoder.pkl')
|
|
|
|
|
|
|
|
|
|
|
|
def classify_diabetes(glucose, bmi, age):
|
|
|
|
input_features = [[glucose, bmi, age]]
|
|
input_features_norm = diabetes_scaler.transform(input_features)
|
|
|
|
|
|
prediction = diabetes_model.predict(input_features_norm)[0]
|
|
prediction_probability = diabetes_model.predict_proba(input_features_norm)[0] * 100
|
|
|
|
diabetic_probability = prediction_probability[prediction].item()
|
|
|
|
if prediction == 0:
|
|
result = "Non Diabetic"
|
|
else:
|
|
result = "Diabetic"
|
|
|
|
|
|
formatted_result = f"{result} | {diabetic_probability:.1f}%"
|
|
return formatted_result
|
|
|
|
|
|
|
|
def classify_medicine(new_data):
|
|
|
|
new_data_df = pd.DataFrame(new_data)
|
|
|
|
|
|
for column in ['Gender', 'Blood Type', 'Medical Condition', 'Test Results']:
|
|
new_data_df[column] = label_encoders[column].transform(new_data_df[column])
|
|
|
|
|
|
new_data_df['Age'] = age_scaler.transform(new_data_df[['Age']])
|
|
|
|
|
|
predictions = knn.predict(new_data_df)
|
|
|
|
|
|
predicted_medications = medication_encoder.inverse_transform(predictions)
|
|
|
|
return predicted_medications
|
|
|
|
|
|
|
|
def generate_counseling_response(prompt):
|
|
inputs = counselling_tokenizer.encode(prompt, return_tensors="pt")
|
|
outputs = counseling_model.generate(inputs, max_length=150, num_return_sequences=1, pad_token_id=counselling_tokenizer.eos_token_id)
|
|
|
|
|
|
response = counselling_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
|
|
if response.startswith(prompt):
|
|
response = response[len(prompt):].strip()
|
|
|
|
return response
|
|
|
|
|
|
|
|
def generate_medication_response(prompt):
|
|
inputs = medication_tokenizer.encode(prompt, return_tensors="pt")
|
|
outputs = medication_model.generate(inputs, max_length=150, num_return_sequences=1, pad_token_id=medication_tokenizer.eos_token_id)
|
|
|
|
|
|
response = medication_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
|
|
if response.startswith(prompt):
|
|
response = response[len(prompt):].strip()
|
|
|
|
return response
|
|
|
|
|
|
|
|
llm = ChatGroq(
|
|
temperature=0,
|
|
groq_api_key='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',
|
|
model_name="llama-3.1-70b-versatile"
|
|
)
|
|
|
|
def get_llama_response(prompt):
|
|
try:
|
|
response = llm.invoke(prompt)
|
|
formatted_response = format_response(response.content)
|
|
return formatted_response
|
|
except Exception as e:
|
|
return f"Error: {str(e)}"
|
|
|
|
def format_response(response):
|
|
|
|
response = response.replace("**", "").replace("*", "").replace(" ", "\n").strip()
|
|
lines = response.split("\n")
|
|
formatted_response = ""
|
|
for line in lines:
|
|
formatted_response += f"<p>{line}</p>"
|
|
return formatted_response
|
|
|