File size: 4,802 Bytes
02b9964 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
# backend/utils.py
from transformers import GPT2LMHeadModel, GPT2Tokenizer
from langchain_groq import ChatGroq
import torch
import requests
import joblib
import pandas as pd
# Load the trained model and tokenizer : Counselling
counseling_model = GPT2LMHeadModel.from_pretrained('backend\\models\\mental_health_model')
counselling_tokenizer = GPT2Tokenizer.from_pretrained('backend\\models\\mental_health_model')
# Load the trained model and tokenizer : Medication
medication_model = GPT2LMHeadModel.from_pretrained('backend\\models\\medication_info')
medication_tokenizer = GPT2Tokenizer.from_pretrained('backend\\models\\medication_info')
# Load the trained Random Forest model and StandardScaler
diabetes_model = joblib.load('backend\\models\\diabetes_model\\random_forest_modelf.joblib')
diabetes_scaler = joblib.load('backend\\models\\diabetes_model\\standard_scaler.joblib')
# Load the model, encoders, and scaler
knn = joblib.load('backend\\models\\medication_classification_model\\knn_model.pkl')
label_encoders = joblib.load('backend\\models\\medication_classification_model\\label_encoders.pkl')
age_scaler = joblib.load('backend\\models\\medication_classification_model\\age_scaler.pkl')
medication_encoder = joblib.load('backend\\models\\medication_classification_model\\medication_encoder.pkl')
# Diabetes Classifier
def classify_diabetes(glucose, bmi, age):
# Normalize the input features
input_features = [[glucose, bmi, age]]
input_features_norm = diabetes_scaler.transform(input_features)
# Make predictions
prediction = diabetes_model.predict(input_features_norm)[0]
prediction_probability = diabetes_model.predict_proba(input_features_norm)[0] * 100
diabetic_probability = prediction_probability[prediction].item()
if prediction == 0:
result = "Non Diabetic"
else:
result = "Diabetic"
# Format the output as: "Non Diabetic | 72%"
formatted_result = f"{result} | {diabetic_probability:.1f}%"
return formatted_result
# Medicine Classifier
def classify_medicine(new_data):
# Convert dictionary to DataFrame
new_data_df = pd.DataFrame(new_data)
# Encode the new data using the saved label encoders
for column in ['Gender', 'Blood Type', 'Medical Condition', 'Test Results']:
new_data_df[column] = label_encoders[column].transform(new_data_df[column])
# Normalize the 'Age' column in the new data
new_data_df['Age'] = age_scaler.transform(new_data_df[['Age']])
# Make predictions
predictions = knn.predict(new_data_df)
# Decode the predictions back to the original medication names
predicted_medications = medication_encoder.inverse_transform(predictions)
return predicted_medications
# Generate Counseling Response
def generate_counseling_response(prompt):
inputs = counselling_tokenizer.encode(prompt, return_tensors="pt")
outputs = counseling_model.generate(inputs, max_length=150, num_return_sequences=1, pad_token_id=counselling_tokenizer.eos_token_id)
# Decode the generated output
response = counselling_tokenizer.decode(outputs[0], skip_special_tokens=True)
# Remove the prompt from the response
if response.startswith(prompt):
response = response[len(prompt):].strip() # Remove the prompt from the response
return response
# Generate Medication Response
def generate_medication_response(prompt):
inputs = medication_tokenizer.encode(prompt, return_tensors="pt")
outputs = medication_model.generate(inputs, max_length=150, num_return_sequences=1, pad_token_id=medication_tokenizer.eos_token_id)
# Decode the generated output
response = medication_tokenizer.decode(outputs[0], skip_special_tokens=True)
# Remove the prompt from the response
if response.startswith(prompt):
response = response[len(prompt):].strip() # Remove the prompt from the response
return response
# Llama 3.1 Integration as a General Tab
llm = ChatGroq(
temperature=0,
groq_api_key='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',
model_name="llama-3.1-70b-versatile"
)
def get_llama_response(prompt):
try:
response = llm.invoke(prompt)
formatted_response = format_response(response.content)
return formatted_response
except Exception as e:
return f"Error: {str(e)}"
def format_response(response):
# Add line breaks and make it easier to read
response = response.replace("**", "").replace("*", "").replace(" ", "\n").strip()
lines = response.split("\n")
formatted_response = ""
for line in lines:
formatted_response += f"<p>{line}</p>"
return formatted_response
|