hamaadayubkhan commited on
Commit
e74639d
·
verified ·
1 Parent(s): e251a18

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -120
app.py DELETED
@@ -1,120 +0,0 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
- from sentence_transformers import SentenceTransformer
4
- import faiss
5
- import numpy as np
6
- import pdfplumber
7
-
8
- # Initialize the InferenceClient
9
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
10
-
11
- # Function to extract text from PDFs
12
- def extract_text_from_pdf(pdf_path):
13
- text = ""
14
- with pdfplumber.open(pdf_path) as pdf:
15
- for page in pdf.pages:
16
- page_text = page.extract_text()
17
- if page_text:
18
- text += page_text
19
- return text
20
-
21
- # Load and preprocess book PDFs
22
- pdf_files = ["Diagnostic and statistical manual of mental disorders _ DSM-5 ( PDFDrive.com ).pdf"]
23
- all_texts = [extract_text_from_pdf(pdf) for pdf in pdf_files]
24
-
25
- # Split text into chunks
26
- def chunk_text(text, chunk_size=300):
27
- sentences = text.split('. ')
28
- chunks, current_chunk = [], ""
29
- for sentence in sentences:
30
- if len(current_chunk) + len(sentence) <= chunk_size:
31
- current_chunk += sentence + ". "
32
- else:
33
- chunks.append(current_chunk.strip())
34
- current_chunk = sentence + ". "
35
- if current_chunk:
36
- chunks.append(current_chunk.strip())
37
- return chunks
38
-
39
- # Prepare embeddings for each book
40
- model = SentenceTransformer("all-MiniLM-L6-v2")
41
- index = faiss.IndexFlatL2(model.get_sentence_embedding_dimension())
42
- chunked_texts = [chunk_text(text) for text in all_texts]
43
- all_chunks = [chunk for chunks in chunked_texts for chunk in chunks]
44
- embeddings = model.encode(all_chunks, convert_to_tensor=True).detach().cpu().numpy()
45
- index.add(embeddings)
46
-
47
- # Function to generate response
48
- def respond(message, history, system_message, max_tokens, temperature, top_p):
49
- # Step 1: Retrieve relevant chunks based on user message
50
- query_embedding = model.encode([message], convert_to_tensor=True).detach().cpu().numpy()
51
- k = 5
52
- _, indices = index.search(query_embedding, k)
53
- relevant_chunks = " ".join([all_chunks[idx] for idx in indices[0]])
54
-
55
- # Step 2: Create prompt for the model
56
- prompt = f"{system_message}\n\nUser Query: {message}\n\nRelevant Information: {relevant_chunks}"
57
- response = ""
58
-
59
- # Step 3: Generate response
60
- for message in client.chat_completion(
61
- [{"role": "system", "content": system_message}, {"role": "user", "content": message}],
62
- max_tokens=max_tokens,
63
- stream=True,
64
- temperature=temperature,
65
- top_p=top_p,
66
- ):
67
- token = message.choices[0].delta.content
68
- response += token
69
- yield response
70
-
71
- # HTML for navigation bar
72
- navbar_html = """
73
- <div style="background-color: #f1f1f1; padding: 10px; text-align: center;">
74
- <a href="#about" style="margin-right: 20px; text-decoration: none; font-weight: bold;">About Us</a>
75
- <a href="#team" style="text-decoration: none; font-weight: bold;">Team</a>
76
- </div>
77
- """
78
-
79
- # About Us section
80
- about_us = """
81
- # About Us
82
- *Personal Psychologist AI* is an AI-driven mental health assistant designed to provide users with mental health support and guidance. The application leverages state-of-the-art natural language processing (NLP) techniques to analyze user input and provide insightful, empathetic, and actionable responses. The AI is powered by models fine-tuned with mental health-related data, including the DSM-5, to assist in diagnosing and offering mental health advice based on text input.
83
-
84
- ### Key Features:
85
- - **AI-Powered Mental Health Support**: Uses advanced NLP models like GPT-2 and fine-tuned models for mental health sentiment analysis.
86
- - **DSM-5 Integration**: The app uses DSM-5 (Diagnostic and Statistical Manual of Mental Disorders) to guide responses related to various mental health conditions.
87
- - **Personalized Recommendations**: Based on user input, the AI provides personalized advice, coping strategies, and insights.
88
- - **Fast Information Retrieval**: Embedding techniques (like Sentence-BERT) and FAISS (Fast Approximate Nearest Neighbors) are used for efficient information retrieval from mental health books and other resources.
89
- - **User Interaction**: Users can input their mental health concerns or queries, and the AI generates helpful and empathetic responses.
90
-
91
- ### Project Team:
92
- - **Hamaad Ayub Khan**: hakgs1234@gmail.com | [LinkedIn](https://linkedin.com/in/hamaadayubkhan)
93
- - **Muhammad Zeeshan Burki**: zeshanburki42@gmail.com | [LinkedIn](https://linkedin.com/in/muhammad-zeeshan-burki)
94
- - **Izhan Sajid**: izhansajid847@gmail.com | [LinkedIn](https://linkedin.com/in/izhan-sajid-70a6bb302)
95
- - **Izza Shahzad**: izzashahzad2003@gmail.com | [LinkedIn](https://linkedin.com/in/izza-shahzad-b43273267)
96
- - **Salar Tariq**: msalartariq05@gmail.com | [LinkedIn](https://linkedin.com/in/msalaartariq)
97
- """
98
-
99
- # Gradio ChatInterface with additional inputs
100
- demo = gr.ChatInterface(
101
- respond,
102
- title="Personal Psychologist",
103
- description="AI-driven mental health assistant based on DSM-5.",
104
- additional_inputs=[
105
- gr.Textbox(value="You are a helpful and empathetic mental health assistant.", label="System message"),
106
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
107
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
108
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
109
- ],
110
- layout="vertical",
111
- )
112
-
113
- # Combine Navbar and About Us section with the interface
114
- with demo:
115
- gr.HTML(navbar_html)
116
- gr.HTML(about_us)
117
-
118
- # Launch the Gradio interface
119
- if __name__ == "__main__":
120
- demo.launch()