File size: 19,117 Bytes
b98ea00
 
 
 
 
 
 
9c4622d
b98ea00
 
 
61c1d3f
b98ea00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4ed8c04
 
 
 
 
 
 
 
 
b98ea00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
410a087
61c1d3f
6ba811c
 
24584f6
 
 
 
6ba811c
61c1d3f
24584f6
61c1d3f
 
 
 
24584f6
61c1d3f
 
4ef34bc
 
35ba739
9762e78
61c1d3f
 
2eafe83
038f9aa
 
 
f9585ec
 
4ef34bc
038f9aa
4ef34bc
038f9aa
f9585ec
 
4ef34bc
038f9aa
4ef34bc
038f9aa
f9585ec
 
4ef34bc
038f9aa
4ef34bc
038f9aa
f9585ec
4ef34bc
 
 
 
 
 
 
 
61c1d3f
2eafe83
 
f9585ec
4ef34bc
 
b98ea00
410a087
 
 
 
 
 
 
8933811
61c1d3f
410a087
ca30b63
9c4622d
c135f63
ca30b63
410a087
 
 
 
 
 
b98ea00
410a087
b98ea00
410a087
 
 
 
 
b98ea00
410a087
 
 
b98ea00
 
410a087
b98ea00
410a087
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b98ea00
410a087
 
b98ea00
410a087
 
 
b98ea00
410a087
 
b98ea00
410a087
 
b98ea00
 
410a087
b98ea00
410a087
 
 
 
 
b98ea00
 
410a087
b98ea00
410a087
 
 
 
b98ea00
410a087
b98ea00
410a087
9c4622d
729f5f4
86e8be5
684d272
 
d7bb633
729f5f4
684d272
 
 
 
 
 
 
 
 
 
 
 
729f5f4
5ca686d
684d272
 
 
729f5f4
684d272
 
 
729f5f4
684d272
 
 
 
 
2f367c8
684d272
 
9c4622d
4f18e5b
684d272
 
4f18e5b
3bf3e60
410a087
 
2eafe83
9762e78
 
 
 
 
b25ecc0
61c1d3f
8933811
 
 
61c1d3f
8933811
ec7170c
b98ea00
 
2d5a4d0
 
 
 
 
 
 
b98ea00
61c1d3f
2d5a4d0
 
61c1d3f
b98ea00
9307f56
61c1d3f
0ad311a
61c1d3f
b98ea00
e8baa3f
b98ea00
61c1d3f
b98ea00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
import os
import re
import json
import getpass
import logging
import openai
import asyncio
import pandas as pd
from typing import Any, List, Tuple, Dict
import gradio as gr
import llama_index
from fpdf import FPDF
from llama_index import Document
from llama_index.llms import OpenAI
from llama_index.embeddings import OpenAIEmbedding, HuggingFaceEmbedding
from llama_index.llms import HuggingFaceLLM
import requests

from RAG_utils import PDFProcessor_Unstructured, PDFQueryEngine, HybridRetriever, MixtralLLM, KeywordSearch, base_utils, ConfigManager


# Configure basic logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')

# Create a logger object
logger = logging.getLogger(__name__)

os.environ["TOKENIZERS_PARALLELISM"] = "false"

config_manager = ConfigManager()
#config_manager.load_config("api", "Config/api_config.json")
config_manager.load_config("model", "model_config.json")

openai.api_key = os.environ['OPENAI_API_KEY'] #config_manager.get_config_value("api", "OPENAI_API_KEY")
hf_token = os.environ['HF_TOKEN']#config_manager.get_config_value("api", "HF_TOKEN")

# PDF rendering and chunking parameters
pdf_processing_config = config_manager.get_config_value("model", "pdf_processing")


ALLOWED_EXTENSIONS = config_manager.get_config_value("model", "allowed_extensions")
embed = config_manager.get_config_value("model", "embeddings")
embed_model_name = config_manager.get_config_value("model", "embeddings_model")


#llm_model = config_manager.get_config_value("model", "llm_model")
model_temperature = config_manager.get_config_value("model", "model_temp")
output_token_size = config_manager.get_config_value("model", "max_tokens")
model_context_window = config_manager.get_config_value("model", "context_window")

gpt_prompt_path = config_manager.get_config_value("model","GPT_PROMPT_PATH")
mistral_prompt_path = config_manager.get_config_value("model","MISTRAL_PROMPT_PATH")
info_prompt_path = config_manager.get_config_value("model", "INFO_PROMPT_PATH")

peer_review_journals_path = config_manager.get_config_value("model", "peer_review_journals_path")
eq_network_journals_path = config_manager.get_config_value("model", "eq_network_journals_path")

queries = config_manager.get_config_value("model", "queries")
criteria = config_manager.get_config_value("model", "criteria")
num_criteria = len(queries)

author_query = config_manager.get_config_value("model", "author_query")
journal_query = config_manager.get_config_value("model", "journal_query")


# Assuming you have already set up logging as described earlier
logger = logging.getLogger(__name__)

# Get the current working directory
current_working_directory = os.getcwd()

# Log the current working directory
logger.info(f"The current working directory is: {current_working_directory}")

# Helper function to check if the file extension is allowed
def allowed_file(filename):
    return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS

def generate_score_bar(score, num_criteria):
    # Convert and round the score from a 9-point scale to a 100-point scale
    score_out_of_100 = round((score / num_criteria) * 100)

    # Determine the color and text based on the original score
    if score == 9:
        color = "#4CAF50"  # green
        text = "Very good"
    elif score in [7, 8]:
        color = "#FFEB3B"  # yellow
        text = "Good"
    elif score in [5, 6]:
        color = "#FF9800"  # orange
        text = "Ok"
    elif score in [3, 4]:
        color = "#F44336"  # red
        text = "Bad"
    else:  # score < 3
        color = "#800000"  # maroon
        text = "Very bad"

    # Create the HTML for the score bar
    score_bar_html = f"""
        <div style="background-color: #ddd; border-radius: 10px; position: relative; height: 20px; width: 100%;">
            <div style="background-color: {color}; height: 100%; border-radius: 10px; width: {score_out_of_100}%;"></div>
        </div>
        <p style="color: {color};">{text}</p>  <!-- Display the text -->
    """
    return score_bar_html

class PDF(FPDF):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        # Load the DejaVu font files
        self.add_font('DejaVu', '', 'DejaVu_Sans/DejaVuSansCondensed.ttf', uni=True)
        self.add_font('DejaVu', 'B', 'DejaVu_Sans/DejaVuSansCondensed-Bold.ttf', uni=True)
        self.add_font('DejaVu', 'I', 'DejaVu_Sans/DejaVuSansCondensed-Oblique.ttf', uni=True)

    def header(self):
        self.set_font('DejaVu', 'B', 12)
        self.cell(0, 10, 'Paper Analysis Report', 0, 1, 'C')

    def footer(self):
        self.set_y(-15)
        self.set_font('DejaVu', 'I', 8)
        self.cell(0, 10, f'Page {self.page_no()}', 0, 0, 'C')

import os

def create_pdf_report(title, author_info, score, criteria, reasoning_list, output_path):
    
    pdf = PDF()
    pdf.add_page()

    # Set margins
    pdf.set_left_margin(10)
    pdf.set_right_margin(10)

    # Title
    pdf.set_font("DejaVu", 'B', 14)
    pdf.cell(0, 10, "Title:", 0, 1)
    pdf.set_font("DejaVu", '', 12)
    pdf.multi_cell(0, 10, title, 0, 1)

    # Author Information
    pdf.set_font("DejaVu", 'B', 14)
    pdf.cell(0, 10, "Author Information:", 0, 1)
    pdf.set_font("DejaVu", '', 12)
    pdf.multi_cell(0, 10, author_info, 0, 1)

    # Score
    pdf.set_font("DejaVu", 'B', 14)
    pdf.cell(0, 10, "Score:", 0, 1)
    pdf.set_font("DejaVu", '', 12)
    pdf.multi_cell(0, 10, score, 0, 1)

    # Reasoning - each reasoning with a green heading in bold
    for heading, reasoning in zip(criteria, reasoning_list):
        pdf.set_font("DejaVu", 'B', 14)
        pdf.set_text_color(0, 128, 0)  # Green color
        pdf.multi_cell(0, 10, heading, 0, 1)
        pdf.set_text_color(0, 0, 0)  # Reset to black color
        pdf.set_font("DejaVu", '', 12)
        pdf.multi_cell(0, 10, reasoning, 0, 1)

    # Save the PDF to the specified output path
    pdf.output(output_path)

    return output_path  # Return the path to the generated report

    
def process_pdf(uploaded_files, llm_model, n_criteria = num_criteria):
    # Initialize aggregation variables
    final_score = 0
    final_reasoning = []
    final_score_bar_html = ""
    final_author_info_html = ""
    final_title_info_html = ""
    output_files = []
    for i, uploaded_file in enumerate(uploaded_files):
        # Process the PDF file
        file_name_without_extension = os.path.splitext(os.path.basename(uploaded_file))[0]

        id_number = file_name_without_extension.split('_')[1] 
        
        pdf_processor = PDFProcessor_Unstructured(pdf_processing_config)
        merged_chunks, tables, title = pdf_processor.process_pdf_file(uploaded_file)
        documents = [Document(text=t) for t in merged_chunks]

        # Prompts and Queries
        utils = base_utils()
    
        info_prompt = utils.read_from_file(info_prompt_path)

        # LLM Model choice
        try:
            if llm_model == "Model 1":
                llm = OpenAI(model="gpt-4-1106-preview", temperature=model_temperature, max_tokens=output_token_size)
                general_prompt = utils.read_from_file(gpt_prompt_path)

            elif llm_model == "Model 2":
                if any(param is None for param in [model_context_window, output_token_size, model_temperature, hf_token]):
                    raise ValueError("All parameters are required for Mistral LLM.")

               
                llm = MixtralLLM(context_window=model_context_window, num_output=output_token_size,
                           temperature=model_temperature, model_name="mistralai/Mixtral-8x7B-Instruct-v0.1", api_key=hf_token)
                general_prompt = utils.read_from_file(mistral_prompt_path)
            else:
                raise ValueError(f"Unsupported language model: {llm_model}")

        except Exception as e:
            logger.error(f"Error initializing language model '{llm_model}': {e}", exc_info=True)
            raise  # Or handle the exception as needed

        # Embedding model choice for RAG
        try:
            if embed == "openai":
                embed_model = OpenAIEmbedding(model="text-embedding-3-large")

            elif embed == "huggingface":
                # Use the specified model name
                embed_model = HuggingFaceEmbedding(embed_model_name)
        
            else:
                raise ValueError(f"Unsupported embedding model: {embed_model}")

        except Exception as e:
            logger.error(f"Error initializing embedding model: {e}", exc_info=True)
            raise
    
        peer_review_journals = utils.read_from_file(peer_review_journals_path)
        eq_network_journals = utils.read_from_file(eq_network_journals_path)

        peer_review_journals_list = peer_review_journals.split('\n')
        eq_network_journals_list = eq_network_journals.split('\n')

    
        modified_journal_query = "Is the given research paper published in any of the following journals: " + ", ".join(peer_review_journals_list) + "?"

        info_llm = OpenAI(model="gpt-4-1106-preview", temperature=model_temperature, max_tokens=100)
        pdf_info_query = PDFQueryEngine(documents, info_llm, embed_model, (info_prompt))
        info_query_engine = pdf_info_query.setup_query_engine()
        journal_result = info_query_engine.query(modified_journal_query).response
        author_result = info_query_engine.query(author_query).response
    
    
        pdf_criteria_query = PDFQueryEngine(documents, llm, embed_model, (general_prompt))

        # Check for prior registration
        nlp_methods = KeywordSearch(merged_chunks)
        eq_journal_result = nlp_methods.find_journal_name(journal_result, eq_network_journals_list)
        peer_journal_result = nlp_methods.find_journal_name(journal_result, peer_review_journals_list)

        registration_result = nlp_methods.check_registration()
        
        # Evaluate with OpenAI model
        total_score, criteria_met, score_percentage, score_list, reasoning = pdf_criteria_query.evaluate_with_llm(registration_result, peer_journal_result, eq_journal_result, queries)
        
        
        try:
            # Define the path to your CSV file
            csv_file_path = '/home/user/app/storing_output.csv'
            logger.info("CSV file path: %s", csv_file_path)

            # Create a dictionary for the new row
            new_row = {
                'Id': id_number,
                'Title': title,
                'Author': author_result
            }
            new_row.update({f'score_cr_{i}': score for i, score in enumerate(score_list, 1)})
            new_row.update({f'reasoning_cr_{i}': reasoning for i, reasoning in enumerate(reasoning, 1)})

            # Convert new_row dictionary to a DataFrame for easy appending
            new_row_df = pd.DataFrame([new_row])
            logger.info("New row DataFrame:\n%s", new_row_df)
        
            # Check if the CSV file exists
            if os.path.exists(csv_file_path):
                # Load the existing data
                logger.info("CSV file exists. Loading existing data.")
                df = pd.read_csv(csv_file_path)
            else:
                # Or create a new DataFrame if the file does not exist
                logger.info("CSV file does not exist. Creating a new DataFrame.")
                columns = ['Id', 'Title', 'Author'] + [f'score_cr_{i}' for i in range(1, 10)] + [f'reasoning_cr_{i}' for i in range(1, 10)]
                df = pd.DataFrame(columns=columns)

            # Append the new data using pd.concat
            df = pd.concat([df, new_row_df], ignore_index=True)
        
            # Save the updated DataFrame back to CSV
            df.to_csv(csv_file_path, index=False)

            logger.info(f"Updated data saved to {csv_file_path}.")
        
        except Exception as e:
            logger.info(f"An error occurred: {e}")

        # Generate the score bar HTML
        score_bar_html = generate_score_bar(total_score, n_criteria)
        scaled_total_score = str(round((total_score / n_criteria) * 100)) + "/100"

        output_dir="/tmp"
        base_name = os.path.splitext(uploaded_file)[0]
        output_path = os.path.join(output_dir, f"{base_name}_report.pdf")
        
        create_pdf_report(title, author_result, scaled_total_score, criteria, reasoning, output_path)
        output_files.append(output_path)

        # Construct the processing message
        processing_message = f"Processing complete. {len(uploaded_files)} reports generated. Please download your reports below."
        
    return processing_message, output_files
        


with gr.Blocks(theme=gr.themes.Glass(
    text_size="sm", 
    font=[gr.themes.GoogleFont("Inconsolata"), "Arial", "sans-serif"], 
    primary_hue="neutral", 
    secondary_hue="gray")) as demo:

    gr.Markdown("## Med Library")
    with gr.Row():
        file_upload = gr.File(label="Choose papers", file_types=['.pdf'], file_count="multiple")
        
    with gr.Row():
        model_choice = gr.Dropdown(["Model 1", "Model 2"], label="Choose a model", value="Model 1")
        submit_button = gr.Button("Evaluate")
    
    processing_message_output = gr.Textbox(label="Processing Status", interactive=False)
    report_download_links = gr.File(label="Download Reports", type="filepath", file_count="multiple")

    submit_button.click(
        fn=process_pdf,
        inputs=[file_upload, model_choice],
        outputs=[processing_message_output, report_download_links]
    )


#Launch the app
demo.launch(share=True, server_name="0.0.0.0", server_port=7860)

# Main route for file upload and display results
# @app.route('/', methods=['GET', 'POST'])
# def upload_and_display_results():
#     total_score = 0
#     score_percentage = 0
#     reasoning = []
#     criteria_met = 0

#     if request.method == 'POST':
#         # Check if the post request has the file part
#         if 'file' not in request.files:
#             flash('No file part')
#             return redirect(request.url)
#         file = request.files['file']
#         # If user does not select file, browser also submits an empty part without filename
#         if file.filename == '':
#             flash('No selected file')
#             return redirect(request.url)
#         if file and allowed_file(file.filename):
#             try:
#                 # Process the PDF file
#                 pdf_processor = PDFProcessor_Unstructured(pdf_processing_config)
#                 merged_chunks, tables = pdf_processor.process_pdf_file(file)
#                 documents = [Document(text=t) for t in merged_chunks]

#                 # LLM Model choice
#                 try:
#                     if llm_model == "gpt-4" or llm_model == "gpt-3.5-turbo":
#                         llm = OpenAI(model=llm_model, temperature=model_temperature, max_tokens=output_token_size)

#                     elif llm_model == "mistralai/Mixtral-8x7B-Instruct-v0.1":
#                         if any(param is None for param in [model_context_window, output_token_size, model_temperature, hf_token]):
#                             raise ValueError("All parameters are required for Mistral LLM.")

#                         llm = MixtralLLM(context_window=model_context_window, num_output=output_token_size,
#                             temperature=model_temperature, model_name=llm_model, api_key=hf_token)
#                     else:
#                         raise ValueError(f"Unsupported language model: {llm_model}")

#                 except Exception as e:
#                     logger.error(f"Error initializing language model '{llm_model}': {e}", exc_info=True)
#                     raise  # Or handle the exception as needed

#                 # Embedding model choice for RAG
#                 try:
#                     if embed == "openai":
#                         embed_model = OpenAIEmbedding()

#                     elif embed == "huggingface":
#                         if embed_model_name is None:
#                             # Set to default model if name not provided
#                             embed_model_name = "BAAI/bge-small-en-v1.5"
#                             embed_model = HuggingFaceEmbedding(embed_model_name)
#                         else:
#                             # Use the specified model name
#                             embed_model = HuggingFaceEmbedding(embed_model_name)
#                     else:
#                         raise ValueError(f"Unsupported embedding model: {embed_model}")


#                 except Exception as e:
#                     logger.error(f"Error initializing embedding model: {e}", exc_info=True)
#                     raise



#                 # Prompts and Queries
#                 utils = base_utils()
#                 general_prompt = utils.read_from_file(general_prompt_path)
#                 info_prompt = utils.read_from_file(info_prompt_path)

#                 peer_review_journals = utils.read_from_file(peer_review_journals_path)
#                 eq_network_journals = utils.read_from_file(eq_network_journals_path)

#                 peer_review_journals_list = peer_review_journals.split('\n')
#                 eq_network_journals_list = eq_network_journals.split('\n')


#                 modified_journal_query = "Is the given research paper published in any of the following journals: " + ", ".join(peer_review_journals_list) + "?"

#                 pdf_info_query = PDFQueryEngine(documents, llm, embed_model, (info_prompt))
#                 info_query_engine = pdf_info_query.setup_query_engine()
#                 journal_result = info_query_engine.query(modified_journal_query).response


#                 pdf_criteria_query = PDFQueryEngine(documents, llm, embed_model, (general_prompt))

#                 # Check for prior registration
#                 nlp_methods = KeywordSearch(merged_chunks)
#                 eq_journal_result = nlp_methods.find_journal_name(journal_result, eq_network_journals_list)
#                 peer_journal_result = nlp_methods.find_journal_name(journal_result, peer_review_journals_list)
#                 registration_result = nlp_methods.check_registration()


#                 # Evaluate with OpenAI model
#                 total_score, criteria_met, score_percentage, reasoning = pdf_criteria_query.evaluate_with_llm(registration_result, peer_journal_result, eq_journal_result, queries)


#             except Exception as e:
#                 logging.exception("An error occurred while processing the file.")
#                 # Consider adding a user-friendly message or redirect
#                 flash('An error occurred while processing the file.')
#                 return redirect(request.url)

#     return render_template('index.html',
#                        total_score = total_score,
#                        score_percentage = score_percentage,
#                        criteria_met = criteria_met,
#                        reasoning = reasoning)


# if __name__ == '__main__':
#     app.run(debug=True)