Spaces:
Sleeping
Sleeping
File size: 3,481 Bytes
415cfe5 dd56e2a 4473d1a c73cad2 415cfe5 4473d1a 415cfe5 92d67dd 93a5e08 92d67dd 93a5e08 c49c76a eee0b5e 2e46dd2 1fe91ac 1e8e9b1 415cfe5 93a5e08 1e8e9b1 415cfe5 1e8e9b1 415cfe5 703c4df 10f43f0 2e46dd2 b6b79c3 aa1786a d572e23 415cfe5 21d8d6a 415cfe5 50629ff 415cfe5 07fa4ec 2e46dd2 51b662a 415cfe5 516d157 03110c9 4a8ac98 77c2ee2 703c4df eee0b5e 2e46dd2 10f43f0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
import PyPDF2
import nltk
import random
import streamlit as st
from openai import OpenAI
from dotenv import load_dotenv
import os
# Download NLTK data (if not already downloaded)
nltk.download('punkt')
# load the environment variables into the python script
load_dotenv()
# fetching the openai_api_key environment variable
openai_api_key = os.getenv('OPENAI_API_KEY')
def extract_text_from_pdf(pdf_file):
pdf_reader = PyPDF2.PdfReader(pdf_file)
text = ""
for page_num in range(len(pdf_reader.pages)):
text += pdf_reader.pages[page_num].extract_text()
return text
def generate_mcqs_on_topic(text, topic, num_mcqs=5):
# Tokenize the text into sentences
sentences = nltk.sent_tokenize(text)
# Randomly select sentences to create Questions
selected_sentences = random.sample(sentences, min(num_mcqs, len(sentences)))
mcqs = []
for sentence in selected_sentences:
# Use ChatGPT for interactive question generation
chatgpt_question = generate_question_with_chatgpt(sentence, topic)
mcqs.append(chatgpt_question)
return mcqs
def generate_question_with_chatgpt(context, topic):
client=OpenAI()
# Initializing the default value
generated_question = {
'content': "Unable to generate a question..",
'options': [], # assuming options is a list
'correct_answer': "Unknown"
}
response = client.chat.completions.create(
model="gpt-3.5-turbo",
max_tokens=1024,
temperature = 0.7,
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": f"What is the question on {topic} for the following? {context}"},
]
)
result = response.json()
print("API Response:", result) # Add this line for debugging
if 'choices' in result:
# Extract the generated question, options, and correct answer from the response
generated_question = {
'content': result["choices"][0]["message"]["content"],
'options': result["choices"][0]["message"].get("options", []),
'correct_answer': result["choices"][0]["message"].get("correct_answer", "Unknown")
}
else:
print("Unexpected API response format.")
return generated_question
def main():
# Title of the Application
st.header("🤖CB Quiz Generator🧠", divider='rainbow')
st.subheader("☕CoffeeBeans☕")
# User input
pdf_file = st.file_uploader("Upload PDF Document:", type=["pdf"])
num_mcqs = st.number_input("Enter Number of MCQs to Generate:", min_value=1, step=1, value=5)
topic = st.text_input("Enter the Topic in which the quiz has to be generated")
# Button to trigger QUIZ generation
if st.button("Generate Quiz"):
if pdf_file:
text = extract_text_from_pdf(pdf_file)
mcqs = generate_mcqs_on_topic(text, topic, num_mcqs)
# Display the generated Questions
st.success(f"Generated {num_mcqs} Questions:")
for i, generated_question in enumerate(mcqs, start=1):
st.write(f"\nQuestion {i}: {generated_question['content']}")
st.write(f"Options: {', '.join(generated_question['options'])}")
st.write(f"Correct Answer: {generated_question['correct_answer']}")
else:
st.error("Please upload a PDF document.")
if __name__ == "__main__":
main()
|