Create social exam
Browse files- social exam +94 -0
social exam
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import TrOCRProcessor, VisionEncoderDecoderModel
|
3 |
+
import torch
|
4 |
+
from PIL import Image
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
# OCR ๋ชจ๋ธ ๋ฐ ํ๋ก์ธ์ ์ด๊ธฐํ
|
8 |
+
processor = TrOCRProcessor.from_pretrained('microsoft/trocr-base-handwritten')
|
9 |
+
model = VisionEncoderDecoderModel.from_pretrained('microsoft/trocr-base-handwritten')
|
10 |
+
|
11 |
+
# ์ ๋ต ๋ฐ ํด์ค ๋ฐ์ดํฐ๋ฒ ์ด์ค
|
12 |
+
answer_key = {
|
13 |
+
"1": {
|
14 |
+
"answer": "๋ฏผ์ฃผ์ฃผ์",
|
15 |
+
"explanation": "๋ฏผ์ฃผ์ฃผ์๋ ๊ตญ๋ฏผ์ด ์ฃผ์ธ์ด ๋์ด ๋๋ผ์ ์ค์ํ ์ผ์ ๊ฒฐ์ ํ๋ ์ ๋์
๋๋ค. ์ฐ๋ฆฌ๋๋ผ๋ ๋ฏผ์ฃผ์ฃผ์ ๊ตญ๊ฐ๋ก, ๊ตญ๋ฏผ๋ค์ด ํฌํ๋ฅผ ํตํด ๋ํ์๋ฅผ ์ ์ถํ๊ณ ์ค์ํ ๊ฒฐ์ ์ ์ฐธ์ฌํฉ๋๋ค."
|
16 |
+
},
|
17 |
+
"2": {
|
18 |
+
"answer": "์ผ๊ถ๋ถ๋ฆฝ",
|
19 |
+
"explanation": "์ผ๊ถ๋ถ๋ฆฝ์ ์
๋ฒ๋ถ, ํ์ ๋ถ, ์ฌ๋ฒ๋ถ๋ก ๊ถ๋ ฅ์ ๋๋์ด ์๋ก ๊ฒฌ์ ์ ๊ท ํ์ ์ด๋ฃจ๊ฒ ํ๋ ์ ๋์
๋๋ค. ์ด๋ฅผ ํตํด ํ ์ชฝ์ ๊ถ๋ ฅ์ด ์ง์ค๋๋ ๊ฒ์ ๋ง์ ์ ์์ต๋๋ค."
|
20 |
+
},
|
21 |
+
"3": {
|
22 |
+
"answer": "์ง๋ฐฉ์์น์ ๋",
|
23 |
+
"explanation": "์ง๋ฐฉ์์น์ ๋๋ ์ง์ญ์ ์ผ์ ๊ทธ ์ง์ญ ์ฃผ๋ฏผ๋ค์ด ์ง์ ๊ฒฐ์ ํ๊ณ ์ฒ๋ฆฌํ๋ ์ ๋์
๋๋ค. ์ฃผ๋ฏผ๋ค์ด ์ง์ ์ง๋ฐฉ์์น๋จ์ฒด์ฅ๊ณผ ์ง๋ฐฉ์ํ ์์์ ์ ์ถํฉ๋๋ค."
|
24 |
+
}
|
25 |
+
}
|
26 |
+
|
27 |
+
def preprocess_image(image):
|
28 |
+
"""์ด๋ฏธ์ง ์ ์ฒ๋ฆฌ ํจ์"""
|
29 |
+
if isinstance(image, np.ndarray):
|
30 |
+
image = Image.fromarray(image)
|
31 |
+
return image
|
32 |
+
|
33 |
+
def recognize_text(image):
|
34 |
+
"""์๊ธ์จ ์ธ์ ํจ์"""
|
35 |
+
image = preprocess_image(image)
|
36 |
+
pixel_values = processor(image, return_tensors="pt").pixel_values
|
37 |
+
|
38 |
+
with torch.no_grad():
|
39 |
+
generated_ids = model.generate(pixel_values)
|
40 |
+
|
41 |
+
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
42 |
+
return generated_text
|
43 |
+
|
44 |
+
def grade_answer(question_number, student_answer):
|
45 |
+
"""๋ต์ ์ฑ์ ํจ์"""
|
46 |
+
correct_answer = answer_key[question_number]["answer"]
|
47 |
+
explanation = answer_key[question_number]["explanation"]
|
48 |
+
|
49 |
+
# ๋ต์ ๋น๊ต (๋์ด์ฐ๊ธฐ, ๋์๋ฌธ์ ๋ฌด์)
|
50 |
+
is_correct = student_answer.replace(" ", "").lower() == correct_answer.replace(" ", "").lower()
|
51 |
+
|
52 |
+
return {
|
53 |
+
"์ ๋ต ์ฌ๋ถ": "์ ๋ต" if is_correct else "์ค๋ต",
|
54 |
+
"์ ๋ต": correct_answer,
|
55 |
+
"ํด์ค": explanation
|
56 |
+
}
|
57 |
+
|
58 |
+
def process_submission(image, question_number):
|
59 |
+
"""์ ์ฒด ์ฒ๋ฆฌ ํจ์"""
|
60 |
+
if not image or not question_number:
|
61 |
+
return "์ด๋ฏธ์ง์ ๋ฌธ์ ๋ฒํธ๋ฅผ ๋ชจ๋ ์
๋ ฅํด์ฃผ์ธ์."
|
62 |
+
|
63 |
+
# ์๊ธ์จ ์ธ์
|
64 |
+
recognized_text = recognize_text(image)
|
65 |
+
|
66 |
+
# ์ฑ์ ๋ฐ ํด์ค
|
67 |
+
result = grade_answer(question_number, recognized_text)
|
68 |
+
|
69 |
+
# ๊ฒฐ๊ณผ ํฌ๋งทํ
|
70 |
+
output = f"""
|
71 |
+
์ธ์๋ ๋ต์: {recognized_text}
|
72 |
+
์ฑ์ ๊ฒฐ๊ณผ: {result['์ ๋ต ์ฌ๋ถ']}
|
73 |
+
์ ๋ต: {result['์ ๋ต']}
|
74 |
+
|
75 |
+
[ํด์ค]
|
76 |
+
{result['ํด์ค']}
|
77 |
+
"""
|
78 |
+
|
79 |
+
return output
|
80 |
+
|
81 |
+
# Gradio ์ธํฐํ์ด์ค ์์ฑ
|
82 |
+
iface = gr.Interface(
|
83 |
+
fn=process_submission,
|
84 |
+
inputs=[
|
85 |
+
gr.Image(label="๋ต์ ์ด๋ฏธ์ง๋ฅผ ์
๋ก๋ํ์ธ์", type="numpy"),
|
86 |
+
gr.Dropdown(choices=["1", "2", "3"], label="๋ฌธ์ ๋ฒํธ๋ฅผ ์ ํํ์ธ์")
|
87 |
+
],
|
88 |
+
outputs=gr.Textbox(label="์ฑ์ ๊ฒฐ๊ณผ"),
|
89 |
+
title="์ด๋ฑํ๊ต ์ฌํ ์ํ์ง ์ฑ์ ํ๋ก๊ทธ๋จ",
|
90 |
+
description="์๊ธ์จ๋ก ์์ฑ๋ ์ฌํ ์ํ ๋ต์์ ์ฑ์ ํ๊ณ ํด์ค์ ์ ๊ณตํ๋ ํ๋ก๊ทธ๋จ์
๋๋ค.",
|
91 |
+
)
|
92 |
+
|
93 |
+
if __name__ == "__main__":
|
94 |
+
iface.launch()
|