tkdehf2 commited on
Commit
40c5fd4
ยท
verified ยท
1 Parent(s): b028760

Create social exam

Browse files
Files changed (1) hide show
  1. social exam +94 -0
social exam ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import TrOCRProcessor, VisionEncoderDecoderModel
3
+ import torch
4
+ from PIL import Image
5
+ import numpy as np
6
+
7
+ # OCR ๋ชจ๋ธ ๋ฐ ํ”„๋กœ์„ธ์„œ ์ดˆ๊ธฐํ™”
8
+ processor = TrOCRProcessor.from_pretrained('microsoft/trocr-base-handwritten')
9
+ model = VisionEncoderDecoderModel.from_pretrained('microsoft/trocr-base-handwritten')
10
+
11
+ # ์ •๋‹ต ๋ฐ ํ•ด์„ค ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค
12
+ answer_key = {
13
+ "1": {
14
+ "answer": "๋ฏผ์ฃผ์ฃผ์˜",
15
+ "explanation": "๋ฏผ์ฃผ์ฃผ์˜๋Š” ๊ตญ๋ฏผ์ด ์ฃผ์ธ์ด ๋˜์–ด ๋‚˜๋ผ์˜ ์ค‘์š”ํ•œ ์ผ์„ ๊ฒฐ์ •ํ•˜๋Š” ์ œ๋„์ž…๋‹ˆ๋‹ค. ์šฐ๋ฆฌ๋‚˜๋ผ๋Š” ๋ฏผ์ฃผ์ฃผ์˜ ๊ตญ๊ฐ€๋กœ, ๊ตญ๋ฏผ๋“ค์ด ํˆฌํ‘œ๋ฅผ ํ†ตํ•ด ๋Œ€ํ‘œ์ž๋ฅผ ์„ ์ถœํ•˜๊ณ  ์ค‘์š”ํ•œ ๊ฒฐ์ •์— ์ฐธ์—ฌํ•ฉ๋‹ˆ๋‹ค."
16
+ },
17
+ "2": {
18
+ "answer": "์‚ผ๊ถŒ๋ถ„๋ฆฝ",
19
+ "explanation": "์‚ผ๊ถŒ๋ถ„๋ฆฝ์€ ์ž…๋ฒ•๋ถ€, ํ–‰์ •๋ถ€, ์‚ฌ๋ฒ•๋ถ€๋กœ ๊ถŒ๋ ฅ์„ ๋‚˜๋ˆ„์–ด ์„œ๋กœ ๊ฒฌ์ œ์™€ ๊ท ํ˜•์„ ์ด๋ฃจ๊ฒŒ ํ•˜๋Š” ์ œ๋„์ž…๋‹ˆ๋‹ค. ์ด๋ฅผ ํ†ตํ•ด ํ•œ ์ชฝ์— ๊ถŒ๋ ฅ์ด ์ง‘์ค‘๋˜๋Š” ๊ฒƒ์„ ๋ง‰์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค."
20
+ },
21
+ "3": {
22
+ "answer": "์ง€๋ฐฉ์ž์น˜์ œ๋„",
23
+ "explanation": "์ง€๋ฐฉ์ž์น˜์ œ๋„๋Š” ์ง€์—ญ์˜ ์ผ์„ ๊ทธ ์ง€์—ญ ์ฃผ๋ฏผ๋“ค์ด ์ง์ ‘ ๊ฒฐ์ •ํ•˜๊ณ  ์ฒ˜๋ฆฌํ•˜๋Š” ์ œ๋„์ž…๋‹ˆ๋‹ค. ์ฃผ๋ฏผ๋“ค์ด ์ง์ ‘ ์ง€๋ฐฉ์ž์น˜๋‹จ์ฒด์žฅ๊ณผ ์ง€๋ฐฉ์˜ํšŒ ์˜์›์„ ์„ ์ถœํ•ฉ๋‹ˆ๋‹ค."
24
+ }
25
+ }
26
+
27
+ def preprocess_image(image):
28
+ """์ด๋ฏธ์ง€ ์ „์ฒ˜๋ฆฌ ํ•จ์ˆ˜"""
29
+ if isinstance(image, np.ndarray):
30
+ image = Image.fromarray(image)
31
+ return image
32
+
33
+ def recognize_text(image):
34
+ """์†๊ธ€์”จ ์ธ์‹ ํ•จ์ˆ˜"""
35
+ image = preprocess_image(image)
36
+ pixel_values = processor(image, return_tensors="pt").pixel_values
37
+
38
+ with torch.no_grad():
39
+ generated_ids = model.generate(pixel_values)
40
+
41
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
42
+ return generated_text
43
+
44
+ def grade_answer(question_number, student_answer):
45
+ """๋‹ต์•ˆ ์ฑ„์  ํ•จ์ˆ˜"""
46
+ correct_answer = answer_key[question_number]["answer"]
47
+ explanation = answer_key[question_number]["explanation"]
48
+
49
+ # ๋‹ต์•ˆ ๋น„๊ต (๋„์–ด์“ฐ๊ธฐ, ๋Œ€์†Œ๋ฌธ์ž ๋ฌด์‹œ)
50
+ is_correct = student_answer.replace(" ", "").lower() == correct_answer.replace(" ", "").lower()
51
+
52
+ return {
53
+ "์ •๋‹ต ์—ฌ๋ถ€": "์ •๋‹ต" if is_correct else "์˜ค๋‹ต",
54
+ "์ •๋‹ต": correct_answer,
55
+ "ํ•ด์„ค": explanation
56
+ }
57
+
58
+ def process_submission(image, question_number):
59
+ """์ „์ฒด ์ฒ˜๋ฆฌ ํ•จ์ˆ˜"""
60
+ if not image or not question_number:
61
+ return "์ด๋ฏธ์ง€์™€ ๋ฌธ์ œ ๋ฒˆํ˜ธ๋ฅผ ๋ชจ๋‘ ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”."
62
+
63
+ # ์†๊ธ€์”จ ์ธ์‹
64
+ recognized_text = recognize_text(image)
65
+
66
+ # ์ฑ„์  ๋ฐ ํ•ด์„ค
67
+ result = grade_answer(question_number, recognized_text)
68
+
69
+ # ๊ฒฐ๊ณผ ํฌ๋งทํŒ…
70
+ output = f"""
71
+ ์ธ์‹๋œ ๋‹ต์•ˆ: {recognized_text}
72
+ ์ฑ„์  ๊ฒฐ๊ณผ: {result['์ •๋‹ต ์—ฌ๋ถ€']}
73
+ ์ •๋‹ต: {result['์ •๋‹ต']}
74
+
75
+ [ํ•ด์„ค]
76
+ {result['ํ•ด์„ค']}
77
+ """
78
+
79
+ return output
80
+
81
+ # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์ƒ์„ฑ
82
+ iface = gr.Interface(
83
+ fn=process_submission,
84
+ inputs=[
85
+ gr.Image(label="๋‹ต์•ˆ ์ด๋ฏธ์ง€๋ฅผ ์—…๋กœ๋“œํ•˜์„ธ์š”", type="numpy"),
86
+ gr.Dropdown(choices=["1", "2", "3"], label="๋ฌธ์ œ ๋ฒˆํ˜ธ๋ฅผ ์„ ํƒํ•˜์„ธ์š”")
87
+ ],
88
+ outputs=gr.Textbox(label="์ฑ„์  ๊ฒฐ๊ณผ"),
89
+ title="์ดˆ๋“ฑํ•™๊ต ์‚ฌํšŒ ์‹œํ—˜์ง€ ์ฑ„์  ํ”„๋กœ๊ทธ๋žจ",
90
+ description="์†๊ธ€์”จ๋กœ ์ž‘์„ฑ๋œ ์‚ฌํšŒ ์‹œํ—˜ ๋‹ต์•ˆ์„ ์ฑ„์ ํ•˜๊ณ  ํ•ด์„ค์„ ์ œ๊ณตํ•˜๋Š” ํ”„๋กœ๊ทธ๋žจ์ž…๋‹ˆ๋‹ค.",
91
+ )
92
+
93
+ if __name__ == "__main__":
94
+ iface.launch()