File size: 1,431 Bytes
b8b90b4
 
 
 
 
 
587ffd2
fd9c32f
587ffd2
fd9c32f
82e3868
 
 
587ffd2
 
b8b90b4
 
 
 
 
 
 
 
 
 
f83f807
b8b90b4
 
a64c8b6
587ffd2
b8b90b4
 
 
95c4d5d
b8b90b4
 
195f1a3
 
 
 
9ef43fe
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import streamlit as st
import pytesseract
import torch
from PIL import Image
from transformers import AutoTokenizer, AutoModelForSequenceClassification

st.title(':blue[_SnapCode_]')
st.markdown("_Extract code blocks out of Screenshots and Images_")

with st.spinner('Code vs Natuaral language - Classification model is loading'):
    model_id = "vishnun/codenlbert-tiny"
    tokenizer = AutoTokenizer.from_pretrained(model_id)
    model = AutoModelForSequenceClassification.from_pretrained(model_id)

st.success('Model loaded')

def classify_text(text):
    input_ids = tokenizer(text, return_tensors="pt")
    with torch.no_grad():
        logits = model(**input_ids).logits

    predicted_class_id = logits.argmax().item()
    
    return model.config.id2label[predicted_class_id]

uploaded_file = st.file_uploader("Upload Image from which code needs to be extracted", type= ['png', 'jpeg', 'jpg']) 

if uploaded_file is not None:
    img = Image.open(uploaded_file)
    ocr_list = [x for x in pytesseract.image_to_string(img).split("\n") if x != '']
    ocr_class = [classify_text(x) for x in ocr_list]
    idx = []
    for i in range(len(ocr_class)):
        if ocr_class[i].upper() == 'CODE':
            idx.append(ocr_list[i])


    st.markdown('**Uploaded Image**')
    st.image(img, caption='Uploaded Image')
    st.markdown("**Retrieved Code Block**")
    st.code(("\n").join(idx), language="python", line_numbers=False)