Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -88,12 +88,16 @@ def analyze_dna(sequence):
|
|
88 |
# Preprocess the input sequence
|
89 |
inputs = tokenizer(sequence, truncation=True, padding='max_length', max_length=512, return_tensors="pt", return_token_type_ids=False)
|
90 |
|
|
|
91 |
# Get model predictions
|
92 |
_, logits = model(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'])
|
|
|
|
|
93 |
|
94 |
# Convert logits to probabilities
|
95 |
probabilities = torch.nn.functional.softmax(logits, dim=-1).squeeze().tolist()
|
96 |
-
|
|
|
97 |
# Get the top 5 most likely classes
|
98 |
top_5_indices = sorted(range(len(probabilities)), key=lambda i: probabilities[i], reverse=True)[:5]
|
99 |
top_5_probs = [probabilities[i] for i in top_5_indices]
|
|
|
88 |
# Preprocess the input sequence
|
89 |
inputs = tokenizer(sequence, truncation=True, padding='max_length', max_length=512, return_tensors="pt", return_token_type_ids=False)
|
90 |
|
91 |
+
print("tokenization done.")
|
92 |
# Get model predictions
|
93 |
_, logits = model(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'])
|
94 |
+
|
95 |
+
print("Forward pass done.")
|
96 |
|
97 |
# Convert logits to probabilities
|
98 |
probabilities = torch.nn.functional.softmax(logits, dim=-1).squeeze().tolist()
|
99 |
+
|
100 |
+
print("Probabilities, done.")
|
101 |
# Get the top 5 most likely classes
|
102 |
top_5_indices = sorted(range(len(probabilities)), key=lambda i: probabilities[i], reverse=True)[:5]
|
103 |
top_5_probs = [probabilities[i] for i in top_5_indices]
|