Spaces:
Sleeping
Sleeping
File size: 5,848 Bytes
2548c76 faf2fff 5a04251 faf2fff 2062064 2548c76 aa9d78c 5a04251 3aff373 2062064 554cc90 2548c76 70b4c2a 6e92bdc 70b4c2a 3aff373 70b4c2a 3aff373 70b4c2a 3aff373 70b4c2a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
import os
import gc
import random
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import torch
import tokenizers
import transformers
from transformers import AutoTokenizer, EncoderDecoderModel, AutoModelForSeq2SeqLM
import sentencepiece
from rdkit import Chem
import rdkit
import streamlit as st
st.title('predictproduct-t5')
st.markdown('#### At this space, you can predict the products of reactions from their inputs.')
st.markdown('#### The code expects input_data as a string or CSV file that contains an "input" column. The format of the string or contents of the column are like "REACTANT:{reactants of the reaction}CATALYST:{catalysts of the reaction}REAGENT:{reagents of the reaction}SOLVENT:{solvent of the reaction}".')
st.markdown('#### If there are no catalyst or reagent, fill the blank with a space. And if there are multiple reactants, concatenate them with "."')
display_text = 'input the reaction smiles (e.g. REACTANT:CNc1nc(SC)ncc1CO.O.O=[Cr](=O)([O-])O[Cr](=O)(=O)[O-].[Na+]CATALYST: REAGENT: SOLVENT:CC(=O)O)'
class CFG():
num_beams = st.number_input(label='num beams', min_value=1, max_value=10, value=5, step=1)
num_return_sequences = num_beams
uploaded_file = st.file_uploader("Choose a CSV file")
input_data = st.text_area(display_text)
model_name_or_path = 'sagawa/ZINC-t5-productpredicition'
model = 't5'
seed = 42
if st.button('predict'):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def seed_everything(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything(seed=CFG.seed)
tokenizer = AutoTokenizer.from_pretrained(CFG.model_name_or_path, return_tensors='pt')
if CFG.model == 't5':
model = AutoModelForSeq2SeqLM.from_pretrained(CFG.model_name_or_path).to(device)
elif CFG.model == 'deberta':
model = EncoderDecoderModel.from_pretrained(CFG.model_name_or_path).to(device)
if CFG.uploaded_file is not None:
input_data = pd.read_csv(CFG.uploaded_file)
outputs = []
for idx, row in input_data.iterrows():
input_compound = row['input']
min_length = min(input_compound.find('CATALYST') - input_compound.find(':') - 10, 0)
inp = tokenizer(input_compound, return_tensors='pt').to(device)
output = model.generate(**inp, min_length=min_length, max_length=min_length+50, num_beams=CFG.num_beams, num_return_sequences=CFG.num_return_sequences, return_dict_in_generate=True, output_scores=True)
scores = output['sequences_scores'].tolist()
output = [tokenizer.decode(i, skip_special_tokens=True).replace('. ', '.').rstrip('.') for i in output['sequences']]
for ith, out in enumerate(output):
mol = Chem.MolFromSmiles(out.rstrip('.'))
if type(mol) == rdkit.Chem.rdchem.Mol:
output.append(out.rstrip('.'))
scores.append(scores[ith])
break
if type(mol) == None:
output.append(None)
scores.append(None)
output += scores
output = [input_compound] + output
outputs.append(output)
output_df = pd.DataFrame(outputs, columns=['input'] + [f'{i}th' for i in range(CFG.num_beams)] + ['valid compound'] + [f'{i}th score' for i in range(CFG.num_beams)] + ['valid compound score'])
@st.cache
def convert_df(df):
# IMPORTANT: Cache the conversion to prevent computation on every rerun
return df.to_csv(index=False)
csv = convert_df(output_df)
st.download_button(
label="Download data as CSV",
data=csv,
file_name='output.csv',
mime='text/csv',
)
else:
input_compound = CFG.input_data
min_length = min(input_compound.find('CATALYST') - input_compound.find(':') - 10, 0)
inp = tokenizer(input_compound, return_tensors='pt').to(device)
output = model.generate(**inp, min_length=min_length, max_length=min_length+50, num_beams=CFG.num_beams, num_return_sequences=CFG.num_return_sequences, return_dict_in_generate=True, output_scores=True)
scores = output['sequences_scores'].tolist()
output = [tokenizer.decode(i, skip_special_tokens=True).replace('. ', '.').rstrip('.') for i in output['sequences']]
for ith, out in enumerate(output):
mol = Chem.MolFromSmiles(out.rstrip('.'))
if type(mol) == rdkit.Chem.rdchem.Mol:
output.append(out.rstrip('.'))
scores.append(scores[ith])
break
if type(mol) == None:
output.append(None)
scores.append(None)
output += scores
output = [input_compound] + output
try:
output_df = pd.DataFrame(np.array(output).reshape(1, -1), columns=['input'] + [f'{i}th' for i in range(CFG.num_beams)] + ['valid compound'] + [f'{i}th score' for i in range(CFG.num_beams)] + ['valid compound score'])
st.table(output_df)
@st.cache
def convert_df(df):
# IMPORTANT: Cache the conversion to prevent computation on every rerun
return df.to_csv(index=False)
csv = convert_df(output_df)
st.download_button(
label="Download data as CSV",
data=csv,
file_name='output.csv',
mime='text/csv',
)
except:
pass |