Spaces:
Sleeping
Sleeping
import requests | |
import tensorflow as tf | |
import pandas as pd | |
import numpy as np | |
from operator import add | |
from functools import reduce | |
from Bio import SeqIO | |
from Bio.SeqRecord import SeqRecord | |
from Bio.SeqFeature import SeqFeature, FeatureLocation | |
from Bio.Seq import Seq | |
from keras.models import load_model | |
import random | |
import pyBigWig | |
# configure GPUs | |
for gpu in tf.config.list_physical_devices('GPU'): | |
tf.config.experimental.set_memory_growth(gpu, enable=True) | |
if len(tf.config.list_physical_devices('GPU')) > 0: | |
tf.config.experimental.set_visible_devices(tf.config.list_physical_devices('GPU')[0], 'GPU') | |
ntmap = {'A': (1, 0, 0, 0), | |
'C': (0, 1, 0, 0), | |
'G': (0, 0, 1, 0), | |
'T': (0, 0, 0, 1) | |
} | |
def get_seqcode(seq): | |
return np.array(reduce(add, map(lambda c: ntmap[c], seq.upper()))).reshape( | |
(1, len(seq), -1)) | |
from keras.models import load_model | |
class DCModelOntar: | |
def __init__(self, ontar_model_dir, is_reg=False): | |
self.model = load_model(ontar_model_dir) | |
def ontar_predict(self, x, channel_first=True): | |
if channel_first: | |
x = x.transpose([0, 2, 3, 1]) | |
yp = self.model.predict(x) | |
return yp.ravel() | |
def fetch_ensembl_transcripts(gene_symbol): | |
url = f"https://rest.ensembl.org/lookup/symbol/homo_sapiens/{gene_symbol}?expand=1;content-type=application/json" | |
response = requests.get(url) | |
if response.status_code == 200: | |
gene_data = response.json() | |
if 'Transcript' in gene_data: | |
return gene_data['Transcript'] | |
else: | |
print("No transcripts found for gene:", gene_symbol) | |
return None | |
else: | |
print(f"Error fetching gene data from Ensembl: {response.text}") | |
return None | |
def fetch_ensembl_sequence(transcript_id): | |
url = f"https://rest.ensembl.org/sequence/id/{transcript_id}?content-type=application/json" | |
response = requests.get(url) | |
if response.status_code == 200: | |
sequence_data = response.json() | |
if 'seq' in sequence_data: | |
return sequence_data['seq'] | |
else: | |
print("No sequence found for transcript:", transcript_id) | |
return None | |
else: | |
print(f"Error fetching sequence data from Ensembl: {response.text}") | |
return None | |
def find_crispr_targets(sequence, chr, start, strand, transcript_id, exon_id, pam="NGG", target_length=20): | |
targets = [] | |
len_sequence = len(sequence) | |
complement = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'} | |
dnatorna = {'A': 'A', 'T': 'U', 'C': 'C', 'G': 'G'} | |
if strand == -1: | |
sequence = ''.join([complement[base] for base in sequence]) | |
for i in range(len_sequence - len(pam) + 1): | |
if sequence[i + 1:i + 3] == pam[1:]: | |
if i >= target_length: | |
target_seq = sequence[i - target_length:i + 3] | |
tar_start = start + i - target_length | |
tar_end = start + i + 3 | |
gRNA = ''.join([dnatorna[base] for base in sequence[i - target_length:i]]) | |
targets.append([target_seq, gRNA, chr, str(tar_start), str(tar_end), str(strand), transcript_id, exon_id]) | |
return targets | |
# Function to predict on-target efficiency and format output | |
def format_prediction_output(targets, model_path): | |
dcModel = DCModelOntar(model_path) | |
formatted_data = [] | |
for target in targets: | |
# Encode the gRNA sequence | |
encoded_seq = get_seqcode(target[0]).reshape(-1,4,1,23) | |
# Predict on-target efficiency using the model | |
prediction = dcModel.ontar_predict(encoded_seq) | |
# Format output | |
gRNA = target[1] | |
chr = target[2] | |
start = target[3] | |
end = target[4] | |
strand = target[5] | |
transcript_id = target[6] | |
exon_id = target[7] | |
formatted_data.append([chr, start, end, strand, transcript_id, exon_id, target[0], gRNA, prediction[0]]) | |
return formatted_data | |
def process_gene(gene_symbol, model_path): | |
transcripts = fetch_ensembl_transcripts(gene_symbol) | |
results = [] | |
all_exons = [] # To accumulate all exons | |
all_gene_sequences = [] # To accumulate all gene sequences | |
if transcripts: | |
for transcript in transcripts: | |
Exons = transcript['Exon'] | |
all_exons.extend(Exons) # Add all exons from this transcript to the list | |
transcript_id = transcript['id'] | |
for exon in Exons: | |
exon_id = exon['id'] | |
gene_sequence = fetch_ensembl_sequence(exon_id) | |
if gene_sequence: | |
all_gene_sequences.append(gene_sequence) # Add this gene sequence to the list | |
start = exon['start'] | |
strand = exon['strand'] | |
chr = exon['seq_region_name'] | |
targets = find_crispr_targets(gene_sequence, chr, start, strand, transcript_id, exon_id) | |
if targets: | |
# Predict on-target efficiency for each gRNA site | |
formatted_data = format_prediction_output(targets, model_path) | |
results.extend(formatted_data) | |
else: | |
print(f"Failed to retrieve gene sequence for exon {exon_id}.") | |
else: | |
print("Failed to retrieve transcripts.") | |
# Return the sorted output, combined gene sequences, and all exons | |
return results, all_gene_sequences, all_exons | |
# def create_genbank_features(data): | |
# features = [] | |
# | |
# # If the input data is a DataFrame, convert it to a list of lists | |
# if isinstance(data, pd.DataFrame): | |
# formatted_data = data.values.tolist() | |
# elif isinstance(data, list): | |
# formatted_data = data | |
# else: | |
# raise TypeError("Data should be either a list or a pandas DataFrame.") | |
# | |
# for row in formatted_data: | |
# try: | |
# start = int(row[1]) | |
# end = int(row[2]) | |
# except ValueError as e: | |
# print(f"Error converting start/end to int: {row[1]}, {row[2]} - {e}") | |
# continue | |
# | |
# strand = 1 if row[3] == '+' else -1 | |
# location = FeatureLocation(start=start, end=end, strand=strand) | |
# feature = SeqFeature(location=location, type="misc_feature", qualifiers={ | |
# 'label': row[7], # Use gRNA as the label | |
# 'note': f"Prediction: {row[8]}" # Include the prediction score | |
# }) | |
# features.append(feature) | |
# | |
# return features | |
# | |
# | |
# def generate_genbank_file_from_df(df, gene_sequence, gene_symbol, output_path): | |
# features = create_genbank_features(df) | |
# record = SeqRecord(Seq(gene_sequence), id=gene_symbol, name=gene_symbol, | |
# description=f'CRISPR Cas9 predicted targets for {gene_symbol}', features=features) | |
# record.annotations["molecule_type"] = "DNA" | |
# SeqIO.write(record, output_path, "genbank") | |
# | |
# | |
# def create_bed_file_from_df(df, output_path): | |
# with open(output_path, 'w') as bed_file: | |
# for index, row in df.iterrows(): | |
# chrom = row["Chr"] | |
# start = int(row["Start Pos"]) # Assuming 'Start Pos' is the column name in the df | |
# end = int(row["End Pos"]) # Assuming 'End Pos' is the column name in the df | |
# strand = '+' if row["Strand"] == '1' else '-' # Assuming 'Strand' is the column name in the df | |
# gRNA = row["gRNA"] | |
# score = str(row["Prediction"]) | |
# transcript_id = row["Transcript"] # Assuming 'Transcript' is the column name in the df | |
# | |
# bed_file.write(f"{chrom}\t{start}\t{end}\t{gRNA}\t{score}\t{strand}\t{transcript_id}\n") | |
# | |
# | |
# def create_csv_from_df(df, output_path): | |
# df.to_csv(output_path, index=False) | |
def create_bigwig(df, bigwig_path): | |
# Ensure the dataframe has the required columns | |
if not all(column in df.columns for column in ["Chr", "Start Pos", "End Pos", "Prediction"]): | |
raise ValueError("DataFrame must contain 'Chr', 'Start Pos', 'End Pos', and 'Prediction' columns.") | |
# Convert positions to integers and sort the DataFrame | |
df['Start Pos'] = df['Start Pos'].astype(int) | |
df['End Pos'] = df['End Pos'].astype(int) | |
df = df.sort_values(by=['Chr', 'Start Pos']) | |
# Prepare the BigWig header | |
chr_sizes = df.groupby('Chr')['End Pos'].max().to_dict() | |
header = [(str(chr), size) for chr, size in chr_sizes.items()] | |
# Create and write to the BigWig file | |
bw = pyBigWig.open(bigwig_path, "w") | |
bw.addHeader(header) | |
# Group by chromosome and add entries | |
for chr, group in df.groupby('Chr'): | |
starts = group['Start Pos'].tolist() | |
ends = group['End Pos'].tolist() | |
values = group['Prediction'].astype(float).tolist() | |
bw.addEntries([str(chr)] * len(starts), starts, ends=ends, values=values) | |
bw.close() | |