parquet-converter commited on
Commit
5d37eae
1 Parent(s): e7b3733

Update parquet files

Browse files
.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ default/pdbbind_complexes-train.parquet filter=lfs diff=lfs merge=lfs -text
README.md DELETED
@@ -1,45 +0,0 @@
1
- ---
2
- tags:
3
- - molecules
4
- - chemistry
5
- - SMILES
6
- ---
7
-
8
- ## How to use the data sets
9
-
10
- This dataset contains more than 16,000 unique pairs of protein sequences and ligand SMILES, and the coordinates
11
- of their complexes.
12
-
13
- SMILES are assumed to be tokenized by the regex from P. Schwaller
14
-
15
- Every (x,y,z) ligand coordinate maps onto a SMILES token, and is *nan* if the token does not represent an atom
16
-
17
- Every receptor coordinate maps onto the Calpha coordinate of that residue.
18
-
19
- The dataset can be used to fine-tune a language model, all data comes from PDBind-cn.
20
-
21
- ### Use the already preprocessed data
22
-
23
- Load a test/train split using
24
-
25
- ```
26
- from datasets import load_dataset
27
- train = load_dataset("jglaser/pdbbind_complexes",split='train[:90%]')
28
- validation = load_dataset("jglaser/pdbbind_complexes",split='train[90%:]')
29
- ```
30
-
31
- ### Pre-process yourself
32
-
33
- To manually perform the preprocessing, download the data sets from P.DBBind-cn
34
-
35
- Register for an account at <https://www.pdbbind.org.cn/>, confirm the validation
36
- email, then login and download
37
-
38
- - the Index files (1)
39
- - the general protein-ligand complexes (2)
40
- - the refined protein-ligand complexes (3)
41
-
42
- Extract those files in `pdbbind/data`
43
-
44
- Run the script `pdbbind.py` in a compute job on an MPI-enabled cluster
45
- (e.g., `mpirun -n 64 pdbbind.py`).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/pdbbind.parquet → default/pdbbind_complexes-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aef30f419b99fb77199c8b68e260febf303b4a50a7affcaa2de7bd538e6d8673
3
- size 711749176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a02d99f9fb9a81c9465ea40e1232bf857725707a688034ca3388f67259ef27b8
3
+ size 382706029
pdbbind.ipynb DELETED
@@ -1,517 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "markdown",
5
- "id": "834aeced-c3c5-42a0-bad1-41e009dd86ee",
6
- "metadata": {},
7
- "source": [
8
- "### Preprocessing"
9
- ]
10
- },
11
- {
12
- "cell_type": "code",
13
- "execution_count": 1,
14
- "id": "86476f6e-802a-463b-a1b0-2ae228bb92af",
15
- "metadata": {},
16
- "outputs": [],
17
- "source": [
18
- "import pandas as pd"
19
- ]
20
- },
21
- {
22
- "cell_type": "code",
23
- "execution_count": 2,
24
- "id": "9b2be11c-f4bb-4107-af49-abd78052afcf",
25
- "metadata": {},
26
- "outputs": [],
27
- "source": [
28
- "df = pd.read_table('data/pdbbind/index/INDEX_general_PL_data.2020',skiprows=4,sep=r'\\s+',usecols=[0,4]).drop(0)\n",
29
- "df = df.rename(columns={'#': 'name','release': 'affinity'})\n",
30
- "df_refined = pd.read_table('data/pdbbind/index/INDEX_refined_data.2020',skiprows=4,sep=r'\\s+',usecols=[0,4]).drop(0)\n",
31
- "df_refined = df_refined.rename(columns={'#': 'name','release': 'affinity'})\n",
32
- "df = pd.concat([df,df_refined])"
33
- ]
34
- },
35
- {
36
- "cell_type": "code",
37
- "execution_count": 3,
38
- "id": "68983ab8-bf11-4ed6-ba06-f962dbdc077e",
39
- "metadata": {},
40
- "outputs": [],
41
- "source": [
42
- "quantities = ['ki','kd','ka','k1/2','kb','ic50','ec50']"
43
- ]
44
- },
45
- {
46
- "cell_type": "code",
47
- "execution_count": 4,
48
- "id": "3acbca3c-9c0b-43a1-a45e-331bf153bcfa",
49
- "metadata": {},
50
- "outputs": [],
51
- "source": [
52
- "from pint import UnitRegistry\n",
53
- "ureg = UnitRegistry()\n",
54
- "\n",
55
- "def to_uM(affinity):\n",
56
- " val = ureg(affinity)\n",
57
- " try:\n",
58
- " return val.m_as(ureg.uM)\n",
59
- " except Exception:\n",
60
- " pass\n",
61
- " \n",
62
- " try:\n",
63
- " return 1/val.m_as(1/ureg.uM)\n",
64
- " except Exception:\n",
65
- " pass"
66
- ]
67
- },
68
- {
69
- "cell_type": "code",
70
- "execution_count": 5,
71
- "id": "58e5748b-2cea-43ff-ab51-85a5021bd50b",
72
- "metadata": {},
73
- "outputs": [],
74
- "source": [
75
- "df['affinity_uM'] = df['affinity'].str.split('[=\\~><]').str[1].apply(to_uM)\n",
76
- "df['affinity_quantity'] = df['affinity'].str.split('[=\\~><]').str[0]"
77
- ]
78
- },
79
- {
80
- "cell_type": "code",
81
- "execution_count": 6,
82
- "id": "d92f0004-68c1-4487-94b9-56b4fd598de4",
83
- "metadata": {},
84
- "outputs": [
85
- {
86
- "data": {
87
- "text/plain": [
88
- "<AxesSubplot:>"
89
- ]
90
- },
91
- "execution_count": 6,
92
- "metadata": {},
93
- "output_type": "execute_result"
94
- },
95
- {
96
- "data": {
97
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYQAAAD4CAYAAADsKpHdAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAAQdUlEQVR4nO3df6zdd13H8efLVkYpFjfHbpp22BKL2m1B3GVWiOSSGncFYveHM8XhOrOkYZk4ZYnp+Adj0mQkoLDFjVTBdjiYdZC0OoYuhStR98MWiKUrywqb22WXlR8ydxHHOt/+cT7Vw+3p7XrOvfe03ucjOTnf8/5+Pt/v5+Z+e173+/l+z2mqCkmSfmTYA5AknRkMBEkSYCBIkhoDQZIEGAiSpGbpsAfQr/PPP7/WrFnTV9/vfe97LF++fG4HJDUeX5pvgxxjBw4c+FZVvbLXurM2ENasWcP+/fv76jsxMcHY2NjcDkhqPL403wY5xpL828nWOWUkSQIMBElSYyBIkgADQZLUGAiSJMBAkCQ1pwyEJB9NcjTJl7tq5yW5L8mj7fncrnU3JTmS5JEkl3fVL01ysK27JUla/Zwkf9XqDyZZM8c/oyTpRXgxZwg7gfEZtW3AvqpaB+xrr0myHtgMXNT63JZkSetzO7AVWNcex7d5LfDvVfVTwJ8A7+v3h5Ek9e+UgVBVnwe+M6O8CdjVlncBV3TV76qq56rqMeAIcFmSlcCKqrq/Ov8Bwx0z+hzf1t3AxuNnD5KkhdPvJ5VHqmoKoKqmklzQ6quAB7raTbba8215Zv14nyfbto4leQb4CeBbM3eaZCudswxGRkaYmJjoa/DT09N995VOxeNrcTj49WeGtu+1r1gyL8fYXH91Ra+/7GuW+mx9TixW7QB2AIyOjla/H932qwU0nzy+Fodrtt0ztH3vHF8+L8dYv3cZPd2mgWjPR1t9Eriwq91q4KlWX92j/kN9kiwFXsGJU1SSpHnWbyDsBba05S3Anq765nbn0Fo6F48fatNLzybZ0K4PXD2jz/Ft/Trw2fI/epakBXfKKaMknwDGgPOTTALvBW4Gdie5FngCuBKgqg4l2Q08DBwDrq+qF9qmrqNzx9Iy4N72APgI8LEkR+icGWyek59MknRaThkIVfX2k6zaeJL224HtPer7gYt71P+LFiiSpOHxk8qSJMBAkCQ1BoIkCTAQJEmNgSBJAgwESVJjIEiSAANBktQYCJIkwECQJDUGgiQJMBAkSY2BIEkCDARJUmMgSJIAA0GS1BgIkiTAQJAkNQaCJAkwECRJjYEgSQIMBElSYyBIkgADQZLUGAiSJMBAkCQ1BoIkCTAQJEmNgSBJAgwESVJjIEiSAANBktQMFAhJfj/JoSRfTvKJJC9Ncl6S+5I82p7P7Wp/U5IjSR5JcnlX/dIkB9u6W5JkkHFJkk5f34GQZBXwu8BoVV0MLAE2A9uAfVW1DtjXXpNkfVt/ETAO3JZkSdvc7cBWYF17jPc7LklSfwadMloKLEuyFHgZ8BSwCdjV1u8CrmjLm4C7quq5qnoMOAJclmQlsKKq7q+qAu7o6iNJWiB9B0JVfR14P/AEMAU8U1V/D4xU1VRrMwVc0LqsAp7s2sRkq61qyzPrkqQFtLTfju3awCZgLfBd4K+TvGO2Lj1qNUu91z630plaYmRkhImJidMY8f+Znp7uu690Kh5fi8ONlxwb2r7n6xjrOxCAXwYeq6pvAiT5FPAG4OkkK6tqqk0HHW3tJ4ELu/qvpjPFNNmWZ9ZPUFU7gB0Ao6OjNTY21tfAJyYm6LevdCoeX4vDNdvuGdq+d44vn5djbJBrCE8AG5K8rN0VtBE4DOwFtrQ2W4A9bXkvsDnJOUnW0rl4/FCbVno2yYa2nau7+kiSFkjfZwhV9WCSu4EvAMeAL9L56/3lwO4k19IJjStb+0NJdgMPt/bXV9ULbXPXATuBZcC97SFJWkCDTBlRVe8F3juj/Byds4Ve7bcD23vU9wMXDzIWSdJg/KSyJAkwECRJjYEgSQIMBElSM9BF5bPVwa8/M7R7iB+/+a1D2a8knYpnCJIkwECQJDUGgiQJMBAkSY2BIEkCDARJUrMobzuV5pO3Nets5RmCJAkwECRJjYEgSQIMBElSYyBIkgADQZLUGAiSJMBAkCQ1BoIkCTAQJEmNgSBJAgwESVJjIEiSAANBktQYCJIkwECQJDUGgiQJMBAkSY2BIEkCDARJUmMgSJKAAQMhyY8nuTvJV5IcTvKLSc5Lcl+SR9vzuV3tb0pyJMkjSS7vql+a5GBbd0uSDDIuSdLpG/QM4UPAZ6rqZ4DXAoeBbcC+qloH7GuvSbIe2AxcBIwDtyVZ0rZzO7AVWNce4wOOS5J0mvoOhCQrgDcBHwGoqh9U1XeBTcCu1mwXcEVb3gTcVVXPVdVjwBHgsiQrgRVVdX9VFXBHVx9J0gJZOkDfVwPfBP4iyWuBA8ANwEhVTQFU1VSSC1r7VcADXf0nW+35tjyzfoIkW+mcSTAyMsLExERfAx9ZBjdecqyvvoPqd8w6e3h8LQ7D+h0DTE9Pz8vvepBAWAr8PPCuqnowyYdo00Mn0eu6QM1SP7FYtQPYATA6OlpjY2OnNeDjbr1zDx84OMiP3r/Hrxobyn61cDy+Fodrtt0ztH3vHF9Ov+9/sxnkGsIkMFlVD7bXd9MJiKfbNBDt+WhX+wu7+q8Gnmr11T3qkqQF1HcgVNU3gCeT/HQrbQQeBvYCW1ptC7CnLe8FNic5J8laOhePH2rTS88m2dDuLrq6q48kaYEMel77LuDOJC8Bvgb8Np2Q2Z3kWuAJ4EqAqjqUZDed0DgGXF9VL7TtXAfsBJYB97aHJGkBDRQIVfUlYLTHqo0nab8d2N6jvh+4eJCxSJIG4yeVJUmAgSBJagwESRJgIEiSGgNBkgQYCJKkxkCQJAEGgiSpMRAkSYCBIElqDARJEmAgSJIaA0GSBBgIkqTGQJAkAQaCJKkxECRJgIEgSWoMBEkSYCBIkhoDQZIEGAiSpMZAkCQBBoIkqTEQJEmAgSBJagwESRJgIEiSGgNBkgQYCJKkxkCQJAEGgiSpGTgQkixJ8sUkf9ten5fkviSPtudzu9relORIkkeSXN5VvzTJwbbuliQZdFySpNMzF2cINwCHu15vA/ZV1TpgX3tNkvXAZuAiYBy4LcmS1ud2YCuwrj3G52BckqTTMFAgJFkNvBX4867yJmBXW94FXNFVv6uqnquqx4AjwGVJVgIrqur+qirgjq4+kqQFsnTA/h8E/gD4sa7aSFVNAVTVVJILWn0V8EBXu8lWe74tz6yfIMlWOmcSjIyMMDEx0degR5bBjZcc66vvoPods84eHl+Lw7B+xwDT09Pz8rvuOxCSvA04WlUHkoy9mC49ajVL/cRi1Q5gB8Do6GiNjb2Y3Z7o1jv38IGDg2Zhfx6/amwo+9XC8fhaHK7Zds/Q9r1zfDn9vv/NZpCj9o3AryV5C/BSYEWSvwSeTrKynR2sBI629pPAhV39VwNPtfrqHnVJ0gLq+xpCVd1UVaurag2di8Wfrap3AHuBLa3ZFmBPW94LbE5yTpK1dC4eP9Sml55NsqHdXXR1Vx9J0gKZj/Pam4HdSa4FngCuBKiqQ0l2Aw8Dx4Drq+qF1uc6YCewDLi3PSRJC2hOAqGqJoCJtvxtYONJ2m0Htveo7wcunouxSJL64yeVJUmAgSBJagwESRJgIEiSGgNBkgQYCJKkxkCQJAEGgiSpMRAkSYCBIElqDARJEmAgSJIaA0GSBBgIkqTGQJAkAQaCJKkxECRJgIEgSWoMBEkSYCBIkhoDQZIEGAiSpMZAkCQBBoIkqTEQJEmAgSBJagwESRJgIEiSGgNBkgQYCJKkxkCQJAEGgiSp6TsQklyY5HNJDic5lOSGVj8vyX1JHm3P53b1uSnJkSSPJLm8q35pkoNt3S1JMtiPJUk6XYOcIRwDbqyqnwU2ANcnWQ9sA/ZV1TpgX3tNW7cZuAgYB25LsqRt63ZgK7CuPcYHGJckqQ99B0JVTVXVF9rys8BhYBWwCdjVmu0CrmjLm4C7quq5qnoMOAJclmQlsKKq7q+qAu7o6iNJWiBzcg0hyRrgdcCDwEhVTUEnNIALWrNVwJNd3SZbbVVbnlmXJC2gpYNuIMnLgU8Cv1dV/zHL9H+vFTVLvde+ttKZWmJkZISJiYnTHi/AyDK48ZJjffUdVL9j1tnD42txGNbvGGB6enpeftcDBUKSH6UTBndW1ada+ekkK6tqqk0HHW31SeDCru6rgadafXWP+gmqagewA2B0dLTGxsb6Gvetd+7hAwcHzsK+PH7V2FD2q4Xj8bU4XLPtnqHte+f4cvp9/5vNIHcZBfgIcLiq/rhr1V5gS1veAuzpqm9Ock6StXQuHj/UppWeTbKhbfPqrj6SpAUyyJ8xbwR+CziY5Eut9h7gZmB3kmuBJ4ArAarqUJLdwMN07lC6vqpeaP2uA3YCy4B720OStID6DoSq+kd6z/8DbDxJn+3A9h71/cDF/Y5FkjQ4P6ksSQIMBElSYyBIkgADQZLUGAiSJMBAkCQ1BoIkCTAQJEmNgSBJAgwESVJjIEiSAANBktQYCJIkwECQJDUGgiQJMBAkSY2BIEkCDARJUmMgSJIAA0GS1BgIkiTAQJAkNQaCJAkwECRJjYEgSQIMBElSYyBIkgADQZLUGAiSJMBAkCQ1BoIkCTAQJEmNgSBJAs6gQEgynuSRJEeSbBv2eCRpsTkjAiHJEuBPgV8F1gNvT7J+uKOSpMXljAgE4DLgSFV9rap+ANwFbBrymCRpUVk67AE0q4Anu15PAr8ws1GSrcDW9nI6ySN97u984Ft99h1I3jeMvWqBeXxpXr35fQMdYz95shVnSiCkR61OKFTtAHYMvLNkf1WNDrodqRePL823+TrGzpQpo0ngwq7Xq4GnhjQWSVqUzpRA+BdgXZK1SV4CbAb2DnlMkrSonBFTRlV1LMnvAH8HLAE+WlWH5nGXA087SbPw+NJ8m5djLFUnTNVLkhahM2XKSJI0ZAaCJAlYRIGQZLpr+S1JHk3yqiTvTHL1MMems8OMY+g1ST7dvmrlcJLdSUaSrEny/SRfao8Pd/W5NMnB1ueWJL1ut5b+18net2ZrN4gz4qLyQkqyEbgV+JWqegL48Cm6SD8kyUuBe4B3V9XftNqbgVcC08BXq+rnenS9nc4HKx8APg2MA/cuxJh1duvxvjUvFlUgJPkl4M+At1TVV1vtD4Hpqnr/MMems8pvAvcfDwOAqvocQJI1vTokWQmsqKr72+s7gCswEHQKJ3nfWgt8nM57+Gfmal+LZsoIOAfYA1xRVV8Z9mB0VrsYODDL+rVJvpjkH9o/Zuh8PctkV5vJVpNmc7L3rQ8Bt1fV64FvzNXOFlMgPA/8M3DtsAei/9emgFdV1euAdwMfT7KCF/n1LNIMJ3vfeiPwibb8sbna2WIKhP8GfgN4fZL3DHswOqsdAi7ttaKqnquqb7flA8BXgdfQOSNY3dXUr2fRizHb+9ac/0GxmAKBqvpP4G3AVUk8U1C/Pg68Iclbjxfaf/B0SZJXtv/fgySvBtYBX6uqKeDZJBva3UVX05kKkGZ1kvetf6LzFT8AV83VvhbVRWWAqvpOknHg80mG8hXFOrtV1feTvA34YJIP0jmt/1fgBuBNwB8lOQa8ALyzqr7Tul4H7ASW0bmY7AVlvSg93rduoDMdeQPwybnaj19dIUkCFtmUkSTp5AwESRJgIEiSGgNBkgQYCJKkxkCQJAEGgiSp+R+8kn6fv7jc8QAAAABJRU5ErkJggg==\n",
98
- "text/plain": [
99
- "<Figure size 432x288 with 1 Axes>"
100
- ]
101
- },
102
- "metadata": {
103
- "needs_background": "light"
104
- },
105
- "output_type": "display_data"
106
- }
107
- ],
108
- "source": [
109
- "df['affinity_quantity'].hist()"
110
- ]
111
- },
112
- {
113
- "cell_type": "code",
114
- "execution_count": 7,
115
- "id": "aa358835-55f3-4551-9217-e76a15de4fe8",
116
- "metadata": {},
117
- "outputs": [],
118
- "source": [
119
- "df_filter = df[df['affinity_quantity'].str.lower().isin(quantities)]\n",
120
- "df_filter = df_filter.dropna()"
121
- ]
122
- },
123
- {
124
- "cell_type": "code",
125
- "execution_count": 8,
126
- "id": "802cb9bc-2563-4d7f-9a76-3be2d9263a36",
127
- "metadata": {},
128
- "outputs": [],
129
- "source": [
130
- "cutoffs = [5,8,11,15]"
131
- ]
132
- },
133
- {
134
- "cell_type": "code",
135
- "execution_count": 9,
136
- "id": "d8e71a8c-11a3-41f0-ab61-3ddc57e10961",
137
- "metadata": {},
138
- "outputs": [],
139
- "source": [
140
- "dfs_complex = {c: pd.read_parquet('data/pdbbind_complex_{}.parquet'.format(c)) for c in cutoffs}"
141
- ]
142
- },
143
- {
144
- "cell_type": "code",
145
- "execution_count": 10,
146
- "id": "ed3fe035-6035-4d39-b072-d12dc0a95857",
147
- "metadata": {},
148
- "outputs": [],
149
- "source": [
150
- "import dask.array as da\n",
151
- "import dask.dataframe as dd\n",
152
- "from dask.bag import from_delayed\n",
153
- "from dask import delayed\n",
154
- "import pyarrow as pa\n",
155
- "import pyarrow.parquet as pq"
156
- ]
157
- },
158
- {
159
- "cell_type": "code",
160
- "execution_count": 11,
161
- "id": "cd26125b-e68b-4fa3-846e-2b6e7f635fe0",
162
- "metadata": {},
163
- "outputs": [
164
- {
165
- "name": "stdout",
166
- "output_type": "stream",
167
- "text": [
168
- "(2046, 510)\n"
169
- ]
170
- }
171
- ],
172
- "source": [
173
- "contacts_dask = [da.from_npy_stack('data/pdbbind_contacts_{}'.format(c)) for c in cutoffs]\n",
174
- "shape = contacts_dask[0][0].shape\n",
175
- "print(shape)"
176
- ]
177
- },
178
- {
179
- "cell_type": "code",
180
- "execution_count": 12,
181
- "id": "9c7c9849-2345-4baf-89e7-d412f52353b6",
182
- "metadata": {},
183
- "outputs": [
184
- {
185
- "data": {
186
- "text/html": [
187
- "<table>\n",
188
- "<tr>\n",
189
- "<td>\n",
190
- "<table>\n",
191
- " <thead>\n",
192
- " <tr><td> </td><th> Array </th><th> Chunk </th></tr>\n",
193
- " </thead>\n",
194
- " <tbody>\n",
195
- " <tr><th> Bytes </th><td> 2.72 GiB </td> <td> 2.72 GiB </td></tr>\n",
196
- " <tr><th> Shape </th><td> (700, 2046, 510) </td> <td> (700, 2046, 510) </td></tr>\n",
197
- " <tr><th> Count </th><td> 25 Tasks </td><td> 1 Chunks </td></tr>\n",
198
- " <tr><th> Type </th><td> float32 </td><td> numpy.ndarray </td></tr>\n",
199
- " </tbody>\n",
200
- "</table>\n",
201
- "</td>\n",
202
- "<td>\n",
203
- "<svg width=\"128\" height=\"195\" style=\"stroke:rgb(0,0,0);stroke-width:1\" >\n",
204
- "\n",
205
- " <!-- Horizontal lines -->\n",
206
- " <line x1=\"10\" y1=\"0\" x2=\"35\" y2=\"25\" style=\"stroke-width:2\" />\n",
207
- " <line x1=\"10\" y1=\"120\" x2=\"35\" y2=\"145\" style=\"stroke-width:2\" />\n",
208
- "\n",
209
- " <!-- Vertical lines -->\n",
210
- " <line x1=\"10\" y1=\"0\" x2=\"10\" y2=\"120\" style=\"stroke-width:2\" />\n",
211
- " <line x1=\"35\" y1=\"25\" x2=\"35\" y2=\"145\" style=\"stroke-width:2\" />\n",
212
- "\n",
213
- " <!-- Colored Rectangle -->\n",
214
- " <polygon points=\"10.0,0.0 35.86269549127143,25.86269549127143 35.86269549127143,145.86269549127144 10.0,120.0\" style=\"fill:#ECB172A0;stroke-width:0\"/>\n",
215
- "\n",
216
- " <!-- Horizontal lines -->\n",
217
- " <line x1=\"10\" y1=\"0\" x2=\"52\" y2=\"0\" style=\"stroke-width:2\" />\n",
218
- " <line x1=\"35\" y1=\"25\" x2=\"78\" y2=\"25\" style=\"stroke-width:2\" />\n",
219
- "\n",
220
- " <!-- Vertical lines -->\n",
221
- " <line x1=\"10\" y1=\"0\" x2=\"35\" y2=\"25\" style=\"stroke-width:2\" />\n",
222
- " <line x1=\"52\" y1=\"0\" x2=\"78\" y2=\"25\" style=\"stroke-width:2\" />\n",
223
- "\n",
224
- " <!-- Colored Rectangle -->\n",
225
- " <polygon points=\"10.0,0.0 52.88780092952726,0.0 78.7504964207987,25.86269549127143 35.86269549127143,25.86269549127143\" style=\"fill:#ECB172A0;stroke-width:0\"/>\n",
226
- "\n",
227
- " <!-- Horizontal lines -->\n",
228
- " <line x1=\"35\" y1=\"25\" x2=\"78\" y2=\"25\" style=\"stroke-width:2\" />\n",
229
- " <line x1=\"35\" y1=\"145\" x2=\"78\" y2=\"145\" style=\"stroke-width:2\" />\n",
230
- "\n",
231
- " <!-- Vertical lines -->\n",
232
- " <line x1=\"35\" y1=\"25\" x2=\"35\" y2=\"145\" style=\"stroke-width:2\" />\n",
233
- " <line x1=\"78\" y1=\"25\" x2=\"78\" y2=\"145\" style=\"stroke-width:2\" />\n",
234
- "\n",
235
- " <!-- Colored Rectangle -->\n",
236
- " <polygon points=\"35.86269549127143,25.86269549127143 78.7504964207987,25.86269549127143 78.7504964207987,145.86269549127144 35.86269549127143,145.86269549127144\" style=\"fill:#ECB172A0;stroke-width:0\"/>\n",
237
- "\n",
238
- " <!-- Text -->\n",
239
- " <text x=\"57.306596\" y=\"165.862695\" font-size=\"1.0rem\" font-weight=\"100\" text-anchor=\"middle\" >510</text>\n",
240
- " <text x=\"98.750496\" y=\"85.862695\" font-size=\"1.0rem\" font-weight=\"100\" text-anchor=\"middle\" transform=\"rotate(-90,98.750496,85.862695)\">2046</text>\n",
241
- " <text x=\"12.931348\" y=\"152.931348\" font-size=\"1.0rem\" font-weight=\"100\" text-anchor=\"middle\" transform=\"rotate(45,12.931348,152.931348)\">700</text>\n",
242
- "</svg>\n",
243
- "</td>\n",
244
- "</tr>\n",
245
- "</table>"
246
- ],
247
- "text/plain": [
248
- "dask.array<blocks, shape=(700, 2046, 510), dtype=float32, chunksize=(700, 2046, 510), chunktype=numpy.ndarray>"
249
- ]
250
- },
251
- "execution_count": 12,
252
- "metadata": {},
253
- "output_type": "execute_result"
254
- }
255
- ],
256
- "source": [
257
- "contacts_dask[0].blocks[1]"
258
- ]
259
- },
260
- {
261
- "cell_type": "code",
262
- "execution_count": 13,
263
- "id": "0bd8e9b9-9713-4572-bd7f-dc47da9fce91",
264
- "metadata": {},
265
- "outputs": [
266
- {
267
- "data": {
268
- "text/plain": [
269
- "[16232, 16228, 16226, 16223]"
270
- ]
271
- },
272
- "execution_count": 13,
273
- "metadata": {},
274
- "output_type": "execute_result"
275
- }
276
- ],
277
- "source": [
278
- "[len(c) for c in contacts_dask]"
279
- ]
280
- },
281
- {
282
- "cell_type": "code",
283
- "execution_count": 14,
284
- "id": "87493934-3839-476a-a975-7da057c320da",
285
- "metadata": {},
286
- "outputs": [
287
- {
288
- "data": {
289
- "text/plain": [
290
- "16232"
291
- ]
292
- },
293
- "execution_count": 14,
294
- "metadata": {},
295
- "output_type": "execute_result"
296
- }
297
- ],
298
- "source": [
299
- "contacts_dask[0].shape[0]"
300
- ]
301
- },
302
- {
303
- "cell_type": "code",
304
- "execution_count": 15,
305
- "id": "42e95d84-ef27-4417-9479-8b356462b8c3",
306
- "metadata": {},
307
- "outputs": [],
308
- "source": [
309
- "import numpy as np\n",
310
- "all_partitions = []\n",
311
- "for c, cutoff in zip(contacts_dask,cutoffs):\n",
312
- " def chunk_to_sparse(rcut, chunk, idx_chunk):\n",
313
- " res = dfs_complex[rcut].iloc[idx_chunk][['name']].copy()\n",
314
- " # pad to account for [CLS] and [SEP]\n",
315
- " res['contacts_{}A'.format(rcut)] = [np.where(np.pad(a,pad_width=(1,1)).flatten())[0] for a in chunk]\n",
316
- " return res\n",
317
- "\n",
318
- " partitions = [delayed(chunk_to_sparse)(cutoff,b,k)\n",
319
- " for b,k in zip(c.blocks, da.arange(c.shape[0],chunks=c.chunks[0:1]).blocks)\n",
320
- " ]\n",
321
- " all_partitions.append(partitions)"
322
- ]
323
- },
324
- {
325
- "cell_type": "code",
326
- "execution_count": 16,
327
- "id": "5520a925-693f-43f0-9e76-df2e128f272e",
328
- "metadata": {},
329
- "outputs": [
330
- {
331
- "data": {
332
- "text/html": [
333
- "<div>\n",
334
- "<style scoped>\n",
335
- " .dataframe tbody tr th:only-of-type {\n",
336
- " vertical-align: middle;\n",
337
- " }\n",
338
- "\n",
339
- " .dataframe tbody tr th {\n",
340
- " vertical-align: top;\n",
341
- " }\n",
342
- "\n",
343
- " .dataframe thead th {\n",
344
- " text-align: right;\n",
345
- " }\n",
346
- "</style>\n",
347
- "<table border=\"1\" class=\"dataframe\">\n",
348
- " <thead>\n",
349
- " <tr style=\"text-align: right;\">\n",
350
- " <th></th>\n",
351
- " <th>name</th>\n",
352
- " <th>contacts_5A</th>\n",
353
- " </tr>\n",
354
- " </thead>\n",
355
- " <tbody>\n",
356
- " <tr>\n",
357
- " <th>0</th>\n",
358
- " <td>10gs</td>\n",
359
- " <td>[3083, 3084, 3086, 3087, 3088, 3089, 3094, 309...</td>\n",
360
- " </tr>\n",
361
- " <tr>\n",
362
- " <th>1</th>\n",
363
- " <td>184l</td>\n",
364
- " <td>[39945, 39946, 39947, 39948, 43010, 43012, 430...</td>\n",
365
- " </tr>\n",
366
- " <tr>\n",
367
- " <th>2</th>\n",
368
- " <td>186l</td>\n",
369
- " <td>[39943, 39944, 39945, 43010, 43011, 43012, 430...</td>\n",
370
- " </tr>\n",
371
- " <tr>\n",
372
- " <th>3</th>\n",
373
- " <td>187l</td>\n",
374
- " <td>[39937, 39938, 39947, 43009, 43010, 43012, 430...</td>\n",
375
- " </tr>\n",
376
- " <tr>\n",
377
- " <th>4</th>\n",
378
- " <td>188l</td>\n",
379
- " <td>[39937, 39938, 39940, 39941, 43009, 43010, 430...</td>\n",
380
- " </tr>\n",
381
- " </tbody>\n",
382
- "</table>\n",
383
- "</div>"
384
- ],
385
- "text/plain": [
386
- " name contacts_5A\n",
387
- "0 10gs [3083, 3084, 3086, 3087, 3088, 3089, 3094, 309...\n",
388
- "1 184l [39945, 39946, 39947, 39948, 43010, 43012, 430...\n",
389
- "2 186l [39943, 39944, 39945, 43010, 43011, 43012, 430...\n",
390
- "3 187l [39937, 39938, 39947, 43009, 43010, 43012, 430...\n",
391
- "4 188l [39937, 39938, 39940, 39941, 43009, 43010, 430..."
392
- ]
393
- },
394
- "execution_count": 16,
395
- "metadata": {},
396
- "output_type": "execute_result"
397
- }
398
- ],
399
- "source": [
400
- "all_partitions[0][0].compute().head()"
401
- ]
402
- },
403
- {
404
- "cell_type": "code",
405
- "execution_count": 17,
406
- "id": "4982c3b1-5ce9-4f17-9834-a02c4e136bc2",
407
- "metadata": {},
408
- "outputs": [],
409
- "source": [
410
- "ddfs = [dd.from_delayed(p) for p in all_partitions]"
411
- ]
412
- },
413
- {
414
- "cell_type": "code",
415
- "execution_count": 18,
416
- "id": "f6cdee43-33c6-445c-8619-ace20f90638c",
417
- "metadata": {},
418
- "outputs": [],
419
- "source": [
420
- "ddf_all = None\n",
421
- "for d in ddfs:\n",
422
- " if ddf_all is not None:\n",
423
- " ddf_all = ddf_all.merge(d, on='name')\n",
424
- " else:\n",
425
- " ddf_all = d\n",
426
- "ddf_all = ddf_all.merge(df_filter,on='name')\n",
427
- "ddf_all = ddf_all.merge(list(dfs_complex.values())[0],on='name')"
428
- ]
429
- },
430
- {
431
- "cell_type": "code",
432
- "execution_count": 19,
433
- "id": "8f49f871-76f6-4fb2-b2db-c0794d4c07bf",
434
- "metadata": {},
435
- "outputs": [
436
- {
437
- "name": "stdout",
438
- "output_type": "stream",
439
- "text": [
440
- "CPU times: user 8min 53s, sys: 11min 31s, total: 20min 24s\n",
441
- "Wall time: 3min 29s\n"
442
- ]
443
- }
444
- ],
445
- "source": [
446
- "%%time\n",
447
- "df_all_contacts = ddf_all.compute()"
448
- ]
449
- },
450
- {
451
- "cell_type": "code",
452
- "execution_count": 20,
453
- "id": "45e4b4fa-6338-4abe-bd6e-8aea46e2a09c",
454
- "metadata": {},
455
- "outputs": [],
456
- "source": [
457
- "df_all_contacts['neg_log10_affinity_M'] = 6-np.log10(df_all_contacts['affinity_uM'])"
458
- ]
459
- },
460
- {
461
- "cell_type": "code",
462
- "execution_count": 21,
463
- "id": "7c3db301-6565-4053-bbd4-139bb41dd1c4",
464
- "metadata": {},
465
- "outputs": [
466
- {
467
- "data": {
468
- "text/plain": [
469
- "(array([6.34387834]), array([3.57815698]))"
470
- ]
471
- },
472
- "execution_count": 21,
473
- "metadata": {},
474
- "output_type": "execute_result"
475
- }
476
- ],
477
- "source": [
478
- "from sklearn.preprocessing import StandardScaler\n",
479
- "scaler = StandardScaler()\n",
480
- "df_all_contacts['affinity'] = scaler.fit_transform(df_all_contacts['neg_log10_affinity_M'].values.reshape(-1,1))\n",
481
- "scaler.mean_, scaler.var_"
482
- ]
483
- },
484
- {
485
- "cell_type": "code",
486
- "execution_count": 22,
487
- "id": "c9d674bb-d6a2-4810-aa2b-e3bc3b4bbc98",
488
- "metadata": {},
489
- "outputs": [],
490
- "source": [
491
- "# save to parquet\n",
492
- "df_all_contacts.drop(columns=['name','affinity_quantity']).astype({'affinity': 'float32','neg_log10_affinity_M': 'float32'}).to_parquet('data/pdbbind_with_contacts.parquet',index=False)"
493
- ]
494
- }
495
- ],
496
- "metadata": {
497
- "kernelspec": {
498
- "display_name": "Python 3 (ipykernel)",
499
- "language": "python",
500
- "name": "python3"
501
- },
502
- "language_info": {
503
- "codemirror_mode": {
504
- "name": "ipython",
505
- "version": 3
506
- },
507
- "file_extension": ".py",
508
- "mimetype": "text/x-python",
509
- "name": "python",
510
- "nbconvert_exporter": "python",
511
- "pygments_lexer": "ipython3",
512
- "version": "3.9.6"
513
- }
514
- },
515
- "nbformat": 4,
516
- "nbformat_minor": 5
517
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pdbbind.py DELETED
@@ -1,177 +0,0 @@
1
- from mpi4py import MPI
2
- from mpi4py.futures import MPICommExecutor
3
-
4
- import warnings
5
- from Bio.PDB import PDBParser, PPBuilder, CaPPBuilder
6
- from Bio.PDB.NeighborSearch import NeighborSearch
7
- from Bio.PDB.Selection import unfold_entities
8
-
9
- import numpy as np
10
- import dask.array as da
11
-
12
- from rdkit import Chem
13
-
14
- from spyrmsd import molecule
15
- from spyrmsd import graph
16
- import networkx as nx
17
-
18
- import os
19
- import re
20
- import sys
21
-
22
- # all punctuation
23
- punctuation_regex = r"""(\(|\)|\.|=|#|-|\+|\\|\/|:|~|@|\?|>>?|\*|\$|\%[0-9]{2}|[0-9])"""
24
-
25
- # tokenization regex (Schwaller)
26
- molecule_regex = r"""(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\|\/|:|~|@|\?|>>?|\*|\$|\%[0-9]{2}|[0-9])"""
27
-
28
- max_seq = 2046 # = 2048 - 2 (accounting for [CLS] and [SEP])
29
- max_smiles = 510 # = 512 - 2
30
- chunk_size = '1G'
31
-
32
- def rot_from_two_vecs(e0_unnormalized, e1_unnormalized):
33
- """Create rotation matrices from unnormalized vectors for the x and y-axes.
34
- This creates a rotation matrix from two vectors using Gram-Schmidt
35
- orthogonalization.
36
- Args:
37
- e0_unnormalized: vectors lying along x-axis of resulting rotation
38
- e1_unnormalized: vectors lying in xy-plane of resulting rotation
39
- Returns:
40
- Rotations resulting from Gram-Schmidt procedure.
41
- """
42
- # Normalize the unit vector for the x-axis, e0.
43
- e0 = e0_unnormalized / np.linalg.norm(e0_unnormalized)
44
-
45
- # make e1 perpendicular to e0.
46
- c = np.dot(e1_unnormalized, e0)
47
- e1 = e1_unnormalized - c * e0
48
- e1 = e1 / np.linalg.norm(e1)
49
-
50
- # Compute e2 as cross product of e0 and e1.
51
- e2 = np.cross(e0, e1)
52
-
53
- # local to space frame
54
- return np.stack([e0,e1,e2]).T
55
-
56
- def get_local_frames(mol):
57
- # get the two nearest neighbors of every atom on the molecular graph
58
- # ties are broken using canonical ordering
59
- g = molecule.Molecule.from_rdkit(mol).to_graph()
60
-
61
- R = []
62
- for node in g:
63
- length = nx.single_source_shortest_path_length(g, node)
64
-
65
- neighbor_a = [n for n,l in length.items() if l==1][0]
66
-
67
- try:
68
- neighbor_b = [n for n,l in length.items() if l==1][1]
69
- except:
70
- # get next nearest neighbor
71
- neighbor_b = [n for n,l in length.items() if l==2][0]
72
-
73
- xyz = np.array(mol.GetConformer().GetAtomPosition(node))
74
- xyz_a = np.array(mol.GetConformer().GetAtomPosition(neighbor_a))
75
- xyz_b = np.array(mol.GetConformer().GetAtomPosition(neighbor_b))
76
-
77
- R.append(rot_from_two_vecs(xyz_a-xyz, xyz_b-xyz))
78
-
79
- return R
80
-
81
- def parse_complex(fn):
82
- try:
83
- name = os.path.basename(fn)
84
-
85
- # parse protein sequence and coordinates
86
- parser = PDBParser()
87
- with warnings.catch_warnings():
88
- warnings.simplefilter("ignore")
89
- structure = parser.get_structure('protein',fn+'/'+name+'_protein.pdb')
90
-
91
- res_frames = []
92
-
93
- # extract sequence, Calpha positions and local coordinate frames using the AF2 convention
94
- ppb = CaPPBuilder()
95
- seq = []
96
- xyz_receptor = []
97
- R_receptor = []
98
- for pp in ppb.build_peptides(structure):
99
- seq.append(str(pp.get_sequence()))
100
- xyz_receptor += [tuple(a.get_vector()) for a in pp.get_ca_list()]
101
-
102
- for res in pp:
103
- N = np.array(tuple(res['N'].get_vector()))
104
- C = np.array(tuple(res['C'].get_vector()))
105
- CA = np.array(tuple(res['CA'].get_vector()))
106
-
107
- R_receptor.append(rot_from_two_vecs(N-CA,C-CA).flatten().tolist())
108
-
109
- seq = ''.join(seq)
110
-
111
- # parse ligand, convert to SMILES and map atoms
112
- suppl = Chem.SDMolSupplier(fn+'/'+name+'_ligand.sdf')
113
- mol = next(suppl)
114
-
115
- # bring molecule atoms in canonical order (to determine local frames uniquely)
116
- m_neworder = tuple(zip(*sorted([(j, i) for i, j in enumerate(Chem.CanonicalRankAtoms(mol))])))[1]
117
- mol = Chem.RenumberAtoms(mol, m_neworder)
118
-
119
- # position of atoms in SMILES (not counting punctuation)
120
- smi = Chem.MolToSmiles(mol)
121
- atom_order = [int(s) for s in list(filter(None,re.sub(r'[\[\]]','',mol.GetProp("_smilesAtomOutputOrder")).split(',')))]
122
-
123
- # tokenize the SMILES
124
- tokens = list(filter(None, re.split(molecule_regex, smi)))
125
-
126
- # remove punctuation
127
- masked_tokens = [re.sub(punctuation_regex,'',s) for s in tokens]
128
-
129
- k = 0
130
- token_pos = []
131
- token_rot = []
132
-
133
- frames = get_local_frames(mol)
134
-
135
- for i,token in enumerate(masked_tokens):
136
- if token != '':
137
- token_pos.append(tuple(mol.GetConformer().GetAtomPosition(atom_order[k])))
138
- token_rot.append(frames[atom_order[k]].flatten().tolist())
139
- k += 1
140
- else:
141
- token_pos.append((np.nan, np.nan, np.nan))
142
- token_rot.append(np.eye(3).flatten().tolist())
143
-
144
- return name, seq, smi, xyz_receptor, token_pos, token_rot, R_receptor
145
-
146
- except Exception as e:
147
- print(e)
148
- return None
149
-
150
-
151
- if __name__ == '__main__':
152
- import glob
153
-
154
- filenames = glob.glob('data/pdbbind/v2020-other-PL/*')
155
- filenames.extend(glob.glob('data/pdbbind/refined-set/*'))
156
- filenames = sorted(filenames)
157
- comm = MPI.COMM_WORLD
158
- with MPICommExecutor(comm, root=0) as executor:
159
- if executor is not None:
160
- result = executor.map(parse_complex, filenames, chunksize=32)
161
- result = list(result)
162
- names = [r[0] for r in result if r is not None]
163
- seqs = [r[1] for r in result if r is not None]
164
- all_smiles = [r[2] for r in result if r is not None]
165
- all_xyz_receptor = [r[3] for r in result if r is not None]
166
- all_xyz_ligand = [r[4] for r in result if r is not None]
167
- all_rot_ligand = [r[5] for r in result if r is not None]
168
- all_rot_receptor = [r[6] for r in result if r is not None]
169
-
170
- import pandas as pd
171
- df = pd.DataFrame({'name': names, 'seq': seqs,
172
- 'smiles': all_smiles,
173
- 'receptor_xyz': all_xyz_receptor,
174
- 'ligand_xyz': all_xyz_ligand,
175
- 'ligand_rot': all_rot_ligand,
176
- 'receptor_rot': all_rot_receptor})
177
- df.to_parquet('data/pdbbind.parquet',index=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pdbbind.slurm DELETED
@@ -1,9 +0,0 @@
1
- #!/bin/bash
2
- #SBATCH -J preprocess_pdbbind
3
- #SBATCH -p batch
4
- #SBATCH -A STF006
5
- #SBATCH -t 3:00:00
6
- #SBATCH -N 4
7
- #SBATCH --ntasks-per-node=8
8
-
9
- srun python pdbbind.py
 
 
 
 
 
 
 
 
 
 
pdbbind_complexes.py DELETED
@@ -1,131 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """TODO: A dataset of protein sequences, ligand SMILES, and complex coordinates."""
16
-
17
- import huggingface_hub
18
- import os
19
- import pyarrow.parquet as pq
20
- import datasets
21
-
22
-
23
- # TODO: Add BibTeX citation
24
- # Find for instance the citation on arxiv or on the dataset repo/website
25
- _CITATION = """\
26
- @InProceedings{huggingface:dataset,
27
- title = {jglaser/pdbbind_complexes},
28
- author={Jens Glaser, ORNL
29
- },
30
- year={2022}
31
- }
32
- """
33
-
34
- # TODO: Add description of the dataset here
35
- # You can copy an official description
36
- _DESCRIPTION = """\
37
- A dataset to fine-tune language models on protein-ligand binding affinity and contact prediction.
38
- """
39
-
40
- # TODO: Add a link to an official homepage for the dataset here
41
- _HOMEPAGE = ""
42
-
43
- # TODO: Add the licence for the dataset here if you can find it
44
- _LICENSE = "BSD two-clause"
45
-
46
- # TODO: Add link to the official dataset URLs here
47
- # The HuggingFace dataset library don't host the datasets but only point to the original files
48
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
49
- _URL = "https://huggingface.co/datasets/jglaser/pdbbind_complexes/resolve/main/"
50
- _data_dir = "data/"
51
- _file_names = {'default': _data_dir+'pdbbind.parquet'}
52
-
53
- _URLs = {name: _URL+_file_names[name] for name in _file_names}
54
-
55
-
56
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
57
- class PDBBindComplexes(datasets.ArrowBasedBuilder):
58
- """List of protein sequences, ligand SMILES, and complex coordinates."""
59
-
60
- VERSION = datasets.Version("1.5.0")
61
-
62
- def _info(self):
63
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
64
- #if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
65
- # features = datasets.Features(
66
- # {
67
- # "sentence": datasets.Value("string"),
68
- # "option1": datasets.Value("string"),
69
- # "answer": datasets.Value("string")
70
- # # These are the features of your dataset like images, labels ...
71
- # }
72
- # )
73
- #else: # This is an example to show how to have different features for "first_domain" and "second_domain"
74
- features = datasets.Features(
75
- {
76
- "name": datasets.Value("string"),
77
- "seq": datasets.Value("string"),
78
- "smiles": datasets.Value("string"),
79
- "receptor_xyz": datasets.Sequence(datasets.Sequence(datasets.Value('float32'))),
80
- "ligand_xyz": datasets.Sequence(datasets.Sequence(datasets.Value('float32'))),
81
- "ligand_rot": datasets.Sequence(datasets.Sequence(datasets.Value('float32'))),
82
- "receptor_rot": datasets.Sequence(datasets.Sequence(datasets.Value('float32'))),
83
- # These are the features of your dataset like images, labels ...
84
- }
85
- )
86
- return datasets.DatasetInfo(
87
- # This is the description that will appear on the datasets page.
88
- description=_DESCRIPTION,
89
- # This defines the different columns of the dataset and their types
90
- features=features, # Here we define them above because they are different between the two configurations
91
- # If there's a common (input, target) tuple from the features,
92
- # specify them here. They'll be used if as_supervised=True in
93
- # builder.as_dataset.
94
- supervised_keys=None,
95
- # Homepage of the dataset for documentation
96
- homepage=_HOMEPAGE,
97
- # License for the dataset if available
98
- license=_LICENSE,
99
- # Citation for the dataset
100
- citation=_CITATION,
101
- )
102
-
103
- def _split_generators(self, dl_manager):
104
- """Returns SplitGenerators."""
105
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
106
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
107
-
108
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
109
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
110
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
111
- files = dl_manager.download_and_extract(_URLs)
112
-
113
- return [
114
- datasets.SplitGenerator(
115
- # These kwargs will be passed to _generate_examples
116
- name=datasets.Split.TRAIN,
117
- gen_kwargs={
118
- 'filepath': files["default"],
119
- },
120
- ),
121
-
122
- ]
123
-
124
- def _generate_tables(
125
- self, filepath
126
- ):
127
- from pyarrow import fs
128
- local = fs.LocalFileSystem()
129
-
130
- for i, f in enumerate([filepath]):
131
- yield i, pq.read_table(f,filesystem=local)