arshy commited on
Commit
c892f97
1 Parent(s): d1a784e

initial commit

Browse files
.gitattributes CHANGED
@@ -3,6 +3,7 @@
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
5
  *.ckpt filter=lfs diff=lfs merge=lfs -text
 
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
@@ -33,3 +34,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
5
  *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.csv filter=lfs diff=lfs merge=lfs -text
7
  *.ftz filter=lfs diff=lfs merge=lfs -text
8
  *.gz filter=lfs diff=lfs merge=lfs -text
9
  *.h5 filter=lfs diff=lfs merge=lfs -text
 
34
  *.zip filter=lfs diff=lfs merge=lfs -text
35
  *.zst filter=lfs diff=lfs merge=lfs -text
36
  *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ data/fpmmTrades.csv filter=lfs diff=lfs merge=lfs -text
38
+ data/fpmms.csv filter=lfs diff=lfs merge=lfs -text
39
+ data/requests.csv filter=lfs diff=lfs merge=lfs -text
40
+ data/summary_profitability.csv filter=lfs diff=lfs merge=lfs -text
41
+ data/tools.csv filter=lfs diff=lfs merge=lfs -text
42
+ data/all_trades_profitability.csv filter=lfs diff=lfs merge=lfs -text
43
+ data/delivers.csv filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
app.py ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+ import logging
3
+ import gradio as gr
4
+ import pandas as pd
5
+ from apscheduler.schedulers.background import BackgroundScheduler
6
+ from apscheduler.triggers.cron import CronTrigger
7
+ from pytz import utc
8
+ from tabs.trades import (
9
+ prepare_trades,
10
+ get_overall_trades,
11
+ get_overall_winning_trades,
12
+ plot_trades_by_week,
13
+ plot_winning_trades_by_week,
14
+ plot_trade_details
15
+ )
16
+ from tabs.tool_win import (
17
+ get_tool_winning_rate,
18
+ get_overall_winning_rate,
19
+ plot_tool_winnings_overall,
20
+ plot_tool_winnings_by_tool
21
+ )
22
+ from tabs.error import (
23
+ get_error_data,
24
+ get_error_data_overall,
25
+ plot_error_data,
26
+ plot_tool_error_data,
27
+ plot_week_error_data
28
+ )
29
+ from tabs.about import about_olas_predict
30
+
31
+
32
+ def refresh_data():
33
+ # Run the pull_data.py script and wait for it to finish
34
+ try:
35
+ result = subprocess.run(["python", "./scripts/pull_data.py"], check=True)
36
+ logging.info("Script executed successfully: %s", result)
37
+ except subprocess.CalledProcessError as e:
38
+ logging.error("Failed to run script: %s", e)
39
+ return # Stop execution if the script fails
40
+
41
+ # Reload dataframes
42
+ try:
43
+ global tools_df, trades_df, error_df, error_overall_df, winning_rate_df, winning_rate_overall_df, trades_count_df, trades_winning_rate_df
44
+ logging.info("Refreshing data...")
45
+ tools_df = pd.read_csv("./data/tools.csv", low_memory=False)
46
+ trades_df = pd.read_csv("./data/all_trades_profitability.csv")
47
+ trades_df = prepare_trades(trades_df)
48
+ error_df = get_error_data(tools_df=tools_df, inc_tools=INC_TOOLS)
49
+ error_overall_df = get_error_data_overall(error_df=error_df)
50
+ winning_rate_df = get_tool_winning_rate(tools_df=tools_df, inc_tools=INC_TOOLS)
51
+ winning_rate_overall_df = get_overall_winning_rate(wins_df=winning_rate_df)
52
+ trades_count_df = get_overall_trades(trades_df=trades_df)
53
+ trades_winning_rate_df = get_overall_winning_trades(trades_df=trades_df)
54
+ logging.info("Data refreshed.")
55
+ except Exception as e:
56
+ logging.error("Failed to refresh data: %s", e)
57
+
58
+ tools_df = pd.read_csv("./data/tools.csv", low_memory=False)
59
+ trades_df = pd.read_csv("./data/all_trades_profitability.csv")
60
+ trades_df = prepare_trades(trades_df)
61
+
62
+ demo = gr.Blocks()
63
+
64
+ INC_TOOLS = [
65
+ 'prediction-online',
66
+ 'prediction-offline',
67
+ 'claude-prediction-online',
68
+ 'claude-prediction-offline',
69
+ 'prediction-offline-sme',
70
+ 'prediction-online-sme',
71
+ 'prediction-request-rag',
72
+ 'prediction-request-reasoning',
73
+ 'prediction-url-cot-claude',
74
+ 'prediction-request-rag-claude',
75
+ 'prediction-request-reasoning-claude'
76
+ ]
77
+
78
+
79
+ # TOOLS DATA
80
+ error_df = get_error_data(
81
+ tools_df=tools_df,
82
+ inc_tools=INC_TOOLS
83
+ )
84
+ error_overall_df = get_error_data_overall(
85
+ error_df=error_df
86
+ )
87
+ winning_rate_df = get_tool_winning_rate(
88
+ tools_df=tools_df,
89
+ inc_tools=INC_TOOLS
90
+ )
91
+ winning_rate_overall_df = get_overall_winning_rate(
92
+ wins_df=winning_rate_df
93
+ )
94
+ trades_count_df = get_overall_trades(
95
+ trades_df=trades_df
96
+ )
97
+ trades_winning_rate_df = get_overall_winning_trades(
98
+ trades_df=trades_df
99
+ )
100
+
101
+ with demo:
102
+ gr.HTML("<h1>Olas Predict Actual Performance</h1>")
103
+ gr.Markdown("This app shows the actual performance of Olas Predict tools on the live market.")
104
+
105
+ with gr.Tabs():
106
+ with gr.TabItem("🔥Trades Dashboard"):
107
+ with gr.Row():
108
+ gr.Markdown("# Plot of number of trades by week")
109
+ with gr.Row():
110
+ plot_trades_by_week = plot_trades_by_week(
111
+ trades_df=trades_count_df
112
+ )
113
+ with gr.Row():
114
+ gr.Markdown("# Plot of winning trades by week")
115
+ with gr.Row():
116
+ plot_winning_trades_by_week = plot_winning_trades_by_week(
117
+ trades_df=trades_winning_rate_df
118
+ )
119
+ with gr.Row():
120
+ gr.Markdown("# Plot of trade details")
121
+ with gr.Row():
122
+ trade_details_selector = gr.Dropdown(
123
+ label="Select a trade",
124
+ choices=[
125
+ "mech calls",
126
+ "collateral amount",
127
+ "earnings",
128
+ "net earnings",
129
+ "ROI"
130
+ ],
131
+ value="mech calls"
132
+ )
133
+ with gr.Row():
134
+ trade_details_plot = plot_trade_details(
135
+ trade_detail="mech calls",
136
+ trades_df=trades_df
137
+ )
138
+
139
+ def update_trade_details(trade_detail):
140
+ return plot_trade_details(
141
+ trade_detail=trade_detail,
142
+ trades_df=trades_df
143
+ )
144
+
145
+ trade_details_selector.change(
146
+ update_trade_details,
147
+ inputs=trade_details_selector,
148
+ outputs=trade_details_plot
149
+ )
150
+
151
+ with gr.Row():
152
+ trade_details_selector
153
+ with gr.Row():
154
+ trade_details_plot
155
+
156
+ with gr.TabItem("🚀 Tool Winning Dashboard"):
157
+ with gr.Row():
158
+ gr.Markdown("# Plot showing overall winning rate")
159
+
160
+ with gr.Row():
161
+ winning_selector = gr.Dropdown(
162
+ label="Select Metric",
163
+ choices=['losses', 'wins', 'total_request', 'win_perc'],
164
+ value='win_perc',
165
+ )
166
+
167
+ with gr.Row():
168
+ winning_plot = plot_tool_winnings_overall(
169
+ wins_df=winning_rate_overall_df,
170
+ winning_selector="win_perc"
171
+ )
172
+
173
+ def update_tool_winnings_overall_plot(winning_selector):
174
+ return plot_tool_winnings_overall(
175
+ wins_df=winning_rate_overall_df,
176
+ winning_selector=winning_selector
177
+ )
178
+
179
+ winning_selector.change(
180
+ update_tool_winnings_overall_plot,
181
+ inputs=winning_selector,
182
+ outputs=winning_plot
183
+ )
184
+
185
+ with gr.Row():
186
+ winning_selector
187
+ with gr.Row():
188
+ winning_plot
189
+
190
+ with gr.Row():
191
+ gr.Markdown("# Plot showing winning rate by tool")
192
+
193
+ with gr.Row():
194
+ sel_tool = gr.Dropdown(
195
+ label="Select a tool",
196
+ choices=INC_TOOLS,
197
+ value=INC_TOOLS[0]
198
+ )
199
+
200
+ with gr.Row():
201
+ plot_tool_win_rate = plot_tool_winnings_by_tool(
202
+ wins_df=winning_rate_df,
203
+ tool=INC_TOOLS[0]
204
+ )
205
+
206
+ def update_tool_winnings_by_tool_plot(tool):
207
+ return plot_tool_winnings_by_tool(
208
+ wins_df=winning_rate_df,
209
+ tool=tool
210
+ )
211
+
212
+ sel_tool.change(
213
+ update_tool_winnings_by_tool_plot,
214
+ inputs=sel_tool,
215
+ outputs=plot_tool_win_rate
216
+ )
217
+
218
+ with gr.Row():
219
+ sel_tool
220
+ with gr.Row():
221
+ plot_tool_win_rate
222
+
223
+ with gr.TabItem("🏥 Tool Error Dashboard"):
224
+ with gr.Row():
225
+ gr.Markdown("# Plot showing overall error")
226
+ with gr.Row():
227
+ plot_error_data(
228
+ error_all_df=error_overall_df
229
+ )
230
+ with gr.Row():
231
+ gr.Markdown("# Plot showing error by tool")
232
+ with gr.Row():
233
+ sel_tool = gr.Dropdown(
234
+ label="Select a tool",
235
+ choices=INC_TOOLS,
236
+ value=INC_TOOLS[0]
237
+ )
238
+
239
+ with gr.Row():
240
+ plot_tool_error = plot_tool_error_data(
241
+ error_df=error_df,
242
+ tool=INC_TOOLS[0]
243
+ )
244
+
245
+
246
+ def update_tool_error_plot(tool):
247
+ return plot_tool_error_data(
248
+ error_df=error_df,
249
+ tool=tool
250
+ )
251
+
252
+ sel_tool.change(
253
+ update_tool_error_plot,
254
+ inputs=sel_tool,
255
+ outputs=plot_tool_error
256
+ )
257
+ with gr.Row():
258
+ sel_tool
259
+ with gr.Row():
260
+ plot_tool_error
261
+
262
+ with gr.Row():
263
+ gr.Markdown("# Plot showing error by week")
264
+
265
+ with gr.Row():
266
+ choices = error_overall_df['request_month_year_week'].unique().tolist()
267
+ # sort the choices by the latest week to be on the top
268
+ choices = sorted(choices)
269
+ sel_week = gr.Dropdown(
270
+ label="Select a week",
271
+ choices=choices,
272
+ value=choices[-1]
273
+ )
274
+
275
+ with gr.Row():
276
+ plot_week_error = plot_week_error_data(
277
+ error_df=error_df,
278
+ week=choices[-1]
279
+ )
280
+
281
+ def update_week_error_plot(selected_week):
282
+ return plot_week_error_data(
283
+ error_df=error_df,
284
+ week=selected_week
285
+ )
286
+
287
+ sel_tool.change(update_tool_error_plot, inputs=sel_tool, outputs=plot_tool_error)
288
+ sel_week.change(update_week_error_plot, inputs=sel_week, outputs=plot_week_error)
289
+
290
+ with gr.Row():
291
+ sel_tool
292
+ with gr.Row():
293
+ plot_tool_error
294
+ with gr.Row():
295
+ sel_week
296
+ with gr.Row():
297
+ plot_week_error
298
+
299
+ with gr.TabItem("ℹ️ About"):
300
+ with gr.Accordion("About Olas Predict"):
301
+ gr.Markdown(about_olas_predict)
302
+
303
+ # Create the scheduler
304
+ scheduler = BackgroundScheduler(timezone=utc)
305
+ scheduler.add_job(refresh_data, CronTrigger(hour=0, minute=0)) # Runs daily at 12 AM UTC
306
+ scheduler.start()
307
+ # scheduler = BackgroundScheduler(timezone=utc)
308
+ # scheduler.add_job(refresh_data, CronTrigger(hour='*')) # Runs every hour
309
+ # scheduler.start()
310
+
311
+ demo.queue(default_concurrency_limit=40).launch()
contracts/new_mech_abi.json ADDED
@@ -0,0 +1,718 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "inputs": [
4
+ {
5
+ "internalType": "address",
6
+ "name": "_token",
7
+ "type": "address"
8
+ },
9
+ {
10
+ "internalType": "uint256",
11
+ "name": "_tokenId",
12
+ "type": "uint256"
13
+ },
14
+ {
15
+ "internalType": "uint256",
16
+ "name": "_price",
17
+ "type": "uint256"
18
+ }
19
+ ],
20
+ "stateMutability": "nonpayable",
21
+ "type": "constructor"
22
+ },
23
+ {
24
+ "inputs": [
25
+ {
26
+ "internalType": "uint256",
27
+ "name": "agentId",
28
+ "type": "uint256"
29
+ }
30
+ ],
31
+ "name": "AgentNotFound",
32
+ "type": "error"
33
+ },
34
+ {
35
+ "inputs": [
36
+ {
37
+ "internalType": "uint256",
38
+ "name": "provided",
39
+ "type": "uint256"
40
+ },
41
+ {
42
+ "internalType": "uint256",
43
+ "name": "expected",
44
+ "type": "uint256"
45
+ }
46
+ ],
47
+ "name": "NotEnoughPaid",
48
+ "type": "error"
49
+ },
50
+ {
51
+ "inputs": [
52
+ {
53
+ "internalType": "uint256",
54
+ "name": "provided",
55
+ "type": "uint256"
56
+ },
57
+ {
58
+ "internalType": "uint256",
59
+ "name": "max",
60
+ "type": "uint256"
61
+ }
62
+ ],
63
+ "name": "Overflow",
64
+ "type": "error"
65
+ },
66
+ {
67
+ "inputs": [
68
+ {
69
+ "internalType": "uint256",
70
+ "name": "requestId",
71
+ "type": "uint256"
72
+ }
73
+ ],
74
+ "name": "RequestIdNotFound",
75
+ "type": "error"
76
+ },
77
+ {
78
+ "inputs": [],
79
+ "name": "ZeroAddress",
80
+ "type": "error"
81
+ },
82
+ {
83
+ "anonymous": false,
84
+ "inputs": [
85
+ {
86
+ "indexed": true,
87
+ "internalType": "address",
88
+ "name": "sender",
89
+ "type": "address"
90
+ },
91
+ {
92
+ "indexed": false,
93
+ "internalType": "uint256",
94
+ "name": "requestId",
95
+ "type": "uint256"
96
+ },
97
+ {
98
+ "indexed": false,
99
+ "internalType": "bytes",
100
+ "name": "data",
101
+ "type": "bytes"
102
+ }
103
+ ],
104
+ "name": "Deliver",
105
+ "type": "event"
106
+ },
107
+ {
108
+ "anonymous": false,
109
+ "inputs": [
110
+ {
111
+ "indexed": false,
112
+ "internalType": "uint256",
113
+ "name": "price",
114
+ "type": "uint256"
115
+ }
116
+ ],
117
+ "name": "PriceUpdated",
118
+ "type": "event"
119
+ },
120
+ {
121
+ "anonymous": false,
122
+ "inputs": [
123
+ {
124
+ "indexed": true,
125
+ "internalType": "address",
126
+ "name": "sender",
127
+ "type": "address"
128
+ },
129
+ {
130
+ "indexed": false,
131
+ "internalType": "uint256",
132
+ "name": "requestId",
133
+ "type": "uint256"
134
+ },
135
+ {
136
+ "indexed": false,
137
+ "internalType": "bytes",
138
+ "name": "data",
139
+ "type": "bytes"
140
+ }
141
+ ],
142
+ "name": "Request",
143
+ "type": "event"
144
+ },
145
+ {
146
+ "inputs": [
147
+ {
148
+ "internalType": "uint256",
149
+ "name": "requestId",
150
+ "type": "uint256"
151
+ },
152
+ {
153
+ "internalType": "bytes",
154
+ "name": "data",
155
+ "type": "bytes"
156
+ }
157
+ ],
158
+ "name": "deliver",
159
+ "outputs": [],
160
+ "stateMutability": "nonpayable",
161
+ "type": "function"
162
+ },
163
+ {
164
+ "inputs": [],
165
+ "name": "entryPoint",
166
+ "outputs": [
167
+ {
168
+ "internalType": "contract IEntryPoint",
169
+ "name": "",
170
+ "type": "address"
171
+ }
172
+ ],
173
+ "stateMutability": "view",
174
+ "type": "function"
175
+ },
176
+ {
177
+ "inputs": [
178
+ {
179
+ "internalType": "address",
180
+ "name": "to",
181
+ "type": "address"
182
+ },
183
+ {
184
+ "internalType": "uint256",
185
+ "name": "value",
186
+ "type": "uint256"
187
+ },
188
+ {
189
+ "internalType": "bytes",
190
+ "name": "data",
191
+ "type": "bytes"
192
+ },
193
+ {
194
+ "internalType": "enum Enum.Operation",
195
+ "name": "operation",
196
+ "type": "uint8"
197
+ },
198
+ {
199
+ "internalType": "uint256",
200
+ "name": "txGas",
201
+ "type": "uint256"
202
+ }
203
+ ],
204
+ "name": "exec",
205
+ "outputs": [
206
+ {
207
+ "internalType": "bytes",
208
+ "name": "returnData",
209
+ "type": "bytes"
210
+ }
211
+ ],
212
+ "stateMutability": "nonpayable",
213
+ "type": "function"
214
+ },
215
+ {
216
+ "inputs": [
217
+ {
218
+ "internalType": "address",
219
+ "name": "account",
220
+ "type": "address"
221
+ },
222
+ {
223
+ "internalType": "bytes",
224
+ "name": "data",
225
+ "type": "bytes"
226
+ }
227
+ ],
228
+ "name": "getRequestId",
229
+ "outputs": [
230
+ {
231
+ "internalType": "uint256",
232
+ "name": "requestId",
233
+ "type": "uint256"
234
+ }
235
+ ],
236
+ "stateMutability": "pure",
237
+ "type": "function"
238
+ },
239
+ {
240
+ "inputs": [
241
+ {
242
+ "internalType": "address",
243
+ "name": "account",
244
+ "type": "address"
245
+ }
246
+ ],
247
+ "name": "getRequestsCount",
248
+ "outputs": [
249
+ {
250
+ "internalType": "uint256",
251
+ "name": "requestsCount",
252
+ "type": "uint256"
253
+ }
254
+ ],
255
+ "stateMutability": "view",
256
+ "type": "function"
257
+ },
258
+ {
259
+ "inputs": [
260
+ {
261
+ "internalType": "uint256",
262
+ "name": "size",
263
+ "type": "uint256"
264
+ },
265
+ {
266
+ "internalType": "uint256",
267
+ "name": "offset",
268
+ "type": "uint256"
269
+ }
270
+ ],
271
+ "name": "getUndeliveredRequestIds",
272
+ "outputs": [
273
+ {
274
+ "internalType": "uint256[]",
275
+ "name": "requestIds",
276
+ "type": "uint256[]"
277
+ }
278
+ ],
279
+ "stateMutability": "view",
280
+ "type": "function"
281
+ },
282
+ {
283
+ "inputs": [
284
+ {
285
+ "internalType": "address",
286
+ "name": "signer",
287
+ "type": "address"
288
+ }
289
+ ],
290
+ "name": "isOperator",
291
+ "outputs": [
292
+ {
293
+ "internalType": "bool",
294
+ "name": "",
295
+ "type": "bool"
296
+ }
297
+ ],
298
+ "stateMutability": "view",
299
+ "type": "function"
300
+ },
301
+ {
302
+ "inputs": [
303
+ {
304
+ "internalType": "bytes32",
305
+ "name": "hash",
306
+ "type": "bytes32"
307
+ },
308
+ {
309
+ "internalType": "bytes",
310
+ "name": "signature",
311
+ "type": "bytes"
312
+ }
313
+ ],
314
+ "name": "isValidSignature",
315
+ "outputs": [
316
+ {
317
+ "internalType": "bytes4",
318
+ "name": "magicValue",
319
+ "type": "bytes4"
320
+ }
321
+ ],
322
+ "stateMutability": "view",
323
+ "type": "function"
324
+ },
325
+ {
326
+ "inputs": [
327
+ {
328
+ "internalType": "uint256",
329
+ "name": "",
330
+ "type": "uint256"
331
+ },
332
+ {
333
+ "internalType": "uint256",
334
+ "name": "",
335
+ "type": "uint256"
336
+ }
337
+ ],
338
+ "name": "mapRequestIds",
339
+ "outputs": [
340
+ {
341
+ "internalType": "uint256",
342
+ "name": "",
343
+ "type": "uint256"
344
+ }
345
+ ],
346
+ "stateMutability": "view",
347
+ "type": "function"
348
+ },
349
+ {
350
+ "inputs": [
351
+ {
352
+ "internalType": "address",
353
+ "name": "",
354
+ "type": "address"
355
+ }
356
+ ],
357
+ "name": "mapRequestsCounts",
358
+ "outputs": [
359
+ {
360
+ "internalType": "uint256",
361
+ "name": "",
362
+ "type": "uint256"
363
+ }
364
+ ],
365
+ "stateMutability": "view",
366
+ "type": "function"
367
+ },
368
+ {
369
+ "inputs": [],
370
+ "name": "nonce",
371
+ "outputs": [
372
+ {
373
+ "internalType": "uint256",
374
+ "name": "",
375
+ "type": "uint256"
376
+ }
377
+ ],
378
+ "stateMutability": "view",
379
+ "type": "function"
380
+ },
381
+ {
382
+ "inputs": [],
383
+ "name": "numUndeliveredRequests",
384
+ "outputs": [
385
+ {
386
+ "internalType": "uint256",
387
+ "name": "",
388
+ "type": "uint256"
389
+ }
390
+ ],
391
+ "stateMutability": "view",
392
+ "type": "function"
393
+ },
394
+ {
395
+ "inputs": [
396
+ {
397
+ "internalType": "address",
398
+ "name": "",
399
+ "type": "address"
400
+ },
401
+ {
402
+ "internalType": "address",
403
+ "name": "",
404
+ "type": "address"
405
+ },
406
+ {
407
+ "internalType": "uint256[]",
408
+ "name": "",
409
+ "type": "uint256[]"
410
+ },
411
+ {
412
+ "internalType": "uint256[]",
413
+ "name": "",
414
+ "type": "uint256[]"
415
+ },
416
+ {
417
+ "internalType": "bytes",
418
+ "name": "",
419
+ "type": "bytes"
420
+ }
421
+ ],
422
+ "name": "onERC1155BatchReceived",
423
+ "outputs": [
424
+ {
425
+ "internalType": "bytes4",
426
+ "name": "",
427
+ "type": "bytes4"
428
+ }
429
+ ],
430
+ "stateMutability": "pure",
431
+ "type": "function"
432
+ },
433
+ {
434
+ "inputs": [
435
+ {
436
+ "internalType": "address",
437
+ "name": "",
438
+ "type": "address"
439
+ },
440
+ {
441
+ "internalType": "address",
442
+ "name": "",
443
+ "type": "address"
444
+ },
445
+ {
446
+ "internalType": "uint256",
447
+ "name": "",
448
+ "type": "uint256"
449
+ },
450
+ {
451
+ "internalType": "uint256",
452
+ "name": "",
453
+ "type": "uint256"
454
+ },
455
+ {
456
+ "internalType": "bytes",
457
+ "name": "",
458
+ "type": "bytes"
459
+ }
460
+ ],
461
+ "name": "onERC1155Received",
462
+ "outputs": [
463
+ {
464
+ "internalType": "bytes4",
465
+ "name": "",
466
+ "type": "bytes4"
467
+ }
468
+ ],
469
+ "stateMutability": "pure",
470
+ "type": "function"
471
+ },
472
+ {
473
+ "inputs": [
474
+ {
475
+ "internalType": "address",
476
+ "name": "",
477
+ "type": "address"
478
+ },
479
+ {
480
+ "internalType": "address",
481
+ "name": "",
482
+ "type": "address"
483
+ },
484
+ {
485
+ "internalType": "uint256",
486
+ "name": "",
487
+ "type": "uint256"
488
+ },
489
+ {
490
+ "internalType": "bytes",
491
+ "name": "",
492
+ "type": "bytes"
493
+ }
494
+ ],
495
+ "name": "onERC721Received",
496
+ "outputs": [
497
+ {
498
+ "internalType": "bytes4",
499
+ "name": "",
500
+ "type": "bytes4"
501
+ }
502
+ ],
503
+ "stateMutability": "pure",
504
+ "type": "function"
505
+ },
506
+ {
507
+ "inputs": [],
508
+ "name": "price",
509
+ "outputs": [
510
+ {
511
+ "internalType": "uint256",
512
+ "name": "",
513
+ "type": "uint256"
514
+ }
515
+ ],
516
+ "stateMutability": "view",
517
+ "type": "function"
518
+ },
519
+ {
520
+ "inputs": [
521
+ {
522
+ "internalType": "bytes",
523
+ "name": "data",
524
+ "type": "bytes"
525
+ }
526
+ ],
527
+ "name": "request",
528
+ "outputs": [
529
+ {
530
+ "internalType": "uint256",
531
+ "name": "requestId",
532
+ "type": "uint256"
533
+ }
534
+ ],
535
+ "stateMutability": "payable",
536
+ "type": "function"
537
+ },
538
+ {
539
+ "inputs": [
540
+ {
541
+ "internalType": "uint256",
542
+ "name": "newPrice",
543
+ "type": "uint256"
544
+ }
545
+ ],
546
+ "name": "setPrice",
547
+ "outputs": [],
548
+ "stateMutability": "nonpayable",
549
+ "type": "function"
550
+ },
551
+ {
552
+ "inputs": [
553
+ {
554
+ "internalType": "bytes",
555
+ "name": "initParams",
556
+ "type": "bytes"
557
+ }
558
+ ],
559
+ "name": "setUp",
560
+ "outputs": [],
561
+ "stateMutability": "nonpayable",
562
+ "type": "function"
563
+ },
564
+ {
565
+ "inputs": [],
566
+ "name": "token",
567
+ "outputs": [
568
+ {
569
+ "internalType": "contract IERC721",
570
+ "name": "",
571
+ "type": "address"
572
+ }
573
+ ],
574
+ "stateMutability": "view",
575
+ "type": "function"
576
+ },
577
+ {
578
+ "inputs": [],
579
+ "name": "tokenId",
580
+ "outputs": [
581
+ {
582
+ "internalType": "uint256",
583
+ "name": "",
584
+ "type": "uint256"
585
+ }
586
+ ],
587
+ "stateMutability": "view",
588
+ "type": "function"
589
+ },
590
+ {
591
+ "inputs": [
592
+ {
593
+ "internalType": "address",
594
+ "name": "",
595
+ "type": "address"
596
+ },
597
+ {
598
+ "internalType": "address",
599
+ "name": "",
600
+ "type": "address"
601
+ },
602
+ {
603
+ "internalType": "address",
604
+ "name": "",
605
+ "type": "address"
606
+ },
607
+ {
608
+ "internalType": "uint256",
609
+ "name": "",
610
+ "type": "uint256"
611
+ },
612
+ {
613
+ "internalType": "bytes",
614
+ "name": "",
615
+ "type": "bytes"
616
+ },
617
+ {
618
+ "internalType": "bytes",
619
+ "name": "",
620
+ "type": "bytes"
621
+ }
622
+ ],
623
+ "name": "tokensReceived",
624
+ "outputs": [],
625
+ "stateMutability": "pure",
626
+ "type": "function"
627
+ },
628
+ {
629
+ "inputs": [
630
+ {
631
+ "components": [
632
+ {
633
+ "internalType": "address",
634
+ "name": "sender",
635
+ "type": "address"
636
+ },
637
+ {
638
+ "internalType": "uint256",
639
+ "name": "nonce",
640
+ "type": "uint256"
641
+ },
642
+ {
643
+ "internalType": "bytes",
644
+ "name": "initCode",
645
+ "type": "bytes"
646
+ },
647
+ {
648
+ "internalType": "bytes",
649
+ "name": "callData",
650
+ "type": "bytes"
651
+ },
652
+ {
653
+ "internalType": "uint256",
654
+ "name": "callGasLimit",
655
+ "type": "uint256"
656
+ },
657
+ {
658
+ "internalType": "uint256",
659
+ "name": "verificationGasLimit",
660
+ "type": "uint256"
661
+ },
662
+ {
663
+ "internalType": "uint256",
664
+ "name": "preVerificationGas",
665
+ "type": "uint256"
666
+ },
667
+ {
668
+ "internalType": "uint256",
669
+ "name": "maxFeePerGas",
670
+ "type": "uint256"
671
+ },
672
+ {
673
+ "internalType": "uint256",
674
+ "name": "maxPriorityFeePerGas",
675
+ "type": "uint256"
676
+ },
677
+ {
678
+ "internalType": "bytes",
679
+ "name": "paymasterAndData",
680
+ "type": "bytes"
681
+ },
682
+ {
683
+ "internalType": "bytes",
684
+ "name": "signature",
685
+ "type": "bytes"
686
+ }
687
+ ],
688
+ "internalType": "struct UserOperation",
689
+ "name": "userOp",
690
+ "type": "tuple"
691
+ },
692
+ {
693
+ "internalType": "bytes32",
694
+ "name": "userOpHash",
695
+ "type": "bytes32"
696
+ },
697
+ {
698
+ "internalType": "uint256",
699
+ "name": "missingAccountFunds",
700
+ "type": "uint256"
701
+ }
702
+ ],
703
+ "name": "validateUserOp",
704
+ "outputs": [
705
+ {
706
+ "internalType": "uint256",
707
+ "name": "validationData",
708
+ "type": "uint256"
709
+ }
710
+ ],
711
+ "stateMutability": "nonpayable",
712
+ "type": "function"
713
+ },
714
+ {
715
+ "stateMutability": "payable",
716
+ "type": "receive"
717
+ }
718
+ ]
contracts/old_mech_abi.json ADDED
@@ -0,0 +1,605 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "inputs": [
4
+ {
5
+ "internalType": "address",
6
+ "name": "_token",
7
+ "type": "address"
8
+ },
9
+ {
10
+ "internalType": "uint256",
11
+ "name": "_tokenId",
12
+ "type": "uint256"
13
+ },
14
+ {
15
+ "internalType": "uint256",
16
+ "name": "_price",
17
+ "type": "uint256"
18
+ }
19
+ ],
20
+ "stateMutability": "nonpayable",
21
+ "type": "constructor"
22
+ },
23
+ {
24
+ "inputs": [
25
+ {
26
+ "internalType": "uint256",
27
+ "name": "agentId",
28
+ "type": "uint256"
29
+ }
30
+ ],
31
+ "name": "AgentNotFound",
32
+ "type": "error"
33
+ },
34
+ {
35
+ "inputs": [
36
+ {
37
+ "internalType": "uint256",
38
+ "name": "provided",
39
+ "type": "uint256"
40
+ },
41
+ {
42
+ "internalType": "uint256",
43
+ "name": "expected",
44
+ "type": "uint256"
45
+ }
46
+ ],
47
+ "name": "NotEnoughPaid",
48
+ "type": "error"
49
+ },
50
+ {
51
+ "inputs": [],
52
+ "name": "ZeroAddress",
53
+ "type": "error"
54
+ },
55
+ {
56
+ "anonymous": false,
57
+ "inputs": [
58
+ {
59
+ "indexed": false,
60
+ "internalType": "uint256",
61
+ "name": "requestId",
62
+ "type": "uint256"
63
+ },
64
+ {
65
+ "indexed": false,
66
+ "internalType": "bytes",
67
+ "name": "data",
68
+ "type": "bytes"
69
+ }
70
+ ],
71
+ "name": "Deliver",
72
+ "type": "event"
73
+ },
74
+ {
75
+ "anonymous": false,
76
+ "inputs": [
77
+ {
78
+ "indexed": true,
79
+ "internalType": "address",
80
+ "name": "sender",
81
+ "type": "address"
82
+ },
83
+ {
84
+ "indexed": false,
85
+ "internalType": "bytes32",
86
+ "name": "taskHash",
87
+ "type": "bytes32"
88
+ }
89
+ ],
90
+ "name": "Perform",
91
+ "type": "event"
92
+ },
93
+ {
94
+ "anonymous": false,
95
+ "inputs": [
96
+ {
97
+ "indexed": false,
98
+ "internalType": "uint256",
99
+ "name": "price",
100
+ "type": "uint256"
101
+ }
102
+ ],
103
+ "name": "PriceUpdated",
104
+ "type": "event"
105
+ },
106
+ {
107
+ "anonymous": false,
108
+ "inputs": [
109
+ {
110
+ "indexed": true,
111
+ "internalType": "address",
112
+ "name": "sender",
113
+ "type": "address"
114
+ },
115
+ {
116
+ "indexed": false,
117
+ "internalType": "uint256",
118
+ "name": "requestId",
119
+ "type": "uint256"
120
+ },
121
+ {
122
+ "indexed": false,
123
+ "internalType": "bytes",
124
+ "name": "data",
125
+ "type": "bytes"
126
+ }
127
+ ],
128
+ "name": "Request",
129
+ "type": "event"
130
+ },
131
+ {
132
+ "inputs": [
133
+ {
134
+ "internalType": "uint256",
135
+ "name": "requestId",
136
+ "type": "uint256"
137
+ },
138
+ {
139
+ "internalType": "bytes",
140
+ "name": "data",
141
+ "type": "bytes"
142
+ }
143
+ ],
144
+ "name": "deliver",
145
+ "outputs": [],
146
+ "stateMutability": "nonpayable",
147
+ "type": "function"
148
+ },
149
+ {
150
+ "inputs": [],
151
+ "name": "entryPoint",
152
+ "outputs": [
153
+ {
154
+ "internalType": "contract IEntryPoint",
155
+ "name": "",
156
+ "type": "address"
157
+ }
158
+ ],
159
+ "stateMutability": "view",
160
+ "type": "function"
161
+ },
162
+ {
163
+ "inputs": [
164
+ {
165
+ "internalType": "address",
166
+ "name": "to",
167
+ "type": "address"
168
+ },
169
+ {
170
+ "internalType": "uint256",
171
+ "name": "value",
172
+ "type": "uint256"
173
+ },
174
+ {
175
+ "internalType": "bytes",
176
+ "name": "data",
177
+ "type": "bytes"
178
+ },
179
+ {
180
+ "internalType": "enum Enum.Operation",
181
+ "name": "operation",
182
+ "type": "uint8"
183
+ },
184
+ {
185
+ "internalType": "uint256",
186
+ "name": "txGas",
187
+ "type": "uint256"
188
+ }
189
+ ],
190
+ "name": "exec",
191
+ "outputs": [
192
+ {
193
+ "internalType": "bytes",
194
+ "name": "returnData",
195
+ "type": "bytes"
196
+ }
197
+ ],
198
+ "stateMutability": "nonpayable",
199
+ "type": "function"
200
+ },
201
+ {
202
+ "inputs": [
203
+ {
204
+ "internalType": "address",
205
+ "name": "account",
206
+ "type": "address"
207
+ },
208
+ {
209
+ "internalType": "bytes",
210
+ "name": "data",
211
+ "type": "bytes"
212
+ }
213
+ ],
214
+ "name": "getRequestId",
215
+ "outputs": [
216
+ {
217
+ "internalType": "uint256",
218
+ "name": "requestId",
219
+ "type": "uint256"
220
+ }
221
+ ],
222
+ "stateMutability": "pure",
223
+ "type": "function"
224
+ },
225
+ {
226
+ "inputs": [
227
+ {
228
+ "internalType": "address",
229
+ "name": "signer",
230
+ "type": "address"
231
+ }
232
+ ],
233
+ "name": "isOperator",
234
+ "outputs": [
235
+ {
236
+ "internalType": "bool",
237
+ "name": "",
238
+ "type": "bool"
239
+ }
240
+ ],
241
+ "stateMutability": "view",
242
+ "type": "function"
243
+ },
244
+ {
245
+ "inputs": [
246
+ {
247
+ "internalType": "bytes32",
248
+ "name": "hash",
249
+ "type": "bytes32"
250
+ },
251
+ {
252
+ "internalType": "bytes",
253
+ "name": "signature",
254
+ "type": "bytes"
255
+ }
256
+ ],
257
+ "name": "isValidSignature",
258
+ "outputs": [
259
+ {
260
+ "internalType": "bytes4",
261
+ "name": "magicValue",
262
+ "type": "bytes4"
263
+ }
264
+ ],
265
+ "stateMutability": "view",
266
+ "type": "function"
267
+ },
268
+ {
269
+ "inputs": [],
270
+ "name": "nonce",
271
+ "outputs": [
272
+ {
273
+ "internalType": "uint256",
274
+ "name": "",
275
+ "type": "uint256"
276
+ }
277
+ ],
278
+ "stateMutability": "view",
279
+ "type": "function"
280
+ },
281
+ {
282
+ "inputs": [
283
+ {
284
+ "internalType": "address",
285
+ "name": "",
286
+ "type": "address"
287
+ },
288
+ {
289
+ "internalType": "address",
290
+ "name": "",
291
+ "type": "address"
292
+ },
293
+ {
294
+ "internalType": "uint256[]",
295
+ "name": "",
296
+ "type": "uint256[]"
297
+ },
298
+ {
299
+ "internalType": "uint256[]",
300
+ "name": "",
301
+ "type": "uint256[]"
302
+ },
303
+ {
304
+ "internalType": "bytes",
305
+ "name": "",
306
+ "type": "bytes"
307
+ }
308
+ ],
309
+ "name": "onERC1155BatchReceived",
310
+ "outputs": [
311
+ {
312
+ "internalType": "bytes4",
313
+ "name": "",
314
+ "type": "bytes4"
315
+ }
316
+ ],
317
+ "stateMutability": "pure",
318
+ "type": "function"
319
+ },
320
+ {
321
+ "inputs": [
322
+ {
323
+ "internalType": "address",
324
+ "name": "",
325
+ "type": "address"
326
+ },
327
+ {
328
+ "internalType": "address",
329
+ "name": "",
330
+ "type": "address"
331
+ },
332
+ {
333
+ "internalType": "uint256",
334
+ "name": "",
335
+ "type": "uint256"
336
+ },
337
+ {
338
+ "internalType": "uint256",
339
+ "name": "",
340
+ "type": "uint256"
341
+ },
342
+ {
343
+ "internalType": "bytes",
344
+ "name": "",
345
+ "type": "bytes"
346
+ }
347
+ ],
348
+ "name": "onERC1155Received",
349
+ "outputs": [
350
+ {
351
+ "internalType": "bytes4",
352
+ "name": "",
353
+ "type": "bytes4"
354
+ }
355
+ ],
356
+ "stateMutability": "pure",
357
+ "type": "function"
358
+ },
359
+ {
360
+ "inputs": [
361
+ {
362
+ "internalType": "address",
363
+ "name": "",
364
+ "type": "address"
365
+ },
366
+ {
367
+ "internalType": "address",
368
+ "name": "",
369
+ "type": "address"
370
+ },
371
+ {
372
+ "internalType": "uint256",
373
+ "name": "",
374
+ "type": "uint256"
375
+ },
376
+ {
377
+ "internalType": "bytes",
378
+ "name": "",
379
+ "type": "bytes"
380
+ }
381
+ ],
382
+ "name": "onERC721Received",
383
+ "outputs": [
384
+ {
385
+ "internalType": "bytes4",
386
+ "name": "",
387
+ "type": "bytes4"
388
+ }
389
+ ],
390
+ "stateMutability": "pure",
391
+ "type": "function"
392
+ },
393
+ {
394
+ "inputs": [],
395
+ "name": "price",
396
+ "outputs": [
397
+ {
398
+ "internalType": "uint256",
399
+ "name": "",
400
+ "type": "uint256"
401
+ }
402
+ ],
403
+ "stateMutability": "view",
404
+ "type": "function"
405
+ },
406
+ {
407
+ "inputs": [
408
+ {
409
+ "internalType": "bytes",
410
+ "name": "data",
411
+ "type": "bytes"
412
+ }
413
+ ],
414
+ "name": "request",
415
+ "outputs": [
416
+ {
417
+ "internalType": "uint256",
418
+ "name": "requestId",
419
+ "type": "uint256"
420
+ }
421
+ ],
422
+ "stateMutability": "payable",
423
+ "type": "function"
424
+ },
425
+ {
426
+ "inputs": [
427
+ {
428
+ "internalType": "uint256",
429
+ "name": "newPrice",
430
+ "type": "uint256"
431
+ }
432
+ ],
433
+ "name": "setPrice",
434
+ "outputs": [],
435
+ "stateMutability": "nonpayable",
436
+ "type": "function"
437
+ },
438
+ {
439
+ "inputs": [
440
+ {
441
+ "internalType": "bytes",
442
+ "name": "initParams",
443
+ "type": "bytes"
444
+ }
445
+ ],
446
+ "name": "setUp",
447
+ "outputs": [],
448
+ "stateMutability": "nonpayable",
449
+ "type": "function"
450
+ },
451
+ {
452
+ "inputs": [],
453
+ "name": "token",
454
+ "outputs": [
455
+ {
456
+ "internalType": "contract IERC721",
457
+ "name": "",
458
+ "type": "address"
459
+ }
460
+ ],
461
+ "stateMutability": "view",
462
+ "type": "function"
463
+ },
464
+ {
465
+ "inputs": [],
466
+ "name": "tokenId",
467
+ "outputs": [
468
+ {
469
+ "internalType": "uint256",
470
+ "name": "",
471
+ "type": "uint256"
472
+ }
473
+ ],
474
+ "stateMutability": "view",
475
+ "type": "function"
476
+ },
477
+ {
478
+ "inputs": [
479
+ {
480
+ "internalType": "address",
481
+ "name": "",
482
+ "type": "address"
483
+ },
484
+ {
485
+ "internalType": "address",
486
+ "name": "",
487
+ "type": "address"
488
+ },
489
+ {
490
+ "internalType": "address",
491
+ "name": "",
492
+ "type": "address"
493
+ },
494
+ {
495
+ "internalType": "uint256",
496
+ "name": "",
497
+ "type": "uint256"
498
+ },
499
+ {
500
+ "internalType": "bytes",
501
+ "name": "",
502
+ "type": "bytes"
503
+ },
504
+ {
505
+ "internalType": "bytes",
506
+ "name": "",
507
+ "type": "bytes"
508
+ }
509
+ ],
510
+ "name": "tokensReceived",
511
+ "outputs": [],
512
+ "stateMutability": "pure",
513
+ "type": "function"
514
+ },
515
+ {
516
+ "inputs": [
517
+ {
518
+ "components": [
519
+ {
520
+ "internalType": "address",
521
+ "name": "sender",
522
+ "type": "address"
523
+ },
524
+ {
525
+ "internalType": "uint256",
526
+ "name": "nonce",
527
+ "type": "uint256"
528
+ },
529
+ {
530
+ "internalType": "bytes",
531
+ "name": "initCode",
532
+ "type": "bytes"
533
+ },
534
+ {
535
+ "internalType": "bytes",
536
+ "name": "callData",
537
+ "type": "bytes"
538
+ },
539
+ {
540
+ "internalType": "uint256",
541
+ "name": "callGasLimit",
542
+ "type": "uint256"
543
+ },
544
+ {
545
+ "internalType": "uint256",
546
+ "name": "verificationGasLimit",
547
+ "type": "uint256"
548
+ },
549
+ {
550
+ "internalType": "uint256",
551
+ "name": "preVerificationGas",
552
+ "type": "uint256"
553
+ },
554
+ {
555
+ "internalType": "uint256",
556
+ "name": "maxFeePerGas",
557
+ "type": "uint256"
558
+ },
559
+ {
560
+ "internalType": "uint256",
561
+ "name": "maxPriorityFeePerGas",
562
+ "type": "uint256"
563
+ },
564
+ {
565
+ "internalType": "bytes",
566
+ "name": "paymasterAndData",
567
+ "type": "bytes"
568
+ },
569
+ {
570
+ "internalType": "bytes",
571
+ "name": "signature",
572
+ "type": "bytes"
573
+ }
574
+ ],
575
+ "internalType": "struct UserOperation",
576
+ "name": "userOp",
577
+ "type": "tuple"
578
+ },
579
+ {
580
+ "internalType": "bytes32",
581
+ "name": "userOpHash",
582
+ "type": "bytes32"
583
+ },
584
+ {
585
+ "internalType": "uint256",
586
+ "name": "missingAccountFunds",
587
+ "type": "uint256"
588
+ }
589
+ ],
590
+ "name": "validateUserOp",
591
+ "outputs": [
592
+ {
593
+ "internalType": "uint256",
594
+ "name": "validationData",
595
+ "type": "uint256"
596
+ }
597
+ ],
598
+ "stateMutability": "nonpayable",
599
+ "type": "function"
600
+ },
601
+ {
602
+ "stateMutability": "payable",
603
+ "type": "receive"
604
+ }
605
+ ]
data/all_trades_profitability.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c33e317ca70cc8797a84900df008aca7e359beb263244ba4c808ba872d02a5b4
3
+ size 29242992
data/delivers.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28cdb01625315f40a2beb7849f2c792341f9f2c8acf70b622f61a5dcf76a0b57
3
+ size 1407762470
data/fpmmTrades.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5f3f7c2270b0d8d74f4442242e8978d803e26c082b8446ec69bf95fa2d593c5
3
+ size 64770123
data/fpmms.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02e46f743c4650d53fd33cb4edfc7bcaed91d738df5d1b51c5e12173aa4d5e3b
3
+ size 408719
data/requests.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba420bf57f185dcc186a29d84d09b2b77b209cfc8abf14c9821ca8ecdf54187a
3
+ size 131633578
data/summary_profitability.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a58af20c40c7b05a48809fadd3ec20812c05a0fea75c80104cd3ca67029ccc5
3
+ size 48347
data/t_map.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3dda22af540a436e3e36653d7c7fcca7b104fc26ca9de5c56a309f3995941bd4
3
+ size 6031686
data/tools.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abef4e250d6e626b362587f9394c40a274fc9f45e6e8f879b933ff7115af01c2
3
+ size 1503054932
nbs/test.ipynb ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 3,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import pickle\n",
10
+ "import pandas as pd\n",
11
+ "from pathlib import Path\n",
12
+ "from web3 import Web3\n",
13
+ "from concurrent.futures import ThreadPoolExecutor\n",
14
+ "from tqdm import tqdm\n",
15
+ "from functools import partial\n",
16
+ "from datetime import datetime\n"
17
+ ]
18
+ },
19
+ {
20
+ "cell_type": "markdown",
21
+ "metadata": {},
22
+ "source": [
23
+ "### Make t_map"
24
+ ]
25
+ },
26
+ {
27
+ "cell_type": "code",
28
+ "execution_count": null,
29
+ "metadata": {},
30
+ "outputs": [],
31
+ "source": [
32
+ "tools = pd.read_csv(\"../data/tools.csv\")"
33
+ ]
34
+ },
35
+ {
36
+ "cell_type": "code",
37
+ "execution_count": null,
38
+ "metadata": {},
39
+ "outputs": [],
40
+ "source": [
41
+ "tools.columns"
42
+ ]
43
+ },
44
+ {
45
+ "cell_type": "code",
46
+ "execution_count": null,
47
+ "metadata": {},
48
+ "outputs": [],
49
+ "source": [
50
+ "import pickle\n",
51
+ "t_map = tools[['request_block', 'request_time']].set_index('request_block').to_dict()['request_time']\n",
52
+ "\n",
53
+ "with open('../data/t_map.pkl', 'wb') as f:\n",
54
+ " pickle.dump(t_map, f)\n",
55
+ "\n"
56
+ ]
57
+ },
58
+ {
59
+ "cell_type": "code",
60
+ "execution_count": null,
61
+ "metadata": {},
62
+ "outputs": [],
63
+ "source": [
64
+ "with open('../data/t_map.pkl', 'rb') as f:\n",
65
+ " t_map = pickle.load(f)"
66
+ ]
67
+ },
68
+ {
69
+ "cell_type": "markdown",
70
+ "metadata": {},
71
+ "source": [
72
+ "### Markets"
73
+ ]
74
+ },
75
+ {
76
+ "cell_type": "code",
77
+ "execution_count": 4,
78
+ "metadata": {},
79
+ "outputs": [
80
+ {
81
+ "data": {
82
+ "text/plain": [
83
+ "Index(['id', 'currentAnswer', 'title'], dtype='object')"
84
+ ]
85
+ },
86
+ "execution_count": 4,
87
+ "metadata": {},
88
+ "output_type": "execute_result"
89
+ }
90
+ ],
91
+ "source": [
92
+ "fpmms = pd.read_csv(\"../data/fpmms.csv\")\n",
93
+ "fpmms.columns"
94
+ ]
95
+ },
96
+ {
97
+ "cell_type": "code",
98
+ "execution_count": 6,
99
+ "metadata": {},
100
+ "outputs": [
101
+ {
102
+ "name": "stderr",
103
+ "output_type": "stream",
104
+ "text": [
105
+ "/var/folders/l_/g22b1g_n0gn4tmx9lkxqv5x00000gn/T/ipykernel_42934/371090584.py:1: DtypeWarning: Columns (2) have mixed types. Specify dtype option on import or set low_memory=False.\n",
106
+ " delivers = pd.read_csv(\"../data/delivers.csv\")\n"
107
+ ]
108
+ },
109
+ {
110
+ "data": {
111
+ "text/plain": [
112
+ "(263613, 12)"
113
+ ]
114
+ },
115
+ "execution_count": 6,
116
+ "metadata": {},
117
+ "output_type": "execute_result"
118
+ }
119
+ ],
120
+ "source": [
121
+ "delivers = pd.read_csv(\"../data/delivers.csv\")\n",
122
+ "delivers.shape\n"
123
+ ]
124
+ },
125
+ {
126
+ "cell_type": "code",
127
+ "execution_count": 7,
128
+ "metadata": {},
129
+ "outputs": [
130
+ {
131
+ "data": {
132
+ "text/plain": [
133
+ "(245092, 6)"
134
+ ]
135
+ },
136
+ "execution_count": 7,
137
+ "metadata": {},
138
+ "output_type": "execute_result"
139
+ }
140
+ ],
141
+ "source": [
142
+ "requests = pd.read_csv(\"../data/requests.csv\")\n",
143
+ "requests.columns\n",
144
+ "\n",
145
+ "requests.shape"
146
+ ]
147
+ },
148
+ {
149
+ "cell_type": "code",
150
+ "execution_count": 8,
151
+ "metadata": {},
152
+ "outputs": [
153
+ {
154
+ "name": "stderr",
155
+ "output_type": "stream",
156
+ "text": [
157
+ "/var/folders/l_/g22b1g_n0gn4tmx9lkxqv5x00000gn/T/ipykernel_42934/3254331204.py:1: DtypeWarning: Columns (7,10) have mixed types. Specify dtype option on import or set low_memory=False.\n",
158
+ " tools = pd.read_csv(\"../data/tools.csv\")\n"
159
+ ]
160
+ },
161
+ {
162
+ "data": {
163
+ "text/plain": [
164
+ "Index(['request_id', 'request_block', 'prompt_request', 'tool', 'nonce',\n",
165
+ " 'trader_address', 'deliver_block', 'error', 'error_message',\n",
166
+ " 'prompt_response', 'mech_address', 'p_yes', 'p_no', 'confidence',\n",
167
+ " 'info_utility', 'vote', 'win_probability', 'title', 'currentAnswer',\n",
168
+ " 'request_time', 'request_month_year', 'request_month_year_week'],\n",
169
+ " dtype='object')"
170
+ ]
171
+ },
172
+ "execution_count": 8,
173
+ "metadata": {},
174
+ "output_type": "execute_result"
175
+ }
176
+ ],
177
+ "source": [
178
+ "tools = pd.read_csv(\"../data/tools.csv\")\n",
179
+ "tools.columns"
180
+ ]
181
+ },
182
+ {
183
+ "cell_type": "code",
184
+ "execution_count": 9,
185
+ "metadata": {},
186
+ "outputs": [
187
+ {
188
+ "data": {
189
+ "text/plain": [
190
+ "841"
191
+ ]
192
+ },
193
+ "execution_count": 9,
194
+ "metadata": {},
195
+ "output_type": "execute_result"
196
+ }
197
+ ],
198
+ "source": [
199
+ "tools['request_time'].isna().sum()"
200
+ ]
201
+ },
202
+ {
203
+ "cell_type": "code",
204
+ "execution_count": 10,
205
+ "metadata": {},
206
+ "outputs": [],
207
+ "source": [
208
+ "def block_number_to_timestamp(block_number: int, web3: Web3) -> str:\n",
209
+ " \"\"\"Convert a block number to a timestamp.\"\"\"\n",
210
+ " block = web3.eth.get_block(block_number)\n",
211
+ " timestamp = datetime.utcfromtimestamp(block['timestamp'])\n",
212
+ " return timestamp.strftime('%Y-%m-%d %H:%M:%S')\n",
213
+ "\n",
214
+ "\n",
215
+ "def parallelize_timestamp_conversion(df: pd.DataFrame, function: callable) -> list:\n",
216
+ " \"\"\"Parallelize the timestamp conversion.\"\"\"\n",
217
+ " block_numbers = df['request_block'].tolist()\n",
218
+ " with ThreadPoolExecutor(max_workers=10) as executor:\n",
219
+ " results = list(tqdm(executor.map(function, block_numbers), total=len(block_numbers))) \n",
220
+ " return results\n"
221
+ ]
222
+ },
223
+ {
224
+ "cell_type": "code",
225
+ "execution_count": 11,
226
+ "metadata": {},
227
+ "outputs": [],
228
+ "source": [
229
+ "rpc = \"https://lb.nodies.app/v1/406d8dcc043f4cb3959ed7d6673d311a\"\n",
230
+ "web3 = Web3(Web3.HTTPProvider(rpc))\n",
231
+ "\n",
232
+ "partial_block_number_to_timestamp = partial(block_number_to_timestamp, web3=web3)"
233
+ ]
234
+ },
235
+ {
236
+ "cell_type": "code",
237
+ "execution_count": 15,
238
+ "metadata": {},
239
+ "outputs": [
240
+ {
241
+ "name": "stderr",
242
+ "output_type": "stream",
243
+ "text": [
244
+ "100%|██████████| 841/841 [00:25<00:00, 33.18it/s]\n"
245
+ ]
246
+ }
247
+ ],
248
+ "source": [
249
+ "missing_time_indices = tools[tools['request_time'].isna()].index\n",
250
+ "if not missing_time_indices.empty:\n",
251
+ " partial_block_number_to_timestamp = partial(block_number_to_timestamp, web3=web3)\n",
252
+ " missing_timestamps = parallelize_timestamp_conversion(tools.loc[missing_time_indices], partial_block_number_to_timestamp)\n",
253
+ " \n",
254
+ " # Update the original DataFrame with the missing timestamps\n",
255
+ " for i, timestamp in zip(missing_time_indices, missing_timestamps):\n",
256
+ " tools.at[i, 'request_time'] = timestamp"
257
+ ]
258
+ },
259
+ {
260
+ "cell_type": "code",
261
+ "execution_count": 16,
262
+ "metadata": {},
263
+ "outputs": [
264
+ {
265
+ "data": {
266
+ "text/plain": [
267
+ "0"
268
+ ]
269
+ },
270
+ "execution_count": 16,
271
+ "metadata": {},
272
+ "output_type": "execute_result"
273
+ }
274
+ ],
275
+ "source": [
276
+ "tools['request_time'].isna().sum()"
277
+ ]
278
+ },
279
+ {
280
+ "cell_type": "code",
281
+ "execution_count": 17,
282
+ "metadata": {},
283
+ "outputs": [],
284
+ "source": [
285
+ "tools['request_month_year'] = pd.to_datetime(tools['request_time']).dt.strftime('%Y-%m')\n",
286
+ "tools['request_month_year_week'] = pd.to_datetime(tools['request_time']).dt.to_period('W').astype(str)"
287
+ ]
288
+ },
289
+ {
290
+ "cell_type": "code",
291
+ "execution_count": 18,
292
+ "metadata": {},
293
+ "outputs": [
294
+ {
295
+ "data": {
296
+ "text/plain": [
297
+ "0"
298
+ ]
299
+ },
300
+ "execution_count": 18,
301
+ "metadata": {},
302
+ "output_type": "execute_result"
303
+ }
304
+ ],
305
+ "source": [
306
+ "tools['request_month_year_week'].isna().sum()\n"
307
+ ]
308
+ },
309
+ {
310
+ "cell_type": "code",
311
+ "execution_count": 19,
312
+ "metadata": {},
313
+ "outputs": [],
314
+ "source": [
315
+ "tools.to_csv(\"../data/tools.csv\", index=False)"
316
+ ]
317
+ },
318
+ {
319
+ "cell_type": "code",
320
+ "execution_count": 23,
321
+ "metadata": {},
322
+ "outputs": [],
323
+ "source": [
324
+ "with open('../data/t_map.pkl', 'rb') as f:\n",
325
+ " t_map = pickle.load(f)\n",
326
+ "new_timestamps = tools[['request_block', 'request_time']].dropna().set_index('request_block').to_dict()['request_time']\n",
327
+ "t_map.update(new_timestamps)\n",
328
+ "\n",
329
+ "with open('../data/t_map.pkl', 'wb') as f:\n",
330
+ " pickle.dump(t_map, f)\n",
331
+ "\n"
332
+ ]
333
+ },
334
+ {
335
+ "cell_type": "code",
336
+ "execution_count": null,
337
+ "metadata": {},
338
+ "outputs": [],
339
+ "source": []
340
+ }
341
+ ],
342
+ "metadata": {
343
+ "kernelspec": {
344
+ "display_name": "autogen",
345
+ "language": "python",
346
+ "name": "python3"
347
+ },
348
+ "language_info": {
349
+ "codemirror_mode": {
350
+ "name": "ipython",
351
+ "version": 3
352
+ },
353
+ "file_extension": ".py",
354
+ "mimetype": "text/x-python",
355
+ "name": "python",
356
+ "nbconvert_exporter": "python",
357
+ "pygments_lexer": "ipython3",
358
+ "version": "3.10.13"
359
+ }
360
+ },
361
+ "nbformat": 4,
362
+ "nbformat_minor": 2
363
+ }
nbs/weekly_analysis.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ pandas
2
+ matplotlib
3
+ huggingface-hub
4
+ pyarrow
5
+ web3
6
+ requests
7
+ gradio
8
+ apscheduler
9
+ pytz
scripts/markets.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # ------------------------------------------------------------------------------
3
+ #
4
+ # Copyright 2023 Valory AG
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ #
18
+ # ------------------------------------------------------------------------------
19
+
20
+ import functools
21
+ import warnings
22
+ from string import Template
23
+ from typing import Optional, Generator, Callable
24
+
25
+ import pandas as pd
26
+ import requests
27
+ from tqdm import tqdm
28
+
29
+ from typing import List, Dict
30
+ from pathlib import Path
31
+
32
+ ResponseItemType = List[Dict[str, str]]
33
+ SubgraphResponseType = Dict[str, ResponseItemType]
34
+
35
+
36
+ CREATOR = "0x89c5cc945dd550BcFfb72Fe42BfF002429F46Fec"
37
+ BATCH_SIZE = 1000
38
+ OMEN_SUBGRAPH = "https://api.thegraph.com/subgraphs/name/protofire/omen-xdai"
39
+ FPMMS_FIELD = "fixedProductMarketMakers"
40
+ QUERY_FIELD = "query"
41
+ ERROR_FIELD = "errors"
42
+ DATA_FIELD = "data"
43
+ ID_FIELD = "id"
44
+ ANSWER_FIELD = "currentAnswer"
45
+ QUESTION_FIELD = "question"
46
+ OUTCOMES_FIELD = "outcomes"
47
+ TITLE_FIELD = "title"
48
+ MAX_UINT_HEX = "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
49
+ DEFAULT_FILENAME = "fpmms.csv"
50
+ SCRIPTS_DIR = Path(__file__).parent
51
+ ROOT_DIR = SCRIPTS_DIR.parent
52
+ DATA_DIR = ROOT_DIR / "data"
53
+ FPMMS_QUERY = Template(
54
+ """
55
+ {
56
+ ${fpmms_field}(
57
+ where: {
58
+ creator: "${creator}",
59
+ id_gt: "${fpmm_id}",
60
+ isPendingArbitration: false
61
+ },
62
+ orderBy: ${id_field}
63
+ first: ${first}
64
+ ){
65
+ ${id_field}
66
+ ${answer_field}
67
+ ${question_field} {
68
+ ${outcomes_field}
69
+ }
70
+ ${title_field}
71
+ }
72
+ }
73
+ """
74
+ )
75
+
76
+
77
+ class RetriesExceeded(Exception):
78
+ """Exception to raise when retries are exceeded during data-fetching."""
79
+
80
+ def __init__(
81
+ self, msg="Maximum retries were exceeded while trying to fetch the data!"
82
+ ):
83
+ super().__init__(msg)
84
+
85
+
86
+ def hacky_retry(func: Callable, n_retries: int = 3) -> Callable:
87
+ """Create a hacky retry strategy.
88
+ Unfortunately, we cannot use `requests.packages.urllib3.util.retry.Retry`,
89
+ because the subgraph does not return the appropriate status codes in case of failure.
90
+ Instead, it always returns code 200. Thus, we raise exceptions manually inside `make_request`,
91
+ catch those exceptions in the hacky retry decorator and try again.
92
+ Finally, if the allowed number of retries is exceeded, we raise a custom `RetriesExceeded` exception.
93
+
94
+ :param func: the input request function.
95
+ :param n_retries: the maximum allowed number of retries.
96
+ :return: The request method with the hacky retry strategy applied.
97
+ """
98
+
99
+ @functools.wraps(func)
100
+ def wrapper_hacky_retry(*args, **kwargs) -> SubgraphResponseType:
101
+ """The wrapper for the hacky retry.
102
+
103
+ :return: a response dictionary.
104
+ """
105
+ retried = 0
106
+
107
+ while retried <= n_retries:
108
+ try:
109
+ if retried > 0:
110
+ warnings.warn(f"Retrying {retried}/{n_retries}...")
111
+
112
+ return func(*args, **kwargs)
113
+ except (ValueError, ConnectionError) as e:
114
+ warnings.warn(e.args[0])
115
+ finally:
116
+ retried += 1
117
+
118
+ raise RetriesExceeded()
119
+
120
+ return wrapper_hacky_retry
121
+
122
+
123
+ @hacky_retry
124
+ def query_subgraph(url: str, query: str, key: str) -> SubgraphResponseType:
125
+ """Query a subgraph.
126
+
127
+ Args:
128
+ url: the subgraph's URL.
129
+ query: the query to be used.
130
+ key: the key to use in order to access the required data.
131
+
132
+ Returns:
133
+ a response dictionary.
134
+ """
135
+ content = {QUERY_FIELD: query}
136
+ headers = {
137
+ "Accept": "application/json",
138
+ "Content-Type": "application/json",
139
+ }
140
+ res = requests.post(url, json=content, headers=headers)
141
+
142
+ if res.status_code != 200:
143
+ raise ConnectionError(
144
+ "Something went wrong while trying to communicate with the subgraph "
145
+ f"(Error: {res.status_code})!\n{res.text}"
146
+ )
147
+
148
+ body = res.json()
149
+ if ERROR_FIELD in body.keys():
150
+ raise ValueError(f"The given query is not correct: {body[ERROR_FIELD]}")
151
+
152
+ data = body.get(DATA_FIELD, {}).get(key, None)
153
+ if data is None:
154
+ raise ValueError(f"Unknown error encountered!\nRaw response: \n{body}")
155
+
156
+ return data
157
+
158
+
159
+ def fpmms_fetcher() -> Generator[ResponseItemType, int, None]:
160
+ """An indefinite fetcher for the FPMMs."""
161
+ while True:
162
+ fpmm_id = yield
163
+ fpmms_query = FPMMS_QUERY.substitute(
164
+ creator=CREATOR,
165
+ fpmm_id=fpmm_id,
166
+ fpmms_field=FPMMS_FIELD,
167
+ first=BATCH_SIZE,
168
+ id_field=ID_FIELD,
169
+ answer_field=ANSWER_FIELD,
170
+ question_field=QUESTION_FIELD,
171
+ outcomes_field=OUTCOMES_FIELD,
172
+ title_field=TITLE_FIELD,
173
+ )
174
+ yield query_subgraph(OMEN_SUBGRAPH, fpmms_query, FPMMS_FIELD)
175
+
176
+
177
+ def fetch_fpmms() -> pd.DataFrame:
178
+ """Fetch all the fpmms of the creator."""
179
+ latest_id = ""
180
+ fpmms = []
181
+ fetcher = fpmms_fetcher()
182
+ for _ in tqdm(fetcher, unit="fpmms", unit_scale=BATCH_SIZE):
183
+ batch = fetcher.send(latest_id)
184
+ if len(batch) == 0:
185
+ break
186
+
187
+ latest_id = batch[-1].get(ID_FIELD, "")
188
+ if latest_id == "":
189
+ raise ValueError(f"Unexpected data format retrieved: {batch}")
190
+
191
+ fpmms.extend(batch)
192
+
193
+ return pd.DataFrame(fpmms)
194
+
195
+
196
+ def get_answer(fpmm: pd.Series) -> str:
197
+ """Get an answer from its index, using Series of an FPMM."""
198
+ return fpmm[QUESTION_FIELD][OUTCOMES_FIELD][fpmm[ANSWER_FIELD]]
199
+
200
+
201
+ def transform_fpmms(fpmms: pd.DataFrame) -> pd.DataFrame:
202
+ """Transform an FPMMS dataframe."""
203
+ transformed = fpmms.dropna()
204
+ transformed = transformed.drop_duplicates([ID_FIELD])
205
+ transformed = transformed.loc[transformed[ANSWER_FIELD] != MAX_UINT_HEX]
206
+ transformed.loc[:, ANSWER_FIELD] = (
207
+ transformed[ANSWER_FIELD].str.slice(-1).astype(int)
208
+ )
209
+ transformed.loc[:, ANSWER_FIELD] = transformed.apply(get_answer, axis=1)
210
+ transformed = transformed.drop(columns=[QUESTION_FIELD])
211
+
212
+ return transformed
213
+
214
+
215
+ def etl(filename: Optional[str] = None) -> pd.DataFrame:
216
+ """Fetch, process, store and return the markets as a Dataframe."""
217
+ fpmms = fetch_fpmms()
218
+ fpmms = transform_fpmms(fpmms)
219
+
220
+ if filename:
221
+ fpmms.to_csv(DATA_DIR / filename, index=False)
222
+
223
+ return fpmms
224
+
225
+
226
+ if __name__ == "__main__":
227
+ etl(DEFAULT_FILENAME)
scripts/profitability.py ADDED
@@ -0,0 +1,633 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # ------------------------------------------------------------------------------
3
+ #
4
+ # Copyright 2023 Valory AG
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ #
18
+ # ------------------------------------------------------------------------------
19
+
20
+ import time
21
+ import requests
22
+ import datetime
23
+ import pandas as pd
24
+ from collections import defaultdict
25
+ from typing import Any, Union
26
+ from string import Template
27
+ from enum import Enum
28
+ from tqdm import tqdm
29
+ import numpy as np
30
+ from pathlib import Path
31
+
32
+ IRRELEVANT_TOOLS = [
33
+ "openai-text-davinci-002",
34
+ "openai-text-davinci-003",
35
+ "openai-gpt-3.5-turbo",
36
+ "openai-gpt-4",
37
+ "stabilityai-stable-diffusion-v1-5",
38
+ "stabilityai-stable-diffusion-xl-beta-v2-2-2",
39
+ "stabilityai-stable-diffusion-512-v2-1",
40
+ "stabilityai-stable-diffusion-768-v2-1",
41
+ "deepmind-optimization-strong",
42
+ "deepmind-optimization",
43
+ ]
44
+ QUERY_BATCH_SIZE = 1000
45
+ DUST_THRESHOLD = 10000000000000
46
+ INVALID_ANSWER_HEX = (
47
+ "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
48
+ )
49
+ INVALID_ANSWER = -1
50
+ FPMM_CREATOR = "0x89c5cc945dd550bcffb72fe42bff002429f46fec"
51
+ DEFAULT_FROM_DATE = "1970-01-01T00:00:00"
52
+ DEFAULT_TO_DATE = "2038-01-19T03:14:07"
53
+ DEFAULT_FROM_TIMESTAMP = 0
54
+ DEFAULT_TO_TIMESTAMP = 2147483647
55
+ WXDAI_CONTRACT_ADDRESS = "0xe91D153E0b41518A2Ce8Dd3D7944Fa863463a97d"
56
+ DEFAULT_MECH_FEE = 0.01
57
+ DUST_THRESHOLD = 10000000000000
58
+ SCRIPTS_DIR = Path(__file__).parent
59
+ ROOT_DIR = SCRIPTS_DIR.parent
60
+ DATA_DIR = ROOT_DIR / "data"
61
+
62
+ class MarketState(Enum):
63
+ """Market state"""
64
+
65
+ OPEN = 1
66
+ PENDING = 2
67
+ FINALIZING = 3
68
+ ARBITRATING = 4
69
+ CLOSED = 5
70
+
71
+ def __str__(self) -> str:
72
+ """Prints the market status."""
73
+ return self.name.capitalize()
74
+
75
+
76
+ class MarketAttribute(Enum):
77
+ """Attribute"""
78
+
79
+ NUM_TRADES = "Num_trades"
80
+ WINNER_TRADES = "Winner_trades"
81
+ NUM_REDEEMED = "Num_redeemed"
82
+ INVESTMENT = "Investment"
83
+ FEES = "Fees"
84
+ MECH_CALLS = "Mech_calls"
85
+ MECH_FEES = "Mech_fees"
86
+ EARNINGS = "Earnings"
87
+ NET_EARNINGS = "Net_earnings"
88
+ REDEMPTIONS = "Redemptions"
89
+ ROI = "ROI"
90
+
91
+ def __str__(self) -> str:
92
+ """Prints the attribute."""
93
+ return self.value
94
+
95
+ def __repr__(self) -> str:
96
+ """Prints the attribute representation."""
97
+ return self.name
98
+
99
+ @staticmethod
100
+ def argparse(s: str) -> "MarketAttribute":
101
+ """Performs string conversion to MarketAttribute."""
102
+ try:
103
+ return MarketAttribute[s.upper()]
104
+ except KeyError as e:
105
+ raise ValueError(f"Invalid MarketAttribute: {s}") from e
106
+
107
+
108
+ ALL_TRADES_STATS_DF_COLS = [
109
+ "trader_address",
110
+ "trade_id",
111
+ "creation_timestamp",
112
+ "title",
113
+ "market_status",
114
+ "collateral_amount",
115
+ "outcome_index",
116
+ "trade_fee_amount",
117
+ "outcomes_tokens_traded",
118
+ "current_answer",
119
+ "is_invalid",
120
+ "winning_trade",
121
+ "earnings",
122
+ "redeemed",
123
+ "redeemed_amount",
124
+ "num_mech_calls",
125
+ "mech_fee_amount",
126
+ "net_earnings",
127
+ "roi",
128
+ ]
129
+
130
+ SUMMARY_STATS_DF_COLS = [
131
+ "trader_address",
132
+ "num_trades",
133
+ "num_winning_trades",
134
+ "num_redeemed",
135
+ "total_investment",
136
+ "total_trade_fees",
137
+ "num_mech_calls",
138
+ "total_mech_fees",
139
+ "total_earnings",
140
+ "total_redeemed_amount",
141
+ "total_net_earnings",
142
+ "total_net_earnings_wo_mech_fees",
143
+ "total_roi",
144
+ "total_roi_wo_mech_fees",
145
+ "mean_mech_calls_per_trade",
146
+ "mean_mech_fee_amount_per_trade",
147
+ ]
148
+ headers = {
149
+ "Accept": "application/json, multipart/mixed",
150
+ "Content-Type": "application/json",
151
+ }
152
+
153
+
154
+ omen_xdai_trades_query = Template(
155
+ """
156
+ {
157
+ fpmmTrades(
158
+ where: {
159
+ type: Buy,
160
+ fpmm_: {
161
+ creator: "${fpmm_creator}"
162
+ creationTimestamp_gte: "${fpmm_creationTimestamp_gte}",
163
+ creationTimestamp_lt: "${fpmm_creationTimestamp_lte}"
164
+ },
165
+ creationTimestamp_gte: "${creationTimestamp_gte}",
166
+ creationTimestamp_lte: "${creationTimestamp_lte}"
167
+ id_gt: "${id_gt}"
168
+ }
169
+ first: ${first}
170
+ orderBy: id
171
+ orderDirection: asc
172
+ ) {
173
+ id
174
+ title
175
+ collateralToken
176
+ outcomeTokenMarginalPrice
177
+ oldOutcomeTokenMarginalPrice
178
+ type
179
+ creator {
180
+ id
181
+ }
182
+ creationTimestamp
183
+ collateralAmount
184
+ collateralAmountUSD
185
+ feeAmount
186
+ outcomeIndex
187
+ outcomeTokensTraded
188
+ transactionHash
189
+ fpmm {
190
+ id
191
+ outcomes
192
+ title
193
+ answerFinalizedTimestamp
194
+ currentAnswer
195
+ isPendingArbitration
196
+ arbitrationOccurred
197
+ openingTimestamp
198
+ condition {
199
+ id
200
+ }
201
+ }
202
+ }
203
+ }
204
+ """
205
+ )
206
+
207
+
208
+ conditional_tokens_gc_user_query = Template(
209
+ """
210
+ {
211
+ user(id: "${id}") {
212
+ userPositions(
213
+ first: ${first}
214
+ where: {
215
+ id_gt: "${userPositions_id_gt}"
216
+ }
217
+ orderBy: id
218
+ ) {
219
+ balance
220
+ id
221
+ position {
222
+ id
223
+ conditionIds
224
+ }
225
+ totalBalance
226
+ wrappedBalance
227
+ }
228
+ }
229
+ }
230
+ """
231
+ )
232
+
233
+
234
+ def _to_content(q: str) -> dict[str, Any]:
235
+ """Convert the given query string to payload content, i.e., add it under a `queries` key and convert it to bytes."""
236
+ finalized_query = {
237
+ "query": q,
238
+ "variables": None,
239
+ "extensions": {"headers": None},
240
+ }
241
+ return finalized_query
242
+
243
+
244
+ def _query_omen_xdai_subgraph(
245
+ from_timestamp: float,
246
+ to_timestamp: float,
247
+ fpmm_from_timestamp: float,
248
+ fpmm_to_timestamp: float,
249
+ ) -> dict[str, Any]:
250
+ """Query the subgraph."""
251
+ url = "https://api.thegraph.com/subgraphs/name/protofire/omen-xdai"
252
+
253
+ grouped_results = defaultdict(list)
254
+ id_gt = ""
255
+
256
+ while True:
257
+ query = omen_xdai_trades_query.substitute(
258
+ fpmm_creator=FPMM_CREATOR.lower(),
259
+ creationTimestamp_gte=int(from_timestamp),
260
+ creationTimestamp_lte=int(to_timestamp),
261
+ fpmm_creationTimestamp_gte=int(fpmm_from_timestamp),
262
+ fpmm_creationTimestamp_lte=int(fpmm_to_timestamp),
263
+ first=QUERY_BATCH_SIZE,
264
+ id_gt=id_gt,
265
+ )
266
+ content_json = _to_content(query)
267
+ res = requests.post(url, headers=headers, json=content_json)
268
+ result_json = res.json()
269
+ user_trades = result_json.get("data", {}).get("fpmmTrades", [])
270
+
271
+ if not user_trades:
272
+ break
273
+
274
+ for trade in user_trades:
275
+ fpmm_id = trade.get("fpmm", {}).get("id")
276
+ grouped_results[fpmm_id].append(trade)
277
+
278
+ id_gt = user_trades[len(user_trades) - 1]["id"]
279
+
280
+ all_results = {
281
+ "data": {
282
+ "fpmmTrades": [
283
+ trade
284
+ for trades_list in grouped_results.values()
285
+ for trade in trades_list
286
+ ]
287
+ }
288
+ }
289
+
290
+ return all_results
291
+
292
+
293
+ def _query_conditional_tokens_gc_subgraph(creator: str) -> dict[str, Any]:
294
+ """Query the subgraph."""
295
+ url = "https://api.thegraph.com/subgraphs/name/gnosis/conditional-tokens-gc"
296
+
297
+ all_results: dict[str, Any] = {"data": {"user": {"userPositions": []}}}
298
+ userPositions_id_gt = ""
299
+ while True:
300
+ query = conditional_tokens_gc_user_query.substitute(
301
+ id=creator.lower(),
302
+ first=QUERY_BATCH_SIZE,
303
+ userPositions_id_gt=userPositions_id_gt,
304
+ )
305
+ content_json = {"query": query}
306
+ res = requests.post(url, headers=headers, json=content_json)
307
+ result_json = res.json()
308
+ user_data = result_json.get("data", {}).get("user", {})
309
+
310
+ if not user_data:
311
+ break
312
+
313
+ user_positions = user_data.get("userPositions", [])
314
+
315
+ if user_positions:
316
+ all_results["data"]["user"]["userPositions"].extend(user_positions)
317
+ userPositions_id_gt = user_positions[len(user_positions) - 1]["id"]
318
+ else:
319
+ break
320
+
321
+ if len(all_results["data"]["user"]["userPositions"]) == 0:
322
+ return {"data": {"user": None}}
323
+
324
+ return all_results
325
+
326
+
327
+ def convert_hex_to_int(x: Union[str, float]) -> Union[int, float]:
328
+ """Convert hex to int"""
329
+ if isinstance(x, float):
330
+ return np.nan
331
+ elif isinstance(x, str):
332
+ if x == INVALID_ANSWER_HEX:
333
+ return -1
334
+ else:
335
+ return int(x, 16)
336
+
337
+
338
+ def wei_to_unit(wei: int) -> float:
339
+ """Converts wei to currency unit."""
340
+ return wei / 10**18
341
+
342
+
343
+ def _is_redeemed(user_json: dict[str, Any], fpmmTrade: dict[str, Any]) -> bool:
344
+ """Returns whether the user has redeemed the position."""
345
+ user_positions = user_json["data"]["user"]["userPositions"]
346
+ outcomes_tokens_traded = int(fpmmTrade["outcomeTokensTraded"])
347
+ condition_id = fpmmTrade["fpmm.condition.id"]
348
+
349
+ for position in user_positions:
350
+ position_condition_ids = position["position"]["conditionIds"]
351
+ balance = int(position["balance"])
352
+
353
+ if condition_id in position_condition_ids:
354
+ if balance == 0:
355
+ return True
356
+ # return early
357
+ return False
358
+ return False
359
+
360
+
361
+ def create_fpmmTrades(rpc: str):
362
+ """Create fpmmTrades for all trades."""
363
+ trades_json = _query_omen_xdai_subgraph(
364
+ from_timestamp=DEFAULT_FROM_TIMESTAMP,
365
+ to_timestamp=DEFAULT_TO_TIMESTAMP,
366
+ fpmm_from_timestamp=DEFAULT_FROM_TIMESTAMP,
367
+ fpmm_to_timestamp=DEFAULT_TO_TIMESTAMP,
368
+ )
369
+
370
+ # convert to dataframe
371
+ df = pd.DataFrame(trades_json["data"]["fpmmTrades"])
372
+
373
+ # convert creator to address
374
+ df["creator"] = df["creator"].apply(lambda x: x["id"])
375
+
376
+ # normalize fpmm column
377
+ fpmm = pd.json_normalize(df["fpmm"])
378
+ fpmm.columns = [f"fpmm.{col}" for col in fpmm.columns]
379
+ df = pd.concat([df, fpmm], axis=1)
380
+
381
+ # drop fpmm column
382
+ df.drop(["fpmm"], axis=1, inplace=True)
383
+
384
+ # change creator to creator_address
385
+ df.rename(columns={"creator": "trader_address"}, inplace=True)
386
+
387
+ # save to csv
388
+ df.to_csv(DATA_DIR / "fpmmTrades.csv", index=False)
389
+
390
+ return df
391
+
392
+
393
+ def prepare_profitalibity_data(rpc: str):
394
+ """Prepare data for profitalibity analysis."""
395
+
396
+ # Check if tools.py is in the same directory
397
+ try:
398
+ # load tools.csv
399
+ tools = pd.read_csv(DATA_DIR / "tools.csv")
400
+
401
+ # make sure creator_address is in the columns
402
+ assert "trader_address" in tools.columns, "trader_address column not found"
403
+
404
+ # lowercase and strip creator_address
405
+ tools["trader_address"] = tools["trader_address"].str.lower().str.strip()
406
+
407
+ # drop duplicates
408
+ tools.drop_duplicates(inplace=True)
409
+
410
+ print("tools.csv loaded")
411
+ except FileNotFoundError:
412
+ print("tools.csv not found. Please run tools.py first.")
413
+ return
414
+
415
+ # Check if fpmmTrades.csv is in the same directory
416
+ try:
417
+ # load fpmmTrades.csv
418
+ fpmmTrades = pd.read_csv(DATA_DIR / "fpmmTrades.csv")
419
+ print("fpmmTrades.csv loaded")
420
+ except FileNotFoundError:
421
+ print("fpmmTrades.csv not found. Creating fpmmTrades.csv...")
422
+ fpmmTrades = create_fpmmTrades(rpc)
423
+ fpmmTrades.to_csv(DATA_DIR / "fpmmTrades.csv", index=False)
424
+ fpmmTrades = pd.read_csv(DATA_DIR / "fpmmTrades.csv")
425
+
426
+ # make sure trader_address is in the columns
427
+ assert "trader_address" in fpmmTrades.columns, "trader_address column not found"
428
+
429
+ # lowercase and strip creator_address
430
+ fpmmTrades["trader_address"] = fpmmTrades["trader_address"].str.lower().str.strip()
431
+
432
+ return fpmmTrades, tools
433
+
434
+
435
+ def determine_market_status(trade, current_answer):
436
+ """Determine the market status of a trade."""
437
+ if current_answer is np.nan and time.time() >= trade["fpmm.openingTimestamp"]:
438
+ return MarketState.PENDING
439
+ elif current_answer == np.nan:
440
+ return MarketState.OPEN
441
+ elif trade["fpmm.isPendingArbitration"]:
442
+ return MarketState.ARBITRATING
443
+ elif time.time() < trade["fpmm.answerFinalizedTimestamp"]:
444
+ return MarketState.FINALIZING
445
+ return MarketState.CLOSED
446
+
447
+
448
+ def analyse_trader(
449
+ trader_address: str, fpmmTrades: pd.DataFrame, tools: pd.DataFrame
450
+ ) -> pd.DataFrame:
451
+ """Analyse a trader's trades"""
452
+ # Filter trades and tools for the given trader
453
+ trades = fpmmTrades[fpmmTrades["trader_address"] == trader_address]
454
+ tools_usage = tools[tools["trader_address"] == trader_address]
455
+
456
+ # Prepare the DataFrame
457
+ trades_df = pd.DataFrame(columns=ALL_TRADES_STATS_DF_COLS)
458
+ if trades.empty:
459
+ return trades_df
460
+
461
+ # Fetch user's conditional tokens gc graph
462
+ try:
463
+ user_json = _query_conditional_tokens_gc_subgraph(trader_address)
464
+ except Exception as e:
465
+ print(f"Error fetching user data: {e}")
466
+ return trades_df
467
+
468
+ # Iterate over the trades
469
+ for i, trade in tqdm(trades.iterrows(), total=len(trades), desc="Analysing trades"):
470
+ try:
471
+ # Parsing and computing shared values
472
+ creation_timestamp_utc = datetime.datetime.fromtimestamp(
473
+ trade["creationTimestamp"], tz=datetime.timezone.utc
474
+ )
475
+ collateral_amount = wei_to_unit(float(trade["collateralAmount"]))
476
+ fee_amount = wei_to_unit(float(trade["feeAmount"]))
477
+ outcome_tokens_traded = wei_to_unit(float(trade["outcomeTokensTraded"]))
478
+ earnings, winner_trade = (0, False)
479
+ redemption = _is_redeemed(user_json, trade)
480
+ current_answer = trade["fpmm.currentAnswer"]
481
+
482
+ # Determine market status
483
+ market_status = determine_market_status(trade, current_answer)
484
+
485
+ # Skip non-closed markets
486
+ if market_status != MarketState.CLOSED:
487
+ print(
488
+ f"Skipping trade {i} because market is not closed. Market Status: {market_status}"
489
+ )
490
+ continue
491
+ current_answer = convert_hex_to_int(current_answer)
492
+
493
+ # Compute invalidity
494
+ is_invalid = current_answer == INVALID_ANSWER
495
+
496
+ # Compute earnings and winner trade status
497
+ if is_invalid:
498
+ earnings = collateral_amount
499
+ winner_trade = False
500
+ elif trade["outcomeIndex"] == current_answer:
501
+ earnings = outcome_tokens_traded
502
+ winner_trade = True
503
+
504
+ # Compute mech calls
505
+ num_mech_calls = (
506
+ tools_usage["prompt_request"].apply(lambda x: trade["title"] in x).sum()
507
+ )
508
+ net_earnings = (
509
+ earnings
510
+ - fee_amount
511
+ - (num_mech_calls * DEFAULT_MECH_FEE)
512
+ - collateral_amount
513
+ )
514
+
515
+ # Assign values to DataFrame
516
+ trades_df.loc[i] = {
517
+ "trader_address": trader_address,
518
+ "trade_id": trade["id"],
519
+ "market_status": market_status.name,
520
+ "creation_timestamp": creation_timestamp_utc,
521
+ "title": trade["title"],
522
+ "collateral_amount": collateral_amount,
523
+ "outcome_index": trade["outcomeIndex"],
524
+ "trade_fee_amount": fee_amount,
525
+ "outcomes_tokens_traded": outcome_tokens_traded,
526
+ "current_answer": current_answer,
527
+ "is_invalid": is_invalid,
528
+ "winning_trade": winner_trade,
529
+ "earnings": earnings,
530
+ "redeemed": redemption,
531
+ "redeemed_amount": earnings if redemption else 0,
532
+ "num_mech_calls": num_mech_calls,
533
+ "mech_fee_amount": num_mech_calls * DEFAULT_MECH_FEE,
534
+ "net_earnings": net_earnings,
535
+ "roi": net_earnings / collateral_amount,
536
+ }
537
+
538
+ except Exception as e:
539
+ print(f"Error processing trade {i}: {e}")
540
+ continue
541
+
542
+ return trades_df
543
+
544
+
545
+ def analyse_all_traders(trades: pd.DataFrame, tools: pd.DataFrame) -> pd.DataFrame:
546
+ """Analyse all creators."""
547
+ all_traders = []
548
+ for trader in tqdm(
549
+ trades["trader_address"].unique(),
550
+ total=len(trades["trader_address"].unique()),
551
+ desc="Analysing creators",
552
+ ):
553
+ all_traders.append(analyse_trader(trader, trades, tools))
554
+
555
+ # concat all creators
556
+ all_creators_df = pd.concat(all_traders)
557
+
558
+ return all_creators_df
559
+
560
+
561
+ def summary_analyse(df):
562
+ """Summarise profitability analysis."""
563
+ # Ensure DataFrame is not empty
564
+ if df.empty:
565
+ return pd.DataFrame(columns=SUMMARY_STATS_DF_COLS)
566
+
567
+ # Group by trader_address
568
+ grouped = df.groupby("trader_address")
569
+
570
+ # Create summary DataFrame
571
+ summary_df = grouped.agg(
572
+ num_trades=("trader_address", "size"),
573
+ num_winning_trades=("winning_trade", lambda x: float((x).sum())),
574
+ num_redeemed=("redeemed", lambda x: float(x.sum())),
575
+ total_investment=("collateral_amount", "sum"),
576
+ total_trade_fees=("trade_fee_amount", "sum"),
577
+ num_mech_calls=("num_mech_calls", "sum"),
578
+ total_mech_fees=("mech_fee_amount", "sum"),
579
+ total_earnings=("earnings", "sum"),
580
+ total_redeemed_amount=("redeemed_amount", "sum"),
581
+ total_net_earnings=("net_earnings", "sum"),
582
+ )
583
+
584
+ # Calculating additional columns
585
+ summary_df["total_roi"] = (
586
+ summary_df["total_net_earnings"] / summary_df["total_investment"]
587
+ )
588
+ summary_df["mean_mech_calls_per_trade"] = (
589
+ summary_df["num_mech_calls"] / summary_df["num_trades"]
590
+ )
591
+ summary_df["mean_mech_fee_amount_per_trade"] = (
592
+ summary_df["total_mech_fees"] / summary_df["num_trades"]
593
+ )
594
+ summary_df["total_net_earnings_wo_mech_fees"] = (
595
+ summary_df["total_net_earnings"] + summary_df["total_mech_fees"]
596
+ )
597
+ summary_df["total_roi_wo_mech_fees"] = (
598
+ summary_df["total_net_earnings_wo_mech_fees"] / summary_df["total_investment"]
599
+ )
600
+
601
+ # Resetting index to include trader_address
602
+ summary_df.reset_index(inplace=True)
603
+
604
+ return summary_df
605
+
606
+
607
+ def run_profitability_analysis(rpc):
608
+ """Create all trades analysis."""
609
+
610
+ # load dfs from csv for analysis
611
+ print("Preparing data...")
612
+ fpmmTrades, tools = prepare_profitalibity_data(rpc)
613
+
614
+ # all trades profitability df
615
+ print("Analysing trades...")
616
+ all_trades_df = analyse_all_traders(fpmmTrades, tools)
617
+
618
+ # summarize profitability df
619
+ print("Summarising trades...")
620
+ summary_df = summary_analyse(all_trades_df)
621
+
622
+ # save to csv
623
+ all_trades_df.to_csv(DATA_DIR / "all_trades_profitability.csv", index=False)
624
+ summary_df.to_csv(DATA_DIR / "summary_profitability.csv", index=False)
625
+
626
+ print("Done!")
627
+
628
+ return all_trades_df, summary_df
629
+
630
+
631
+ if __name__ == "__main__":
632
+ rpc = "https://lb.nodies.app/v1/406d8dcc043f4cb3959ed7d6673d311a"
633
+ run_profitability_analysis(rpc)
scripts/pull_data.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import re
3
+ import os
4
+ import pickle
5
+ from datetime import datetime
6
+ from concurrent.futures import ThreadPoolExecutor
7
+ from tqdm import tqdm
8
+ from web3 import Web3
9
+ from typing import Optional
10
+ import pandas as pd
11
+ from pathlib import Path
12
+ from functools import partial
13
+ from markets import (
14
+ etl as mkt_etl,
15
+ DEFAULT_FILENAME as MARKETS_FILENAME,
16
+ )
17
+ from tools import (
18
+ etl as tools_etl,
19
+ DEFAULT_FILENAME as TOOLS_FILENAME,
20
+ )
21
+ from profitability import run_profitability_analysis
22
+ import gc
23
+
24
+ logging.basicConfig(level=logging.INFO)
25
+
26
+ SCRIPTS_DIR = Path(__file__).parent
27
+ ROOT_DIR = SCRIPTS_DIR.parent
28
+ DATA_DIR = ROOT_DIR / "data"
29
+
30
+ def get_question(text: str) -> str:
31
+ """Get the question from a text."""
32
+ # Regex to find text within double quotes
33
+ pattern = r'"([^"]*)"'
34
+
35
+ # Find all occurrences
36
+ questions = re.findall(pattern, text)
37
+
38
+ # Assuming you want the first question if there are multiple
39
+ question = questions[0] if questions else None
40
+
41
+ return question
42
+
43
+
44
+ def current_answer(text: str, fpmms: pd.DataFrame) -> Optional[str]:
45
+ """Get the current answer for a question."""
46
+ row = fpmms[fpmms['title'] == text]
47
+ if row.shape[0] == 0:
48
+ return None
49
+ return row['currentAnswer'].values[0]
50
+
51
+
52
+ def block_number_to_timestamp(block_number: int, web3: Web3) -> str:
53
+ """Convert a block number to a timestamp."""
54
+ block = web3.eth.get_block(block_number)
55
+ timestamp = datetime.utcfromtimestamp(block['timestamp'])
56
+ return timestamp.strftime('%Y-%m-%d %H:%M:%S')
57
+
58
+
59
+ def parallelize_timestamp_conversion(df: pd.DataFrame, function: callable) -> list:
60
+ """Parallelize the timestamp conversion."""
61
+ block_numbers = df['request_block'].tolist()
62
+ with ThreadPoolExecutor(max_workers=10) as executor:
63
+ results = list(tqdm(executor.map(function, block_numbers), total=len(block_numbers)))
64
+ return results
65
+
66
+
67
+ def weekly_analysis():
68
+ """Run weekly analysis for the FPMMS project."""
69
+ rpc = "https://lb.nodies.app/v1/406d8dcc043f4cb3959ed7d6673d311a"
70
+ web3 = Web3(Web3.HTTPProvider(rpc))
71
+
72
+ # Run markets ETL
73
+ logging.info("Running markets ETL")
74
+ mkt_etl(MARKETS_FILENAME)
75
+ logging.info("Markets ETL completed")
76
+
77
+ # Run tools ETL
78
+ logging.info("Running tools ETL")
79
+ tools_etl(
80
+ rpcs=[rpc],
81
+ filename=TOOLS_FILENAME,
82
+ full_contents=True,
83
+ )
84
+ logging.info("Tools ETL completed")
85
+
86
+ # Run profitability analysis
87
+ logging.info("Running profitability analysis")
88
+ if os.path.exists(DATA_DIR / "fpmmTrades.csv"):
89
+ os.remove(DATA_DIR / "fpmmTrades.csv")
90
+ run_profitability_analysis(
91
+ rpc=rpc,
92
+ )
93
+ logging.info("Profitability analysis completed")
94
+
95
+ # Get currentAnswer from FPMMS
96
+ fpmms = pd.read_csv(DATA_DIR / MARKETS_FILENAME)
97
+ tools = pd.read_csv(DATA_DIR / TOOLS_FILENAME)
98
+
99
+ # Get the question from the tools
100
+ logging.info("Getting the question and current answer for the tools")
101
+ tools['title'] = tools['prompt_request'].apply(lambda x: get_question(x))
102
+ tools['currentAnswer'] = tools['title'].apply(lambda x: current_answer(x, fpmms))
103
+
104
+ tools['currentAnswer'] = tools['currentAnswer'].str.replace('yes', 'Yes')
105
+ tools['currentAnswer'] = tools['currentAnswer'].str.replace('no', 'No')
106
+
107
+ # Convert block number to timestamp
108
+ logging.info("Converting block number to timestamp")
109
+ t_map = pickle.load(open(DATA_DIR / "t_map.pkl", "rb"))
110
+ tools['request_time'] = tools['request_block'].map(t_map)
111
+
112
+ # Identify tools with missing request_time and fill them
113
+ missing_time_indices = tools[tools['request_time'].isna()].index
114
+ if not missing_time_indices.empty:
115
+ partial_block_number_to_timestamp = partial(block_number_to_timestamp, web3=web3)
116
+ missing_timestamps = parallelize_timestamp_conversion(tools.loc[missing_time_indices], partial_block_number_to_timestamp)
117
+
118
+ # Update the original DataFrame with the missing timestamps
119
+ for i, timestamp in zip(missing_time_indices, missing_timestamps):
120
+ tools.at[i, 'request_time'] = timestamp
121
+
122
+ tools['request_month_year'] = pd.to_datetime(tools['request_time']).dt.strftime('%Y-%m')
123
+ tools['request_month_year_week'] = pd.to_datetime(tools['request_time']).dt.to_period('W').astype(str)
124
+
125
+ # Save the tools
126
+ tools.to_csv(DATA_DIR / TOOLS_FILENAME, index=False)
127
+
128
+ # Update t_map with new timestamps
129
+ new_timestamps = tools[['request_block', 'request_time']].dropna().set_index('request_block').to_dict()['request_time']
130
+ t_map.update(new_timestamps)
131
+
132
+ with open(DATA_DIR / "t_map.pkl", "wb") as f:
133
+ pickle.dump(t_map, f)
134
+ # clean and release all memory
135
+ del tools
136
+ del fpmms
137
+ del t_map
138
+ gc.collect()
139
+
140
+ logging.info("Weekly analysis files generated and saved")
141
+
142
+
143
+ if __name__ == "__main__":
144
+ weekly_analysis()
145
+
scripts/tools.py ADDED
@@ -0,0 +1,765 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # ------------------------------------------------------------------------------
3
+ #
4
+ # Copyright 2023 Valory AG
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ #
18
+ # ------------------------------------------------------------------------------
19
+
20
+ import json
21
+ import os.path
22
+ import re
23
+ import sys
24
+ import time
25
+ import random
26
+ from dataclasses import dataclass
27
+ from enum import Enum
28
+ from io import StringIO
29
+ from typing import (
30
+ Optional,
31
+ List,
32
+ Dict,
33
+ Any,
34
+ Union,
35
+ Callable,
36
+ Tuple,
37
+ )
38
+
39
+ import pandas as pd
40
+ import requests
41
+ from json.decoder import JSONDecodeError
42
+ from eth_typing import ChecksumAddress
43
+ from eth_utils import to_checksum_address
44
+ from requests.adapters import HTTPAdapter
45
+ from requests.exceptions import (
46
+ ReadTimeout as RequestsReadTimeoutError,
47
+ HTTPError as RequestsHTTPError,
48
+ )
49
+ from tqdm import tqdm
50
+ from urllib3 import Retry
51
+ from urllib3.exceptions import (
52
+ ReadTimeoutError as Urllib3ReadTimeoutError,
53
+ HTTPError as Urllib3HTTPError,
54
+ )
55
+ from web3 import Web3, HTTPProvider
56
+ from web3.exceptions import MismatchedABI
57
+ from web3.types import BlockParams
58
+ from concurrent.futures import ThreadPoolExecutor, as_completed
59
+ from pathlib import Path
60
+
61
+
62
+ CONTRACTS_PATH = "contracts"
63
+ MECH_TO_INFO = {
64
+ # this block number is when the creator had its first tx ever, and after this mech's creation
65
+ "0xff82123dfb52ab75c417195c5fdb87630145ae81": ("old_mech_abi.json", 28911547),
66
+ # this block number is when this mech was created
67
+ "0x77af31de935740567cf4ff1986d04b2c964a786a": ("new_mech_abi.json", 30776879),
68
+ }
69
+ # optionally set the latest block to stop searching for the delivered events
70
+ LATEST_BLOCK: Optional[int] = None
71
+ LATEST_BLOCK_NAME: BlockParams = "latest"
72
+ BLOCK_DATA_NUMBER = "number"
73
+ BLOCKS_CHUNK_SIZE = 10_000
74
+ REDUCE_FACTOR = 0.25
75
+ EVENT_ARGUMENTS = "args"
76
+ DATA = "data"
77
+ REQUEST_ID = "requestId"
78
+ REQUEST_ID_FIELD = "request_id"
79
+ REQUEST_SENDER = "sender"
80
+ PROMPT_FIELD = "prompt"
81
+ BLOCK_FIELD = "block"
82
+ CID_PREFIX = "f01701220"
83
+ HTTP = "http://"
84
+ HTTPS = HTTP[:4] + "s" + HTTP[4:]
85
+ IPFS_ADDRESS = f"{HTTPS}gateway.autonolas.tech/ipfs/"
86
+ IPFS_LINKS_SERIES_NAME = "ipfs_links"
87
+ BACKOFF_FACTOR = 1
88
+ STATUS_FORCELIST = [404, 500, 502, 503, 504]
89
+ DEFAULT_FILENAME = "tools.csv"
90
+ RE_RPC_FILTER_ERROR = r"Filter with id: '\d+' does not exist."
91
+ ABI_ERROR = "The event signature did not match the provided ABI"
92
+ SLEEP = 0.5
93
+ HTTP_TIMEOUT = 10
94
+ N_IPFS_RETRIES = 1
95
+ N_RPC_RETRIES = 100
96
+ RPC_POLL_INTERVAL = 0.05
97
+ IPFS_POLL_INTERVAL = 0.05
98
+ FORMAT_UPDATE_BLOCK_NUMBER = 30411638
99
+ IRRELEVANT_TOOLS = [
100
+ "openai-text-davinci-002",
101
+ "openai-text-davinci-003",
102
+ "openai-gpt-3.5-turbo",
103
+ "openai-gpt-4",
104
+ "stabilityai-stable-diffusion-v1-5",
105
+ "stabilityai-stable-diffusion-xl-beta-v2-2-2",
106
+ "stabilityai-stable-diffusion-512-v2-1",
107
+ "stabilityai-stable-diffusion-768-v2-1",
108
+ "deepmind-optimization-strong",
109
+ "deepmind-optimization",
110
+ ]
111
+ # this is how frequently we will keep a snapshot of the progress so far in terms of blocks' batches
112
+ # for example, the value 1 means that for every `BLOCKS_CHUNK_SIZE` blocks that we search, we also store the snapshot
113
+ SNAPSHOT_RATE = 10
114
+ NUM_WORKERS = 10
115
+ GET_CONTENTS_BATCH_SIZE = 1000
116
+ SCRIPTS_DIR = Path(__file__).parent
117
+ ROOT_DIR = SCRIPTS_DIR.parent
118
+ DATA_DIR = ROOT_DIR / "data"
119
+
120
+ class MechEventName(Enum):
121
+ """The mech's event names."""
122
+
123
+ REQUEST = "Request"
124
+ DELIVER = "Deliver"
125
+
126
+
127
+ @dataclass
128
+ class MechEvent:
129
+ """A mech's on-chain event representation."""
130
+
131
+ for_block: int
132
+ requestId: int
133
+ data: bytes
134
+ sender: str
135
+
136
+ def _ipfs_link(self) -> Optional[str]:
137
+ """Get the ipfs link for the data."""
138
+ return f"{IPFS_ADDRESS}{CID_PREFIX}{self.data.hex()}"
139
+
140
+ @property
141
+ def ipfs_request_link(self) -> Optional[str]:
142
+ """Get the IPFS link for the request."""
143
+ return f"{self._ipfs_link()}/metadata.json"
144
+
145
+ @property
146
+ def ipfs_deliver_link(self) -> Optional[str]:
147
+ """Get the IPFS link for the deliver."""
148
+ if self.requestId is None:
149
+ return None
150
+ return f"{self._ipfs_link()}/{self.requestId}"
151
+
152
+ def ipfs_link(self, event_name: MechEventName) -> Optional[str]:
153
+ """Get the ipfs link based on the event."""
154
+ if event_name == MechEventName.REQUEST:
155
+ if self.for_block < FORMAT_UPDATE_BLOCK_NUMBER:
156
+ return self._ipfs_link()
157
+ return self.ipfs_request_link
158
+ if event_name == MechEventName.DELIVER:
159
+ return self.ipfs_deliver_link
160
+ return None
161
+
162
+
163
+ @dataclass(init=False)
164
+ class MechRequest:
165
+ """A structure for a request to a mech."""
166
+
167
+ request_id: Optional[int]
168
+ request_block: Optional[int]
169
+ prompt_request: Optional[str]
170
+ tool: Optional[str]
171
+ nonce: Optional[str]
172
+ trader_address: Optional[str]
173
+
174
+ def __init__(self, **kwargs: Any) -> None:
175
+ """Initialize the request ignoring extra keys."""
176
+ self.request_id = int(kwargs.pop(REQUEST_ID, 0))
177
+ self.request_block = int(kwargs.pop(BLOCK_FIELD, 0))
178
+ self.prompt_request = kwargs.pop(PROMPT_FIELD, None)
179
+ self.tool = kwargs.pop("tool", None)
180
+ self.nonce = kwargs.pop("nonce", None)
181
+ self.trader_address = kwargs.pop("sender", None)
182
+
183
+
184
+ @dataclass(init=False)
185
+ class PredictionResponse:
186
+ """A response of a prediction."""
187
+
188
+ p_yes: float
189
+ p_no: float
190
+ confidence: float
191
+ info_utility: float
192
+ vote: Optional[str]
193
+ win_probability: Optional[float]
194
+
195
+ def __init__(self, **kwargs: Any) -> None:
196
+ """Initialize the mech's prediction ignoring extra keys."""
197
+ try:
198
+ self.p_yes = float(kwargs.pop("p_yes"))
199
+ self.p_no = float(kwargs.pop("p_no"))
200
+ self.confidence = float(kwargs.pop("confidence"))
201
+ self.info_utility = float(kwargs.pop("info_utility"))
202
+ self.win_probability = 0
203
+
204
+ # Validate probabilities
205
+ probabilities = {
206
+ "p_yes": self.p_yes,
207
+ "p_no": self.p_no,
208
+ "confidence": self.confidence,
209
+ "info_utility": self.info_utility,
210
+ }
211
+
212
+ for name, prob in probabilities.items():
213
+ if not 0 <= prob <= 1:
214
+ raise ValueError(f"{name} probability is out of bounds: {prob}")
215
+
216
+ if self.p_yes + self.p_no != 1:
217
+ raise ValueError(
218
+ f"Sum of p_yes and p_no is not 1: {self.p_yes} + {self.p_no}"
219
+ )
220
+
221
+ self.vote = self.get_vote()
222
+ self.win_probability = self.get_win_probability()
223
+
224
+ except KeyError as e:
225
+ raise KeyError(f"Missing key in PredictionResponse: {e}")
226
+ except ValueError as e:
227
+ raise ValueError(f"Invalid value in PredictionResponse: {e}")
228
+
229
+ def get_vote(self) -> Optional[str]:
230
+ """Return the vote."""
231
+ if self.p_no == self.p_yes:
232
+ return None
233
+ if self.p_no > self.p_yes:
234
+ return "No"
235
+ return "Yes"
236
+
237
+ def get_win_probability(self) -> Optional[float]:
238
+ """Return the probability estimation for winning with vote."""
239
+ return max(self.p_no, self.p_yes)
240
+
241
+
242
+ @dataclass(init=False)
243
+ class MechResponse:
244
+ """A structure for the response of a mech."""
245
+
246
+ request_id: int
247
+ deliver_block: Optional[int]
248
+ result: Optional[PredictionResponse]
249
+ error: Optional[str]
250
+ error_message: Optional[str]
251
+ prompt_response: Optional[str]
252
+ mech_address: Optional[str]
253
+
254
+ def __init__(self, **kwargs: Any) -> None:
255
+ """Initialize the mech's response ignoring extra keys."""
256
+ self.error = kwargs.get("error", None)
257
+ self.request_id = int(kwargs.get(REQUEST_ID, 0))
258
+ self.deliver_block = int(kwargs.get(BLOCK_FIELD, 0))
259
+ self.result = kwargs.get("result", None)
260
+ self.prompt_response = kwargs.get(PROMPT_FIELD, None)
261
+ self.mech_address = kwargs.get("sender", None)
262
+
263
+ if self.result != "Invalid response":
264
+ self.error_message = kwargs.get("error_message", None)
265
+
266
+ try:
267
+ if isinstance(self.result, str):
268
+ kwargs = json.loads(self.result)
269
+ self.result = PredictionResponse(**kwargs)
270
+ self.error = str(False)
271
+
272
+ except JSONDecodeError:
273
+ self.error_message = "Response parsing error"
274
+ self.error = str(True)
275
+
276
+ except Exception as e:
277
+ self.error_message = str(e)
278
+ self.error = str(True)
279
+
280
+ else:
281
+ self.error_message = "Invalid response from tool"
282
+ self.error = str(True)
283
+ self.result = None
284
+
285
+
286
+ EVENT_TO_MECH_STRUCT = {
287
+ MechEventName.REQUEST: MechRequest,
288
+ MechEventName.DELIVER: MechResponse,
289
+ }
290
+
291
+
292
+ def parse_args() -> str:
293
+ """Parse the arguments and return the RPC."""
294
+ if len(sys.argv) != 2:
295
+ raise ValueError("Expected the RPC as a positional argument.")
296
+ return sys.argv[1]
297
+
298
+
299
+ def read_abi(abi_path: str) -> str:
300
+ """Read and return the wxDAI contract's ABI."""
301
+ with open(abi_path) as abi_file:
302
+ return abi_file.read()
303
+
304
+
305
+ def reduce_window(contract_instance, event, from_block, batch_size, latest_block):
306
+ """Dynamically reduce the batch size window."""
307
+ keep_fraction = 1 - REDUCE_FACTOR
308
+ events_filter = contract_instance.events[event].build_filter()
309
+ events_filter.fromBlock = from_block
310
+ batch_size = int(batch_size * keep_fraction)
311
+ events_filter.toBlock = min(from_block + batch_size, latest_block)
312
+ tqdm.write(f"RPC timed out! Resizing batch size to {batch_size}.")
313
+ time.sleep(SLEEP)
314
+ return events_filter, batch_size
315
+
316
+
317
+ def get_events(
318
+ w3: Web3,
319
+ event: str,
320
+ mech_address: ChecksumAddress,
321
+ mech_abi_path: str,
322
+ earliest_block: int,
323
+ latest_block: int,
324
+ ) -> List:
325
+ """Get the delivered events."""
326
+ abi = read_abi(mech_abi_path)
327
+ contract_instance = w3.eth.contract(address=mech_address, abi=abi)
328
+
329
+ events = []
330
+ from_block = earliest_block
331
+ batch_size = BLOCKS_CHUNK_SIZE
332
+ with tqdm(
333
+ total=latest_block - from_block,
334
+ desc=f"Searching {event} events for mech {mech_address}",
335
+ unit="blocks",
336
+ ) as pbar:
337
+ while from_block < latest_block:
338
+ events_filter = contract_instance.events[event].build_filter()
339
+ events_filter.fromBlock = from_block
340
+ events_filter.toBlock = min(from_block + batch_size, latest_block)
341
+
342
+ entries = None
343
+ retries = 0
344
+ while entries is None:
345
+ try:
346
+ entries = events_filter.deploy(w3).get_all_entries()
347
+ retries = 0
348
+ except (RequestsHTTPError, Urllib3HTTPError) as exc:
349
+ if "Request Entity Too Large" in exc.args[0]:
350
+ events_filter, batch_size = reduce_window(
351
+ contract_instance,
352
+ event,
353
+ from_block,
354
+ batch_size,
355
+ latest_block,
356
+ )
357
+ except (Urllib3ReadTimeoutError, RequestsReadTimeoutError):
358
+ events_filter, batch_size = reduce_window(
359
+ contract_instance, event, from_block, batch_size, latest_block
360
+ )
361
+ except Exception as exc:
362
+ retries += 1
363
+ if retries == N_RPC_RETRIES:
364
+ tqdm.write(
365
+ f"Skipping events for blocks {events_filter.fromBlock} - {events_filter.toBlock} "
366
+ f"as the retries have been exceeded."
367
+ )
368
+ break
369
+ sleep = SLEEP * retries
370
+ if (
371
+ (
372
+ isinstance(exc, ValueError)
373
+ and re.match(
374
+ RE_RPC_FILTER_ERROR, exc.args[0].get("message", "")
375
+ )
376
+ is None
377
+ )
378
+ and not isinstance(exc, ValueError)
379
+ and not isinstance(exc, MismatchedABI)
380
+ ):
381
+ tqdm.write(
382
+ f"An error was raised from the RPC: {exc}\n Retrying in {sleep} seconds."
383
+ )
384
+ time.sleep(sleep)
385
+
386
+ from_block += batch_size
387
+ pbar.update(batch_size)
388
+
389
+ if entries is None:
390
+ continue
391
+
392
+ chunk = list(entries)
393
+ events.extend(chunk)
394
+ time.sleep(RPC_POLL_INTERVAL)
395
+
396
+ return events
397
+
398
+
399
+ def parse_events(raw_events: List) -> List[MechEvent]:
400
+ """Parse all the specified MechEvents."""
401
+ parsed_events = []
402
+ for event in raw_events:
403
+ for_block = event.get("blockNumber", 0)
404
+ args = event.get(EVENT_ARGUMENTS, {})
405
+ request_id = args.get(REQUEST_ID, 0)
406
+ data = args.get(DATA, b"")
407
+ sender = args.get(REQUEST_SENDER, "")
408
+ parsed_event = MechEvent(for_block, request_id, data, sender)
409
+ parsed_events.append(parsed_event)
410
+
411
+ return parsed_events
412
+
413
+
414
+ def create_session() -> requests.Session:
415
+ """Create a session with a retry strategy."""
416
+ session = requests.Session()
417
+ retry_strategy = Retry(
418
+ total=N_IPFS_RETRIES + 1,
419
+ backoff_factor=BACKOFF_FACTOR,
420
+ status_forcelist=STATUS_FORCELIST,
421
+ )
422
+ adapter = HTTPAdapter(max_retries=retry_strategy)
423
+ for protocol in (HTTP, HTTPS):
424
+ session.mount(protocol, adapter)
425
+
426
+ return session
427
+
428
+
429
+ def request(
430
+ session: requests.Session, url: str, timeout: int = HTTP_TIMEOUT
431
+ ) -> Optional[requests.Response]:
432
+ """Perform a request with a session."""
433
+ try:
434
+ response = session.get(url, timeout=timeout)
435
+ response.raise_for_status()
436
+ except requests.exceptions.HTTPError as exc:
437
+ tqdm.write(f"HTTP error occurred: {exc}.")
438
+ except Exception as exc:
439
+ tqdm.write(f"Unexpected error occurred: {exc}.")
440
+ else:
441
+ return response
442
+ return None
443
+
444
+
445
+ def limit_text(text: str, limit: int = 200) -> str:
446
+ """Limit the given text"""
447
+ if len(text) > limit:
448
+ return f"{text[:limit]}..."
449
+ return text
450
+
451
+
452
+ def parse_ipfs_response(
453
+ session: requests.Session,
454
+ url: str,
455
+ event: MechEvent,
456
+ event_name: MechEventName,
457
+ response: requests.Response,
458
+ ) -> Optional[Dict[str, str]]:
459
+ """Parse a response from IPFS."""
460
+ try:
461
+ return response.json()
462
+ except requests.exceptions.JSONDecodeError:
463
+ # this is a workaround because the `metadata.json` file was introduced and removed multiple times
464
+ if event_name == MechEventName.REQUEST and url != event.ipfs_request_link:
465
+ url = event.ipfs_request_link
466
+ response = request(session, url)
467
+ if response is None:
468
+ tqdm.write(f"Skipping {event=}.")
469
+ return None
470
+
471
+ try:
472
+ return response.json()
473
+ except requests.exceptions.JSONDecodeError:
474
+ pass
475
+
476
+ tqdm.write(f"Failed to parse response into json for {url=}.")
477
+ return None
478
+
479
+
480
+ def parse_ipfs_tools_content(
481
+ raw_content: Dict[str, str], event: MechEvent, event_name: MechEventName
482
+ ) -> Optional[Union[MechRequest, MechResponse]]:
483
+ """Parse tools content from IPFS."""
484
+ struct = EVENT_TO_MECH_STRUCT.get(event_name)
485
+ raw_content[REQUEST_ID] = str(event.requestId)
486
+ raw_content[BLOCK_FIELD] = str(event.for_block)
487
+ raw_content["sender"] = str(event.sender)
488
+
489
+ try:
490
+ mech_response = struct(**raw_content)
491
+ except (ValueError, TypeError, KeyError):
492
+ tqdm.write(f"Could not parse {limit_text(str(raw_content))}")
493
+ return None
494
+
495
+ if event_name == MechEventName.REQUEST and mech_response.tool in IRRELEVANT_TOOLS:
496
+ return None
497
+
498
+ return mech_response
499
+
500
+
501
+ def get_contents(
502
+ session: requests.Session, events: List[MechEvent], event_name: MechEventName
503
+ ) -> pd.DataFrame:
504
+ """Fetch the tools' responses."""
505
+ contents = []
506
+ for event in tqdm(events, desc=f"Tools' results", unit="results"):
507
+ url = event.ipfs_link(event_name)
508
+ response = request(session, url)
509
+ if response is None:
510
+ tqdm.write(f"Skipping {event=}.")
511
+ continue
512
+
513
+ raw_content = parse_ipfs_response(session, url, event, event_name, response)
514
+ if raw_content is None:
515
+ continue
516
+
517
+ mech_response = parse_ipfs_tools_content(raw_content, event, event_name)
518
+ if mech_response is None:
519
+ continue
520
+ contents.append(mech_response)
521
+ time.sleep(IPFS_POLL_INTERVAL)
522
+
523
+ return pd.DataFrame(contents)
524
+
525
+
526
+ def check_for_dicts(df: pd.DataFrame) -> List[str]:
527
+ """Check for columns that contain dictionaries."""
528
+ dict_columns = []
529
+ for column in df.columns:
530
+ if df[column].apply(lambda x: isinstance(x, dict)).any():
531
+ dict_columns.append(column)
532
+ return dict_columns
533
+
534
+
535
+ def drop_dict_rows(df: pd.DataFrame,
536
+ dict_columns: List[str]) -> pd.DataFrame:
537
+ """Drop rows that contain dictionaries."""
538
+ for column in dict_columns:
539
+ df = df[~df[column].apply(lambda x: isinstance(x, dict))]
540
+ return df
541
+
542
+
543
+ def clean(df: pd.DataFrame) -> pd.DataFrame:
544
+ """Clean the dataframe."""
545
+ dict_columns = check_for_dicts(df)
546
+ df = drop_dict_rows(df, dict_columns)
547
+ cleaned = df.drop_duplicates()
548
+ cleaned[REQUEST_ID_FIELD] = cleaned[REQUEST_ID_FIELD].astype("str")
549
+ return cleaned
550
+
551
+
552
+ def transform_request(contents: pd.DataFrame) -> pd.DataFrame:
553
+ """Transform the requests dataframe."""
554
+ return clean(contents)
555
+
556
+
557
+ def transform_deliver(contents: pd.DataFrame, full_contents=False) -> pd.DataFrame:
558
+ """Transform the delivers dataframe."""
559
+ unpacked_result = pd.json_normalize(contents.result)
560
+ # # drop result column if it exists
561
+ if "result" in unpacked_result.columns:
562
+ unpacked_result.drop(columns=["result"], inplace=True)
563
+
564
+ # drop prompt column if it exists
565
+ if "prompt" in unpacked_result.columns:
566
+ unpacked_result.drop(columns=["prompt"], inplace=True)
567
+
568
+ # rename prompt column to prompt_deliver
569
+ unpacked_result.rename(columns={"prompt": "prompt_deliver"}, inplace=True)
570
+ contents = pd.concat((contents, unpacked_result), axis=1)
571
+
572
+ if "result" in contents.columns:
573
+ contents.drop(columns=["result"], inplace=True)
574
+
575
+ if "prompt" in contents.columns:
576
+ contents.drop(columns=["prompt"], inplace=True)
577
+
578
+ return clean(contents)
579
+
580
+
581
+ def gen_event_filename(event_name: MechEventName) -> str:
582
+ """Generate the filename of an event."""
583
+ return f"{event_name.value.lower()}s.csv"
584
+
585
+
586
+ def read_n_last_lines(filename: str, n: int = 1) -> str:
587
+ """Return the `n` last lines' content of a file."""
588
+ num_newlines = 0
589
+ with open(filename, "rb") as f:
590
+ try:
591
+ f.seek(-2, os.SEEK_END)
592
+ while num_newlines < n:
593
+ f.seek(-2, os.SEEK_CUR)
594
+ if f.read(1) == b"\n":
595
+ num_newlines += 1
596
+ except OSError:
597
+ f.seek(0)
598
+ last_line = f.readline().decode()
599
+ return last_line
600
+
601
+
602
+ def get_earliest_block(event_name: MechEventName) -> int:
603
+ """Get the earliest block number to use when filtering for events."""
604
+ filename = gen_event_filename(event_name)
605
+ if not os.path.exists(DATA_DIR / filename):
606
+ return 0
607
+
608
+ cols = pd.read_csv(DATA_DIR / filename, index_col=0, nrows=0).columns.tolist()
609
+ last_line_buff = StringIO(read_n_last_lines(DATA_DIR/filename))
610
+ last_line_series = pd.read_csv(last_line_buff, names=cols)
611
+ block_field = f"{event_name.value.lower()}_{BLOCK_FIELD}"
612
+ return int(last_line_series[block_field].values[0])
613
+
614
+
615
+ def store_progress(
616
+ filename: str,
617
+ event_to_contents: Dict[MechEventName, pd.DataFrame],
618
+ tools: pd.DataFrame,
619
+ ) -> None:
620
+ """Store the given progress."""
621
+ if filename:
622
+ for event_name, content in event_to_contents.items():
623
+ event_filename = gen_event_filename(event_name)
624
+
625
+ if "result" in content.columns:
626
+ content.drop(columns=["result"], inplace=True)
627
+
628
+ content.to_csv(DATA_DIR / event_filename, index=False, escapechar="\\")
629
+
630
+ # drop result and error columns
631
+ if "result" in tools.columns:
632
+ tools.drop(columns=["result"], inplace=True)
633
+
634
+ tools.to_csv(DATA_DIR / filename, index=False, escapechar="\\")
635
+
636
+
637
+ def etl(
638
+ rpcs: List[str], filename: Optional[str] = None, full_contents: bool = True
639
+ ) -> pd.DataFrame:
640
+ """Fetch from on-chain events, process, store and return the tools' results on all the questions as a Dataframe."""
641
+ w3s = [Web3(HTTPProvider(r)) for r in rpcs]
642
+ session = create_session()
643
+ event_to_transformer = {
644
+ MechEventName.REQUEST: transform_request,
645
+ MechEventName.DELIVER: transform_deliver,
646
+ }
647
+ mech_to_info = {
648
+ to_checksum_address(address): (
649
+ os.path.join(CONTRACTS_PATH, filename),
650
+ earliest_block,
651
+ )
652
+ for address, (filename, earliest_block) in MECH_TO_INFO.items()
653
+ }
654
+ event_to_contents = {}
655
+
656
+ latest_block = LATEST_BLOCK
657
+ if latest_block is None:
658
+ latest_block = w3s[0].eth.get_block(LATEST_BLOCK_NAME)[BLOCK_DATA_NUMBER]
659
+
660
+ next_start_block = None
661
+
662
+ # Loop through events in event_to_transformer
663
+ for event_name, transformer in event_to_transformer.items():
664
+ if next_start_block is None:
665
+ next_start_block_base = get_earliest_block(event_name)
666
+
667
+ # Loop through mech addresses in mech_to_info
668
+ events = []
669
+ for address, (abi, earliest_block) in mech_to_info.items():
670
+ if next_start_block_base == 0:
671
+ next_start_block = earliest_block
672
+ else:
673
+ next_start_block = next_start_block_base
674
+
675
+ print(
676
+ f"Searching for {event_name.value} events for mech {address} from block {next_start_block} to {latest_block}."
677
+ )
678
+
679
+ # parallelize the fetching of events
680
+ with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
681
+ futures = []
682
+ for i in range(
683
+ next_start_block, latest_block, BLOCKS_CHUNK_SIZE * SNAPSHOT_RATE
684
+ ):
685
+ futures.append(
686
+ executor.submit(
687
+ get_events,
688
+ random.choice(w3s),
689
+ event_name.value,
690
+ address,
691
+ abi,
692
+ i,
693
+ min(i + BLOCKS_CHUNK_SIZE * SNAPSHOT_RATE, latest_block),
694
+ )
695
+ )
696
+
697
+ for future in tqdm(
698
+ as_completed(futures),
699
+ total=len(futures),
700
+ desc=f"Fetching {event_name.value} Events",
701
+ ):
702
+ current_mech_events = future.result()
703
+ events.extend(current_mech_events)
704
+
705
+ parsed = parse_events(events)
706
+
707
+ contents = []
708
+ with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
709
+ futures = []
710
+ for i in range(0, len(parsed), GET_CONTENTS_BATCH_SIZE):
711
+ futures.append(
712
+ executor.submit(
713
+ get_contents,
714
+ session,
715
+ parsed[i : i + GET_CONTENTS_BATCH_SIZE],
716
+ event_name,
717
+ )
718
+ )
719
+
720
+ for future in tqdm(
721
+ as_completed(futures),
722
+ total=len(futures),
723
+ desc=f"Fetching {event_name.value} Contents",
724
+ ):
725
+ current_mech_contents = future.result()
726
+ contents.append(current_mech_contents)
727
+
728
+ contents = pd.concat(contents, ignore_index=True)
729
+
730
+ full_contents = True
731
+ if event_name == MechEventName.REQUEST:
732
+ transformed = transformer(contents)
733
+ elif event_name == MechEventName.DELIVER:
734
+ transformed = transformer(contents, full_contents=full_contents)
735
+
736
+ events_filename = gen_event_filename(event_name)
737
+
738
+ if os.path.exists(DATA_DIR / events_filename):
739
+ old = pd.read_csv(DATA_DIR / events_filename)
740
+
741
+ # Reset index to avoid index conflicts
742
+ old.reset_index(drop=True, inplace=True)
743
+ transformed.reset_index(drop=True, inplace=True)
744
+
745
+ # Concatenate DataFrames
746
+ transformed = pd.concat([old, transformed], ignore_index=True)
747
+
748
+ # Drop duplicates if necessary
749
+ transformed.drop_duplicates(subset=REQUEST_ID_FIELD, inplace=True)
750
+
751
+ event_to_contents[event_name] = transformed.copy()
752
+
753
+ # Store progress
754
+ tools = pd.merge(*event_to_contents.values(), on=REQUEST_ID_FIELD)
755
+ store_progress(filename, event_to_contents, tools)
756
+
757
+ return tools
758
+
759
+
760
+ if __name__ == "__main__":
761
+ RPCs = [
762
+ "https://lb.nodies.app/v1/406d8dcc043f4cb3959ed7d6673d311a",
763
+ ]
764
+
765
+ tools = etl(rpcs=RPCs, filename=DEFAULT_FILENAME, full_contents=True)
tabs/about.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+
2
+ about_olas_predict = """\
3
+ Olas is a network of autonomous services that can run complex logic in a decentralized manner, interacting with on- and off-chain data autonomously and continuously. For other use cases check out [olas.network](https://olas.network/).
4
+ Since 'Olas' means 'waves' in Spanish, it is sometimes referred to as the 'ocean of services' 🌊.
5
+ The project is co-created by [Valory](https://www.valory.xyz/). Valory aspires to enable communities, organizations and countries to co-own AI systems, beginning with decentralized autonomous agents.
6
+ """
tabs/error.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import gradio as gr
3
+ from typing import List
4
+
5
+
6
+ HEIGHT=600
7
+ WIDTH=1000
8
+
9
+ def set_error(row: pd.Series) -> bool:
10
+ """Sets the error for the given row."""
11
+ if row.error not in [True, False]:
12
+ if not row.prompt_response:
13
+ return True
14
+ return False
15
+ return row.error
16
+
17
+ def get_error_data(tools_df: pd.DataFrame, inc_tools: List[str]) -> pd.DataFrame:
18
+ """Gets the error data for the given tools and calculates the error percentage."""
19
+ tools_inc = tools_df[tools_df['tool'].isin(inc_tools)].copy()
20
+ tools_inc['error'] = tools_inc.apply(set_error, axis=1)
21
+ error = tools_inc.groupby(['tool', 'request_month_year_week', 'error']).size().unstack().fillna(0).reset_index()
22
+ error['error_perc'] = (error[True] / (error[False] + error[True])) * 100
23
+ error['total_requests'] = error[False] + error[True]
24
+ return error
25
+
26
+ def get_error_data_overall(error_df: pd.DataFrame) -> pd.DataFrame:
27
+ """Gets the error data for the given tools and calculates the error percentage."""
28
+ error_total = error_df.groupby('request_month_year_week').agg({'total_requests': 'sum', False: 'sum', True: 'sum'}).reset_index()
29
+ error_total['error_perc'] = (error_total[True] / error_total['total_requests']) * 100
30
+ error_total.columns = error_total.columns.astype(str)
31
+ error_total['error_perc'] = error_total['error_perc'].apply(lambda x: round(x, 4))
32
+ return error_total
33
+
34
+ def plot_error_data(error_all_df: pd.DataFrame) -> gr.BarPlot:
35
+ """Plots the error data for the given tools and calculates the error percentage."""
36
+ return gr.BarPlot(
37
+ value=error_all_df,
38
+ x="request_month_year_week",
39
+ y="error_perc",
40
+ title="Error Percentage",
41
+ x_title="Week",
42
+ y_title="Error Percentage",
43
+ show_label=True,
44
+ interactive=True,
45
+ show_actions_button=True,
46
+ tooltip=["request_month_year_week", "error_perc"],
47
+ height=HEIGHT,
48
+ width=WIDTH
49
+ )
50
+
51
+ def plot_tool_error_data(error_df: pd.DataFrame, tool: str) -> gr.BarPlot:
52
+ """Plots the error data for the given tool."""
53
+ error_tool = error_df[error_df['tool'] == tool].copy()
54
+ error_tool.columns = error_tool.columns.astype(str)
55
+ error_tool['error_perc'] = error_tool['error_perc'].apply(lambda x: round(x, 4))
56
+
57
+ return gr.BarPlot(
58
+ title="Error Percentage",
59
+ x_title="Week",
60
+ y_title="Error Percentage",
61
+ show_label=True,
62
+ interactive=True,
63
+ show_actions_button=True,
64
+ tooltip=["request_month_year_week", "error_perc"],
65
+ value=error_tool,
66
+ x="request_month_year_week",
67
+ y="error_perc",
68
+ height=HEIGHT,
69
+ width=WIDTH
70
+ )
71
+
72
+ def plot_week_error_data(error_df: pd.DataFrame, week: str) -> gr.BarPlot:
73
+ """Plots the error data for the given week."""
74
+ error_week = error_df[error_df['request_month_year_week'] == week].copy()
75
+ error_week.columns = error_week.columns.astype(str)
76
+ error_week['error_perc'] = error_week['error_perc'].apply(lambda x: round(x, 4))
77
+ return gr.BarPlot(
78
+ value=error_week,
79
+ x="tool",
80
+ y="error_perc",
81
+ title="Error Percentage",
82
+ x_title="Tool",
83
+ y_title="Error Percentage",
84
+ show_label=True,
85
+ interactive=True,
86
+ show_actions_button=True,
87
+ tooltip=["tool", "error_perc"],
88
+ height=HEIGHT,
89
+ width=WIDTH
90
+ )
tabs/tool_win.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import gradio as gr
3
+ from typing import List
4
+
5
+
6
+ HEIGHT=600
7
+ WIDTH=1000
8
+
9
+
10
+ def set_error(row: pd.Series) -> bool:
11
+ """Sets the error for the given row."""
12
+ if row.error not in [True, False]:
13
+ if not row.prompt_response:
14
+ return True
15
+ return False
16
+ return row.error
17
+
18
+
19
+ def get_tool_winning_rate(tools_df: pd.DataFrame, inc_tools: List[str]) -> pd.DataFrame:
20
+ """Gets the tool winning rate data for the given tools and calculates the winning percentage."""
21
+ tools_inc = tools_df[tools_df['tool'].isin(inc_tools)].copy()
22
+ tools_inc['error'] = tools_inc.apply(set_error, axis=1)
23
+ tools_non_error = tools_inc[tools_inc['error'] != True]
24
+ tools_non_error.loc[:, 'currentAnswer'] = tools_non_error['currentAnswer'].replace({'no': 'No', 'yes': 'Yes'})
25
+ tools_non_error = tools_non_error[tools_non_error['currentAnswer'].isin(['Yes', 'No'])]
26
+ tools_non_error = tools_non_error[tools_non_error['vote'].isin(['Yes', 'No'])]
27
+ tools_non_error['win'] = (tools_non_error['currentAnswer'] == tools_non_error['vote']).astype(int)
28
+ tools_non_error.columns = tools_non_error.columns.astype(str)
29
+ wins = tools_non_error.groupby(['tool', 'request_month_year_week', 'win']).size().unstack().fillna(0)
30
+ wins['win_perc'] = (wins[1] / (wins[0] + wins[1])) * 100
31
+ wins.reset_index(inplace=True)
32
+ wins['total_request'] = wins[0] + wins[1]
33
+ wins.columns = wins.columns.astype(str)
34
+ # Convert request_month_year_week to string and explicitly set type for Altair
35
+ wins['request_month_year_week'] = wins['request_month_year_week'].astype(str)
36
+ return wins
37
+
38
+
39
+ def get_overall_winning_rate(wins_df: pd.DataFrame) -> pd.DataFrame:
40
+ """Gets the overall winning rate data for the given tools and calculates the winning percentage."""
41
+ overall_wins = wins_df.groupby('request_month_year_week').agg({
42
+ "0": 'sum',
43
+ "1": 'sum',
44
+ "win_perc": 'mean',
45
+ "total_request": 'sum'
46
+ }).rename(columns={"0": 'losses', "1": 'wins'}).reset_index()
47
+ return overall_wins
48
+
49
+
50
+ def plot_tool_winnings_overall(wins_df: pd.DataFrame, winning_selector: str = "win_perc") -> gr.BarPlot:
51
+ """Plots the overall winning rate data for the given tools and calculates the winning percentage."""
52
+ return gr.BarPlot(
53
+ title="Winning Rate",
54
+ x_title="Date",
55
+ y_title=winning_selector,
56
+ show_label=True,
57
+ interactive=True,
58
+ show_actions_button=True,
59
+ tooltip=["request_month_year_week", winning_selector],
60
+ value=wins_df,
61
+ x="request_month_year_week",
62
+ y=winning_selector,
63
+ height=HEIGHT,
64
+ width=WIDTH
65
+ )
66
+
67
+
68
+ def plot_tool_winnings_by_tool(wins_df: pd.DataFrame, tool: str) -> gr.BarPlot:
69
+ """Plots the winning rate data for the given tool."""
70
+ return gr.BarPlot(
71
+ title="Winning Rate",
72
+ x_title="Week",
73
+ y_title="Winning Rate",
74
+ x="request_month_year_week",
75
+ y="win_perc",
76
+ value=wins_df[wins_df['tool'] == tool],
77
+ show_label=True,
78
+ interactive=True,
79
+ show_actions_button=True,
80
+ tooltip=["request_month_year_week", "win_perc"],
81
+ height=HEIGHT,
82
+ width=WIDTH
83
+ )
tabs/trades.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+
4
+
5
+ HEIGHT=600
6
+ WIDTH=1000
7
+
8
+ def prepare_trades(trades_df: pd.DataFrame) -> pd.DataFrame:
9
+ """Prepares the trades data for analysis."""
10
+ trades_df['creation_timestamp'] = pd.to_datetime(trades_df['creation_timestamp'])
11
+ trades_df['creation_timestamp'] = trades_df['creation_timestamp'].dt.tz_convert('UTC')
12
+ trades_df['month_year'] = trades_df['creation_timestamp'].dt.to_period('M').astype(str)
13
+ trades_df['month_year_week'] = trades_df['creation_timestamp'].dt.to_period('W').astype(str)
14
+ trades_df['winning_trade'] = trades_df['winning_trade'].astype(int)
15
+ return trades_df
16
+
17
+
18
+ def get_overall_trades(trades_df: pd.DataFrame) -> pd.DataFrame:
19
+ """Gets the overall trades data for the given tools and calculates the winning percentage."""
20
+ trades_count = trades_df.groupby('month_year_week').size().reset_index()
21
+ trades_count.columns = trades_count.columns.astype(str)
22
+ trades_count.rename(columns={'0': 'trades'}, inplace=True)
23
+ return trades_count
24
+
25
+ def get_overall_winning_trades(trades_df: pd.DataFrame) -> pd.DataFrame:
26
+ """Gets the overall winning trades data for the given tools and calculates the winning percentage."""
27
+ winning_trades = trades_df.groupby(['month_year_week'])['winning_trade'].sum() / trades_df.groupby(['month_year_week'])['winning_trade'].count() * 100
28
+ # winning_trades is a series, give it a dataframe
29
+ winning_trades = winning_trades.reset_index()
30
+ winning_trades.columns = winning_trades.columns.astype(str)
31
+ winning_trades.columns = ['month_year_week', 'winning_trade']
32
+ return winning_trades
33
+
34
+ def plot_trade_details(trade_detail: str, trades_df: pd.DataFrame) -> gr.LinePlot:
35
+ """Plots the trade details for the given trade detail."""
36
+ if trade_detail == "mech calls":
37
+ # this is to filter out the data before 2023-09-01
38
+ trades_filtered = trades_df[trades_df["creation_timestamp"] >"2023-09-01"]
39
+ trades_filtered = trades_filtered.groupby("month_year_week")["num_mech_calls"].quantile([0.25, 0.5, 0.75]).unstack()
40
+ trades_filtered.columns = trades_filtered.columns.astype(str)
41
+ trades_filtered.reset_index(inplace=True)
42
+ trades_filtered.columns = [
43
+ "month_year_week",
44
+ "25th_percentile",
45
+ "50th_percentile",
46
+ "75th_percentile"
47
+ ]
48
+ # reformat the data as percentile, date, value
49
+ trades_filtered = trades_filtered.melt(id_vars=["month_year_week"], var_name="percentile", value_name="mech_calls")
50
+
51
+ return gr.LinePlot(
52
+ value=trades_filtered,
53
+ x="month_year_week",
54
+ y="mech_calls",
55
+ color="percentile",
56
+ show_label=True,
57
+ interactive=True,
58
+ show_actions_button=True,
59
+ tooltip=["month_year_week", "percentile", "mech_calls"],
60
+ height=HEIGHT,
61
+ width=WIDTH
62
+ )
63
+
64
+ if trade_detail == "collateral amount":
65
+ trades_filtered = trades_df[trades_df["creation_timestamp"] >"2023-09-01"]
66
+ trades_filtered = trades_filtered.groupby("month_year_week")["collateral_amount"].quantile([0.25, 0.5, 0.75]).unstack()
67
+ trades_filtered.columns = trades_filtered.columns.astype(str)
68
+ trades_filtered.reset_index(inplace=True)
69
+ trades_filtered.columns = [
70
+ "month_year_week",
71
+ "25th_percentile",
72
+ "50th_percentile",
73
+ "75th_percentile"
74
+ ]
75
+ # reformat the data as percentile, date, value
76
+ trades_filtered = trades_filtered.melt(id_vars=["month_year_week"], var_name="percentile", value_name="collateral_amount")
77
+
78
+ return gr.LinePlot(
79
+ value=trades_filtered,
80
+ x="month_year_week",
81
+ y="collateral_amount",
82
+ color="percentile",
83
+ show_label=True,
84
+ interactive=True,
85
+ show_actions_button=True,
86
+ tooltip=["month_year_week", "percentile", "collateral_amount"],
87
+ height=HEIGHT,
88
+ width=WIDTH
89
+ )
90
+
91
+ if trade_detail == "earnings":
92
+ trades_filtered = trades_df[trades_df["creation_timestamp"] >"2023-09-01"]
93
+ trades_filtered = trades_filtered.groupby("month_year_week")["earnings"].quantile([0.25, 0.5, 0.75]).unstack()
94
+ trades_filtered.columns = trades_filtered.columns.astype(str)
95
+ trades_filtered.reset_index(inplace=True)
96
+ trades_filtered.columns = [
97
+ "month_year_week",
98
+ "25th_percentile",
99
+ "50th_percentile",
100
+ "75th_percentile"
101
+ ]
102
+ # reformat the data as percentile, date, value
103
+ trades_filtered = trades_filtered.melt(id_vars=["month_year_week"], var_name="percentile", value_name="earnings")
104
+
105
+ return gr.LinePlot(
106
+ value=trades_filtered,
107
+ x="month_year_week",
108
+ y="earnings",
109
+ color="percentile",
110
+ show_label=True,
111
+ interactive=True,
112
+ show_actions_button=True,
113
+ tooltip=["month_year_week", "percentile", "earnings"],
114
+ height=HEIGHT,
115
+ width=WIDTH
116
+ )
117
+
118
+ if trade_detail == "net earnings":
119
+ trades_filtered = trades_df[trades_df["creation_timestamp"] >"2023-09-01"]
120
+ trades_filtered = trades_filtered.groupby("month_year_week")["net_earnings"].quantile([0.25, 0.5, 0.75]).unstack()
121
+ trades_filtered.columns = trades_filtered.columns.astype(str)
122
+ trades_filtered.reset_index(inplace=True)
123
+ trades_filtered.columns = [
124
+ "month_year_week",
125
+ "25th_percentile",
126
+ "50th_percentile",
127
+ "75th_percentile"
128
+ ]
129
+ # reformat the data as percentile, date, value
130
+ trades_filtered = trades_filtered.melt(id_vars=["month_year_week"], var_name="percentile", value_name="net_earnings")
131
+
132
+ return gr.LinePlot(
133
+ value=trades_filtered,
134
+ x="month_year_week",
135
+ y="net_earnings",
136
+ color="percentile",
137
+ show_label=True,
138
+ interactive=True,
139
+ show_actions_button=True,
140
+ tooltip=["month_year_week", "percentile", "net_earnings"],
141
+ height=HEIGHT,
142
+ width=WIDTH
143
+ )
144
+
145
+ if trade_detail == "ROI":
146
+ trades_filtered = trades_df[trades_df["creation_timestamp"] >"2023-09-01"]
147
+ trades_filtered = trades_filtered.groupby("month_year_week")["roi"].quantile([0.25, 0.5, 0.75]).unstack()
148
+ trades_filtered.columns = trades_filtered.columns.astype(str)
149
+ trades_filtered.reset_index(inplace=True)
150
+ trades_filtered.columns = [
151
+ "month_year_week",
152
+ "25th_percentile",
153
+ "50th_percentile",
154
+ "75th_percentile"
155
+ ]
156
+ # reformat the data as percentile, date, value
157
+ trades_filtered = trades_filtered.melt(id_vars=["month_year_week"], var_name="percentile", value_name="ROI")
158
+
159
+ return gr.LinePlot(
160
+ value=trades_filtered,
161
+ x="month_year_week",
162
+ y="ROI",
163
+ color="percentile",
164
+ show_label=True,
165
+ interactive=True,
166
+ show_actions_button=True,
167
+ tooltip=["month_year_week", "percentile", "ROI"],
168
+ height=HEIGHT,
169
+ width=WIDTH
170
+ )
171
+
172
+ def plot_trades_by_week(trades_df: pd.DataFrame) -> gr.BarPlot:
173
+ """Plots the trades data for the given tools and calculates the winning percentage."""
174
+ return gr.BarPlot(
175
+ value=trades_df,
176
+ x="month_year_week",
177
+ y="trades",
178
+ show_label=True,
179
+ interactive=True,
180
+ show_actions_button=True,
181
+ tooltip=["month_year_week", "trades"],
182
+ height=HEIGHT,
183
+ width=WIDTH
184
+ )
185
+
186
+ def plot_winning_trades_by_week(trades_df: pd.DataFrame) -> gr.BarPlot:
187
+ """Plots the winning trades data for the given tools and calculates the winning percentage."""
188
+ return gr.BarPlot(
189
+ value=trades_df,
190
+ x="month_year_week",
191
+ y="winning_trade",
192
+ show_label=True,
193
+ interactive=True,
194
+ show_actions_button=True,
195
+ tooltip=["month_year_week", "winning_trade"],
196
+ height=HEIGHT,
197
+ width=WIDTH
198
+ )