File size: 9,262 Bytes
cd451ea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
import os
import pandas as pd
import numpy as np
from typing import Any, Union
from string import Template
import requests
import pickle
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
import time
from datetime import datetime
from utils import ROOT_DIR, TMP_DIR

NUM_WORKERS = 10
IPFS_POLL_INTERVAL = 0.2
INVALID_ANSWER_HEX = (
    "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
)
INVALID_ANSWER = -1
SUBGRAPH_API_KEY = os.environ.get("SUBGRAPH_API_KEY", None)
OMEN_SUBGRAPH_URL = Template(
    """https://gateway-arbitrum.network.thegraph.com/api/${subgraph_api_key}/subgraphs/id/9fUVQpFwzpdWS9bq5WkAnmKbNNcoBwatMR4yZq81pbbz"""
)
get_token_amounts_query = Template(
    """
    {
  
      fpmmLiquidities(
        where: {
                fpmm_: {
                        creator: "${fpmm_creator}",
                        id: "${fpmm_id}",
                    },
                id_gt: ""
                }
                orderBy: creationTimestamp
                orderDirection: asc
        )
      {
        id
        outcomeTokenAmounts
        creationTimestamp
        additionalLiquidityParameter
      }
    }
"""
)
CREATOR = "0x89c5cc945dd550BcFfb72Fe42BfF002429F46Fec"
PEARL_CREATOR = "0xFfc8029154ECD55ABED15BD428bA596E7D23f557"
market_creators_map = {"quickstart": CREATOR, "pearl": PEARL_CREATOR}
headers = {
    "Accept": "application/json, multipart/mixed",
    "Content-Type": "application/json",
}


def _to_content(q: str) -> dict[str, Any]:
    """Convert the given query string to payload content, i.e., add it under a `queries` key and convert it to bytes."""
    finalized_query = {
        "query": q,
        "variables": None,
        "extensions": {"headers": None},
    }
    return finalized_query


def collect_liquidity_info(
    index: int, fpmm_id: str, market_creator: str
) -> dict[str, Any]:
    omen_subgraph = OMEN_SUBGRAPH_URL.substitute(subgraph_api_key=SUBGRAPH_API_KEY)
    market_creator_id = market_creators_map[market_creator]
    query = get_token_amounts_query.substitute(
        fpmm_creator=market_creator_id.lower(),
        fpmm_id=fpmm_id,
    )
    content_json = _to_content(query)
    # print(f"Executing liquidity query {query}")
    res = requests.post(omen_subgraph, headers=headers, json=content_json)
    result_json = res.json()
    tokens_info = result_json.get("data", {}).get("fpmmLiquidities", [])
    if not tokens_info:
        return None

    # the last item is the final information of the market
    last_info = tokens_info[-1]
    token_amounts = [int(x) for x in last_info["outcomeTokenAmounts"]]
    time.sleep(IPFS_POLL_INTERVAL)
    return {fpmm_id: token_amounts}


def convert_hex_to_int(x: Union[str, float]) -> Union[int, float]:
    """Convert hex to int"""
    if isinstance(x, float):
        return np.nan
    if isinstance(x, str):
        if x == INVALID_ANSWER_HEX:
            return "invalid"
        return "yes" if int(x, 16) == 0 else "no"


def get_closed_markets():
    print("Reading parquet file with closed markets data from trades")
    try:
        markets = pd.read_parquet(TMP_DIR / "fpmmTrades.parquet")
    except Exception:
        print("Error reading the parquet file")

    columns_of_interest = [
        "fpmm.currentAnswer",
        "fpmm.id",
        "fpmm.openingTimestamp",
        "market_creator",
    ]
    markets = markets[columns_of_interest]
    markets.rename(
        columns={
            "fpmm.currentAnswer": "currentAnswer",
            "fpmm.openingTimestamp": "openingTimestamp",
            "fpmm.id": "id",
        },
        inplace=True,
    )
    markets = markets.drop_duplicates(subset=["id"], keep="last")
    # remove invalid answers
    markets = markets.loc[markets["currentAnswer"] != INVALID_ANSWER_HEX]
    markets["currentAnswer"] = markets["currentAnswer"].apply(
        lambda x: convert_hex_to_int(x)
    )
    markets.dropna(inplace=True)
    markets["opening_datetime"] = markets["openingTimestamp"].apply(
        lambda x: datetime.fromtimestamp(int(x))
    )
    markets = markets.sort_values(by="opening_datetime", ascending=True)
    return markets


def kl_divergence(P, Q):
    """
    Compute KL divergence for a single sample with two prob distributions.

    :param P: True distribution)
    :param Q: Approximating distribution)
    :return: KL divergence value
    """
    # Review edge cases
    if P[0] == Q[0]:
        return 0.0
    # If P is complete opposite of Q, divergence is some max value.
    # Here set to 20--allows for Q [\mu, 1-\mu] or Q[1-\mu, \mu] where \mu = 10^-8
    if P[0] == Q[1]:
        return 20

    nonzero = P > 0.0
    # Compute KL divergence
    kl_div = np.sum(P[nonzero] * np.log(P[nonzero] / Q[nonzero]))

    return kl_div


def market_KL_divergence(market_row: pd.DataFrame) -> float:
    """Function to compute the divergence based on the formula
    Formula in https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence"""
    current_answer = market_row.currentAnswer  # "yes", "no"
    approx_prob = market_row.first_outcome_prob
    true_prob = 1.0  # for yes outcome
    if current_answer == "no":
        true_prob = 0.0  # = 0% for yes outcome and 100% for no

    # we have only one sample, the final probability based on tokens
    # Ensure probabilities sum to 1
    P = np.array([true_prob, 1 - true_prob])
    Q = np.array([approx_prob, 1 - approx_prob])
    return kl_divergence(P, Q)


def off_by_values(market_row: pd.DataFrame) -> float:
    current_answer = market_row.currentAnswer  # "yes", "no"
    approx_prob = market_row.first_outcome_prob
    true_prob = 1.0  # for yes outcome
    if current_answer == "no":
        true_prob = 0.0  # = 0% for yes outcome and 100% for no

    # we have only one sample, the final probability based on tokens
    # Ensure probabilities sum to 1
    P = np.array([true_prob, 1 - true_prob])
    Q = np.array([approx_prob, 1 - approx_prob])
    return abs(P[0] - Q[0]) * 100.0


def compute_tokens_prob(token_amounts: list) -> list:
    first_token_amounts = token_amounts[0]
    second_token_amounts = token_amounts[1]
    total_tokens = first_token_amounts + second_token_amounts
    first_token_prob = 1 - round((first_token_amounts / total_tokens), 4)
    return [first_token_prob, 1 - first_token_prob]


def prepare_closed_markets_data():
    closed_markets = get_closed_markets()
    closed_markets["first_outcome_prob"] = -1.0
    closed_markets["second_outcome_prob"] = -1.0
    total_markets = len(closed_markets)
    markets_no_info = []
    no_info = 0
    with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
        futures = []
        for i in range(total_markets):
            futures.append(
                executor.submit(
                    collect_liquidity_info,
                    i,
                    closed_markets.iloc[i].id,
                    closed_markets.iloc[i].market_creator,
                )
            )
        markets_with_info = 0
        for future in tqdm(
            as_completed(futures),
            total=len(futures),
            desc=f"Fetching Market liquidity info",
        ):
            token_amounts_dict = future.result()
            if token_amounts_dict:
                fpmm_id, token_amounts = token_amounts_dict.popitem()
                if token_amounts:
                    tokens_prob = compute_tokens_prob(token_amounts)
                    closed_markets.loc[
                        closed_markets["id"] == fpmm_id, "first_outcome_prob"
                    ] = tokens_prob[0]
                    closed_markets.loc[
                        closed_markets["id"] == fpmm_id, "second_outcome_prob"
                    ] = tokens_prob[1]
                    markets_with_info += 1
                else:
                    tqdm.write(f"Skipping market with no liquidity info")
                    markets_no_info.append(i)
            else:
                tqdm.write(f"Skipping market with no liquidity info")
                no_info += 1

    print(f"Markets with info = {markets_with_info}")
    # Removing markets with no liq info
    closed_markets = closed_markets.loc[closed_markets["first_outcome_prob"] != -1.0]
    print(
        f"Finished computing all markets liquidity info. Final length = {len(closed_markets)}"
    )
    if len(markets_no_info) > 0:
        print(
            f"There were {len(markets_no_info)} markets with no liquidity info. Printing some index of the dataframe"
        )
        with open("no_liq_info.pickle", "wb") as file:
            pickle.dump(markets_no_info, file)
        print(markets_no_info[:1])
    print(closed_markets.head())
    # Add the Kullback–Leibler divergence values
    print("Computing Kullback–Leibler (KL) divergence")
    closed_markets["kl_divergence"] = closed_markets.apply(
        lambda x: market_KL_divergence(x), axis=1
    )
    closed_markets["off_by_perc"] = closed_markets.apply(
        lambda x: off_by_values(x), axis=1
    )
    closed_markets.to_parquet(ROOT_DIR / "closed_markets_div.parquet", index=False)
    print("Finished preparing final dataset for visualization")
    print(closed_markets.head())


if __name__ == "__main__":
    prepare_closed_markets_data()