Olas-predict-dataset / scripts /closed_markets_divergence.py
cyberosa
adding update scripts for the dataset
cd451ea
raw
history blame
9.26 kB
import os
import pandas as pd
import numpy as np
from typing import Any, Union
from string import Template
import requests
import pickle
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
import time
from datetime import datetime
from utils import ROOT_DIR, TMP_DIR
NUM_WORKERS = 10
IPFS_POLL_INTERVAL = 0.2
INVALID_ANSWER_HEX = (
"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
)
INVALID_ANSWER = -1
SUBGRAPH_API_KEY = os.environ.get("SUBGRAPH_API_KEY", None)
OMEN_SUBGRAPH_URL = Template(
"""https://gateway-arbitrum.network.thegraph.com/api/${subgraph_api_key}/subgraphs/id/9fUVQpFwzpdWS9bq5WkAnmKbNNcoBwatMR4yZq81pbbz"""
)
get_token_amounts_query = Template(
"""
{
fpmmLiquidities(
where: {
fpmm_: {
creator: "${fpmm_creator}",
id: "${fpmm_id}",
},
id_gt: ""
}
orderBy: creationTimestamp
orderDirection: asc
)
{
id
outcomeTokenAmounts
creationTimestamp
additionalLiquidityParameter
}
}
"""
)
CREATOR = "0x89c5cc945dd550BcFfb72Fe42BfF002429F46Fec"
PEARL_CREATOR = "0xFfc8029154ECD55ABED15BD428bA596E7D23f557"
market_creators_map = {"quickstart": CREATOR, "pearl": PEARL_CREATOR}
headers = {
"Accept": "application/json, multipart/mixed",
"Content-Type": "application/json",
}
def _to_content(q: str) -> dict[str, Any]:
"""Convert the given query string to payload content, i.e., add it under a `queries` key and convert it to bytes."""
finalized_query = {
"query": q,
"variables": None,
"extensions": {"headers": None},
}
return finalized_query
def collect_liquidity_info(
index: int, fpmm_id: str, market_creator: str
) -> dict[str, Any]:
omen_subgraph = OMEN_SUBGRAPH_URL.substitute(subgraph_api_key=SUBGRAPH_API_KEY)
market_creator_id = market_creators_map[market_creator]
query = get_token_amounts_query.substitute(
fpmm_creator=market_creator_id.lower(),
fpmm_id=fpmm_id,
)
content_json = _to_content(query)
# print(f"Executing liquidity query {query}")
res = requests.post(omen_subgraph, headers=headers, json=content_json)
result_json = res.json()
tokens_info = result_json.get("data", {}).get("fpmmLiquidities", [])
if not tokens_info:
return None
# the last item is the final information of the market
last_info = tokens_info[-1]
token_amounts = [int(x) for x in last_info["outcomeTokenAmounts"]]
time.sleep(IPFS_POLL_INTERVAL)
return {fpmm_id: token_amounts}
def convert_hex_to_int(x: Union[str, float]) -> Union[int, float]:
"""Convert hex to int"""
if isinstance(x, float):
return np.nan
if isinstance(x, str):
if x == INVALID_ANSWER_HEX:
return "invalid"
return "yes" if int(x, 16) == 0 else "no"
def get_closed_markets():
print("Reading parquet file with closed markets data from trades")
try:
markets = pd.read_parquet(TMP_DIR / "fpmmTrades.parquet")
except Exception:
print("Error reading the parquet file")
columns_of_interest = [
"fpmm.currentAnswer",
"fpmm.id",
"fpmm.openingTimestamp",
"market_creator",
]
markets = markets[columns_of_interest]
markets.rename(
columns={
"fpmm.currentAnswer": "currentAnswer",
"fpmm.openingTimestamp": "openingTimestamp",
"fpmm.id": "id",
},
inplace=True,
)
markets = markets.drop_duplicates(subset=["id"], keep="last")
# remove invalid answers
markets = markets.loc[markets["currentAnswer"] != INVALID_ANSWER_HEX]
markets["currentAnswer"] = markets["currentAnswer"].apply(
lambda x: convert_hex_to_int(x)
)
markets.dropna(inplace=True)
markets["opening_datetime"] = markets["openingTimestamp"].apply(
lambda x: datetime.fromtimestamp(int(x))
)
markets = markets.sort_values(by="opening_datetime", ascending=True)
return markets
def kl_divergence(P, Q):
"""
Compute KL divergence for a single sample with two prob distributions.
:param P: True distribution)
:param Q: Approximating distribution)
:return: KL divergence value
"""
# Review edge cases
if P[0] == Q[0]:
return 0.0
# If P is complete opposite of Q, divergence is some max value.
# Here set to 20--allows for Q [\mu, 1-\mu] or Q[1-\mu, \mu] where \mu = 10^-8
if P[0] == Q[1]:
return 20
nonzero = P > 0.0
# Compute KL divergence
kl_div = np.sum(P[nonzero] * np.log(P[nonzero] / Q[nonzero]))
return kl_div
def market_KL_divergence(market_row: pd.DataFrame) -> float:
"""Function to compute the divergence based on the formula
Formula in https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence"""
current_answer = market_row.currentAnswer # "yes", "no"
approx_prob = market_row.first_outcome_prob
true_prob = 1.0 # for yes outcome
if current_answer == "no":
true_prob = 0.0 # = 0% for yes outcome and 100% for no
# we have only one sample, the final probability based on tokens
# Ensure probabilities sum to 1
P = np.array([true_prob, 1 - true_prob])
Q = np.array([approx_prob, 1 - approx_prob])
return kl_divergence(P, Q)
def off_by_values(market_row: pd.DataFrame) -> float:
current_answer = market_row.currentAnswer # "yes", "no"
approx_prob = market_row.first_outcome_prob
true_prob = 1.0 # for yes outcome
if current_answer == "no":
true_prob = 0.0 # = 0% for yes outcome and 100% for no
# we have only one sample, the final probability based on tokens
# Ensure probabilities sum to 1
P = np.array([true_prob, 1 - true_prob])
Q = np.array([approx_prob, 1 - approx_prob])
return abs(P[0] - Q[0]) * 100.0
def compute_tokens_prob(token_amounts: list) -> list:
first_token_amounts = token_amounts[0]
second_token_amounts = token_amounts[1]
total_tokens = first_token_amounts + second_token_amounts
first_token_prob = 1 - round((first_token_amounts / total_tokens), 4)
return [first_token_prob, 1 - first_token_prob]
def prepare_closed_markets_data():
closed_markets = get_closed_markets()
closed_markets["first_outcome_prob"] = -1.0
closed_markets["second_outcome_prob"] = -1.0
total_markets = len(closed_markets)
markets_no_info = []
no_info = 0
with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
futures = []
for i in range(total_markets):
futures.append(
executor.submit(
collect_liquidity_info,
i,
closed_markets.iloc[i].id,
closed_markets.iloc[i].market_creator,
)
)
markets_with_info = 0
for future in tqdm(
as_completed(futures),
total=len(futures),
desc=f"Fetching Market liquidity info",
):
token_amounts_dict = future.result()
if token_amounts_dict:
fpmm_id, token_amounts = token_amounts_dict.popitem()
if token_amounts:
tokens_prob = compute_tokens_prob(token_amounts)
closed_markets.loc[
closed_markets["id"] == fpmm_id, "first_outcome_prob"
] = tokens_prob[0]
closed_markets.loc[
closed_markets["id"] == fpmm_id, "second_outcome_prob"
] = tokens_prob[1]
markets_with_info += 1
else:
tqdm.write(f"Skipping market with no liquidity info")
markets_no_info.append(i)
else:
tqdm.write(f"Skipping market with no liquidity info")
no_info += 1
print(f"Markets with info = {markets_with_info}")
# Removing markets with no liq info
closed_markets = closed_markets.loc[closed_markets["first_outcome_prob"] != -1.0]
print(
f"Finished computing all markets liquidity info. Final length = {len(closed_markets)}"
)
if len(markets_no_info) > 0:
print(
f"There were {len(markets_no_info)} markets with no liquidity info. Printing some index of the dataframe"
)
with open("no_liq_info.pickle", "wb") as file:
pickle.dump(markets_no_info, file)
print(markets_no_info[:1])
print(closed_markets.head())
# Add the Kullback–Leibler divergence values
print("Computing Kullback–Leibler (KL) divergence")
closed_markets["kl_divergence"] = closed_markets.apply(
lambda x: market_KL_divergence(x), axis=1
)
closed_markets["off_by_perc"] = closed_markets.apply(
lambda x: off_by_values(x), axis=1
)
closed_markets.to_parquet(ROOT_DIR / "closed_markets_div.parquet", index=False)
print("Finished preparing final dataset for visualization")
print(closed_markets.head())
if __name__ == "__main__":
prepare_closed_markets_data()