-
Notifications
You must be signed in to change notification settings - Fork 10
/
Copy pathbt_rewards.py
105 lines (85 loc) · 3.26 KB
/
bt_rewards.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import json
import pandas as pd
import web3
from pyaml_env import parse_config
from acx.utils import cutAndPowerScore
if __name__ == "__main__":
# Load parameters
params = parse_config("parameters.yaml")
# Inclusion parameters
transferCount = params["traveler"]["parameters"]["inclusion"]["transfer_count"]
transferCountVol = params["traveler"]["parameters"]["inclusion"]["transfer_count_volume"]
volumeInclusion = params["traveler"]["parameters"]["inclusion"]["volume_inclusion"]
# Score parameters
power = params["traveler"]["parameters"]["score"]["power"]
ub = params["traveler"]["parameters"]["score"]["ub"]
travelStartBlock = params["traveler"]["travel_start_block"]
travelEndBlock = params["traveler"]["travel_end_block"]
# Read all transfers that occurred on other bridges
df = pd.read_parquet("intermediate/travelerTransfers.parquet")
# Load prices and merge into dataframe
prices = (
pd.read_json("raw/prices.json", orient="records")
.set_index(["date", "symbol"])
)
df = df.merge(
prices, left_on=["date", "symbol"], right_index=True, how="left"
)
df["amountUSD"] = df.eval("price * amount")
# Load Across transfers
acrossAddresses = (
pd.read_json("intermediate/bridgoorTransactions.json", orient="records")
["recipient"]
.unique()
)
acrossAddresses = list(map(web3.Web3.toChecksumAddress, acrossAddresses))
# Filter out any users that have used Across already
df = df.query("traveler not in @acrossAddresses")
# Load Across LPs -- Only look at pre-traveler LPs for exclusion
originalLpCutoff = 15_649_594
v1LpPositions = (
pd.read_parquet("intermediate/v1CumulativeLp.parquet")
.loc[:originalLpCutoff, :]
.max()
> 1e-18
)
v2LpPositions = (
pd.read_parquet("intermediate/v2CumulativeLp.parquet")
.loc[:originalLpCutoff, :]
.max()
> 1e-18
)
acrossLps = list(
set(v1LpPositions.index[v1LpPositions].get_level_values(1).unique())
.union(v2LpPositions.index[v2LpPositions].get_level_values(1).unique())
)
# Filter out any users that have LP'd for Across (prior to BT)
df = df.query("traveler not in @acrossLps")
# Filter out users who have been identified as sybil
with open("raw/sybil.json", "r") as f:
sybils = json.loads(f.read())
df = df.query("traveler not in @sybils")
# Now we can aggregate and calculate scores
travelers = (
df.groupby("traveler")
.agg(
{
"tx": "count",
"amountUSD": "sum"
}
)
.rename(columns={
"tx": "nTransfers",
"amountUSD": "totalVolume"
})
)
# Filter to only include bridgoors who met qualifications
query_statement = "(nTransfers >= @transferCount & totalVolume >= @transferCountVol) |"
query_statement += "(totalVolume >= @volumeInclusion)"
travelers = travelers.query(query_statement)
travelers["score"] = cutAndPowerScore(
travelers["totalVolume"], 0.0, ub, power
)
# Save output
travelers = travelers.sort_values("totalVolume", ascending=False)
(100 * travelers["score"]).to_json("final/traveler_score.json")