edited_code
stringlengths 17
978k
| original_code
stringlengths 17
978k
|
---|---|
"""
Input: tsv file in the form
Input Video filename | topic | subtopic | title greek | title english | start time | end time | delete segments
input.mp4 | 1 | 1 | έξοδος | output | 00:10:05 | 00:30:10 | 00:11:15-00:12:30,00:20:35-00:22:10
"""
import os
import subprocess
import sys
import yaml
def run_cmd(command: str):
"""run_cmd Run given shell command
Args:
command (str): Shell command to run
Returns:
(int, str): Status code, stdout of shell command
Examples:
>>> run_cmd("ls /")
(0, 'bin\nboot\ndev\netc\nhome\ninit\nlib\nlib32\nlib64\nlibx32\nlost+found\nmedia\nmnt\nopt\nproc\nroot\nrun\nsbin\nsnap\nsrv\nsys\ntmp\nusr\nvar\n')
"""
command = f'{os.getenv('SHELL')} -c "{command}"'
pipe = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
stdout = ""
if pipe.stdout is not None:
stdout = "".join(
[line.decode("utf-8") for line in iter(pipe.stdout.readline, b"")]
)
pipe.stdout.close()
returncode = pipe.wait()
print(stdout)
return returncode, stdout
def out_video(segment, greek=True):
title_idx = 3 if greek else 4
title, topic, subtopic = segment[title_idx], segment[1], segment[2]
name = f"{title}_{topic}-{subtopic}.mp4"
return name
def input_video(segment):
return segment[0]
def manage_timestamps(segment):
try:
st, et = segment[5], segment[6]
except:
st = segment[5]
return [st]
try:
delete_timestamps = segment[7]
except:
return [st, et]
if not delete_timestamps:
return [st, et]
else:
return (
[st]
+ [
t
for s in delete_timestamps.split(",")
for t in (s.split("-")[0], s.split("-")[1])
]
+ [et]
)
def to_cut_fmt(timestamp):
out = ""
labels = ["h", "m", "s"]
lb_idx = 0
for c in timestamp:
if c == ":":
out += labels[lb_idx]
lb_idx += 1
else:
out += c
return out
def to_cut_yaml(inmp4, outmp4, ymlname, timestamps):
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return list(zip(a, a))
timestamps = [to_cut_fmt(t) for t in timestamps]
timeframe = []
if len(timestamps) == 1:
timeframe = [{"from": "start", "to": timestamps[0]}]
else:
for s, e in pairwise(["start"] + timestamps + ["end"]):
timeframe += [{"from": s, "to": e}]
out = {
"input": inmp4,
"output": outmp4,
"cut_method": "delete",
"timeframe": timeframe,
}
with open(ymlname, "w") as fd:
yaml.dump(out, fd, default_flow_style=False, sort_keys=False)
def format_timestamp_args(timestamps):
if len(timestamps) == 1:
return [f"-ss {timestamps[0]} "]
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return list(zip(a, a))
cmds = [f"-ss {s} -to {e}" for s, e in pairwise(timestamps)]
return cmds
def ffmpeg(inp, out, timestamps_args):
if len(timestamps_args) == 1:
run_cmd(f"ffmpeg -y -i '{inp}' " + timestamps_args[0] + f" -c:v h265_nvenc -crf 24 -preset fast -c:a copy '{out}'")
return
mp4s = []
for i, arg in enumerate(timestamps_args):
mp4s.append(f"{i}.mp4")
cmd = f"ffmpeg -i '{inp}' " + arg + f" -c:v h265_nvenc -crf 24 -preset fast -c:a copy '{i}.mp4'"
print(cmd)
run_cmd(cmd)
tmp = ".tmp_files.txt"
with open(tmp, "w") as fd:
for f in mp4s:
fd.write(f"file '{f}'\n")
run_cmd(f"ffmpeg -y -f concat -i .tmp_files.txt '{out}'")
run_cmd(f"rm {tmp} " + " ".join(mp4s))
def read_split_tsv(timestamp_file):
with open(timestamp_file) as f:
segments = [ln.strip().split("\t") for ln in f]
return segments
def main():
timestamp_file = sys.argv[1]
segments = read_split_tsv(timestamp_file)
for segment in segments:
inmp4 = input_video(segment)
outmp4 = "out/" + out_video(segment, greek=True)
timestamps = manage_timestamps(segment)
timestamp_args = format_timestamp_args(timestamps)
ffmpeg(inmp4, outmp4, timestamp_args)
def main1():
timestamp_file = sys.argv[1]
segments = read_split_tsv(timestamp_file)
for i, segment in enumerate(segments):
inmp4 = input_video(segment)
outmp4 = out_video(segment, greek=True)
timestamps = manage_timestamps(segment)
to_cut_yaml(inmp4, outmp4, f"{i}.yml", timestamps)
if __name__ == "__main__":
main()
| """
Input: tsv file in the form
Input Video filename | topic | subtopic | title greek | title english | start time | end time | delete segments
input.mp4 | 1 | 1 | έξοδος | output | 00:10:05 | 00:30:10 | 00:11:15-00:12:30,00:20:35-00:22:10
"""
import os
import subprocess
import sys
import yaml
def run_cmd(command: str):
"""run_cmd Run given shell command
Args:
command (str): Shell command to run
Returns:
(int, str): Status code, stdout of shell command
Examples:
>>> run_cmd("ls /")
(0, 'bin\nboot\ndev\netc\nhome\ninit\nlib\nlib32\nlib64\nlibx32\nlost+found\nmedia\nmnt\nopt\nproc\nroot\nrun\nsbin\nsnap\nsrv\nsys\ntmp\nusr\nvar\n')
"""
command = f'{os.getenv("SHELL")} -c "{command}"'
pipe = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
stdout = ""
if pipe.stdout is not None:
stdout = "".join(
[line.decode("utf-8") for line in iter(pipe.stdout.readline, b"")]
)
pipe.stdout.close()
returncode = pipe.wait()
print(stdout)
return returncode, stdout
def out_video(segment, greek=True):
title_idx = 3 if greek else 4
title, topic, subtopic = segment[title_idx], segment[1], segment[2]
name = f"{title}_{topic}-{subtopic}.mp4"
return name
def input_video(segment):
return segment[0]
def manage_timestamps(segment):
try:
st, et = segment[5], segment[6]
except:
st = segment[5]
return [st]
try:
delete_timestamps = segment[7]
except:
return [st, et]
if not delete_timestamps:
return [st, et]
else:
return (
[st]
+ [
t
for s in delete_timestamps.split(",")
for t in (s.split("-")[0], s.split("-")[1])
]
+ [et]
)
def to_cut_fmt(timestamp):
out = ""
labels = ["h", "m", "s"]
lb_idx = 0
for c in timestamp:
if c == ":":
out += labels[lb_idx]
lb_idx += 1
else:
out += c
return out
def to_cut_yaml(inmp4, outmp4, ymlname, timestamps):
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return list(zip(a, a))
timestamps = [to_cut_fmt(t) for t in timestamps]
timeframe = []
if len(timestamps) == 1:
timeframe = [{"from": "start", "to": timestamps[0]}]
else:
for s, e in pairwise(["start"] + timestamps + ["end"]):
timeframe += [{"from": s, "to": e}]
out = {
"input": inmp4,
"output": outmp4,
"cut_method": "delete",
"timeframe": timeframe,
}
with open(ymlname, "w") as fd:
yaml.dump(out, fd, default_flow_style=False, sort_keys=False)
def format_timestamp_args(timestamps):
if len(timestamps) == 1:
return [f"-ss {timestamps[0]} "]
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return list(zip(a, a))
cmds = [f"-ss {s} -to {e}" for s, e in pairwise(timestamps)]
return cmds
def ffmpeg(inp, out, timestamps_args):
if len(timestamps_args) == 1:
run_cmd(f"ffmpeg -y -i '{inp}' " + timestamps_args[0] + f" -c:v h265_nvenc -crf 24 -preset fast -c:a copy '{out}'")
return
mp4s = []
for i, arg in enumerate(timestamps_args):
mp4s.append(f"{i}.mp4")
cmd = f"ffmpeg -i '{inp}' " + arg + f" -c:v h265_nvenc -crf 24 -preset fast -c:a copy '{i}.mp4'"
print(cmd)
run_cmd(cmd)
tmp = ".tmp_files.txt"
with open(tmp, "w") as fd:
for f in mp4s:
fd.write(f"file '{f}'\n")
run_cmd(f"ffmpeg -y -f concat -i .tmp_files.txt '{out}'")
run_cmd(f"rm {tmp} " + " ".join(mp4s))
def read_split_tsv(timestamp_file):
with open(timestamp_file) as f:
segments = [ln.strip().split("\t") for ln in f]
return segments
def main():
timestamp_file = sys.argv[1]
segments = read_split_tsv(timestamp_file)
for segment in segments:
inmp4 = input_video(segment)
outmp4 = "out/" + out_video(segment, greek=True)
timestamps = manage_timestamps(segment)
timestamp_args = format_timestamp_args(timestamps)
ffmpeg(inmp4, outmp4, timestamp_args)
def main1():
timestamp_file = sys.argv[1]
segments = read_split_tsv(timestamp_file)
for i, segment in enumerate(segments):
inmp4 = input_video(segment)
outmp4 = out_video(segment, greek=True)
timestamps = manage_timestamps(segment)
to_cut_yaml(inmp4, outmp4, f"{i}.yml", timestamps)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Analyze CSV file into scores.
Created on Sat Feb 12 22:15:29 2022 // @hk_nien
"""
from pathlib import Path
import os
import re
import sys
import pandas as pd
import numpy as np
PCODES = dict([
# Regio Noord
(1011, 'Amsterdam'),
(1625, 'Hoorn|Zwaag'),
(1811, 'Alkmaar'),
(7471, 'Goor'),
(7556, 'Hengelo'),
(7903, 'Hoogeveen'),
(7942, 'Meppel'),
(8011, 'Zwolle'),
(8232, 'Lelystad'),
(8442, 'Heerenveen'),
(8911, 'Leeuwarden'),
(9291, 'Kollum'),
(9501, 'Stadskanaal'),
(9726, 'Groningen'),
# Regio Midden
(2406, 'Alphen a/d Rijn'),
(2515, 'Den Haag'),
(3013, 'Rotterdam'),
(3511, 'Utrecht'),
(3901, 'Veenendaal'),
((7137, 7131), 'Lichtenvoorde|Groenlo'),
(7311, 'Apeldoorn'),
# Regio Zuid
(4325, 'Renesse'),
(4462, 'Goes'),
(4701, 'Roosendaal'),
(5038, 'Tilburg'),
(5401, 'Uden'),
(5611, 'Eindhoven'),
(5801, 'Oostrum'),
(6101, 'Echt'),
(6229, 'Maastricht'),
(6541, 'Nijmegen'),
])
def get_bad_scan_times():
"""Return list of Timestamps with bad scan times, from CSV data."""
df = pd.read_csv('data-ggd/ggd_bad_scans.txt', comment='#')
tstamps = pd.to_datetime(df['Timestamp']).to_list()
return tstamps
def _mean_time(ts_list):
"""Return mean timestamp value from list of timestamps."""
ts0 = ts_list[0]
delta_sum = pd.Timedelta(0)
for ts in ts_list:
delta_sum += (ts -ts0)
ts_mean = ts0 + delta_sum / len(ts_list)
return ts_mean
def _delta_time_hhmm(hm):
"""Convert 'hh:mm' string to TimeDelta."""
return pd.Timedelta(f'{hm}:00')
def _summary_to_scores(summary):
"""Convert summary from _read_log to scores dict and effective timestamp.
Parameters:
- summary: dict with int(pc4) -> [(query_time, appt_time), ...]
Return:
- scores dict: int(pc4) -> score (int or float or '?')
- timestamp: middle query timestamp of this run.
"""
# Convert to number codes.
scores = {k: '?' for k in PCODES}
multi_pcs = {} # pc4 -> (pc4[0], pc4[1], ...)
for pc in PCODES:
if isinstance(pc, tuple):
for pc1 in pc:
multi_pcs[pc1] = pc
qtms = []
dhm = _delta_time_hhmm
for pc4, vlist in summary.items():
pc4 = int(pc4)
if pc4 not in scores:
if pc4 in multi_pcs:
pc4_key = multi_pcs[pc4]
else:
print(f'{pc4} not in list...')
continue
else:
pc4_key = pc4
if len(vlist) == 0:
scores[pc4_key] = 7
continue
qtm = _mean_time([v[0] for v in vlist]) # query time
qtms.append(qtm)
atm = min(v[1] for v in vlist) # earliest appointment time
qtm_00 = pd.Timestamp(qtm.strftime('%Y-%m-%dT00:00'))
thresholds = [
(3, qtm_00 + dhm('23:59')),
(4, qtm + dhm('24:00')),
(5, qtm_00 + dhm('48:00')),
(6, qtm + dhm('48:00')),
(6.3, qtm_00 + dhm('72:00')),
(6.7, qtm + dhm('72:00')),
(7, atm)
]
if qtm.hour < 9:
thresholds.insert(0, (1, qtm_00 + dhm('13:00')))
elif qtm.hour < 13:
thresholds.insert(0, (1, qtm + dhm('4:00')))
elif qtm.hour < 17:
thresholds.insert(0, (1, qtm_00 + dhm('24:00')))
thresholds.insert(1, (2, qtm + dhm('20:00')))
else:
thresholds.insert(0, (1, qtm_00 + dhm('24:00')))
thresholds.insert(1, (2, qtm_00 + dhm('37:00')))
for s, tm in thresholds:
if atm < tm:
scores[pc4_key] = s
break
if len(qtms) == 0:
qtm_mid = pd.Timestamp(None)
else:
qtm_min = min(qtms)
qtm_mid = qtm_min + (max(qtms) - qtm_min)/2
return scores, qtm_mid
def _get_min_wait(summary):
"""Return minimum and median wait Timedelta between scan time and appointment.
summary is dict of pc4 -> list of timestamps
No data -> 999 h.
For the median, NaT is counted as infinite.
"""
wtimes = []
for _, vlist in summary.items():
wtimes_this = [atm - qtm for qtm, atm in vlist]
wtimes.append(
min(wtimes_this) if wtimes_this else pd.Timedelta(99, 'h')
)
minwait = min(wtimes) if wtimes else 999
medwait = pd.Timedelta(np.median(wtimes))
return minwait, medwait
def load_csv(csv_fname):
"""Return DataFrame and list of start times (+1)."""
df = pd.read_csv(csv_fname, comment='#')
df['req_pc4'] = df['req_pc4'].astype(int)
for c in df.columns:
if c.endswith('_time') or c.endswith('_date'):
df[c] = pd.to_datetime(df[c])
else:
df.loc[df[c].isna(), c] = None
# start_tms: list of scan start times (plus one extra at the end)
start_tms = df.loc[df['scan_time'].diff() > pd.Timedelta('10 min'), 'scan_time']
start_tms = [df.iloc[0]['scan_time']] + list(start_tms)
start_tms += [df.iloc[-1]['scan_time'] + pd.Timedelta('1 min')]
return df, start_tms
def load_multi_csvs(csv_fnames):
"""Return DataFrame and list of start times (+1)"""
dfs = []
start_tms = []
for f in csv_fnames:
df, st = load_csv(f)
dfs.append(df)
start_tms.extend(st[:-1])
df = pd.concat(dfs).reset_index()
start_tms.append(df.iloc[-1]['scan_time'] + pd.Timedelta('1 min'))
return df, start_tms
def get_scan_scores(df, tm_range):
"""Get scan scores as pc4 -> score dict.
Parameters:
- df: DataFrame with scan_time, req_date, req_pc4, opt0_short_addr,
opt0_time, opt0_loc_id, etc.
- tm_range: (tm_start, tm_stop) timestamps.
Return:
- tstamp: timestamp of the scan (mid-point)
- scores: dict of pc4->score
- min_wait: Timedelta of minimum wait time from scan to appointment
"""
mask = (df['scan_time'] >= tm_range[0]) & (df['scan_time'] < tm_range[1])
df1 = df.loc[mask]
summary = {}
for pc4, city_re in PCODES.items():
pc4_tup = (pc4,) if isinstance(pc4, int) else pc4
options = []
req_pc4 = None
for _, row in df1.loc[df1['req_pc4'].isin(pc4_tup)].iterrows():
req_pc4 = int(row['req_pc4'])
for i in range(3):
addr = row[f'opt{i}_short_addr']
if addr and re.match(f'{city_re}$', addr[5:]):
options.append((row['scan_time'], row[f'opt{i}_time']))
if req_pc4 is not None:
summary[req_pc4] = options
scores, tstamp = _summary_to_scores(summary)
if pd.isna(tstamp):
tstamp = df1.iloc[len(df1)//2]['scan_time']
minwait, medwait = _get_min_wait(summary)
if medwait == 999:
medwait = pd.Timedelta(None)
return tstamp, scores, minwait, medwait
def get_scan_scores_df(df, tm_ranges, decimal_comma=True):
"""Get scan scores as dataframe, from csv dataframe.
Blacklisted scan times are dropped.
Parameters:
- df: DataFrame with scan_time, req_date, req_pc4, opt0_short_addr,
opt0_time, opt0_loc_id, etc.
- tm_ranges: list of timestamps (+one at the end) with boundaries
of timestamp ranges.
- decimal_comma: True to have string values 6,3 rather than float 6.3.
Return:
- Dataframe with scores, date_str, time_str, pc4, min_wait, med_wait as columns.
"""
n = len(tm_ranges)
records = []
index = []
minwait_hs = []
medwait_hs = []
bad_stimes = get_bad_scan_times()
for i in range(n-1):
tm_ra = tm_ranges[i:i+2]
is_ok = True
for tm in bad_stimes:
if tm_ra[0] <= tm < tm_ra[1]:
is_ok = False
break
if not is_ok:
print(f'Dropped scan at {tm_ra[0].strftime('%Y-%m-%d %H:%M')}')
continue
tm, scores, minwait, medwait = get_scan_scores(df, tm_ra)
records.append(scores)
index.append(tm)
minwait_hs.append(minwait.total_seconds() / 3600)
medwait_hs.append(medwait.total_seconds() / 3600)
dates = [t.strftime('%Y-%m-%d') for t in index]
times = [t.strftime('%H:%M') for t in index]
sdf = pd.DataFrame.from_records(records)
sdf.insert(0, 'Time', times)
sdf.insert(0, 'Date', dates)
sdf['min_wait_h'] = np.around(minwait_hs, 2)
sdf['med_wait_h'] = np.around(medwait_hs, 2)
sdf.loc[sdf['min_wait_h'].isna(), 'min_wait_h'] = 999
sdf.columns = [
('/'.join([str(x) for x in c]) if isinstance(c, tuple) else c)
for c in sdf.columns
]
if decimal_comma:
for c in sdf.columns[2:]:
sdf[c] = sdf[c].astype(str)
sdf[c] = sdf[c].str.replace('.', ',', regex=False)
sdf[c] = sdf[c].str.replace(',0$', '', regex=False)
sdf[c] = sdf[c].str.replace('?', '', regex=False)
return sdf
if __name__ == '__main__':
in_spyder = ('SPYDER_ARGS' in os.environ)
csv_fnames = sorted(Path('data-ggd').glob('ggd_scan-????-W??.csv'))
do_all = ('--all' in sys.argv)
do_all = do_all or in_spyder and input('(A)ll or latest?').lower() == 'a'
if do_all:
df, start_tms = load_multi_csvs(csv_fnames)
sdf = get_scan_scores_df(df, start_tms).iloc[::-1]
else:
df, start_tms = load_csv(csv_fnames[-1])
sdf = get_scan_scores_df(df, start_tms[-2:])
print(sdf)
if len(sdf) > 1:
sdf.to_clipboard(index=False)
print('Copied to clipboard including headers')
elif len(sdf) == 1:
sdf.iloc[[0], 2:].to_clipboard(header=False, index=False)
print('Copied to clipboard, scores only.')
else:
print('No output.')
if not in_spyder:
# Note: in Spyder, copy/paste will stall while input is blocked.
input('Press Enter to quit and clear clipboard.')
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Analyze CSV file into scores.
Created on Sat Feb 12 22:15:29 2022 // @hk_nien
"""
from pathlib import Path
import os
import re
import sys
import pandas as pd
import numpy as np
PCODES = dict([
# Regio Noord
(1011, 'Amsterdam'),
(1625, 'Hoorn|Zwaag'),
(1811, 'Alkmaar'),
(7471, 'Goor'),
(7556, 'Hengelo'),
(7903, 'Hoogeveen'),
(7942, 'Meppel'),
(8011, 'Zwolle'),
(8232, 'Lelystad'),
(8442, 'Heerenveen'),
(8911, 'Leeuwarden'),
(9291, 'Kollum'),
(9501, 'Stadskanaal'),
(9726, 'Groningen'),
# Regio Midden
(2406, 'Alphen a/d Rijn'),
(2515, 'Den Haag'),
(3013, 'Rotterdam'),
(3511, 'Utrecht'),
(3901, 'Veenendaal'),
((7137, 7131), 'Lichtenvoorde|Groenlo'),
(7311, 'Apeldoorn'),
# Regio Zuid
(4325, 'Renesse'),
(4462, 'Goes'),
(4701, 'Roosendaal'),
(5038, 'Tilburg'),
(5401, 'Uden'),
(5611, 'Eindhoven'),
(5801, 'Oostrum'),
(6101, 'Echt'),
(6229, 'Maastricht'),
(6541, 'Nijmegen'),
])
def get_bad_scan_times():
"""Return list of Timestamps with bad scan times, from CSV data."""
df = pd.read_csv('data-ggd/ggd_bad_scans.txt', comment='#')
tstamps = pd.to_datetime(df['Timestamp']).to_list()
return tstamps
def _mean_time(ts_list):
"""Return mean timestamp value from list of timestamps."""
ts0 = ts_list[0]
delta_sum = pd.Timedelta(0)
for ts in ts_list:
delta_sum += (ts -ts0)
ts_mean = ts0 + delta_sum / len(ts_list)
return ts_mean
def _delta_time_hhmm(hm):
"""Convert 'hh:mm' string to TimeDelta."""
return pd.Timedelta(f'{hm}:00')
def _summary_to_scores(summary):
"""Convert summary from _read_log to scores dict and effective timestamp.
Parameters:
- summary: dict with int(pc4) -> [(query_time, appt_time), ...]
Return:
- scores dict: int(pc4) -> score (int or float or '?')
- timestamp: middle query timestamp of this run.
"""
# Convert to number codes.
scores = {k: '?' for k in PCODES}
multi_pcs = {} # pc4 -> (pc4[0], pc4[1], ...)
for pc in PCODES:
if isinstance(pc, tuple):
for pc1 in pc:
multi_pcs[pc1] = pc
qtms = []
dhm = _delta_time_hhmm
for pc4, vlist in summary.items():
pc4 = int(pc4)
if pc4 not in scores:
if pc4 in multi_pcs:
pc4_key = multi_pcs[pc4]
else:
print(f'{pc4} not in list...')
continue
else:
pc4_key = pc4
if len(vlist) == 0:
scores[pc4_key] = 7
continue
qtm = _mean_time([v[0] for v in vlist]) # query time
qtms.append(qtm)
atm = min(v[1] for v in vlist) # earliest appointment time
qtm_00 = pd.Timestamp(qtm.strftime('%Y-%m-%dT00:00'))
thresholds = [
(3, qtm_00 + dhm('23:59')),
(4, qtm + dhm('24:00')),
(5, qtm_00 + dhm('48:00')),
(6, qtm + dhm('48:00')),
(6.3, qtm_00 + dhm('72:00')),
(6.7, qtm + dhm('72:00')),
(7, atm)
]
if qtm.hour < 9:
thresholds.insert(0, (1, qtm_00 + dhm('13:00')))
elif qtm.hour < 13:
thresholds.insert(0, (1, qtm + dhm('4:00')))
elif qtm.hour < 17:
thresholds.insert(0, (1, qtm_00 + dhm('24:00')))
thresholds.insert(1, (2, qtm + dhm('20:00')))
else:
thresholds.insert(0, (1, qtm_00 + dhm('24:00')))
thresholds.insert(1, (2, qtm_00 + dhm('37:00')))
for s, tm in thresholds:
if atm < tm:
scores[pc4_key] = s
break
if len(qtms) == 0:
qtm_mid = pd.Timestamp(None)
else:
qtm_min = min(qtms)
qtm_mid = qtm_min + (max(qtms) - qtm_min)/2
return scores, qtm_mid
def _get_min_wait(summary):
"""Return minimum and median wait Timedelta between scan time and appointment.
summary is dict of pc4 -> list of timestamps
No data -> 999 h.
For the median, NaT is counted as infinite.
"""
wtimes = []
for _, vlist in summary.items():
wtimes_this = [atm - qtm for qtm, atm in vlist]
wtimes.append(
min(wtimes_this) if wtimes_this else pd.Timedelta(99, 'h')
)
minwait = min(wtimes) if wtimes else 999
medwait = pd.Timedelta(np.median(wtimes))
return minwait, medwait
def load_csv(csv_fname):
"""Return DataFrame and list of start times (+1)."""
df = pd.read_csv(csv_fname, comment='#')
df['req_pc4'] = df['req_pc4'].astype(int)
for c in df.columns:
if c.endswith('_time') or c.endswith('_date'):
df[c] = pd.to_datetime(df[c])
else:
df.loc[df[c].isna(), c] = None
# start_tms: list of scan start times (plus one extra at the end)
start_tms = df.loc[df['scan_time'].diff() > pd.Timedelta('10 min'), 'scan_time']
start_tms = [df.iloc[0]['scan_time']] + list(start_tms)
start_tms += [df.iloc[-1]['scan_time'] + pd.Timedelta('1 min')]
return df, start_tms
def load_multi_csvs(csv_fnames):
"""Return DataFrame and list of start times (+1)"""
dfs = []
start_tms = []
for f in csv_fnames:
df, st = load_csv(f)
dfs.append(df)
start_tms.extend(st[:-1])
df = pd.concat(dfs).reset_index()
start_tms.append(df.iloc[-1]['scan_time'] + pd.Timedelta('1 min'))
return df, start_tms
def get_scan_scores(df, tm_range):
"""Get scan scores as pc4 -> score dict.
Parameters:
- df: DataFrame with scan_time, req_date, req_pc4, opt0_short_addr,
opt0_time, opt0_loc_id, etc.
- tm_range: (tm_start, tm_stop) timestamps.
Return:
- tstamp: timestamp of the scan (mid-point)
- scores: dict of pc4->score
- min_wait: Timedelta of minimum wait time from scan to appointment
"""
mask = (df['scan_time'] >= tm_range[0]) & (df['scan_time'] < tm_range[1])
df1 = df.loc[mask]
summary = {}
for pc4, city_re in PCODES.items():
pc4_tup = (pc4,) if isinstance(pc4, int) else pc4
options = []
req_pc4 = None
for _, row in df1.loc[df1['req_pc4'].isin(pc4_tup)].iterrows():
req_pc4 = int(row['req_pc4'])
for i in range(3):
addr = row[f'opt{i}_short_addr']
if addr and re.match(f'{city_re}$', addr[5:]):
options.append((row['scan_time'], row[f'opt{i}_time']))
if req_pc4 is not None:
summary[req_pc4] = options
scores, tstamp = _summary_to_scores(summary)
if pd.isna(tstamp):
tstamp = df1.iloc[len(df1)//2]['scan_time']
minwait, medwait = _get_min_wait(summary)
if medwait == 999:
medwait = pd.Timedelta(None)
return tstamp, scores, minwait, medwait
def get_scan_scores_df(df, tm_ranges, decimal_comma=True):
"""Get scan scores as dataframe, from csv dataframe.
Blacklisted scan times are dropped.
Parameters:
- df: DataFrame with scan_time, req_date, req_pc4, opt0_short_addr,
opt0_time, opt0_loc_id, etc.
- tm_ranges: list of timestamps (+one at the end) with boundaries
of timestamp ranges.
- decimal_comma: True to have string values 6,3 rather than float 6.3.
Return:
- Dataframe with scores, date_str, time_str, pc4, min_wait, med_wait as columns.
"""
n = len(tm_ranges)
records = []
index = []
minwait_hs = []
medwait_hs = []
bad_stimes = get_bad_scan_times()
for i in range(n-1):
tm_ra = tm_ranges[i:i+2]
is_ok = True
for tm in bad_stimes:
if tm_ra[0] <= tm < tm_ra[1]:
is_ok = False
break
if not is_ok:
print(f'Dropped scan at {tm_ra[0].strftime("%Y-%m-%d %H:%M")}')
continue
tm, scores, minwait, medwait = get_scan_scores(df, tm_ra)
records.append(scores)
index.append(tm)
minwait_hs.append(minwait.total_seconds() / 3600)
medwait_hs.append(medwait.total_seconds() / 3600)
dates = [t.strftime('%Y-%m-%d') for t in index]
times = [t.strftime('%H:%M') for t in index]
sdf = pd.DataFrame.from_records(records)
sdf.insert(0, 'Time', times)
sdf.insert(0, 'Date', dates)
sdf['min_wait_h'] = np.around(minwait_hs, 2)
sdf['med_wait_h'] = np.around(medwait_hs, 2)
sdf.loc[sdf['min_wait_h'].isna(), 'min_wait_h'] = 999
sdf.columns = [
('/'.join([str(x) for x in c]) if isinstance(c, tuple) else c)
for c in sdf.columns
]
if decimal_comma:
for c in sdf.columns[2:]:
sdf[c] = sdf[c].astype(str)
sdf[c] = sdf[c].str.replace('.', ',', regex=False)
sdf[c] = sdf[c].str.replace(',0$', '', regex=False)
sdf[c] = sdf[c].str.replace('?', '', regex=False)
return sdf
if __name__ == '__main__':
in_spyder = ('SPYDER_ARGS' in os.environ)
csv_fnames = sorted(Path('data-ggd').glob('ggd_scan-????-W??.csv'))
do_all = ('--all' in sys.argv)
do_all = do_all or in_spyder and input('(A)ll or latest?').lower() == 'a'
if do_all:
df, start_tms = load_multi_csvs(csv_fnames)
sdf = get_scan_scores_df(df, start_tms).iloc[::-1]
else:
df, start_tms = load_csv(csv_fnames[-1])
sdf = get_scan_scores_df(df, start_tms[-2:])
print(sdf)
if len(sdf) > 1:
sdf.to_clipboard(index=False)
print('Copied to clipboard including headers')
elif len(sdf) == 1:
sdf.iloc[[0], 2:].to_clipboard(header=False, index=False)
print('Copied to clipboard, scores only.')
else:
print('No output.')
if not in_spyder:
# Note: in Spyder, copy/paste will stall while input is blocked.
input('Press Enter to quit and clear clipboard.')
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import atexit
import collections
import collections.abc
import enum
import fcntl
import logging
import os
import os.path
import pathlib
import queue
import re
import select
import shlex
import shutil
import subprocess
import sys
import tarfile
import tempfile
import threading
import time
import json
import serial
import serial.tools.list_ports
import yaml
from tvm.micro.project_api import server
_LOG = logging.getLogger(__name__)
API_SERVER_DIR = pathlib.Path(os.path.dirname(__file__) or os.path.getcwd())
BUILD_DIR = API_SERVER_DIR / "build"
MODEL_LIBRARY_FORMAT_RELPATH = "model.tar"
IS_TEMPLATE = not (API_SERVER_DIR / MODEL_LIBRARY_FORMAT_RELPATH).exists()
BOARDS = API_SERVER_DIR / "boards.json"
# Data structure to hold the information microtvm_api_server.py needs
# to communicate with each of these boards.
try:
with open(BOARDS) as boards:
BOARD_PROPERTIES = json.load(boards)
except FileNotFoundError:
raise FileNotFoundError(f"Board file {{{BOARDS}}} does not exist.")
def check_call(cmd_args, *args, **kwargs):
cwd_str = "" if "cwd" not in kwargs else f" (in cwd: {kwargs["cwd"]})"
_LOG.info("run%s: %s", cwd_str, " ".join(shlex.quote(a) for a in cmd_args))
return subprocess.check_call(cmd_args, *args, **kwargs)
CACHE_ENTRY_RE = re.compile(r"(?P<name>[^:]+):(?P<type>[^=]+)=(?P<value>.*)")
CMAKE_BOOL_MAP = dict(
[(k, True) for k in ("1", "ON", "YES", "TRUE", "Y")]
+ [(k, False) for k in ("0", "OFF", "NO", "FALSE", "N", "IGNORE", "NOTFOUND", "")]
)
class CMakeCache(collections.abc.Mapping):
def __init__(self, path):
self._path = path
self._dict = None
def __iter__(self):
return iter(self._dict)
def __getitem__(self, key):
if self._dict is None:
self._dict = self._read_cmake_cache()
return self._dict[key]
def __len__(self):
return len(self._dict)
def _read_cmake_cache(self):
"""Read a CMakeCache.txt-like file and return a dictionary of values."""
entries = collections.OrderedDict()
with open(self._path, encoding="utf-8") as f:
for line in f:
m = CACHE_ENTRY_RE.match(line.rstrip("\n"))
if not m:
continue
if m.group("type") == "BOOL":
value = CMAKE_BOOL_MAP[m.group("value").upper()]
else:
value = m.group("value")
entries[m.group("name")] = value
return entries
CMAKE_CACHE = CMakeCache(BUILD_DIR / "CMakeCache.txt")
class BoardError(Exception):
"""Raised when an attached board cannot be opened (i.e. missing /dev nodes, etc)."""
class BoardAutodetectFailed(Exception):
"""Raised when no attached hardware is found matching the board= given to ZephyrCompiler."""
def _get_flash_runner():
flash_runner = CMAKE_CACHE.get("ZEPHYR_BOARD_FLASH_RUNNER")
if flash_runner is not None:
return flash_runner
with open(CMAKE_CACHE["ZEPHYR_RUNNERS_YAML"]) as f:
doc = yaml.load(f, Loader=yaml.FullLoader)
return doc["flash-runner"]
def _get_device_args(options):
flash_runner = _get_flash_runner()
if flash_runner == "nrfjprog":
return _get_nrf_device_args(options)
if flash_runner == "openocd":
return _get_openocd_device_args(options)
raise BoardError(
f"Don't know how to find serial terminal for board {CMAKE_CACHE["BOARD"]} with flash "
f"runner {flash_runner}"
)
# kwargs passed to usb.core.find to find attached boards for the openocd flash runner.
BOARD_USB_FIND_KW = {
"nucleo_l4r5zi": {"idVendor": 0x0483, "idProduct": 0x374B},
"nucleo_f746zg": {"idVendor": 0x0483, "idProduct": 0x374B},
"stm32f746g_disco": {"idVendor": 0x0483, "idProduct": 0x374B},
"mimxrt1050_evk": {"idVendor": 0x1366, "idProduct": 0x0105},
}
def openocd_serial(options):
"""Find the serial port to use for a board with OpenOCD flash strategy."""
if "openocd_serial" in options:
return options["openocd_serial"]
import usb # pylint: disable=import-outside-toplevel
find_kw = BOARD_USB_FIND_KW[CMAKE_CACHE["BOARD"]]
boards = usb.core.find(find_all=True, **find_kw)
serials = []
for b in boards:
serials.append(b.serial_number)
if len(serials) == 0:
raise BoardAutodetectFailed(f"No attached USB devices matching: {find_kw!r}")
serials.sort()
autodetected_openocd_serial = serials[0]
_LOG.debug("zephyr openocd driver: autodetected serial %s", serials[0])
return autodetected_openocd_serial
def _get_openocd_device_args(options):
return ["--serial", openocd_serial(options)]
def _get_nrf_device_args(options):
nrfjprog_args = ["nrfjprog", "--ids"]
nrfjprog_ids = subprocess.check_output(nrfjprog_args, encoding="utf-8")
if not nrfjprog_ids.strip("\n"):
raise BoardAutodetectFailed(f'No attached boards recognized by {' '.join(nrfjprog_args)}')
boards = nrfjprog_ids.split("\n")[:-1]
if len(boards) > 1:
if options["nrfjprog_snr"] is None:
raise BoardError(
"Multiple boards connected; specify one with nrfjprog_snr=: " f'{', '.join(boards)}'
)
if str(options["nrfjprog_snr"]) not in boards:
raise BoardError(
f"nrfjprog_snr ({options["nrfjprog_snr"]}) not found in {nrfjprog_args}: {boards}"
)
return ["--snr", options["nrfjprog_snr"]]
if not boards:
return []
return ["--snr", boards[0]]
PROJECT_TYPES = []
if IS_TEMPLATE:
for d in (API_SERVER_DIR / "src").iterdir():
if d.is_dir():
PROJECT_TYPES.append(d.name)
PROJECT_OPTIONS = [
server.ProjectOption(
"extra_files_tar",
help="If given, during generate_project, uncompress the tarball at this path into the project dir.",
),
server.ProjectOption(
"gdbserver_port", help=("If given, port number to use when running the local gdbserver.")
),
server.ProjectOption(
"nrfjprog_snr",
help=("When used with nRF targets, serial # of the attached board to use, from nrfjprog."),
),
server.ProjectOption(
"openocd_serial",
help=("When used with OpenOCD targets, serial # of the attached board to use."),
),
server.ProjectOption(
"project_type",
help="Type of project to generate.",
choices=tuple(PROJECT_TYPES),
),
server.ProjectOption("verbose", help="Run build with verbose output.", choices=(True, False)),
server.ProjectOption(
"west_cmd",
help=(
"Path to the west tool. If given, supersedes both the zephyr_base "
"option and ZEPHYR_BASE environment variable."
),
),
server.ProjectOption("zephyr_base", help="Path to the zephyr base directory."),
server.ProjectOption(
"zephyr_board",
choices=list(BOARD_PROPERTIES),
help="Name of the Zephyr board to build for.",
),
server.ProjectOption(
"config_main_stack_size",
help="Sets CONFIG_MAIN_STACK_SIZE for Zephyr board.",
),
]
class Handler(server.ProjectAPIHandler):
def __init__(self):
super(Handler, self).__init__()
self._proc = None
def server_info_query(self, tvm_version):
return server.ServerInfo(
platform_name="zephyr",
is_template=IS_TEMPLATE,
model_library_format_path=""
if IS_TEMPLATE
else (API_SERVER_DIR / MODEL_LIBRARY_FORMAT_RELPATH),
project_options=PROJECT_OPTIONS,
)
# These files and directories will be recursively copied into generated projects from the CRT.
CRT_COPY_ITEMS = ("include", "Makefile", "src")
# Maps extra line added to prj.conf to a tuple or list of zephyr_board for which it is needed.
EXTRA_PRJ_CONF_DIRECTIVES = {
"CONFIG_TIMER_RANDOM_GENERATOR=y": (
"qemu_x86",
"qemu_riscv32",
"qemu_cortex_r5",
"qemu_riscv64",
),
"CONFIG_ENTROPY_GENERATOR=y": (
"mps2_an521",
"nrf5340dk_nrf5340_cpuapp",
"nucleo_f746zg",
"nucleo_l4r5zi",
"stm32f746g_disco",
),
}
def _create_prj_conf(self, project_dir, options):
with open(project_dir / "prj.conf", "w") as f:
f.write(
"# For UART used from main().\n"
"CONFIG_RING_BUFFER=y\n"
"CONFIG_UART_CONSOLE=n\n"
"CONFIG_UART_INTERRUPT_DRIVEN=y\n"
"\n"
)
f.write("# For TVMPlatformAbort().\n" "CONFIG_REBOOT=y\n" "\n")
if options["project_type"] == "host_driven":
f.write("# For RPC server C++ bindings.\n" "CONFIG_CPLUSPLUS=y\n" "\n")
f.write("# For math routines\n" "CONFIG_NEWLIB_LIBC=y\n" "\n")
if self._has_fpu(options["zephyr_board"]):
f.write("# For models with floating point.\n" "CONFIG_FPU=y\n" "\n")
# Set main stack size, if needed.
if options.get("config_main_stack_size") is not None:
f.write(f"CONFIG_MAIN_STACK_SIZE={options["config_main_stack_size"]}\n")
f.write("# For random number generation.\n" "CONFIG_TEST_RANDOM_GENERATOR=y\n")
f.write("\n# Extra prj.conf directives\n")
for line, board_list in self.EXTRA_PRJ_CONF_DIRECTIVES.items():
if options["zephyr_board"] in board_list:
f.write(f"{line}\n")
f.write("\n")
API_SERVER_CRT_LIBS_TOKEN = "<API_SERVER_CRT_LIBS>"
CRT_LIBS_BY_PROJECT_TYPE = {
"host_driven": "microtvm_rpc_server microtvm_rpc_common common",
"aot_demo": "memory microtvm_rpc_common common",
}
def generate_project(self, model_library_format_path, standalone_crt_dir, project_dir, options):
project_dir = pathlib.Path(project_dir)
# Make project directory.
project_dir.mkdir()
# Copy ourselves to the generated project. TVM may perform further build steps on the generated project
# by launching the copy.
shutil.copy2(__file__, project_dir / os.path.basename(__file__))
# Copy boards.json file to generated project.
shutil.copy2(BOARDS, project_dir / BOARDS.name)
# Place Model Library Format tarball in the special location, which this script uses to decide
# whether it's being invoked in a template or generated project.
project_model_library_format_tar_path = project_dir / MODEL_LIBRARY_FORMAT_RELPATH
shutil.copy2(model_library_format_path, project_model_library_format_tar_path)
# Extract Model Library Format tarball.into <project_dir>/model.
extract_path = os.path.splitext(project_model_library_format_tar_path)[0]
with tarfile.TarFile(project_model_library_format_tar_path) as tf:
os.makedirs(extract_path)
tf.extractall(path=extract_path)
if self._is_qemu(options):
shutil.copytree(API_SERVER_DIR / "qemu-hack", project_dir / "qemu-hack")
# Populate CRT.
crt_path = project_dir / "crt"
crt_path.mkdir()
for item in self.CRT_COPY_ITEMS:
src_path = os.path.join(standalone_crt_dir, item)
dst_path = crt_path / item
if os.path.isdir(src_path):
shutil.copytree(src_path, dst_path)
else:
shutil.copy2(src_path, dst_path)
# Populate Makefile.
with open(API_SERVER_DIR / "CMakeLists.txt.template", "r") as cmake_template_f:
with open(project_dir / "CMakeLists.txt", "w") as cmake_f:
for line in cmake_template_f:
if self.API_SERVER_CRT_LIBS_TOKEN in line:
crt_libs = self.CRT_LIBS_BY_PROJECT_TYPE[options["project_type"]]
line = line.replace("<API_SERVER_CRT_LIBS>", crt_libs)
cmake_f.write(line)
self._create_prj_conf(project_dir, options)
# Populate crt-config.h
crt_config_dir = project_dir / "crt_config"
crt_config_dir.mkdir()
shutil.copy2(
API_SERVER_DIR / "crt_config" / "crt_config.h", crt_config_dir / "crt_config.h"
)
# Populate src/
src_dir = project_dir / "src"
shutil.copytree(API_SERVER_DIR / "src" / options["project_type"], src_dir)
# Populate extra_files
if options.get("extra_files_tar"):
with tarfile.open(options["extra_files_tar"], mode="r:*") as tf:
tf.extractall(project_dir)
def build(self, options):
BUILD_DIR.mkdir()
cmake_args = ["cmake", ".."]
if options.get("verbose"):
cmake_args.append("-DCMAKE_VERBOSE_MAKEFILE:BOOL=TRUE")
if options.get("zephyr_base"):
cmake_args.append(f"-DZEPHYR_BASE:STRING={options["zephyr_base"]}")
if options.get("west_cmd"):
cmake_args.append(f"-DWEST={options["west_cmd"]}")
cmake_args.append(f"-DBOARD:STRING={options["zephyr_board"]}")
check_call(cmake_args, cwd=BUILD_DIR)
args = ["make", "-j2"]
if options.get("verbose"):
args.append("VERBOSE=1")
check_call(args, cwd=BUILD_DIR)
# A list of all zephyr_board values which are known to launch using QEMU. Many platforms which
# launch through QEMU by default include "qemu" in their name. However, not all do. This list
# includes those tested platforms which do not include qemu.
_KNOWN_QEMU_ZEPHYR_BOARDS = ("mps2_an521",)
@classmethod
def _is_qemu(cls, options):
return (
"qemu" in options["zephyr_board"]
or options["zephyr_board"] in cls._KNOWN_QEMU_ZEPHYR_BOARDS
)
@classmethod
def _has_fpu(cls, zephyr_board):
fpu_boards = [name for name, board in BOARD_PROPERTIES.items() if board["fpu"]]
return zephyr_board in fpu_boards
def flash(self, options):
if self._is_qemu(options):
return # NOTE: qemu requires no flash step--it is launched from open_transport.
zephyr_board = options["zephyr_board"]
# The nRF5340DK requires an additional `nrfjprog --recover` before each flash cycle.
# This is because readback protection is enabled by default when this device is flashed.
# Otherwise, flashing may fail with an error such as the following:
# ERROR: The operation attempted is unavailable due to readback protection in
# ERROR: your device. Please use --recover to unlock the device.
if zephyr_board.startswith("nrf5340dk") and _get_flash_runner() == "nrfjprog":
recover_args = ["nrfjprog", "--recover"]
recover_args.extend(_get_nrf_device_args(options))
check_call(recover_args, cwd=API_SERVER_DIR / "build")
check_call(["make", "flash"], cwd=API_SERVER_DIR / "build")
def open_transport(self, options):
if self._is_qemu(options):
transport = ZephyrQemuTransport(options)
else:
transport = ZephyrSerialTransport(options)
to_return = transport.open()
self._transport = transport
atexit.register(lambda: self.close_transport())
return to_return
def close_transport(self):
if self._transport is not None:
self._transport.close()
self._transport = None
def read_transport(self, n, timeout_sec):
if self._transport is None:
raise server.TransportClosedError()
return self._transport.read(n, timeout_sec)
def write_transport(self, data, timeout_sec):
if self._transport is None:
raise server.TransportClosedError()
return self._transport.write(data, timeout_sec)
def _set_nonblock(fd):
flag = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flag | os.O_NONBLOCK)
new_flag = fcntl.fcntl(fd, fcntl.F_GETFL)
assert (new_flag & os.O_NONBLOCK) != 0, "Cannot set file descriptor {fd} to non-blocking"
class ZephyrSerialTransport:
@classmethod
def _lookup_baud_rate(cls, options):
zephyr_base = options.get("zephyr_base", os.environ["ZEPHYR_BASE"])
sys.path.insert(0, os.path.join(zephyr_base, "scripts", "dts"))
try:
import dtlib # pylint: disable=import-outside-toplevel
finally:
sys.path.pop(0)
dt_inst = dtlib.DT(BUILD_DIR / "zephyr" / "zephyr.dts")
uart_baud = (
dt_inst.get_node("/chosen")
.props["zephyr,console"]
.to_path()
.props["current-speed"]
.to_num()
)
_LOG.debug("zephyr transport: found UART baudrate from devicetree: %d", uart_baud)
return uart_baud
@classmethod
def _find_nrf_serial_port(cls, options):
com_ports = subprocess.check_output(
["nrfjprog", "--com"] + _get_device_args(options), encoding="utf-8"
)
ports_by_vcom = {}
for line in com_ports.split("\n")[:-1]:
parts = line.split()
ports_by_vcom[parts[2]] = parts[1]
return ports_by_vcom["VCOM2"]
@classmethod
def _find_openocd_serial_port(cls, options):
serial_number = openocd_serial(options)
ports = [p for p in serial.tools.list_ports.grep(serial_number)]
if len(ports) != 1:
raise Exception(
f"_find_openocd_serial_port: expected 1 port to match {serial_number}, "
f"found: {ports!r}"
)
return ports[0].device
@classmethod
def _find_jlink_serial_port(cls, options):
return cls._find_openocd_serial_port(options)
@classmethod
def _find_serial_port(cls, options):
flash_runner = _get_flash_runner()
if flash_runner == "nrfjprog":
return cls._find_nrf_serial_port(options)
if flash_runner == "openocd":
return cls._find_openocd_serial_port(options)
if flash_runner == "jlink":
return cls._find_jlink_serial_port(options)
raise RuntimeError(f"Don't know how to deduce serial port for flash runner {flash_runner}")
def __init__(self, options):
self._options = options
self._port = None
def open(self):
port_path = self._find_serial_port(self._options)
self._port = serial.Serial(port_path, baudrate=self._lookup_baud_rate(self._options))
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=5.0,
session_established_timeout_sec=5.0,
)
def close(self):
self._port.close()
self._port = None
def read(self, n, timeout_sec):
self._port.timeout = timeout_sec
to_return = self._port.read(n)
if not to_return:
raise server.IoTimeoutError()
return to_return
def write(self, data, timeout_sec):
self._port.write_timeout = timeout_sec
bytes_written = 0
while bytes_written < len(data):
n = self._port.write(data)
data = data[n:]
bytes_written += n
class ZephyrQemuMakeResult(enum.Enum):
QEMU_STARTED = "qemu_started"
MAKE_FAILED = "make_failed"
EOF = "eof"
class ZephyrQemuTransport:
"""The user-facing Zephyr QEMU transport class."""
def __init__(self, options):
self.options = options
self.proc = None
self.pipe_dir = None
self.read_fd = None
self.write_fd = None
self._queue = queue.Queue()
def open(self):
self.pipe_dir = pathlib.Path(tempfile.mkdtemp())
self.pipe = self.pipe_dir / "fifo"
self.write_pipe = self.pipe_dir / "fifo.in"
self.read_pipe = self.pipe_dir / "fifo.out"
os.mkfifo(self.write_pipe)
os.mkfifo(self.read_pipe)
if "gdbserver_port" in self.options:
if "env" in self.kwargs:
self.kwargs["env"] = copy.copy(self.kwargs["env"])
else:
self.kwargs["env"] = os.environ.copy()
self.kwargs["env"]["TVM_QEMU_GDBSERVER_PORT"] = str(self.options["gdbserver_port"])
self.proc = subprocess.Popen(
["make", "run", f"QEMU_PIPE={self.pipe}"],
cwd=BUILD_DIR,
stdout=subprocess.PIPE,
)
self._wait_for_qemu()
# NOTE: although each pipe is unidirectional, open both as RDWR to work around a select
# limitation on linux. Without this, non-blocking I/O can't use timeouts because named
# FIFO are always considered ready to read when no one has opened them for writing.
self.read_fd = os.open(self.read_pipe, os.O_RDWR | os.O_NONBLOCK)
self.write_fd = os.open(self.write_pipe, os.O_RDWR | os.O_NONBLOCK)
_set_nonblock(self.read_fd)
_set_nonblock(self.write_fd)
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=10.0,
session_established_timeout_sec=10.0,
)
def close(self):
did_write = False
if self.write_fd is not None:
try:
server.write_with_timeout(
self.write_fd, b"\x01x", 1.0
) # Use a short timeout since we will kill the process
did_write = True
except server.IoTimeoutError:
pass
os.close(self.write_fd)
self.write_fd = None
if self.proc:
if not did_write:
self.proc.terminate()
try:
self.proc.wait(5.0)
except subprocess.TimeoutExpired:
self.proc.kill()
if self.read_fd:
os.close(self.read_fd)
self.read_fd = None
if self.pipe_dir is not None:
shutil.rmtree(self.pipe_dir)
self.pipe_dir = None
def read(self, n, timeout_sec):
return server.read_with_timeout(self.read_fd, n, timeout_sec)
def write(self, data, timeout_sec):
to_write = bytearray()
escape_pos = []
for i, b in enumerate(data):
if b == 0x01:
to_write.append(b)
escape_pos.append(i)
to_write.append(b)
while to_write:
num_written = server.write_with_timeout(self.write_fd, to_write, timeout_sec)
to_write = to_write[num_written:]
def _qemu_check_stdout(self):
for line in self.proc.stdout:
line = str(line)
_LOG.info("%s", line)
if "[QEMU] CPU" in line:
self._queue.put(ZephyrQemuMakeResult.QEMU_STARTED)
else:
line = re.sub("[^a-zA-Z0-9 \n]", "", line)
pattern = r"recipe for target (\w*) failed"
if re.search(pattern, line, re.IGNORECASE):
self._queue.put(ZephyrQemuMakeResult.MAKE_FAILED)
self._queue.put(ZephyrQemuMakeResult.EOF)
def _wait_for_qemu(self):
threading.Thread(target=self._qemu_check_stdout, daemon=True).start()
while True:
try:
item = self._queue.get(timeout=120)
except Exception:
raise TimeoutError("QEMU setup timeout.")
if item == ZephyrQemuMakeResult.QEMU_STARTED:
break
if item in [ZephyrQemuMakeResult.MAKE_FAILED, ZephyrQemuMakeResult.EOF]:
raise RuntimeError("QEMU setup failed.")
raise ValueError(f"{item} not expected.")
if __name__ == "__main__":
server.main(Handler())
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import atexit
import collections
import collections.abc
import enum
import fcntl
import logging
import os
import os.path
import pathlib
import queue
import re
import select
import shlex
import shutil
import subprocess
import sys
import tarfile
import tempfile
import threading
import time
import json
import serial
import serial.tools.list_ports
import yaml
from tvm.micro.project_api import server
_LOG = logging.getLogger(__name__)
API_SERVER_DIR = pathlib.Path(os.path.dirname(__file__) or os.path.getcwd())
BUILD_DIR = API_SERVER_DIR / "build"
MODEL_LIBRARY_FORMAT_RELPATH = "model.tar"
IS_TEMPLATE = not (API_SERVER_DIR / MODEL_LIBRARY_FORMAT_RELPATH).exists()
BOARDS = API_SERVER_DIR / "boards.json"
# Data structure to hold the information microtvm_api_server.py needs
# to communicate with each of these boards.
try:
with open(BOARDS) as boards:
BOARD_PROPERTIES = json.load(boards)
except FileNotFoundError:
raise FileNotFoundError(f"Board file {{{BOARDS}}} does not exist.")
def check_call(cmd_args, *args, **kwargs):
cwd_str = "" if "cwd" not in kwargs else f" (in cwd: {kwargs['cwd']})"
_LOG.info("run%s: %s", cwd_str, " ".join(shlex.quote(a) for a in cmd_args))
return subprocess.check_call(cmd_args, *args, **kwargs)
CACHE_ENTRY_RE = re.compile(r"(?P<name>[^:]+):(?P<type>[^=]+)=(?P<value>.*)")
CMAKE_BOOL_MAP = dict(
[(k, True) for k in ("1", "ON", "YES", "TRUE", "Y")]
+ [(k, False) for k in ("0", "OFF", "NO", "FALSE", "N", "IGNORE", "NOTFOUND", "")]
)
class CMakeCache(collections.abc.Mapping):
def __init__(self, path):
self._path = path
self._dict = None
def __iter__(self):
return iter(self._dict)
def __getitem__(self, key):
if self._dict is None:
self._dict = self._read_cmake_cache()
return self._dict[key]
def __len__(self):
return len(self._dict)
def _read_cmake_cache(self):
"""Read a CMakeCache.txt-like file and return a dictionary of values."""
entries = collections.OrderedDict()
with open(self._path, encoding="utf-8") as f:
for line in f:
m = CACHE_ENTRY_RE.match(line.rstrip("\n"))
if not m:
continue
if m.group("type") == "BOOL":
value = CMAKE_BOOL_MAP[m.group("value").upper()]
else:
value = m.group("value")
entries[m.group("name")] = value
return entries
CMAKE_CACHE = CMakeCache(BUILD_DIR / "CMakeCache.txt")
class BoardError(Exception):
"""Raised when an attached board cannot be opened (i.e. missing /dev nodes, etc)."""
class BoardAutodetectFailed(Exception):
"""Raised when no attached hardware is found matching the board= given to ZephyrCompiler."""
def _get_flash_runner():
flash_runner = CMAKE_CACHE.get("ZEPHYR_BOARD_FLASH_RUNNER")
if flash_runner is not None:
return flash_runner
with open(CMAKE_CACHE["ZEPHYR_RUNNERS_YAML"]) as f:
doc = yaml.load(f, Loader=yaml.FullLoader)
return doc["flash-runner"]
def _get_device_args(options):
flash_runner = _get_flash_runner()
if flash_runner == "nrfjprog":
return _get_nrf_device_args(options)
if flash_runner == "openocd":
return _get_openocd_device_args(options)
raise BoardError(
f"Don't know how to find serial terminal for board {CMAKE_CACHE['BOARD']} with flash "
f"runner {flash_runner}"
)
# kwargs passed to usb.core.find to find attached boards for the openocd flash runner.
BOARD_USB_FIND_KW = {
"nucleo_l4r5zi": {"idVendor": 0x0483, "idProduct": 0x374B},
"nucleo_f746zg": {"idVendor": 0x0483, "idProduct": 0x374B},
"stm32f746g_disco": {"idVendor": 0x0483, "idProduct": 0x374B},
"mimxrt1050_evk": {"idVendor": 0x1366, "idProduct": 0x0105},
}
def openocd_serial(options):
"""Find the serial port to use for a board with OpenOCD flash strategy."""
if "openocd_serial" in options:
return options["openocd_serial"]
import usb # pylint: disable=import-outside-toplevel
find_kw = BOARD_USB_FIND_KW[CMAKE_CACHE["BOARD"]]
boards = usb.core.find(find_all=True, **find_kw)
serials = []
for b in boards:
serials.append(b.serial_number)
if len(serials) == 0:
raise BoardAutodetectFailed(f"No attached USB devices matching: {find_kw!r}")
serials.sort()
autodetected_openocd_serial = serials[0]
_LOG.debug("zephyr openocd driver: autodetected serial %s", serials[0])
return autodetected_openocd_serial
def _get_openocd_device_args(options):
return ["--serial", openocd_serial(options)]
def _get_nrf_device_args(options):
nrfjprog_args = ["nrfjprog", "--ids"]
nrfjprog_ids = subprocess.check_output(nrfjprog_args, encoding="utf-8")
if not nrfjprog_ids.strip("\n"):
raise BoardAutodetectFailed(f'No attached boards recognized by {" ".join(nrfjprog_args)}')
boards = nrfjprog_ids.split("\n")[:-1]
if len(boards) > 1:
if options["nrfjprog_snr"] is None:
raise BoardError(
"Multiple boards connected; specify one with nrfjprog_snr=: " f'{", ".join(boards)}'
)
if str(options["nrfjprog_snr"]) not in boards:
raise BoardError(
f"nrfjprog_snr ({options['nrfjprog_snr']}) not found in {nrfjprog_args}: {boards}"
)
return ["--snr", options["nrfjprog_snr"]]
if not boards:
return []
return ["--snr", boards[0]]
PROJECT_TYPES = []
if IS_TEMPLATE:
for d in (API_SERVER_DIR / "src").iterdir():
if d.is_dir():
PROJECT_TYPES.append(d.name)
PROJECT_OPTIONS = [
server.ProjectOption(
"extra_files_tar",
help="If given, during generate_project, uncompress the tarball at this path into the project dir.",
),
server.ProjectOption(
"gdbserver_port", help=("If given, port number to use when running the local gdbserver.")
),
server.ProjectOption(
"nrfjprog_snr",
help=("When used with nRF targets, serial # of the attached board to use, from nrfjprog."),
),
server.ProjectOption(
"openocd_serial",
help=("When used with OpenOCD targets, serial # of the attached board to use."),
),
server.ProjectOption(
"project_type",
help="Type of project to generate.",
choices=tuple(PROJECT_TYPES),
),
server.ProjectOption("verbose", help="Run build with verbose output.", choices=(True, False)),
server.ProjectOption(
"west_cmd",
help=(
"Path to the west tool. If given, supersedes both the zephyr_base "
"option and ZEPHYR_BASE environment variable."
),
),
server.ProjectOption("zephyr_base", help="Path to the zephyr base directory."),
server.ProjectOption(
"zephyr_board",
choices=list(BOARD_PROPERTIES),
help="Name of the Zephyr board to build for.",
),
server.ProjectOption(
"config_main_stack_size",
help="Sets CONFIG_MAIN_STACK_SIZE for Zephyr board.",
),
]
class Handler(server.ProjectAPIHandler):
def __init__(self):
super(Handler, self).__init__()
self._proc = None
def server_info_query(self, tvm_version):
return server.ServerInfo(
platform_name="zephyr",
is_template=IS_TEMPLATE,
model_library_format_path=""
if IS_TEMPLATE
else (API_SERVER_DIR / MODEL_LIBRARY_FORMAT_RELPATH),
project_options=PROJECT_OPTIONS,
)
# These files and directories will be recursively copied into generated projects from the CRT.
CRT_COPY_ITEMS = ("include", "Makefile", "src")
# Maps extra line added to prj.conf to a tuple or list of zephyr_board for which it is needed.
EXTRA_PRJ_CONF_DIRECTIVES = {
"CONFIG_TIMER_RANDOM_GENERATOR=y": (
"qemu_x86",
"qemu_riscv32",
"qemu_cortex_r5",
"qemu_riscv64",
),
"CONFIG_ENTROPY_GENERATOR=y": (
"mps2_an521",
"nrf5340dk_nrf5340_cpuapp",
"nucleo_f746zg",
"nucleo_l4r5zi",
"stm32f746g_disco",
),
}
def _create_prj_conf(self, project_dir, options):
with open(project_dir / "prj.conf", "w") as f:
f.write(
"# For UART used from main().\n"
"CONFIG_RING_BUFFER=y\n"
"CONFIG_UART_CONSOLE=n\n"
"CONFIG_UART_INTERRUPT_DRIVEN=y\n"
"\n"
)
f.write("# For TVMPlatformAbort().\n" "CONFIG_REBOOT=y\n" "\n")
if options["project_type"] == "host_driven":
f.write("# For RPC server C++ bindings.\n" "CONFIG_CPLUSPLUS=y\n" "\n")
f.write("# For math routines\n" "CONFIG_NEWLIB_LIBC=y\n" "\n")
if self._has_fpu(options["zephyr_board"]):
f.write("# For models with floating point.\n" "CONFIG_FPU=y\n" "\n")
# Set main stack size, if needed.
if options.get("config_main_stack_size") is not None:
f.write(f"CONFIG_MAIN_STACK_SIZE={options['config_main_stack_size']}\n")
f.write("# For random number generation.\n" "CONFIG_TEST_RANDOM_GENERATOR=y\n")
f.write("\n# Extra prj.conf directives\n")
for line, board_list in self.EXTRA_PRJ_CONF_DIRECTIVES.items():
if options["zephyr_board"] in board_list:
f.write(f"{line}\n")
f.write("\n")
API_SERVER_CRT_LIBS_TOKEN = "<API_SERVER_CRT_LIBS>"
CRT_LIBS_BY_PROJECT_TYPE = {
"host_driven": "microtvm_rpc_server microtvm_rpc_common common",
"aot_demo": "memory microtvm_rpc_common common",
}
def generate_project(self, model_library_format_path, standalone_crt_dir, project_dir, options):
project_dir = pathlib.Path(project_dir)
# Make project directory.
project_dir.mkdir()
# Copy ourselves to the generated project. TVM may perform further build steps on the generated project
# by launching the copy.
shutil.copy2(__file__, project_dir / os.path.basename(__file__))
# Copy boards.json file to generated project.
shutil.copy2(BOARDS, project_dir / BOARDS.name)
# Place Model Library Format tarball in the special location, which this script uses to decide
# whether it's being invoked in a template or generated project.
project_model_library_format_tar_path = project_dir / MODEL_LIBRARY_FORMAT_RELPATH
shutil.copy2(model_library_format_path, project_model_library_format_tar_path)
# Extract Model Library Format tarball.into <project_dir>/model.
extract_path = os.path.splitext(project_model_library_format_tar_path)[0]
with tarfile.TarFile(project_model_library_format_tar_path) as tf:
os.makedirs(extract_path)
tf.extractall(path=extract_path)
if self._is_qemu(options):
shutil.copytree(API_SERVER_DIR / "qemu-hack", project_dir / "qemu-hack")
# Populate CRT.
crt_path = project_dir / "crt"
crt_path.mkdir()
for item in self.CRT_COPY_ITEMS:
src_path = os.path.join(standalone_crt_dir, item)
dst_path = crt_path / item
if os.path.isdir(src_path):
shutil.copytree(src_path, dst_path)
else:
shutil.copy2(src_path, dst_path)
# Populate Makefile.
with open(API_SERVER_DIR / "CMakeLists.txt.template", "r") as cmake_template_f:
with open(project_dir / "CMakeLists.txt", "w") as cmake_f:
for line in cmake_template_f:
if self.API_SERVER_CRT_LIBS_TOKEN in line:
crt_libs = self.CRT_LIBS_BY_PROJECT_TYPE[options["project_type"]]
line = line.replace("<API_SERVER_CRT_LIBS>", crt_libs)
cmake_f.write(line)
self._create_prj_conf(project_dir, options)
# Populate crt-config.h
crt_config_dir = project_dir / "crt_config"
crt_config_dir.mkdir()
shutil.copy2(
API_SERVER_DIR / "crt_config" / "crt_config.h", crt_config_dir / "crt_config.h"
)
# Populate src/
src_dir = project_dir / "src"
shutil.copytree(API_SERVER_DIR / "src" / options["project_type"], src_dir)
# Populate extra_files
if options.get("extra_files_tar"):
with tarfile.open(options["extra_files_tar"], mode="r:*") as tf:
tf.extractall(project_dir)
def build(self, options):
BUILD_DIR.mkdir()
cmake_args = ["cmake", ".."]
if options.get("verbose"):
cmake_args.append("-DCMAKE_VERBOSE_MAKEFILE:BOOL=TRUE")
if options.get("zephyr_base"):
cmake_args.append(f"-DZEPHYR_BASE:STRING={options['zephyr_base']}")
if options.get("west_cmd"):
cmake_args.append(f"-DWEST={options['west_cmd']}")
cmake_args.append(f"-DBOARD:STRING={options['zephyr_board']}")
check_call(cmake_args, cwd=BUILD_DIR)
args = ["make", "-j2"]
if options.get("verbose"):
args.append("VERBOSE=1")
check_call(args, cwd=BUILD_DIR)
# A list of all zephyr_board values which are known to launch using QEMU. Many platforms which
# launch through QEMU by default include "qemu" in their name. However, not all do. This list
# includes those tested platforms which do not include qemu.
_KNOWN_QEMU_ZEPHYR_BOARDS = ("mps2_an521",)
@classmethod
def _is_qemu(cls, options):
return (
"qemu" in options["zephyr_board"]
or options["zephyr_board"] in cls._KNOWN_QEMU_ZEPHYR_BOARDS
)
@classmethod
def _has_fpu(cls, zephyr_board):
fpu_boards = [name for name, board in BOARD_PROPERTIES.items() if board["fpu"]]
return zephyr_board in fpu_boards
def flash(self, options):
if self._is_qemu(options):
return # NOTE: qemu requires no flash step--it is launched from open_transport.
zephyr_board = options["zephyr_board"]
# The nRF5340DK requires an additional `nrfjprog --recover` before each flash cycle.
# This is because readback protection is enabled by default when this device is flashed.
# Otherwise, flashing may fail with an error such as the following:
# ERROR: The operation attempted is unavailable due to readback protection in
# ERROR: your device. Please use --recover to unlock the device.
if zephyr_board.startswith("nrf5340dk") and _get_flash_runner() == "nrfjprog":
recover_args = ["nrfjprog", "--recover"]
recover_args.extend(_get_nrf_device_args(options))
check_call(recover_args, cwd=API_SERVER_DIR / "build")
check_call(["make", "flash"], cwd=API_SERVER_DIR / "build")
def open_transport(self, options):
if self._is_qemu(options):
transport = ZephyrQemuTransport(options)
else:
transport = ZephyrSerialTransport(options)
to_return = transport.open()
self._transport = transport
atexit.register(lambda: self.close_transport())
return to_return
def close_transport(self):
if self._transport is not None:
self._transport.close()
self._transport = None
def read_transport(self, n, timeout_sec):
if self._transport is None:
raise server.TransportClosedError()
return self._transport.read(n, timeout_sec)
def write_transport(self, data, timeout_sec):
if self._transport is None:
raise server.TransportClosedError()
return self._transport.write(data, timeout_sec)
def _set_nonblock(fd):
flag = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flag | os.O_NONBLOCK)
new_flag = fcntl.fcntl(fd, fcntl.F_GETFL)
assert (new_flag & os.O_NONBLOCK) != 0, "Cannot set file descriptor {fd} to non-blocking"
class ZephyrSerialTransport:
@classmethod
def _lookup_baud_rate(cls, options):
zephyr_base = options.get("zephyr_base", os.environ["ZEPHYR_BASE"])
sys.path.insert(0, os.path.join(zephyr_base, "scripts", "dts"))
try:
import dtlib # pylint: disable=import-outside-toplevel
finally:
sys.path.pop(0)
dt_inst = dtlib.DT(BUILD_DIR / "zephyr" / "zephyr.dts")
uart_baud = (
dt_inst.get_node("/chosen")
.props["zephyr,console"]
.to_path()
.props["current-speed"]
.to_num()
)
_LOG.debug("zephyr transport: found UART baudrate from devicetree: %d", uart_baud)
return uart_baud
@classmethod
def _find_nrf_serial_port(cls, options):
com_ports = subprocess.check_output(
["nrfjprog", "--com"] + _get_device_args(options), encoding="utf-8"
)
ports_by_vcom = {}
for line in com_ports.split("\n")[:-1]:
parts = line.split()
ports_by_vcom[parts[2]] = parts[1]
return ports_by_vcom["VCOM2"]
@classmethod
def _find_openocd_serial_port(cls, options):
serial_number = openocd_serial(options)
ports = [p for p in serial.tools.list_ports.grep(serial_number)]
if len(ports) != 1:
raise Exception(
f"_find_openocd_serial_port: expected 1 port to match {serial_number}, "
f"found: {ports!r}"
)
return ports[0].device
@classmethod
def _find_jlink_serial_port(cls, options):
return cls._find_openocd_serial_port(options)
@classmethod
def _find_serial_port(cls, options):
flash_runner = _get_flash_runner()
if flash_runner == "nrfjprog":
return cls._find_nrf_serial_port(options)
if flash_runner == "openocd":
return cls._find_openocd_serial_port(options)
if flash_runner == "jlink":
return cls._find_jlink_serial_port(options)
raise RuntimeError(f"Don't know how to deduce serial port for flash runner {flash_runner}")
def __init__(self, options):
self._options = options
self._port = None
def open(self):
port_path = self._find_serial_port(self._options)
self._port = serial.Serial(port_path, baudrate=self._lookup_baud_rate(self._options))
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=5.0,
session_established_timeout_sec=5.0,
)
def close(self):
self._port.close()
self._port = None
def read(self, n, timeout_sec):
self._port.timeout = timeout_sec
to_return = self._port.read(n)
if not to_return:
raise server.IoTimeoutError()
return to_return
def write(self, data, timeout_sec):
self._port.write_timeout = timeout_sec
bytes_written = 0
while bytes_written < len(data):
n = self._port.write(data)
data = data[n:]
bytes_written += n
class ZephyrQemuMakeResult(enum.Enum):
QEMU_STARTED = "qemu_started"
MAKE_FAILED = "make_failed"
EOF = "eof"
class ZephyrQemuTransport:
"""The user-facing Zephyr QEMU transport class."""
def __init__(self, options):
self.options = options
self.proc = None
self.pipe_dir = None
self.read_fd = None
self.write_fd = None
self._queue = queue.Queue()
def open(self):
self.pipe_dir = pathlib.Path(tempfile.mkdtemp())
self.pipe = self.pipe_dir / "fifo"
self.write_pipe = self.pipe_dir / "fifo.in"
self.read_pipe = self.pipe_dir / "fifo.out"
os.mkfifo(self.write_pipe)
os.mkfifo(self.read_pipe)
if "gdbserver_port" in self.options:
if "env" in self.kwargs:
self.kwargs["env"] = copy.copy(self.kwargs["env"])
else:
self.kwargs["env"] = os.environ.copy()
self.kwargs["env"]["TVM_QEMU_GDBSERVER_PORT"] = str(self.options["gdbserver_port"])
self.proc = subprocess.Popen(
["make", "run", f"QEMU_PIPE={self.pipe}"],
cwd=BUILD_DIR,
stdout=subprocess.PIPE,
)
self._wait_for_qemu()
# NOTE: although each pipe is unidirectional, open both as RDWR to work around a select
# limitation on linux. Without this, non-blocking I/O can't use timeouts because named
# FIFO are always considered ready to read when no one has opened them for writing.
self.read_fd = os.open(self.read_pipe, os.O_RDWR | os.O_NONBLOCK)
self.write_fd = os.open(self.write_pipe, os.O_RDWR | os.O_NONBLOCK)
_set_nonblock(self.read_fd)
_set_nonblock(self.write_fd)
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=10.0,
session_established_timeout_sec=10.0,
)
def close(self):
did_write = False
if self.write_fd is not None:
try:
server.write_with_timeout(
self.write_fd, b"\x01x", 1.0
) # Use a short timeout since we will kill the process
did_write = True
except server.IoTimeoutError:
pass
os.close(self.write_fd)
self.write_fd = None
if self.proc:
if not did_write:
self.proc.terminate()
try:
self.proc.wait(5.0)
except subprocess.TimeoutExpired:
self.proc.kill()
if self.read_fd:
os.close(self.read_fd)
self.read_fd = None
if self.pipe_dir is not None:
shutil.rmtree(self.pipe_dir)
self.pipe_dir = None
def read(self, n, timeout_sec):
return server.read_with_timeout(self.read_fd, n, timeout_sec)
def write(self, data, timeout_sec):
to_write = bytearray()
escape_pos = []
for i, b in enumerate(data):
if b == 0x01:
to_write.append(b)
escape_pos.append(i)
to_write.append(b)
while to_write:
num_written = server.write_with_timeout(self.write_fd, to_write, timeout_sec)
to_write = to_write[num_written:]
def _qemu_check_stdout(self):
for line in self.proc.stdout:
line = str(line)
_LOG.info("%s", line)
if "[QEMU] CPU" in line:
self._queue.put(ZephyrQemuMakeResult.QEMU_STARTED)
else:
line = re.sub("[^a-zA-Z0-9 \n]", "", line)
pattern = r"recipe for target (\w*) failed"
if re.search(pattern, line, re.IGNORECASE):
self._queue.put(ZephyrQemuMakeResult.MAKE_FAILED)
self._queue.put(ZephyrQemuMakeResult.EOF)
def _wait_for_qemu(self):
threading.Thread(target=self._qemu_check_stdout, daemon=True).start()
while True:
try:
item = self._queue.get(timeout=120)
except Exception:
raise TimeoutError("QEMU setup timeout.")
if item == ZephyrQemuMakeResult.QEMU_STARTED:
break
if item in [ZephyrQemuMakeResult.MAKE_FAILED, ZephyrQemuMakeResult.EOF]:
raise RuntimeError("QEMU setup failed.")
raise ValueError(f"{item} not expected.")
if __name__ == "__main__":
server.main(Handler())
|
import logging
import azure.functions as func
import json
import os
from azure.cosmosdb.table.tableservice import TableService
from azure.cosmosdb.table.models import Entity
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
# Connect to Azure Table Storage
table_service = TableService(connection_string= os.environ['AzureWebJobsStorage'])
table_service.create_table('intents') if not table_service.exists('intents') else None
req_body = req.get_json()
if req_body:
# Create row to be saved on Azure Table Storage
print(req_body.get('ConversationId'))
data = req_body
data["PartitionKey"] = req_body.get('ConversationId')
data["RowKey"] = req_body.get('MessageId')
# Save row on Azure Table Storage
table_service.insert_or_replace_entity('intents', data)
return func.HttpResponse(f"Row {req_body.get("MessageId")} for {req_body.get("ConversationId")} added")
else:
return func.HttpResponse(
"Please pass valid request body",
status_code=400
) | import logging
import azure.functions as func
import json
import os
from azure.cosmosdb.table.tableservice import TableService
from azure.cosmosdb.table.models import Entity
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
# Connect to Azure Table Storage
table_service = TableService(connection_string= os.environ['AzureWebJobsStorage'])
table_service.create_table('intents') if not table_service.exists('intents') else None
req_body = req.get_json()
if req_body:
# Create row to be saved on Azure Table Storage
print(req_body.get('ConversationId'))
data = req_body
data["PartitionKey"] = req_body.get('ConversationId')
data["RowKey"] = req_body.get('MessageId')
# Save row on Azure Table Storage
table_service.insert_or_replace_entity('intents', data)
return func.HttpResponse(f"Row {req_body.get('MessageId')} for {req_body.get('ConversationId')} added")
else:
return func.HttpResponse(
"Please pass valid request body",
status_code=400
) |
import datetime
import logging
import traceback
from dis_snek.models import ComponentContext
from dis_snek.models import InteractionContext
from ElevatorBot.misc.formating import embed_message
def get_now_with_tz() -> datetime.datetime:
"""Returns the current datetime (timezone aware)"""
return datetime.datetime.now(tz=datetime.timezone.utc)
def localize_datetime(obj: datetime.datetime) -> datetime.datetime:
"""Returns a timezone aware object, localized to the system timezone"""
return obj.astimezone()
async def log_error(
ctx: InteractionContext | ComponentContext,
error: Exception,
situation: str,
) -> None:
"""Respond to the context and log error"""
if not ctx.responded:
await ctx.send(
embeds=embed_message(
"Error",
f"Sorry, something went wrong\nThe Error has been logged and will be worked on",
str(error),
)
)
# log the error
logger = logging.getLogger(situation)
logger.exception(
f"InteractionID '{ctx.interaction_id}" - Error {error} - Traceback: \n{"".join(traceback.format_tb(error.__traceback__))}"
)
# raising error again to making deving easier
raise error
| import datetime
import logging
import traceback
from dis_snek.models import ComponentContext
from dis_snek.models import InteractionContext
from ElevatorBot.misc.formating import embed_message
def get_now_with_tz() -> datetime.datetime:
"""Returns the current datetime (timezone aware)"""
return datetime.datetime.now(tz=datetime.timezone.utc)
def localize_datetime(obj: datetime.datetime) -> datetime.datetime:
"""Returns a timezone aware object, localized to the system timezone"""
return obj.astimezone()
async def log_error(
ctx: InteractionContext | ComponentContext,
error: Exception,
situation: str,
) -> None:
"""Respond to the context and log error"""
if not ctx.responded:
await ctx.send(
embeds=embed_message(
"Error",
f"Sorry, something went wrong\nThe Error has been logged and will be worked on",
str(error),
)
)
# log the error
logger = logging.getLogger(situation)
logger.exception(
f"InteractionID '{ctx.interaction_id}' - Error {error} - Traceback: \n{''.join(traceback.format_tb(error.__traceback__))}"
)
# raising error again to making deving easier
raise error
|
from datetime import datetime
import cv2
import re
import base64
from flask import Flask, render_template, request, jsonify
from flask_cors import CORS
import numpy as np
from io import BytesIO
from PIL import Image, ImageOps
import os,sys
import requests
from graphpipe import remote
from matplotlib import pylab as plt
app = Flask(__name__)
CORS(app) # To Post by Ajax
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
ans,t1,t2,t3 = get_answer(request)
return jsonify({'ans': ans, 't1': t1, 't2': t2, 't3': t3})
else:
return render_template('index.html')
def result(img):
img = img.reshape(1, 784)
img = img.astype(np.float32)
img = np.multiply(img, 1.0 / 255.0)
pred = remote.execute("http://localhost:9001", img)
r = np.argmax(pred, axis=1)
pp = pred*100
top1 = str(np.argsort(-pp)[0][0])+ " (" +str(int(np.sort(-pp)[0][0]*-1))+"%)"
top2 = str(np.argsort(-pp)[0][1])+ " (" +str(int(np.sort(-pp)[0][1]*-1))+"%)"
top3 = str(np.argsort(-pp)[0][2])+ " (" +str(int(np.sort(-pp)[0][2]*-1))+"%)"
# return int(r)
return r,top1,top2,top3
def get_answer(req):
img_str = re.search(r'base64,(.*)', req.form['img']).group(1)
nparr = np.fromstring(base64.b64decode(img_str), np.uint8)
img_src = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
img_negaposi = 255 - img_src
img_gray = cv2.cvtColor(img_negaposi, cv2.COLOR_BGR2GRAY)
img_resize = cv2.resize(img_gray,(28,28))
cv2.imwrite(f"images/{datetime.now().strftime("%s")}.jpg",img_resize)
ans,t1,t2,t3 = result(img_resize)
return int(ans),t1,t2,t3
if __name__ == "__main__":
app.run(debug=False, host='0.0.0.0', port=8001)
| from datetime import datetime
import cv2
import re
import base64
from flask import Flask, render_template, request, jsonify
from flask_cors import CORS
import numpy as np
from io import BytesIO
from PIL import Image, ImageOps
import os,sys
import requests
from graphpipe import remote
from matplotlib import pylab as plt
app = Flask(__name__)
CORS(app) # To Post by Ajax
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
ans,t1,t2,t3 = get_answer(request)
return jsonify({'ans': ans, 't1': t1, 't2': t2, 't3': t3})
else:
return render_template('index.html')
def result(img):
img = img.reshape(1, 784)
img = img.astype(np.float32)
img = np.multiply(img, 1.0 / 255.0)
pred = remote.execute("http://localhost:9001", img)
r = np.argmax(pred, axis=1)
pp = pred*100
top1 = str(np.argsort(-pp)[0][0])+ " (" +str(int(np.sort(-pp)[0][0]*-1))+"%)"
top2 = str(np.argsort(-pp)[0][1])+ " (" +str(int(np.sort(-pp)[0][1]*-1))+"%)"
top3 = str(np.argsort(-pp)[0][2])+ " (" +str(int(np.sort(-pp)[0][2]*-1))+"%)"
# return int(r)
return r,top1,top2,top3
def get_answer(req):
img_str = re.search(r'base64,(.*)', req.form['img']).group(1)
nparr = np.fromstring(base64.b64decode(img_str), np.uint8)
img_src = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
img_negaposi = 255 - img_src
img_gray = cv2.cvtColor(img_negaposi, cv2.COLOR_BGR2GRAY)
img_resize = cv2.resize(img_gray,(28,28))
cv2.imwrite(f"images/{datetime.now().strftime('%s')}.jpg",img_resize)
ans,t1,t2,t3 = result(img_resize)
return int(ans),t1,t2,t3
if __name__ == "__main__":
app.run(debug=False, host='0.0.0.0', port=8001)
|
import abc
import enum
import itertools
import logging
import uuid
from copy import deepcopy
from typing import Any, Dict, List, MutableMapping, Optional, Union
from ruamel.yaml import YAML
from ruamel.yaml.comments import CommentedMap
from ruamel.yaml.compat import StringIO
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.util import convert_to_json_serializable, nested_update
from great_expectations.marshmallow__shade import (
INCLUDE,
Schema,
ValidationError,
fields,
post_dump,
post_load,
pre_load,
validates_schema,
)
from great_expectations.marshmallow__shade.validate import OneOf
from great_expectations.types import DictDot, SerializableDictDot
from great_expectations.types.configurations import ClassConfigSchema
yaml = YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
logger = logging.getLogger(__name__)
CURRENT_GE_CONFIG_VERSION = 3
FIRST_GE_CONFIG_VERSION_WITH_CHECKPOINT_STORE = 3
CURRENT_CHECKPOINT_CONFIG_VERSION = 1
MINIMUM_SUPPORTED_CONFIG_VERSION = 2
DEFAULT_USAGE_STATISTICS_URL = (
"https://stats.greatexpectations.io/great_expectations/v1/usage_statistics"
)
def object_to_yaml_str(obj):
output_str: str
with StringIO() as string_stream:
yaml.dump(obj, string_stream)
output_str = string_stream.getvalue()
return output_str
class BaseYamlConfig(SerializableDictDot):
_config_schema_class = None
def __init__(self, commented_map: CommentedMap = None):
if commented_map is None:
commented_map = CommentedMap()
self._commented_map = commented_map
@classmethod
def _get_schema_instance(cls) -> Schema:
if not issubclass(cls.get_schema_class(), Schema):
raise ge_exceptions.InvalidConfigError(
"Invalid type: A configuration schema class needs to inherit from the Marshmallow Schema class."
)
if not issubclass(cls.get_config_class(), BaseYamlConfig):
raise ge_exceptions.InvalidConfigError(
"Invalid type: A configuration class needs to inherit from the BaseYamlConfig class."
)
if hasattr(cls.get_config_class(), "_schema_instance"):
# noinspection PyProtectedMember
schema_instance: Schema = cls.get_config_class()._schema_instance
if schema_instance is None:
cls.get_config_class()._schema_instance = (cls.get_schema_class())()
else:
return schema_instance
else:
cls.get_config_class().schema_instance = (cls.get_schema_class())()
return cls.get_config_class().schema_instance
@classmethod
def from_commented_map(cls, commented_map: CommentedMap):
try:
config: Union[dict, BaseYamlConfig]
config = cls._get_schema_instance().load(commented_map)
if isinstance(config, dict):
return cls.get_config_class()(commented_map=commented_map, **config)
return config
except ValidationError:
logger.error(
"Encountered errors during loading config. See ValidationError for more details."
)
raise
def _get_schema_validated_updated_commented_map(self) -> CommentedMap:
commented_map: CommentedMap = deepcopy(self._commented_map)
commented_map.update(self._get_schema_instance().dump(self))
return commented_map
def to_yaml(self, outfile):
"""
:returns None (but writes a YAML file containing the project configuration)
"""
yaml.dump(self.commented_map, outfile)
def to_yaml_str(self) -> str:
"""
:returns a YAML string containing the project configuration
"""
return object_to_yaml_str(self.commented_map)
def to_json_dict(self) -> dict:
"""
:returns a JSON-serialiable dict containing the project configuration
"""
commented_map: CommentedMap = self.commented_map
return convert_to_json_serializable(data=commented_map)
@property
def commented_map(self) -> CommentedMap:
return self._get_schema_validated_updated_commented_map()
@classmethod
def get_config_class(cls):
raise NotImplementedError
@classmethod
def get_schema_class(cls):
raise NotImplementedError
class AssetConfig(DictDot):
def __init__(
self,
name=None,
class_name=None,
module_name=None,
bucket=None,
prefix=None,
delimiter=None,
max_keys=None,
batch_spec_passthrough=None,
**kwargs,
):
if name is not None:
self.name = name
self._class_name = class_name
self._module_name = module_name
if bucket is not None:
self.bucket = bucket
if prefix is not None:
self.prefix = prefix
if delimiter is not None:
self.delimiter = delimiter
if max_keys is not None:
self.max_keys = max_keys
if batch_spec_passthrough is not None:
self.batch_spec_passthrough = batch_spec_passthrough
for k, v in kwargs.items():
setattr(self, k, v)
@property
def class_name(self):
return self._class_name
@property
def module_name(self):
return self._module_name
class AssetConfigSchema(Schema):
class Meta:
unknown = INCLUDE
name = fields.String(required=False, allow_none=True)
class_name = fields.String(required=False, allow_none=True, missing="Asset")
module_name = fields.String(
required=False,
all_none=True,
missing="great_expectations.datasource.data_connector.asset",
)
base_directory = fields.String(required=False, allow_none=True)
glob_directive = fields.String(required=False, allow_none=True)
pattern = fields.String(required=False, allow_none=True)
group_names = fields.List(
cls_or_instance=fields.Str(), required=False, allow_none=True
)
bucket = fields.String(required=False, allow_none=True)
prefix = fields.String(required=False, allow_none=True)
delimiter = fields.String(required=False, allow_none=True)
max_keys = fields.Integer(required=False, allow_none=True)
batch_spec_passthrough = fields.Dict(required=False, allow_none=True)
@validates_schema
def validate_schema(self, data, **kwargs):
pass
# noinspection PyUnusedLocal
@post_load
def make_asset_config(self, data, **kwargs):
return AssetConfig(**data)
class SorterConfig(DictDot):
def __init__(
self,
name,
class_name=None,
module_name=None,
orderby="asc",
reference_list=None,
datetime_format=None,
**kwargs,
):
self._name = name
self._class_name = class_name
self._module_name = module_name
self._orderby = orderby
for k, v in kwargs.items():
setattr(self, k, v)
if reference_list is not None:
self._reference_list = reference_list
if datetime_format is not None:
self._datetime_format = datetime_format
@property
def name(self):
return self._name
@property
def module_name(self):
return self._module_name
@property
def class_name(self):
return self._class_name
@property
def orderby(self):
return self._orderby
@property
def reference_list(self):
return self._reference_list
@property
def datetime_format(self):
return self._datetime_format
class SorterConfigSchema(Schema):
class Meta:
unknown = INCLUDE
name = fields.String(required=True)
class_name = fields.String(required=True)
module_name = fields.String(
missing="great_expectations.datasource.data_connector.sorter"
)
orderby = fields.String(required=False, missing="asc", allow_none=False)
# allow_none = True because it is only used by some Sorters
reference_list = fields.List(
cls_or_instance=fields.Str(), required=False, missing=None, allow_none=True
)
datetime_format = fields.String(required=False, missing=None, allow_none=True)
@validates_schema
def validate_schema(self, data, **kwargs):
pass
# noinspection PyUnusedLocal
@post_load
def make_sorter_config(self, data, **kwargs):
return SorterConfig(**data)
class DataConnectorConfig(DictDot):
def __init__(
self,
class_name,
module_name=None,
credentials=None,
assets=None,
base_directory=None,
glob_directive=None,
default_regex=None,
batch_identifiers=None,
sorters=None,
batch_spec_passthrough=None,
# S3
boto3_options=None,
bucket=None,
max_keys=None,
# Azure
azure_options=None,
container=None,
name_starts_with=None,
# GCS
bucket_or_name=None,
max_results=None,
# Both S3/GCS
prefix=None,
# Both S3/Azure
delimiter=None,
**kwargs,
):
self._class_name = class_name
self._module_name = module_name
if credentials is not None:
self.credentials = credentials
if assets is not None:
self.assets = assets
if base_directory is not None:
self.base_directory = base_directory
if glob_directive is not None:
self.glob_directive = glob_directive
if default_regex is not None:
self.default_regex = default_regex
if batch_identifiers is not None:
self.batch_identifiers = batch_identifiers
if sorters is not None:
self.sorters = sorters
if batch_spec_passthrough is not None:
self.batch_spec_passthrough = batch_spec_passthrough
# S3
if boto3_options is not None:
self.boto3_options = boto3_options
if bucket is not None:
self.bucket = bucket
if max_keys is not None:
self.max_keys = max_keys
# Azure
if azure_options is not None:
self.azure_options = azure_options
if container is not None:
self.container = container
if name_starts_with is not None:
self.name_starts_with = name_starts_with
# GCS
if bucket_or_name is not None:
self.bucket_or_name = bucket_or_name
if max_results is not None:
self.max_results = max_results
# Both S3/GCS
if prefix is not None:
self.prefix = prefix
# Both S3/Azure
if delimiter is not None:
self.delimiter = delimiter
for k, v in kwargs.items():
setattr(self, k, v)
@property
def class_name(self):
return self._class_name
@property
def module_name(self):
return self._module_name
class DataConnectorConfigSchema(Schema):
class Meta:
unknown = INCLUDE
class_name = fields.String(required=True)
module_name = fields.String(missing="great_expectations.datasource.data_connector")
assets = fields.Dict(
keys=fields.Str(),
values=fields.Nested(AssetConfigSchema, required=False, allow_none=True),
required=False,
allow_none=True,
)
base_directory = fields.String(required=False, allow_none=True)
glob_directive = fields.String(required=False, allow_none=True)
sorters = fields.List(
fields.Nested(SorterConfigSchema, required=False, allow_none=True),
required=False,
allow_none=True,
)
default_regex = fields.Dict(required=False, allow_none=True)
credentials = fields.Raw(required=False, allow_none=True)
batch_identifiers = fields.List(
cls_or_instance=fields.Str(), required=False, allow_none=True
)
# S3
boto3_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
bucket = fields.String(required=False, allow_none=True)
max_keys = fields.Integer(required=False, allow_none=True)
# Azure
azure_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
container = fields.String(required=False, allow_none=True)
name_starts_with = fields.String(required=False, allow_none=True)
# GCS
gcs_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
bucket_or_name = fields.String(required=False, allow_none=True)
max_results = fields.String(required=False, allow_none=True)
# Both S3/GCS
prefix = fields.String(required=False, allow_none=True)
# Both S3/Azure
delimiter = fields.String(required=False, allow_none=True)
data_asset_name_prefix = fields.String(required=False, allow_none=True)
data_asset_name_suffix = fields.String(required=False, allow_none=True)
include_schema_name = fields.Boolean(required=False, allow_none=True)
splitter_method = fields.String(required=False, allow_none=True)
splitter_kwargs = fields.Dict(required=False, allow_none=True)
sampling_method = fields.String(required=False, allow_none=True)
sampling_kwargs = fields.Dict(required=False, allow_none=True)
excluded_tables = fields.List(
cls_or_instance=fields.Str(), required=False, allow_none=True
)
included_tables = fields.List(
cls_or_instance=fields.Str(), required=False, allow_none=True
)
skip_inapplicable_tables = fields.Boolean(required=False, allow_none=True)
batch_spec_passthrough = fields.Dict(required=False, allow_none=True)
@validates_schema
def validate_schema(self, data, **kwargs):
# If a class_name begins with the dollar sign ("$"), then it is assumed to be a variable name to be substituted.
if data["class_name"][0] == "$":
return
if ("default_regex" in data) and not (
data["class_name"]
in [
"InferredAssetFilesystemDataConnector",
"ConfiguredAssetFilesystemDataConnector",
"InferredAssetS3DataConnector",
"ConfiguredAssetS3DataConnector",
"InferredAssetAzureDataConnector",
"ConfiguredAssetAzureDataConnector",
"InferredAssetGCSDataConnector",
"ConfiguredAssetGCSDataConnector",
]
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses one or more keys in a data connector that are required only by a
subclass of the FilePathDataConnector class (your data connector is "{data['class_name']}"). Please update your
configuration to continue.
"""
)
if ("glob_directive" in data) and not (
data["class_name"]
in [
"InferredAssetFilesystemDataConnector",
"ConfiguredAssetFilesystemDataConnector",
]
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses one or more keys in a data connector that are required only by a
filesystem type of the data connector (your data connector is "{data['class_name']}"). Please update your
configuration to continue.
"""
)
if ("delimiter" in data) and not (
data["class_name"]
in [
"InferredAssetS3DataConnector",
"ConfiguredAssetS3DataConnector",
"InferredAssetAzureDataConnector",
"ConfiguredAssetAzureDataConnector",
]
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses one or more keys in a data connector that are required only by an
S3/Azure type of the data connector (your data connector is "{data['class_name']}"). Please update your configuration to
continue.
"""
)
if ("prefix" in data) and not (
data["class_name"]
in [
"InferredAssetS3DataConnector",
"ConfiguredAssetS3DataConnector",
"InferredAssetGCSDataConnector",
"ConfiguredAssetGCSDataConnector",
]
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses one or more keys in a data connector that are required only by an
S3/GCS type of the data connector (your data connector is "{data['class_name']}"). Please update your configuration to
continue.
"""
)
if ("bucket" in data or "max_keys" in data) and not (
data["class_name"]
in [
"InferredAssetS3DataConnector",
"ConfiguredAssetS3DataConnector",
]
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses one or more keys in a data connector that are required only by an
S3 type of the data connector (your data connector is "{data['class_name']}"). Please update your configuration to
continue.
"""
)
if (
"azure_options" in data or "container" in data or "name_starts_with" in data
) and not (
data["class_name"]
in [
"InferredAssetAzureDataConnector",
"ConfiguredAssetAzureDataConnector",
]
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses one or more keys in a data connector that are required only by an
Azure type of the data connector (your data connector is "{data['class_name']}"). Please update your configuration to
continue.
"""
)
if "azure_options" in data and data["class_name"] in [
"InferredAssetAzureDataConnector",
"ConfiguredAssetAzureDataConnector",
]:
azure_options = data["azure_options"]
if not (("conn_str" in azure_options) ^ ("account_url" in azure_options)):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration is either missing methods of authentication or is using too many for the Azure type of data connector.
You must only select one between `conn_str` or `account_url`. Please update your configuration to continue.
"""
)
if (
"gcs_options" in data or "bucket_or_name" in data or "max_results" in data
) and not (
data["class_name"]
in [
"InferredAssetGCSDataConnector",
"ConfiguredAssetGCSDataConnector",
]
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses one or more keys in a data connector that are required only by a
GCS type of the data connector (your data connector is "{data['class_name']}"). Please update your configuration to
continue.
"""
)
if "gcs_options" in data and data["class_name"] in [
"InferredAssetGCSDataConnector",
"ConfiguredAssetGCSDataConnector",
]:
gcs_options = data["gcs_options"]
if "filename" in gcs_options and "info" in gcs_options:
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration can only use a single method of authentication for the GCS type of data connector.
You must only select one between `filename` (from_service_account_file) and `info` (from_service_account_info). Please update your configuration to continue.
"""
)
if (
"data_asset_name_prefix" in data
or "data_asset_name_suffix" in data
or "include_schema_name" in data
or "splitter_method" in data
or "splitter_kwargs" in data
or "sampling_method" in data
or "sampling_kwargs" in data
or "excluded_tables" in data
or "included_tables" in data
or "skip_inapplicable_tables" in data
) and not (
data["class_name"]
in [
"InferredAssetSqlDataConnector",
"ConfiguredAssetSqlDataConnector",
]
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses one or more keys in a data connector that are required only by an
SQL type of the data connector (your data connector is "{data['class_name']}"). Please update your configuration to
continue.
"""
)
# noinspection PyUnusedLocal
@post_load
def make_data_connector_config(self, data, **kwargs):
return DataConnectorConfig(**data)
class ExecutionEngineConfig(DictDot):
def __init__(
self,
class_name,
module_name=None,
caching=None,
batch_spec_defaults=None,
connection_string=None,
credentials=None,
spark_config=None,
boto3_options=None,
azure_options=None,
gcs_options=None,
**kwargs,
):
self._class_name = class_name
self._module_name = module_name
if caching is not None:
self.caching = caching
if batch_spec_defaults is not None:
self._batch_spec_defaults = batch_spec_defaults
if connection_string is not None:
self.connection_string = connection_string
if credentials is not None:
self.credentials = credentials
if spark_config is not None:
self.spark_config = spark_config
if boto3_options is not None:
self.boto3_options = boto3_options
if azure_options is not None:
self.azure_options = azure_options
if gcs_options is not None:
self.gcs_options = gcs_options
for k, v in kwargs.items():
setattr(self, k, v)
@property
def module_name(self):
return self._module_name
@property
def class_name(self):
return self._class_name
@property
def batch_spec_defaults(self):
return self._batch_spec_defaults
class ExecutionEngineConfigSchema(Schema):
class Meta:
unknown = INCLUDE
class_name = fields.String(required=True)
module_name = fields.String(missing="great_expectations.execution_engine")
connection_string = fields.String(required=False, allow_none=True)
credentials = fields.Raw(required=False, allow_none=True)
spark_config = fields.Raw(required=False, allow_none=True)
boto3_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
azure_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
gcs_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
caching = fields.Boolean(required=False, allow_none=True)
batch_spec_defaults = fields.Dict(required=False, allow_none=True)
@validates_schema
def validate_schema(self, data, **kwargs):
# If a class_name begins with the dollar sign ("$"), then it is assumed to be a variable name to be substituted.
if data["class_name"][0] == "$":
return
if ("connection_string" in data or "credentials" in data) and not (
data["class_name"] == "SqlAlchemyExecutionEngine"
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses the "connection_string" key in an execution engine, but only
SqlAlchemyExecutionEngine requires this attribute (your execution engine is "{data['class_name']}"). Please update your
configuration to continue.
"""
)
if "spark_config" in data and not (
data["class_name"] == "SparkDFExecutionEngine"
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses the "spark_config" key in an execution engine, but only
SparkDFExecutionEngine requires this attribute (your execution engine is "{data['class_name']}"). Please update your
configuration to continue.
"""
)
# noinspection PyUnusedLocal
@post_load
def make_execution_engine_config(self, data, **kwargs):
return ExecutionEngineConfig(**data)
class DatasourceConfig(DictDot):
def __init__(
self,
class_name=None,
module_name: Optional[str] = "great_expectations.datasource",
execution_engine=None,
data_connectors=None,
data_asset_type=None,
batch_kwargs_generators=None,
connection_string=None,
credentials=None,
introspection=None,
tables=None,
boto3_options=None,
azure_options=None,
gcs_options=None,
reader_method=None,
reader_options=None,
limit=None,
**kwargs,
):
# NOTE - JPC - 20200316: Currently, we are mostly inconsistent with respect to this type...
self._class_name = class_name
self._module_name = module_name
if execution_engine is not None:
self.execution_engine = execution_engine
if data_connectors is not None and isinstance(data_connectors, dict):
self.data_connectors = data_connectors
# NOTE - AJB - 20201202: This should use the datasource class build_configuration method as in DataContext.add_datasource()
if data_asset_type is None:
if class_name == "PandasDatasource":
data_asset_type = {
"class_name": "PandasDataset",
"module_name": "great_expectations.dataset",
}
elif class_name == "SqlAlchemyDatasource":
data_asset_type = {
"class_name": "SqlAlchemyDataset",
"module_name": "great_expectations.dataset",
}
elif class_name == "SparkDFDatasource":
data_asset_type = {
"class_name": "SparkDFDataset",
"module_name": "great_expectations.dataset",
}
if data_asset_type is not None:
self.data_asset_type = data_asset_type
if batch_kwargs_generators is not None:
self.batch_kwargs_generators = batch_kwargs_generators
if connection_string is not None:
self.connection_string = connection_string
if credentials is not None:
self.credentials = credentials
if introspection is not None:
self.introspection = introspection
if tables is not None:
self.tables = tables
if boto3_options is not None:
self.boto3_options = boto3_options
if azure_options is not None:
self.azure_options = azure_options
if gcs_options is not None:
self.gcs_options = gcs_options
if reader_method is not None:
self.reader_method = reader_method
if reader_options is not None:
self.reader_options = reader_options
if limit is not None:
self.limit = limit
for k, v in kwargs.items():
setattr(self, k, v)
@property
def class_name(self):
return self._class_name
@property
def module_name(self):
return self._module_name
class DatasourceConfigSchema(Schema):
class Meta:
unknown = INCLUDE
class_name = fields.String(missing="Datasource")
module_name = fields.String(missing="great_expectations.datasource")
force_reuse_spark_context = fields.Bool(required=False, allow_none=True)
spark_config = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
execution_engine = fields.Nested(
ExecutionEngineConfigSchema, required=False, allow_none=True
)
data_connectors = fields.Dict(
keys=fields.Str(),
values=fields.Nested(DataConnectorConfigSchema),
required=False,
allow_none=True,
)
data_asset_type = fields.Nested(ClassConfigSchema, required=False, allow_none=True)
# TODO: Update to generator-specific
# batch_kwargs_generators = fields.Mapping(keys=fields.Str(), values=fields.Nested(fields.GeneratorSchema))
batch_kwargs_generators = fields.Dict(
keys=fields.Str(), values=fields.Dict(), required=False, allow_none=True
)
connection_string = fields.String(required=False, allow_none=True)
credentials = fields.Raw(required=False, allow_none=True)
introspection = fields.Dict(required=False, allow_none=True)
tables = fields.Dict(required=False, allow_none=True)
boto3_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
azure_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
gcs_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
reader_method = fields.String(required=False, allow_none=True)
reader_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
limit = fields.Integer(required=False, allow_none=True)
@validates_schema
def validate_schema(self, data, **kwargs):
if "generators" in data:
raise ge_exceptions.InvalidConfigError(
'Your current configuration uses the "generators" key in a datasource, but in version 0.10 of '
'GE that key is renamed to "batch_kwargs_generators". Please update your configuration to continue.'
)
# If a class_name begins with the dollar sign ("$"), then it is assumed to be a variable name to be substituted.
if data["class_name"][0] == "$":
return
if (
"connection_string" in data
or "credentials" in data
or "introspection" in data
or "tables" in data
) and not (
data["class_name"]
in [
"SqlAlchemyDatasource",
"SimpleSqlalchemyDatasource",
]
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses one or more keys in a data source that are required only by a
sqlalchemy data source (your data source is "{data['class_name']}"). Please update your configuration to continue.
"""
)
# noinspection PyUnusedLocal
@post_load
def make_datasource_config(self, data, **kwargs):
return DatasourceConfig(**data)
class AnonymizedUsageStatisticsConfig(DictDot):
def __init__(self, enabled=True, data_context_id=None, usage_statistics_url=None):
self._enabled = enabled
if data_context_id is None:
data_context_id = str(uuid.uuid4())
self._explicit_id = False
else:
self._explicit_id = True
self._data_context_id = data_context_id
if usage_statistics_url is None:
usage_statistics_url = DEFAULT_USAGE_STATISTICS_URL
self._explicit_url = False
else:
self._explicit_url = True
self._usage_statistics_url = usage_statistics_url
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, enabled):
if not isinstance(enabled, bool):
raise ValueError("usage statistics enabled property must be boolean")
self._enabled = enabled
@property
def data_context_id(self):
return self._data_context_id
@data_context_id.setter
def data_context_id(self, data_context_id):
try:
uuid.UUID(data_context_id)
except ValueError:
raise ge_exceptions.InvalidConfigError(
"data_context_id must be a valid uuid"
)
self._data_context_id = data_context_id
self._explicit_id = True
@property
def explicit_id(self):
return self._explicit_id
@property
def usage_statistics_url(self):
return self._usage_statistics_url
@usage_statistics_url.setter
def usage_statistics_url(self, usage_statistics_url):
self._usage_statistics_url = usage_statistics_url
self._explicit_url = True
class AnonymizedUsageStatisticsConfigSchema(Schema):
data_context_id = fields.UUID()
enabled = fields.Boolean(default=True)
usage_statistics_url = fields.URL(allow_none=True)
_explicit_url = fields.Boolean(required=False)
# noinspection PyUnusedLocal
@post_load()
def make_usage_statistics_config(self, data, **kwargs):
if "data_context_id" in data:
data["data_context_id"] = str(data["data_context_id"])
return AnonymizedUsageStatisticsConfig(**data)
# noinspection PyUnusedLocal
@post_dump()
def filter_implicit(self, data, **kwargs):
if not data.get("_explicit_url") and "usage_statistics_url" in data:
del data["usage_statistics_url"]
if "_explicit_url" in data:
del data["_explicit_url"]
return data
class NotebookTemplateConfig(DictDot):
def __init__(self, file_name, template_kwargs=None):
self.file_name = file_name
if template_kwargs:
self.template_kwargs = template_kwargs
else:
self.template_kwargs = {}
class NotebookTemplateConfigSchema(Schema):
file_name = fields.String()
template_kwargs = fields.Dict(
keys=fields.Str(), values=fields.Str(), allow_none=True
)
# noinspection PyUnusedLocal
@post_load
def make_notebook_template_config(self, data, **kwargs):
return NotebookTemplateConfig(**data)
class NotebookConfig(DictDot):
def __init__(
self,
class_name,
module_name,
custom_templates_module,
header_markdown=None,
footer_markdown=None,
table_expectations_header_markdown=None,
column_expectations_header_markdown=None,
table_expectations_not_found_markdown=None,
column_expectations_not_found_markdown=None,
authoring_intro_markdown=None,
column_expectations_markdown=None,
header_code=None,
footer_code=None,
table_expectation_code=None,
column_expectation_code=None,
):
self.class_name = class_name
self.module_name = module_name
self.custom_templates_module = custom_templates_module
self.header_markdown = header_markdown
self.footer_markdown = footer_markdown
self.table_expectations_header_markdown = table_expectations_header_markdown
self.column_expectations_header_markdown = column_expectations_header_markdown
self.table_expectations_not_found_markdown = (
table_expectations_not_found_markdown
)
self.column_expectations_not_found_markdown = (
column_expectations_not_found_markdown
)
self.authoring_intro_markdown = authoring_intro_markdown
self.column_expectations_markdown = column_expectations_markdown
self.header_code = header_code
self.footer_code = footer_code
self.table_expectation_code = table_expectation_code
self.column_expectation_code = column_expectation_code
class NotebookConfigSchema(Schema):
class_name = fields.String(missing="SuiteEditNotebookRenderer")
module_name = fields.String(
missing="great_expectations.render.renderer.suite_edit_notebook_renderer"
)
custom_templates_module = fields.String()
header_markdown = fields.Nested(NotebookTemplateConfigSchema, allow_none=True)
footer_markdown = fields.Nested(NotebookTemplateConfigSchema, allow_none=True)
table_expectations_header_markdown = fields.Nested(
NotebookTemplateConfigSchema, allow_none=True
)
column_expectations_header_markdown = fields.Nested(
NotebookTemplateConfigSchema, allow_none=True
)
table_expectations_not_found_markdown = fields.Nested(
NotebookTemplateConfigSchema, allow_none=True
)
column_expectations_not_found_markdown = fields.Nested(
NotebookTemplateConfigSchema, allow_none=True
)
authoring_intro_markdown = fields.Nested(
NotebookTemplateConfigSchema, allow_none=True
)
column_expectations_markdown = fields.Nested(
NotebookTemplateConfigSchema, allow_none=True
)
header_code = fields.Nested(NotebookTemplateConfigSchema, allow_none=True)
footer_code = fields.Nested(NotebookTemplateConfigSchema, allow_none=True)
table_expectation_code = fields.Nested(
NotebookTemplateConfigSchema, allow_none=True
)
column_expectation_code = fields.Nested(
NotebookTemplateConfigSchema, allow_none=True
)
# noinspection PyUnusedLocal
@post_load
def make_notebook_config(self, data, **kwargs):
return NotebookConfig(**data)
class NotebooksConfig(DictDot):
def __init__(self, suite_edit):
self.suite_edit = suite_edit
class NotebooksConfigSchema(Schema):
# for now only suite_edit, could have other customization options for
# notebooks in the future
suite_edit = fields.Nested(NotebookConfigSchema)
# noinspection PyUnusedLocal
@post_load
def make_notebooks_config(self, data, **kwargs):
return NotebooksConfig(**data)
class ConcurrencyConfig(DictDot):
"""WARNING: This class is experimental."""
def __init__(self, enabled: Optional[bool] = False):
"""Initialize a concurrency configuration to control multithreaded execution.
Args:
enabled: Whether or not multithreading is enabled.
"""
self._enabled = enabled
@property
def enabled(self):
"""Whether or not multithreading is enabled."""
return self._enabled
@property
def max_database_query_concurrency(self) -> int:
"""Max number of concurrent database queries to execute with mulithreading."""
# BigQuery has a limit of 100 for "Concurrent rate limit for interactive queries" as described at
# https://cloud.google.com/bigquery/quotas#query_jobs). If necessary, this can later be tuned for other
# databases and/or be manually user configurable.
return 100
def add_sqlalchemy_create_engine_parameters(
self, parameters: MutableMapping[str, Any]
):
"""Update SqlAlchemy parameters to prevent concurrency errors (e.g. http://sqlalche.me/e/14/3o7r) and
bottlenecks.
Args:
parameters: SqlAlchemy create_engine parameters to which we add concurrency appropriate parameters. If the
concurrency parameters are already set, those parameters are left unchanged.
"""
if not self._enabled:
return
if "pool_size" not in parameters:
# https://docs.sqlalchemy.org/en/14/core/engines.html#sqlalchemy.create_engine.params.pool_size
parameters["pool_size"] = 0
if "max_overflow" not in parameters:
# https://docs.sqlalchemy.org/en/14/core/engines.html#sqlalchemy.create_engine.params.max_overflow
parameters["max_overflow"] = -1
class ConcurrencyConfigSchema(Schema):
"""WARNING: This class is experimental."""
enabled = fields.Boolean(default=False)
class GeCloudConfig(DictDot):
def __init__(self, base_url: str, account_id: str, access_token: str):
self.base_url = base_url
self.account_id = account_id
self.access_token = access_token
def to_json_dict(self):
return {
"base_url": self.base_url,
"account_id": self.account_id,
"access_token": self.access_token,
}
class DataContextConfigSchema(Schema):
config_version = fields.Number(
validate=lambda x: 0 < x < 100,
error_messages={"invalid": "config version must " "be a number."},
)
datasources = fields.Dict(
keys=fields.Str(),
values=fields.Nested(DatasourceConfigSchema),
required=False,
allow_none=True,
)
expectations_store_name = fields.Str()
validations_store_name = fields.Str()
evaluation_parameter_store_name = fields.Str()
checkpoint_store_name = fields.Str(required=False, allow_none=True)
plugins_directory = fields.Str(allow_none=True)
validation_operators = fields.Dict(
keys=fields.Str(), values=fields.Dict(), required=False, allow_none=True
)
stores = fields.Dict(keys=fields.Str(), values=fields.Dict())
notebooks = fields.Nested(NotebooksConfigSchema, allow_none=True)
data_docs_sites = fields.Dict(
keys=fields.Str(), values=fields.Dict(), allow_none=True
)
config_variables_file_path = fields.Str(allow_none=True)
anonymous_usage_statistics = fields.Nested(AnonymizedUsageStatisticsConfigSchema)
concurrency = fields.Nested(ConcurrencyConfigSchema)
# noinspection PyMethodMayBeStatic
# noinspection PyUnusedLocal
def handle_error(self, exc, data, **kwargs):
"""Log and raise our custom exception when (de)serialization fails."""
if (
exc
and exc.messages
and isinstance(exc.messages, dict)
and all([key is None for key in exc.messages.keys()])
):
exc.messages = list(itertools.chain.from_iterable(exc.messages.values()))
message: str = (
f"Error while processing DataContextConfig: {" ".join(exc.messages)}"
)
logger.error(message)
raise ge_exceptions.InvalidDataContextConfigError(
message=message,
)
@validates_schema
def validate_schema(self, data, **kwargs):
if "config_version" not in data:
raise ge_exceptions.InvalidDataContextConfigError(
"The key `config_version` is missing; please check your config file.",
validation_error=ValidationError(message="no config_version key"),
)
if not isinstance(data["config_version"], (int, float)):
raise ge_exceptions.InvalidDataContextConfigError(
"The key `config_version` must be a number. Please check your config file.",
validation_error=ValidationError(message="config version not a number"),
)
# When migrating from 0.7.x to 0.8.0
if data["config_version"] == 0 and any(
[
store_config["class_name"] == "ValidationsStore"
for store_config in data["stores"].values()
]
):
raise ge_exceptions.UnsupportedConfigVersionError(
"You appear to be using a config version from the 0.7.x series. This version is no longer supported."
)
if data["config_version"] < MINIMUM_SUPPORTED_CONFIG_VERSION:
raise ge_exceptions.UnsupportedConfigVersionError(
"You appear to have an invalid config version ({}).\n The version number must be at least {}. "
"Please see the migration guide at https://docs.greatexpectations.io/en/latest/guides/how_to_guides/migrating_versions.html".format(
data["config_version"], MINIMUM_SUPPORTED_CONFIG_VERSION
),
)
if data["config_version"] > CURRENT_GE_CONFIG_VERSION:
raise ge_exceptions.InvalidDataContextConfigError(
"You appear to have an invalid config version ({}).\n The maximum valid version is {}.".format(
data["config_version"], CURRENT_GE_CONFIG_VERSION
),
validation_error=ValidationError(message="config version too high"),
)
if data["config_version"] < CURRENT_GE_CONFIG_VERSION and (
"checkpoint_store_name" in data
or any(
[
store_config["class_name"] == "CheckpointStore"
for store_config in data["stores"].values()
]
)
):
raise ge_exceptions.InvalidDataContextConfigError(
"You appear to be using a Checkpoint store with an invalid config version ({}).\n Your data context with this older configuration version specifies a Checkpoint store, which is a new feature. Please update your configuration to the new version number {} before adding a Checkpoint store.\n Visit https://docs.greatexpectations.io/en/latest/how_to_guides/migrating_versions.html to learn more about the upgrade process.".format(
data["config_version"], float(CURRENT_GE_CONFIG_VERSION)
),
validation_error=ValidationError(
message="You appear to be using a Checkpoint store with an invalid config version ({}).\n Your data context with this older configuration version specifies a Checkpoint store, which is a new feature. Please update your configuration to the new version number {} before adding a Checkpoint store.\n Visit https://docs.greatexpectations.io/en/latest/how_to_guides/migrating_versions.html to learn more about the upgrade process.".format(
data["config_version"], float(CURRENT_GE_CONFIG_VERSION)
)
),
)
if (
data["config_version"] >= FIRST_GE_CONFIG_VERSION_WITH_CHECKPOINT_STORE
and "validation_operators" in data
and data["validation_operators"] is not None
):
# TODO: <Alex>Add a URL to the migration guide with instructions for how to replace validation_operators with appropriate actions.</Alex>
logger.warning(
"You appear to be using a legacy capability with the latest config version ({}).\n Your data context with this configuration version uses validation_operators, which are being deprecated. Please update your configuration to be compatible with the version number {}.".format(
data["config_version"], CURRENT_GE_CONFIG_VERSION
),
)
class DataContextConfigDefaults(enum.Enum):
DEFAULT_CONFIG_VERSION = CURRENT_GE_CONFIG_VERSION
DEFAULT_EXPECTATIONS_STORE_NAME = "expectations_store"
EXPECTATIONS_BASE_DIRECTORY = "expectations"
DEFAULT_EXPECTATIONS_STORE_BASE_DIRECTORY_RELATIVE_NAME = (
f"{EXPECTATIONS_BASE_DIRECTORY}/"
)
DEFAULT_VALIDATIONS_STORE_NAME = "validations_store"
VALIDATIONS_BASE_DIRECTORY = "validations"
DEFAULT_VALIDATIONS_STORE_BASE_DIRECTORY_RELATIVE_NAME = (
f"uncommitted/{VALIDATIONS_BASE_DIRECTORY}/"
)
DEFAULT_EVALUATION_PARAMETER_STORE_NAME = "evaluation_parameter_store"
DEFAULT_EVALUATION_PARAMETER_STORE_BASE_DIRECTORY_RELATIVE_NAME = (
"evaluation_parameters/"
)
DEFAULT_CHECKPOINT_STORE_NAME = "checkpoint_store"
CHECKPOINTS_BASE_DIRECTORY = "checkpoints"
DEFAULT_CHECKPOINT_STORE_BASE_DIRECTORY_RELATIVE_NAME = (
f"{CHECKPOINTS_BASE_DIRECTORY}/"
)
DEFAULT_DATA_DOCS_SITE_NAME = "local_site"
DEFAULT_CONFIG_VARIABLES_FILEPATH = "uncommitted/config_variables.yml"
PLUGINS_BASE_DIRECTORY = "plugins"
DEFAULT_PLUGINS_DIRECTORY = f"{PLUGINS_BASE_DIRECTORY}/"
NOTEBOOKS_BASE_DIRECTORY = "notebooks"
DEFAULT_VALIDATION_OPERATORS = {
"action_list_operator": {
"class_name": "ActionListValidationOperator",
"action_list": [
{
"name": "store_validation_result",
"action": {"class_name": "StoreValidationResultAction"},
},
{
"name": "store_evaluation_params",
"action": {"class_name": "StoreEvaluationParametersAction"},
},
{
"name": "update_data_docs",
"action": {"class_name": "UpdateDataDocsAction"},
},
],
}
}
DEFAULT_STORES = {
DEFAULT_EXPECTATIONS_STORE_NAME: {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"base_directory": DEFAULT_EXPECTATIONS_STORE_BASE_DIRECTORY_RELATIVE_NAME,
},
},
DEFAULT_VALIDATIONS_STORE_NAME: {
"class_name": "ValidationsStore",
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"base_directory": DEFAULT_VALIDATIONS_STORE_BASE_DIRECTORY_RELATIVE_NAME,
},
},
DEFAULT_EVALUATION_PARAMETER_STORE_NAME: {
"class_name": "EvaluationParameterStore"
},
DEFAULT_CHECKPOINT_STORE_NAME: {
"class_name": "CheckpointStore",
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"suppress_store_backend_id": True,
"base_directory": DEFAULT_CHECKPOINT_STORE_BASE_DIRECTORY_RELATIVE_NAME,
},
},
}
DEFAULT_DATA_DOCS_SITES = {
DEFAULT_DATA_DOCS_SITE_NAME: {
"class_name": "SiteBuilder",
"show_how_to_buttons": True,
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"base_directory": "uncommitted/data_docs/local_site/",
},
"site_index_builder": {
"class_name": "DefaultSiteIndexBuilder",
},
}
}
class CheckpointConfigDefaults(enum.Enum):
DEFAULT_CONFIG_VERSION = CURRENT_CHECKPOINT_CONFIG_VERSION
class BaseStoreBackendDefaults(DictDot):
"""
Define base defaults for platform specific StoreBackendDefaults.
StoreBackendDefaults define defaults for specific cases of often used configurations.
For example, if you plan to store expectations, validations, and data_docs in s3 use the S3StoreBackendDefaults and you may be able to specify less parameters.
"""
def __init__(
self,
expectations_store_name: str = DataContextConfigDefaults.DEFAULT_EXPECTATIONS_STORE_NAME.value,
validations_store_name: str = DataContextConfigDefaults.DEFAULT_VALIDATIONS_STORE_NAME.value,
evaluation_parameter_store_name: str = DataContextConfigDefaults.DEFAULT_EVALUATION_PARAMETER_STORE_NAME.value,
checkpoint_store_name: str = DataContextConfigDefaults.DEFAULT_CHECKPOINT_STORE_NAME.value,
data_docs_site_name: str = DataContextConfigDefaults.DEFAULT_DATA_DOCS_SITE_NAME.value,
validation_operators: dict = None,
stores: dict = None,
data_docs_sites: dict = None,
):
self.expectations_store_name = expectations_store_name
self.validations_store_name = validations_store_name
self.evaluation_parameter_store_name = evaluation_parameter_store_name
self.checkpoint_store_name = checkpoint_store_name
self.validation_operators = validation_operators
if stores is None:
stores = deepcopy(DataContextConfigDefaults.DEFAULT_STORES.value)
self.stores = stores
if data_docs_sites is None:
data_docs_sites = deepcopy(
DataContextConfigDefaults.DEFAULT_DATA_DOCS_SITES.value
)
self.data_docs_sites = data_docs_sites
self.data_docs_site_name = data_docs_site_name
class S3StoreBackendDefaults(BaseStoreBackendDefaults):
"""
Default store configs for s3 backends, with some accessible parameters
Args:
default_bucket_name: Use this bucket name for stores that do not have a bucket name provided
expectations_store_bucket_name: Overrides default_bucket_name if supplied
validations_store_bucket_name: Overrides default_bucket_name if supplied
data_docs_bucket_name: Overrides default_bucket_name if supplied
checkpoint_store_bucket_name: Overrides default_bucket_name if supplied
expectations_store_prefix: Overrides default if supplied
validations_store_prefix: Overrides default if supplied
data_docs_prefix: Overrides default if supplied
checkpoint_store_prefix: Overrides default if supplied
expectations_store_name: Overrides default if supplied
validations_store_name: Overrides default if supplied
evaluation_parameter_store_name: Overrides default if supplied
checkpoint_store_name: Overrides default if supplied
"""
def __init__(
self,
default_bucket_name: Optional[str] = None,
expectations_store_bucket_name: Optional[str] = None,
validations_store_bucket_name: Optional[str] = None,
data_docs_bucket_name: Optional[str] = None,
checkpoint_store_bucket_name: Optional[str] = None,
expectations_store_prefix: str = "expectations",
validations_store_prefix: str = "validations",
data_docs_prefix: str = "data_docs",
checkpoint_store_prefix: str = "checkpoints",
expectations_store_name: str = "expectations_S3_store",
validations_store_name: str = "validations_S3_store",
evaluation_parameter_store_name: str = "evaluation_parameter_store",
checkpoint_store_name: str = "checkpoint_S3_store",
):
# Initialize base defaults
super().__init__()
# Use default_bucket_name if separate store buckets are not provided
if expectations_store_bucket_name is None:
expectations_store_bucket_name = default_bucket_name
if validations_store_bucket_name is None:
validations_store_bucket_name = default_bucket_name
if data_docs_bucket_name is None:
data_docs_bucket_name = default_bucket_name
if checkpoint_store_bucket_name is None:
checkpoint_store_bucket_name = default_bucket_name
# Overwrite defaults
self.expectations_store_name = expectations_store_name
self.validations_store_name = validations_store_name
self.evaluation_parameter_store_name = evaluation_parameter_store_name
self.checkpoint_store_name = checkpoint_store_name
self.stores = {
expectations_store_name: {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "TupleS3StoreBackend",
"bucket": expectations_store_bucket_name,
"prefix": expectations_store_prefix,
},
},
validations_store_name: {
"class_name": "ValidationsStore",
"store_backend": {
"class_name": "TupleS3StoreBackend",
"bucket": validations_store_bucket_name,
"prefix": validations_store_prefix,
},
},
evaluation_parameter_store_name: {"class_name": "EvaluationParameterStore"},
checkpoint_store_name: {
"class_name": "CheckpointStore",
"store_backend": {
"class_name": "TupleS3StoreBackend",
"bucket": checkpoint_store_bucket_name,
"prefix": checkpoint_store_prefix,
},
},
}
self.data_docs_sites = {
"s3_site": {
"class_name": "SiteBuilder",
"show_how_to_buttons": True,
"store_backend": {
"class_name": "TupleS3StoreBackend",
"bucket": data_docs_bucket_name,
"prefix": data_docs_prefix,
},
"site_index_builder": {
"class_name": "DefaultSiteIndexBuilder",
},
}
}
class FilesystemStoreBackendDefaults(BaseStoreBackendDefaults):
"""
Default store configs for filesystem backends, with some accessible parameters
Args:
root_directory: Absolute directory prepended to the base_directory for each store
plugins_directory: Overrides default if supplied
"""
def __init__(
self,
root_directory: Optional[str] = None,
plugins_directory: Optional[str] = None,
):
# Initialize base defaults
super().__init__()
if plugins_directory is None:
plugins_directory = (
DataContextConfigDefaults.DEFAULT_PLUGINS_DIRECTORY.value
)
self.plugins_directory = plugins_directory
if root_directory is not None:
self.stores[self.expectations_store_name]["store_backend"][
"root_directory"
] = root_directory
self.stores[self.validations_store_name]["store_backend"][
"root_directory"
] = root_directory
self.stores[self.checkpoint_store_name]["store_backend"][
"root_directory"
] = root_directory
self.data_docs_sites[self.data_docs_site_name]["store_backend"][
"root_directory"
] = root_directory
class InMemoryStoreBackendDefaults(BaseStoreBackendDefaults):
"""
Default store configs for in memory backends.
This is useful for testing without persistence.
"""
def __init__(
self,
):
# Initialize base defaults
super().__init__()
self.stores = {
self.expectations_store_name: {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "InMemoryStoreBackend",
},
},
self.validations_store_name: {
"class_name": "ValidationsStore",
"store_backend": {
"class_name": "InMemoryStoreBackend",
},
},
self.evaluation_parameter_store_name: {
"class_name": "EvaluationParameterStore"
},
self.checkpoint_store_name: {
"class_name": "CheckpointStore",
"store_backend": {
"class_name": "InMemoryStoreBackend",
},
},
}
self.data_docs_sites = {}
class GCSStoreBackendDefaults(BaseStoreBackendDefaults):
"""
Default store configs for Google Cloud Storage (GCS) backends, with some accessible parameters
Args:
default_bucket_name: Use this bucket name for stores that do not have a bucket name provided
default_project_name: Use this project name for stores that do not have a project name provided
expectations_store_bucket_name: Overrides default_bucket_name if supplied
validations_store_bucket_name: Overrides default_bucket_name if supplied
data_docs_bucket_name: Overrides default_bucket_name if supplied
checkpoint_store_bucket_name: Overrides default_bucket_name if supplied
expectations_store_project_name: Overrides default_project_name if supplied
validations_store_project_name: Overrides default_project_name if supplied
data_docs_project_name: Overrides default_project_name if supplied
checkpoint_store_project_name: Overrides default_project_name if supplied
expectations_store_prefix: Overrides default if supplied
validations_store_prefix: Overrides default if supplied
data_docs_prefix: Overrides default if supplied
checkpoint_store_prefix: Overrides default if supplied
expectations_store_name: Overrides default if supplied
validations_store_name: Overrides default if supplied
evaluation_parameter_store_name: Overrides default if supplied
checkpoint_store_name: Overrides default if supplied
"""
def __init__(
self,
default_bucket_name: Optional[str] = None,
default_project_name: Optional[str] = None,
expectations_store_bucket_name: Optional[str] = None,
validations_store_bucket_name: Optional[str] = None,
data_docs_bucket_name: Optional[str] = None,
checkpoint_store_bucket_name: Optional[str] = None,
expectations_store_project_name: Optional[str] = None,
validations_store_project_name: Optional[str] = None,
data_docs_project_name: Optional[str] = None,
checkpoint_store_project_name: Optional[str] = None,
expectations_store_prefix: str = "expectations",
validations_store_prefix: str = "validations",
data_docs_prefix: str = "data_docs",
checkpoint_store_prefix: str = "checkpoints",
expectations_store_name: str = "expectations_GCS_store",
validations_store_name: str = "validations_GCS_store",
evaluation_parameter_store_name: str = "evaluation_parameter_store",
checkpoint_store_name: str = "checkpoint_GCS_store",
):
# Initialize base defaults
super().__init__()
# Use default_bucket_name if separate store buckets are not provided
if expectations_store_bucket_name is None:
expectations_store_bucket_name = default_bucket_name
if validations_store_bucket_name is None:
validations_store_bucket_name = default_bucket_name
if data_docs_bucket_name is None:
data_docs_bucket_name = default_bucket_name
if checkpoint_store_bucket_name is None:
checkpoint_store_bucket_name = default_bucket_name
# Use default_project_name if separate store projects are not provided
if expectations_store_project_name is None:
expectations_store_project_name = default_project_name
if validations_store_project_name is None:
validations_store_project_name = default_project_name
if data_docs_project_name is None:
data_docs_project_name = default_project_name
if checkpoint_store_project_name is None:
checkpoint_store_project_name = default_project_name
# Overwrite defaults
self.expectations_store_name = expectations_store_name
self.validations_store_name = validations_store_name
self.evaluation_parameter_store_name = evaluation_parameter_store_name
self.checkpoint_store_name = checkpoint_store_name
self.stores = {
expectations_store_name: {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "TupleGCSStoreBackend",
"project": expectations_store_project_name,
"bucket": expectations_store_bucket_name,
"prefix": expectations_store_prefix,
},
},
validations_store_name: {
"class_name": "ValidationsStore",
"store_backend": {
"class_name": "TupleGCSStoreBackend",
"project": validations_store_project_name,
"bucket": validations_store_bucket_name,
"prefix": validations_store_prefix,
},
},
evaluation_parameter_store_name: {"class_name": "EvaluationParameterStore"},
checkpoint_store_name: {
"class_name": "CheckpointStore",
"store_backend": {
"class_name": "TupleGCSStoreBackend",
"project": checkpoint_store_project_name,
"bucket": checkpoint_store_bucket_name,
"prefix": checkpoint_store_prefix,
},
},
}
self.data_docs_sites = {
"gcs_site": {
"class_name": "SiteBuilder",
"show_how_to_buttons": True,
"store_backend": {
"class_name": "TupleGCSStoreBackend",
"project": data_docs_project_name,
"bucket": data_docs_bucket_name,
"prefix": data_docs_prefix,
},
"site_index_builder": {
"class_name": "DefaultSiteIndexBuilder",
},
}
}
class DatabaseStoreBackendDefaults(BaseStoreBackendDefaults):
"""
Default store configs for database backends, with some accessible parameters
Args:
default_credentials: Use these credentials for all stores that do not have credentials provided
expectations_store_credentials: Overrides default_credentials if supplied
validations_store_credentials: Overrides default_credentials if supplied
checkpoint_store_credentials: Overrides default_credentials if supplied
expectations_store_name: Overrides default if supplied
validations_store_name: Overrides default if supplied
evaluation_parameter_store_name: Overrides default if supplied
checkpoint_store_name: Overrides default if supplied
"""
def __init__(
self,
default_credentials: Optional[Dict] = None,
expectations_store_credentials: Optional[Dict] = None,
validations_store_credentials: Optional[Dict] = None,
checkpoint_store_credentials: Optional[Dict] = None,
expectations_store_name: str = "expectations_database_store",
validations_store_name: str = "validations_database_store",
evaluation_parameter_store_name: str = "evaluation_parameter_store",
checkpoint_store_name: str = "checkpoint_database_store",
):
# Initialize base defaults
super().__init__()
# Use default credentials if separate credentials not supplied for expectations_store and validations_store
if expectations_store_credentials is None:
expectations_store_credentials = default_credentials
if validations_store_credentials is None:
validations_store_credentials = default_credentials
if checkpoint_store_credentials is None:
checkpoint_store_credentials = default_credentials
# Overwrite defaults
self.expectations_store_name = expectations_store_name
self.validations_store_name = validations_store_name
self.evaluation_parameter_store_name = evaluation_parameter_store_name
self.checkpoint_store_name = checkpoint_store_name
self.stores = {
expectations_store_name: {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "DatabaseStoreBackend",
"credentials": expectations_store_credentials,
},
},
validations_store_name: {
"class_name": "ValidationsStore",
"store_backend": {
"class_name": "DatabaseStoreBackend",
"credentials": validations_store_credentials,
},
},
evaluation_parameter_store_name: {"class_name": "EvaluationParameterStore"},
checkpoint_store_name: {
"class_name": "CheckpointStore",
"store_backend": {
"class_name": "DatabaseStoreBackend",
"credentials": checkpoint_store_credentials,
},
},
}
class DataContextConfig(BaseYamlConfig):
# TODO: <Alex>ALEX (does not work yet)</Alex>
# _config_schema_class = DataContextConfigSchema
def __init__(
self,
config_version: Optional[float] = None,
datasources: Optional[
Union[
Dict[str, DatasourceConfig],
Dict[str, Dict[str, Union[Dict[str, str], str, dict]]],
]
] = None,
expectations_store_name: Optional[str] = None,
validations_store_name: Optional[str] = None,
evaluation_parameter_store_name: Optional[str] = None,
checkpoint_store_name: Optional[str] = None,
plugins_directory: Optional[str] = None,
validation_operators=None,
stores: Optional[Dict] = None,
data_docs_sites: Optional[Dict] = None,
notebooks=None,
config_variables_file_path: Optional[str] = None,
anonymous_usage_statistics=None,
store_backend_defaults: Optional[BaseStoreBackendDefaults] = None,
commented_map: Optional[CommentedMap] = None,
concurrency: Optional[Union[ConcurrencyConfig, Dict]] = None,
):
# Set defaults
if config_version is None:
config_version = DataContextConfigDefaults.DEFAULT_CONFIG_VERSION.value
# Set defaults via store_backend_defaults if one is passed in
# Override attributes from store_backend_defaults with any items passed into the constructor:
if store_backend_defaults is not None:
if stores is None:
stores = store_backend_defaults.stores
if expectations_store_name is None:
expectations_store_name = store_backend_defaults.expectations_store_name
if validations_store_name is None:
validations_store_name = store_backend_defaults.validations_store_name
if evaluation_parameter_store_name is None:
evaluation_parameter_store_name = (
store_backend_defaults.evaluation_parameter_store_name
)
if data_docs_sites is None:
data_docs_sites = store_backend_defaults.data_docs_sites
if checkpoint_store_name is None:
checkpoint_store_name = store_backend_defaults.checkpoint_store_name
self._config_version = config_version
if datasources is None:
datasources = {}
self.datasources = datasources
self.expectations_store_name = expectations_store_name
self.validations_store_name = validations_store_name
self.evaluation_parameter_store_name = evaluation_parameter_store_name
if checkpoint_store_name is not None:
self.checkpoint_store_name = checkpoint_store_name
self.plugins_directory = plugins_directory
if validation_operators is not None:
self.validation_operators = validation_operators
self.stores = stores
self.notebooks = notebooks
self.data_docs_sites = data_docs_sites
self.config_variables_file_path = config_variables_file_path
if anonymous_usage_statistics is None:
anonymous_usage_statistics = AnonymizedUsageStatisticsConfig()
elif isinstance(anonymous_usage_statistics, dict):
anonymous_usage_statistics = AnonymizedUsageStatisticsConfig(
**anonymous_usage_statistics
)
self.anonymous_usage_statistics = anonymous_usage_statistics
if concurrency is None:
concurrency = ConcurrencyConfig()
elif isinstance(concurrency, dict):
concurrency = ConcurrencyConfig(**concurrency)
self.concurrency: ConcurrencyConfig = concurrency
super().__init__(commented_map=commented_map)
# TODO: <Alex>ALEX (we still need the next two properties)</Alex>
@classmethod
def get_config_class(cls):
return cls # DataContextConfig
@classmethod
def get_schema_class(cls):
return DataContextConfigSchema
@property
def config_version(self):
return self._config_version
class CheckpointConfigSchema(Schema):
class Meta:
unknown = INCLUDE
fields = (
"name",
"config_version",
"template_name",
"module_name",
"class_name",
"run_name_template",
"expectation_suite_name",
"batch_request",
"action_list",
"evaluation_parameters",
"runtime_configuration",
"validations",
"profilers",
# Next two fields are for LegacyCheckpoint configuration
"validation_operator_name",
"batches",
# Next fields are used by configurators
"site_names",
"slack_webhook",
"notify_on",
"notify_with",
"ge_cloud_id",
"expectation_suite_ge_cloud_id",
)
ordered = True
# if keys have None value, remove in post_dump
REMOVE_KEYS_IF_NONE = [
"site_names",
"slack_webhook",
"notify_on",
"notify_with",
]
ge_cloud_id = fields.UUID(required=False, allow_none=True)
name = fields.String(required=False, allow_none=True)
config_version = fields.Number(
validate=lambda x: (0 < x < 100) or x is None,
error_messages={"invalid": "config version must " "be a number or None."},
required=False,
allow_none=True,
)
template_name = fields.String(required=False, allow_none=True)
module_name = fields.String(required=False, missing="great_expectations.checkpoint")
class_name = fields.Str(required=False, allow_none=True)
run_name_template = fields.String(required=False, allow_none=True)
expectation_suite_name = fields.String(required=False, allow_none=True)
expectation_suite_ge_cloud_id = fields.UUID(required=False, allow_none=True)
batch_request = fields.Dict(required=False, allow_none=True)
action_list = fields.List(
cls_or_instance=fields.Dict(), required=False, allow_none=True
)
evaluation_parameters = fields.Dict(required=False, allow_none=True)
runtime_configuration = fields.Dict(required=False, allow_none=True)
validations = fields.List(
cls_or_instance=fields.Dict(), required=False, allow_none=True
)
profilers = fields.List(
cls_or_instance=fields.Dict(), required=False, allow_none=True
)
# Next two fields are for LegacyCheckpoint configuration
validation_operator_name = fields.Str(required=False, allow_none=True)
batches = fields.List(
cls_or_instance=fields.Dict(
keys=fields.Str(
validate=OneOf(["batch_kwargs", "expectation_suite_names"]),
required=False,
allow_none=True,
)
),
required=False,
allow_none=True,
)
# Next fields are used by configurators
site_names = fields.Raw(required=False, allow_none=True)
slack_webhook = fields.String(required=False, allow_none=True)
notify_on = fields.String(required=False, allow_none=True)
notify_with = fields.String(required=False, allow_none=True)
@validates_schema
def validate_schema(self, data, **kwargs):
if not (
"name" in data or "validation_operator_name" in data or "batches" in data
):
raise ge_exceptions.InvalidConfigError(
f"""Your current Checkpoint configuration is incomplete. Please update your Checkpoint configuration to
continue.
"""
)
if data.get("config_version"):
if "name" not in data:
raise ge_exceptions.InvalidConfigError(
f"""Your Checkpoint configuration requires the "name" field. Please update your current Checkpoint
configuration to continue.
"""
)
@post_dump
def remove_keys_if_none(self, data, **kwargs):
data = deepcopy(data)
for key in self.REMOVE_KEYS_IF_NONE:
if key in data and data[key] is None:
data.pop(key)
return data
class CheckpointConfig(BaseYamlConfig):
# TODO: <Alex>ALEX (does not work yet)</Alex>
# _config_schema_class = CheckpointConfigSchema
def __init__(
self,
name: Optional[str] = None,
config_version: Optional[Union[int, float]] = None,
template_name: Optional[str] = None,
module_name: Optional[str] = None,
class_name: Optional[str] = None,
run_name_template: Optional[str] = None,
expectation_suite_name: Optional[str] = None,
batch_request: Optional[dict] = None,
action_list: Optional[List[dict]] = None,
evaluation_parameters: Optional[dict] = None,
runtime_configuration: Optional[dict] = None,
validations: Optional[List[dict]] = None,
profilers: Optional[List[dict]] = None,
validation_operator_name: Optional[str] = None,
batches: Optional[List[dict]] = None,
commented_map: Optional[CommentedMap] = None,
ge_cloud_id: Optional[str] = None,
# the following four args are used by SimpleCheckpoint
site_names: Optional[Union[list, str]] = None,
slack_webhook: Optional[str] = None,
notify_on: Optional[str] = None,
notify_with: Optional[str] = None,
expectation_suite_ge_cloud_id: Optional[str] = None,
):
self._name = name
self._config_version = config_version
if self.config_version is None:
class_name = class_name or "LegacyCheckpoint"
self.validation_operator_name = validation_operator_name
if batches is not None and isinstance(batches, list):
self.batches = batches
else:
class_name = class_name or "Checkpoint"
self._template_name = template_name
self._run_name_template = run_name_template
self._expectation_suite_name = expectation_suite_name
self._expectation_suite_ge_cloud_id = expectation_suite_ge_cloud_id
self._batch_request = batch_request
self._action_list = action_list or []
self._evaluation_parameters = evaluation_parameters or {}
self._runtime_configuration = runtime_configuration or {}
self._validations = validations or []
self._profilers = profilers or []
self._ge_cloud_id = ge_cloud_id
# the following attributes are used by SimpleCheckpoint
self._site_names = site_names
self._slack_webhook = slack_webhook
self._notify_on = notify_on
self._notify_with = notify_with
self._module_name = module_name or "great_expectations.checkpoint"
self._class_name = class_name
super().__init__(commented_map=commented_map)
def update(
self,
other_config: Optional["CheckpointConfig"] = None,
runtime_kwargs: Optional[dict] = None,
):
assert other_config is not None or runtime_kwargs is not None, (
"other_config and runtime_kwargs cannot both " "be None"
)
if other_config is not None:
# replace
if other_config.name is not None:
self.name = other_config.name
if other_config.module_name is not None:
self.module_name = other_config.module_name
if other_config.class_name is not None:
self.class_name = other_config.class_name
if other_config.run_name_template is not None:
self.run_name_template = other_config.run_name_template
if other_config.expectation_suite_name is not None:
self.expectation_suite_name = other_config.expectation_suite_name
if other_config.expectation_suite_ge_cloud_id is not None:
self.expectation_suite_ge_cloud_id = (
other_config.expectation_suite_ge_cloud_id
)
# update
if other_config.batch_request is not None:
if self.batch_request is None:
batch_request = {}
else:
batch_request = self.batch_request
other_batch_request = other_config.batch_request
updated_batch_request = nested_update(
batch_request,
other_batch_request,
)
self._batch_request = updated_batch_request
if other_config.action_list is not None:
self.action_list = self.get_updated_action_list(
base_action_list=self.action_list,
other_action_list=other_config.action_list,
)
if other_config.evaluation_parameters is not None:
nested_update(
self.evaluation_parameters,
other_config.evaluation_parameters,
)
if other_config.runtime_configuration is not None:
nested_update(
self.runtime_configuration,
other_config.runtime_configuration,
)
if other_config.validations is not None:
self.validations.extend(
filter(
lambda v: v not in self.validations, other_config.validations
)
)
if other_config.profilers is not None:
self.profilers.extend(other_config.profilers)
if runtime_kwargs is not None and any(runtime_kwargs.values()):
# replace
if runtime_kwargs.get("run_name_template") is not None:
self.run_name_template = runtime_kwargs.get("run_name_template")
if runtime_kwargs.get("expectation_suite_name") is not None:
self.expectation_suite_name = runtime_kwargs.get(
"expectation_suite_name"
)
if runtime_kwargs.get("expectation_suite_ge_cloud_id") is not None:
self.expectation_suite_ge_cloud_id = runtime_kwargs.get(
"expectation_suite_ge_cloud_id"
)
# update
if runtime_kwargs.get("batch_request") is not None:
batch_request = self.batch_request
batch_request = batch_request or {}
runtime_batch_request = runtime_kwargs.get("batch_request")
batch_request = nested_update(batch_request, runtime_batch_request)
self._batch_request = batch_request
if runtime_kwargs.get("action_list") is not None:
self.action_list = self.get_updated_action_list(
base_action_list=self.action_list,
other_action_list=runtime_kwargs.get("action_list"),
)
if runtime_kwargs.get("evaluation_parameters") is not None:
nested_update(
self.evaluation_parameters,
runtime_kwargs.get("evaluation_parameters"),
)
if runtime_kwargs.get("runtime_configuration") is not None:
nested_update(
self.runtime_configuration,
runtime_kwargs.get("runtime_configuration"),
)
if runtime_kwargs.get("validations") is not None:
self.validations.extend(
filter(
lambda v: v not in self.validations,
runtime_kwargs.get("validations"),
)
)
if runtime_kwargs.get("profilers") is not None:
self.profilers.extend(runtime_kwargs.get("profilers"))
# TODO: <Alex>ALEX (we still need the next two properties)</Alex>
@classmethod
def get_config_class(cls):
return cls # CheckpointConfig
@classmethod
def get_schema_class(cls):
return CheckpointConfigSchema
@property
def ge_cloud_id(self):
return self._ge_cloud_id
@ge_cloud_id.setter
def ge_cloud_id(self, value: str):
self._ge_cloud_id = value
@property
def expectation_suite_ge_cloud_id(self):
return self._expectation_suite_ge_cloud_id
@expectation_suite_ge_cloud_id.setter
def expectation_suite_ge_cloud_id(self, value: str):
self._expectation_suite_ge_cloud_id = value
@property
def name(self):
return self._name
@name.setter
def name(self, value: str):
self._name = value
@property
def template_name(self):
return self._template_name
@template_name.setter
def template_name(self, value: str):
self._template_name = value
@property
def config_version(self):
return self._config_version
@property
def validations(self):
return self._validations
@property
def profilers(self):
return self._profilers
@property
def module_name(self):
return self._module_name
@module_name.setter
def module_name(self, value: str):
self._module_name = value
@property
def class_name(self):
return self._class_name
@class_name.setter
def class_name(self, value: str):
self._class_name = value
@property
def run_name_template(self):
return self._run_name_template
@run_name_template.setter
def run_name_template(self, value: str):
self._run_name_template = value
@property
def batch_request(self):
return self._batch_request
@batch_request.setter
def batch_request(self, value: dict):
self._batch_request = value
@property
def expectation_suite_name(self):
return self._expectation_suite_name
@expectation_suite_name.setter
def expectation_suite_name(self, value: str):
self._expectation_suite_name = value
@property
def action_list(self):
return self._action_list
@action_list.setter
def action_list(self, value: List[dict]):
self._action_list = value
@property
def site_names(self):
return self._site_names
@property
def slack_webhook(self):
return self._slack_webhook
@property
def notify_on(self):
return self._notify_on
@property
def notify_with(self):
return self._notify_with
@classmethod
def get_updated_action_list(
cls,
base_action_list: list,
other_action_list: list,
) -> List[dict]:
base_action_list_dict = {action["name"]: action for action in base_action_list}
for other_action in other_action_list:
other_action_name = other_action["name"]
if other_action_name in base_action_list_dict:
if other_action["action"] is None:
base_action_list_dict.pop(other_action_name)
else:
nested_update(
base_action_list_dict[other_action_name],
other_action,
dedup=True,
)
else:
base_action_list_dict[other_action_name] = other_action
return list(base_action_list_dict.values())
@property
def evaluation_parameters(self):
return self._evaluation_parameters
@property
def runtime_configuration(self):
return self._runtime_configuration
class CheckpointValidationConfig(DictDot):
pass
class CheckpointValidationConfigSchema(Schema):
pass
dataContextConfigSchema = DataContextConfigSchema()
datasourceConfigSchema = DatasourceConfigSchema()
dataConnectorConfigSchema = DataConnectorConfigSchema()
assetConfigSchema = AssetConfigSchema()
sorterConfigSchema = SorterConfigSchema()
anonymizedUsageStatisticsSchema = AnonymizedUsageStatisticsConfigSchema()
notebookConfigSchema = NotebookConfigSchema()
checkpointConfigSchema = CheckpointConfigSchema()
concurrencyConfigSchema = ConcurrencyConfigSchema()
| import abc
import enum
import itertools
import logging
import uuid
from copy import deepcopy
from typing import Any, Dict, List, MutableMapping, Optional, Union
from ruamel.yaml import YAML
from ruamel.yaml.comments import CommentedMap
from ruamel.yaml.compat import StringIO
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.util import convert_to_json_serializable, nested_update
from great_expectations.marshmallow__shade import (
INCLUDE,
Schema,
ValidationError,
fields,
post_dump,
post_load,
pre_load,
validates_schema,
)
from great_expectations.marshmallow__shade.validate import OneOf
from great_expectations.types import DictDot, SerializableDictDot
from great_expectations.types.configurations import ClassConfigSchema
yaml = YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
logger = logging.getLogger(__name__)
CURRENT_GE_CONFIG_VERSION = 3
FIRST_GE_CONFIG_VERSION_WITH_CHECKPOINT_STORE = 3
CURRENT_CHECKPOINT_CONFIG_VERSION = 1
MINIMUM_SUPPORTED_CONFIG_VERSION = 2
DEFAULT_USAGE_STATISTICS_URL = (
"https://stats.greatexpectations.io/great_expectations/v1/usage_statistics"
)
def object_to_yaml_str(obj):
output_str: str
with StringIO() as string_stream:
yaml.dump(obj, string_stream)
output_str = string_stream.getvalue()
return output_str
class BaseYamlConfig(SerializableDictDot):
_config_schema_class = None
def __init__(self, commented_map: CommentedMap = None):
if commented_map is None:
commented_map = CommentedMap()
self._commented_map = commented_map
@classmethod
def _get_schema_instance(cls) -> Schema:
if not issubclass(cls.get_schema_class(), Schema):
raise ge_exceptions.InvalidConfigError(
"Invalid type: A configuration schema class needs to inherit from the Marshmallow Schema class."
)
if not issubclass(cls.get_config_class(), BaseYamlConfig):
raise ge_exceptions.InvalidConfigError(
"Invalid type: A configuration class needs to inherit from the BaseYamlConfig class."
)
if hasattr(cls.get_config_class(), "_schema_instance"):
# noinspection PyProtectedMember
schema_instance: Schema = cls.get_config_class()._schema_instance
if schema_instance is None:
cls.get_config_class()._schema_instance = (cls.get_schema_class())()
else:
return schema_instance
else:
cls.get_config_class().schema_instance = (cls.get_schema_class())()
return cls.get_config_class().schema_instance
@classmethod
def from_commented_map(cls, commented_map: CommentedMap):
try:
config: Union[dict, BaseYamlConfig]
config = cls._get_schema_instance().load(commented_map)
if isinstance(config, dict):
return cls.get_config_class()(commented_map=commented_map, **config)
return config
except ValidationError:
logger.error(
"Encountered errors during loading config. See ValidationError for more details."
)
raise
def _get_schema_validated_updated_commented_map(self) -> CommentedMap:
commented_map: CommentedMap = deepcopy(self._commented_map)
commented_map.update(self._get_schema_instance().dump(self))
return commented_map
def to_yaml(self, outfile):
"""
:returns None (but writes a YAML file containing the project configuration)
"""
yaml.dump(self.commented_map, outfile)
def to_yaml_str(self) -> str:
"""
:returns a YAML string containing the project configuration
"""
return object_to_yaml_str(self.commented_map)
def to_json_dict(self) -> dict:
"""
:returns a JSON-serialiable dict containing the project configuration
"""
commented_map: CommentedMap = self.commented_map
return convert_to_json_serializable(data=commented_map)
@property
def commented_map(self) -> CommentedMap:
return self._get_schema_validated_updated_commented_map()
@classmethod
def get_config_class(cls):
raise NotImplementedError
@classmethod
def get_schema_class(cls):
raise NotImplementedError
class AssetConfig(DictDot):
def __init__(
self,
name=None,
class_name=None,
module_name=None,
bucket=None,
prefix=None,
delimiter=None,
max_keys=None,
batch_spec_passthrough=None,
**kwargs,
):
if name is not None:
self.name = name
self._class_name = class_name
self._module_name = module_name
if bucket is not None:
self.bucket = bucket
if prefix is not None:
self.prefix = prefix
if delimiter is not None:
self.delimiter = delimiter
if max_keys is not None:
self.max_keys = max_keys
if batch_spec_passthrough is not None:
self.batch_spec_passthrough = batch_spec_passthrough
for k, v in kwargs.items():
setattr(self, k, v)
@property
def class_name(self):
return self._class_name
@property
def module_name(self):
return self._module_name
class AssetConfigSchema(Schema):
class Meta:
unknown = INCLUDE
name = fields.String(required=False, allow_none=True)
class_name = fields.String(required=False, allow_none=True, missing="Asset")
module_name = fields.String(
required=False,
all_none=True,
missing="great_expectations.datasource.data_connector.asset",
)
base_directory = fields.String(required=False, allow_none=True)
glob_directive = fields.String(required=False, allow_none=True)
pattern = fields.String(required=False, allow_none=True)
group_names = fields.List(
cls_or_instance=fields.Str(), required=False, allow_none=True
)
bucket = fields.String(required=False, allow_none=True)
prefix = fields.String(required=False, allow_none=True)
delimiter = fields.String(required=False, allow_none=True)
max_keys = fields.Integer(required=False, allow_none=True)
batch_spec_passthrough = fields.Dict(required=False, allow_none=True)
@validates_schema
def validate_schema(self, data, **kwargs):
pass
# noinspection PyUnusedLocal
@post_load
def make_asset_config(self, data, **kwargs):
return AssetConfig(**data)
class SorterConfig(DictDot):
def __init__(
self,
name,
class_name=None,
module_name=None,
orderby="asc",
reference_list=None,
datetime_format=None,
**kwargs,
):
self._name = name
self._class_name = class_name
self._module_name = module_name
self._orderby = orderby
for k, v in kwargs.items():
setattr(self, k, v)
if reference_list is not None:
self._reference_list = reference_list
if datetime_format is not None:
self._datetime_format = datetime_format
@property
def name(self):
return self._name
@property
def module_name(self):
return self._module_name
@property
def class_name(self):
return self._class_name
@property
def orderby(self):
return self._orderby
@property
def reference_list(self):
return self._reference_list
@property
def datetime_format(self):
return self._datetime_format
class SorterConfigSchema(Schema):
class Meta:
unknown = INCLUDE
name = fields.String(required=True)
class_name = fields.String(required=True)
module_name = fields.String(
missing="great_expectations.datasource.data_connector.sorter"
)
orderby = fields.String(required=False, missing="asc", allow_none=False)
# allow_none = True because it is only used by some Sorters
reference_list = fields.List(
cls_or_instance=fields.Str(), required=False, missing=None, allow_none=True
)
datetime_format = fields.String(required=False, missing=None, allow_none=True)
@validates_schema
def validate_schema(self, data, **kwargs):
pass
# noinspection PyUnusedLocal
@post_load
def make_sorter_config(self, data, **kwargs):
return SorterConfig(**data)
class DataConnectorConfig(DictDot):
def __init__(
self,
class_name,
module_name=None,
credentials=None,
assets=None,
base_directory=None,
glob_directive=None,
default_regex=None,
batch_identifiers=None,
sorters=None,
batch_spec_passthrough=None,
# S3
boto3_options=None,
bucket=None,
max_keys=None,
# Azure
azure_options=None,
container=None,
name_starts_with=None,
# GCS
bucket_or_name=None,
max_results=None,
# Both S3/GCS
prefix=None,
# Both S3/Azure
delimiter=None,
**kwargs,
):
self._class_name = class_name
self._module_name = module_name
if credentials is not None:
self.credentials = credentials
if assets is not None:
self.assets = assets
if base_directory is not None:
self.base_directory = base_directory
if glob_directive is not None:
self.glob_directive = glob_directive
if default_regex is not None:
self.default_regex = default_regex
if batch_identifiers is not None:
self.batch_identifiers = batch_identifiers
if sorters is not None:
self.sorters = sorters
if batch_spec_passthrough is not None:
self.batch_spec_passthrough = batch_spec_passthrough
# S3
if boto3_options is not None:
self.boto3_options = boto3_options
if bucket is not None:
self.bucket = bucket
if max_keys is not None:
self.max_keys = max_keys
# Azure
if azure_options is not None:
self.azure_options = azure_options
if container is not None:
self.container = container
if name_starts_with is not None:
self.name_starts_with = name_starts_with
# GCS
if bucket_or_name is not None:
self.bucket_or_name = bucket_or_name
if max_results is not None:
self.max_results = max_results
# Both S3/GCS
if prefix is not None:
self.prefix = prefix
# Both S3/Azure
if delimiter is not None:
self.delimiter = delimiter
for k, v in kwargs.items():
setattr(self, k, v)
@property
def class_name(self):
return self._class_name
@property
def module_name(self):
return self._module_name
class DataConnectorConfigSchema(Schema):
class Meta:
unknown = INCLUDE
class_name = fields.String(required=True)
module_name = fields.String(missing="great_expectations.datasource.data_connector")
assets = fields.Dict(
keys=fields.Str(),
values=fields.Nested(AssetConfigSchema, required=False, allow_none=True),
required=False,
allow_none=True,
)
base_directory = fields.String(required=False, allow_none=True)
glob_directive = fields.String(required=False, allow_none=True)
sorters = fields.List(
fields.Nested(SorterConfigSchema, required=False, allow_none=True),
required=False,
allow_none=True,
)
default_regex = fields.Dict(required=False, allow_none=True)
credentials = fields.Raw(required=False, allow_none=True)
batch_identifiers = fields.List(
cls_or_instance=fields.Str(), required=False, allow_none=True
)
# S3
boto3_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
bucket = fields.String(required=False, allow_none=True)
max_keys = fields.Integer(required=False, allow_none=True)
# Azure
azure_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
container = fields.String(required=False, allow_none=True)
name_starts_with = fields.String(required=False, allow_none=True)
# GCS
gcs_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
bucket_or_name = fields.String(required=False, allow_none=True)
max_results = fields.String(required=False, allow_none=True)
# Both S3/GCS
prefix = fields.String(required=False, allow_none=True)
# Both S3/Azure
delimiter = fields.String(required=False, allow_none=True)
data_asset_name_prefix = fields.String(required=False, allow_none=True)
data_asset_name_suffix = fields.String(required=False, allow_none=True)
include_schema_name = fields.Boolean(required=False, allow_none=True)
splitter_method = fields.String(required=False, allow_none=True)
splitter_kwargs = fields.Dict(required=False, allow_none=True)
sampling_method = fields.String(required=False, allow_none=True)
sampling_kwargs = fields.Dict(required=False, allow_none=True)
excluded_tables = fields.List(
cls_or_instance=fields.Str(), required=False, allow_none=True
)
included_tables = fields.List(
cls_or_instance=fields.Str(), required=False, allow_none=True
)
skip_inapplicable_tables = fields.Boolean(required=False, allow_none=True)
batch_spec_passthrough = fields.Dict(required=False, allow_none=True)
@validates_schema
def validate_schema(self, data, **kwargs):
# If a class_name begins with the dollar sign ("$"), then it is assumed to be a variable name to be substituted.
if data["class_name"][0] == "$":
return
if ("default_regex" in data) and not (
data["class_name"]
in [
"InferredAssetFilesystemDataConnector",
"ConfiguredAssetFilesystemDataConnector",
"InferredAssetS3DataConnector",
"ConfiguredAssetS3DataConnector",
"InferredAssetAzureDataConnector",
"ConfiguredAssetAzureDataConnector",
"InferredAssetGCSDataConnector",
"ConfiguredAssetGCSDataConnector",
]
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses one or more keys in a data connector that are required only by a
subclass of the FilePathDataConnector class (your data connector is "{data['class_name']}"). Please update your
configuration to continue.
"""
)
if ("glob_directive" in data) and not (
data["class_name"]
in [
"InferredAssetFilesystemDataConnector",
"ConfiguredAssetFilesystemDataConnector",
]
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses one or more keys in a data connector that are required only by a
filesystem type of the data connector (your data connector is "{data['class_name']}"). Please update your
configuration to continue.
"""
)
if ("delimiter" in data) and not (
data["class_name"]
in [
"InferredAssetS3DataConnector",
"ConfiguredAssetS3DataConnector",
"InferredAssetAzureDataConnector",
"ConfiguredAssetAzureDataConnector",
]
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses one or more keys in a data connector that are required only by an
S3/Azure type of the data connector (your data connector is "{data['class_name']}"). Please update your configuration to
continue.
"""
)
if ("prefix" in data) and not (
data["class_name"]
in [
"InferredAssetS3DataConnector",
"ConfiguredAssetS3DataConnector",
"InferredAssetGCSDataConnector",
"ConfiguredAssetGCSDataConnector",
]
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses one or more keys in a data connector that are required only by an
S3/GCS type of the data connector (your data connector is "{data['class_name']}"). Please update your configuration to
continue.
"""
)
if ("bucket" in data or "max_keys" in data) and not (
data["class_name"]
in [
"InferredAssetS3DataConnector",
"ConfiguredAssetS3DataConnector",
]
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses one or more keys in a data connector that are required only by an
S3 type of the data connector (your data connector is "{data['class_name']}"). Please update your configuration to
continue.
"""
)
if (
"azure_options" in data or "container" in data or "name_starts_with" in data
) and not (
data["class_name"]
in [
"InferredAssetAzureDataConnector",
"ConfiguredAssetAzureDataConnector",
]
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses one or more keys in a data connector that are required only by an
Azure type of the data connector (your data connector is "{data['class_name']}"). Please update your configuration to
continue.
"""
)
if "azure_options" in data and data["class_name"] in [
"InferredAssetAzureDataConnector",
"ConfiguredAssetAzureDataConnector",
]:
azure_options = data["azure_options"]
if not (("conn_str" in azure_options) ^ ("account_url" in azure_options)):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration is either missing methods of authentication or is using too many for the Azure type of data connector.
You must only select one between `conn_str` or `account_url`. Please update your configuration to continue.
"""
)
if (
"gcs_options" in data or "bucket_or_name" in data or "max_results" in data
) and not (
data["class_name"]
in [
"InferredAssetGCSDataConnector",
"ConfiguredAssetGCSDataConnector",
]
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses one or more keys in a data connector that are required only by a
GCS type of the data connector (your data connector is "{data['class_name']}"). Please update your configuration to
continue.
"""
)
if "gcs_options" in data and data["class_name"] in [
"InferredAssetGCSDataConnector",
"ConfiguredAssetGCSDataConnector",
]:
gcs_options = data["gcs_options"]
if "filename" in gcs_options and "info" in gcs_options:
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration can only use a single method of authentication for the GCS type of data connector.
You must only select one between `filename` (from_service_account_file) and `info` (from_service_account_info). Please update your configuration to continue.
"""
)
if (
"data_asset_name_prefix" in data
or "data_asset_name_suffix" in data
or "include_schema_name" in data
or "splitter_method" in data
or "splitter_kwargs" in data
or "sampling_method" in data
or "sampling_kwargs" in data
or "excluded_tables" in data
or "included_tables" in data
or "skip_inapplicable_tables" in data
) and not (
data["class_name"]
in [
"InferredAssetSqlDataConnector",
"ConfiguredAssetSqlDataConnector",
]
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses one or more keys in a data connector that are required only by an
SQL type of the data connector (your data connector is "{data['class_name']}"). Please update your configuration to
continue.
"""
)
# noinspection PyUnusedLocal
@post_load
def make_data_connector_config(self, data, **kwargs):
return DataConnectorConfig(**data)
class ExecutionEngineConfig(DictDot):
def __init__(
self,
class_name,
module_name=None,
caching=None,
batch_spec_defaults=None,
connection_string=None,
credentials=None,
spark_config=None,
boto3_options=None,
azure_options=None,
gcs_options=None,
**kwargs,
):
self._class_name = class_name
self._module_name = module_name
if caching is not None:
self.caching = caching
if batch_spec_defaults is not None:
self._batch_spec_defaults = batch_spec_defaults
if connection_string is not None:
self.connection_string = connection_string
if credentials is not None:
self.credentials = credentials
if spark_config is not None:
self.spark_config = spark_config
if boto3_options is not None:
self.boto3_options = boto3_options
if azure_options is not None:
self.azure_options = azure_options
if gcs_options is not None:
self.gcs_options = gcs_options
for k, v in kwargs.items():
setattr(self, k, v)
@property
def module_name(self):
return self._module_name
@property
def class_name(self):
return self._class_name
@property
def batch_spec_defaults(self):
return self._batch_spec_defaults
class ExecutionEngineConfigSchema(Schema):
class Meta:
unknown = INCLUDE
class_name = fields.String(required=True)
module_name = fields.String(missing="great_expectations.execution_engine")
connection_string = fields.String(required=False, allow_none=True)
credentials = fields.Raw(required=False, allow_none=True)
spark_config = fields.Raw(required=False, allow_none=True)
boto3_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
azure_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
gcs_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
caching = fields.Boolean(required=False, allow_none=True)
batch_spec_defaults = fields.Dict(required=False, allow_none=True)
@validates_schema
def validate_schema(self, data, **kwargs):
# If a class_name begins with the dollar sign ("$"), then it is assumed to be a variable name to be substituted.
if data["class_name"][0] == "$":
return
if ("connection_string" in data or "credentials" in data) and not (
data["class_name"] == "SqlAlchemyExecutionEngine"
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses the "connection_string" key in an execution engine, but only
SqlAlchemyExecutionEngine requires this attribute (your execution engine is "{data['class_name']}"). Please update your
configuration to continue.
"""
)
if "spark_config" in data and not (
data["class_name"] == "SparkDFExecutionEngine"
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses the "spark_config" key in an execution engine, but only
SparkDFExecutionEngine requires this attribute (your execution engine is "{data['class_name']}"). Please update your
configuration to continue.
"""
)
# noinspection PyUnusedLocal
@post_load
def make_execution_engine_config(self, data, **kwargs):
return ExecutionEngineConfig(**data)
class DatasourceConfig(DictDot):
def __init__(
self,
class_name=None,
module_name: Optional[str] = "great_expectations.datasource",
execution_engine=None,
data_connectors=None,
data_asset_type=None,
batch_kwargs_generators=None,
connection_string=None,
credentials=None,
introspection=None,
tables=None,
boto3_options=None,
azure_options=None,
gcs_options=None,
reader_method=None,
reader_options=None,
limit=None,
**kwargs,
):
# NOTE - JPC - 20200316: Currently, we are mostly inconsistent with respect to this type...
self._class_name = class_name
self._module_name = module_name
if execution_engine is not None:
self.execution_engine = execution_engine
if data_connectors is not None and isinstance(data_connectors, dict):
self.data_connectors = data_connectors
# NOTE - AJB - 20201202: This should use the datasource class build_configuration method as in DataContext.add_datasource()
if data_asset_type is None:
if class_name == "PandasDatasource":
data_asset_type = {
"class_name": "PandasDataset",
"module_name": "great_expectations.dataset",
}
elif class_name == "SqlAlchemyDatasource":
data_asset_type = {
"class_name": "SqlAlchemyDataset",
"module_name": "great_expectations.dataset",
}
elif class_name == "SparkDFDatasource":
data_asset_type = {
"class_name": "SparkDFDataset",
"module_name": "great_expectations.dataset",
}
if data_asset_type is not None:
self.data_asset_type = data_asset_type
if batch_kwargs_generators is not None:
self.batch_kwargs_generators = batch_kwargs_generators
if connection_string is not None:
self.connection_string = connection_string
if credentials is not None:
self.credentials = credentials
if introspection is not None:
self.introspection = introspection
if tables is not None:
self.tables = tables
if boto3_options is not None:
self.boto3_options = boto3_options
if azure_options is not None:
self.azure_options = azure_options
if gcs_options is not None:
self.gcs_options = gcs_options
if reader_method is not None:
self.reader_method = reader_method
if reader_options is not None:
self.reader_options = reader_options
if limit is not None:
self.limit = limit
for k, v in kwargs.items():
setattr(self, k, v)
@property
def class_name(self):
return self._class_name
@property
def module_name(self):
return self._module_name
class DatasourceConfigSchema(Schema):
class Meta:
unknown = INCLUDE
class_name = fields.String(missing="Datasource")
module_name = fields.String(missing="great_expectations.datasource")
force_reuse_spark_context = fields.Bool(required=False, allow_none=True)
spark_config = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
execution_engine = fields.Nested(
ExecutionEngineConfigSchema, required=False, allow_none=True
)
data_connectors = fields.Dict(
keys=fields.Str(),
values=fields.Nested(DataConnectorConfigSchema),
required=False,
allow_none=True,
)
data_asset_type = fields.Nested(ClassConfigSchema, required=False, allow_none=True)
# TODO: Update to generator-specific
# batch_kwargs_generators = fields.Mapping(keys=fields.Str(), values=fields.Nested(fields.GeneratorSchema))
batch_kwargs_generators = fields.Dict(
keys=fields.Str(), values=fields.Dict(), required=False, allow_none=True
)
connection_string = fields.String(required=False, allow_none=True)
credentials = fields.Raw(required=False, allow_none=True)
introspection = fields.Dict(required=False, allow_none=True)
tables = fields.Dict(required=False, allow_none=True)
boto3_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
azure_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
gcs_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
reader_method = fields.String(required=False, allow_none=True)
reader_options = fields.Dict(
keys=fields.Str(), values=fields.Str(), required=False, allow_none=True
)
limit = fields.Integer(required=False, allow_none=True)
@validates_schema
def validate_schema(self, data, **kwargs):
if "generators" in data:
raise ge_exceptions.InvalidConfigError(
'Your current configuration uses the "generators" key in a datasource, but in version 0.10 of '
'GE that key is renamed to "batch_kwargs_generators". Please update your configuration to continue.'
)
# If a class_name begins with the dollar sign ("$"), then it is assumed to be a variable name to be substituted.
if data["class_name"][0] == "$":
return
if (
"connection_string" in data
or "credentials" in data
or "introspection" in data
or "tables" in data
) and not (
data["class_name"]
in [
"SqlAlchemyDatasource",
"SimpleSqlalchemyDatasource",
]
):
raise ge_exceptions.InvalidConfigError(
f"""Your current configuration uses one or more keys in a data source that are required only by a
sqlalchemy data source (your data source is "{data['class_name']}"). Please update your configuration to continue.
"""
)
# noinspection PyUnusedLocal
@post_load
def make_datasource_config(self, data, **kwargs):
return DatasourceConfig(**data)
class AnonymizedUsageStatisticsConfig(DictDot):
def __init__(self, enabled=True, data_context_id=None, usage_statistics_url=None):
self._enabled = enabled
if data_context_id is None:
data_context_id = str(uuid.uuid4())
self._explicit_id = False
else:
self._explicit_id = True
self._data_context_id = data_context_id
if usage_statistics_url is None:
usage_statistics_url = DEFAULT_USAGE_STATISTICS_URL
self._explicit_url = False
else:
self._explicit_url = True
self._usage_statistics_url = usage_statistics_url
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, enabled):
if not isinstance(enabled, bool):
raise ValueError("usage statistics enabled property must be boolean")
self._enabled = enabled
@property
def data_context_id(self):
return self._data_context_id
@data_context_id.setter
def data_context_id(self, data_context_id):
try:
uuid.UUID(data_context_id)
except ValueError:
raise ge_exceptions.InvalidConfigError(
"data_context_id must be a valid uuid"
)
self._data_context_id = data_context_id
self._explicit_id = True
@property
def explicit_id(self):
return self._explicit_id
@property
def usage_statistics_url(self):
return self._usage_statistics_url
@usage_statistics_url.setter
def usage_statistics_url(self, usage_statistics_url):
self._usage_statistics_url = usage_statistics_url
self._explicit_url = True
class AnonymizedUsageStatisticsConfigSchema(Schema):
data_context_id = fields.UUID()
enabled = fields.Boolean(default=True)
usage_statistics_url = fields.URL(allow_none=True)
_explicit_url = fields.Boolean(required=False)
# noinspection PyUnusedLocal
@post_load()
def make_usage_statistics_config(self, data, **kwargs):
if "data_context_id" in data:
data["data_context_id"] = str(data["data_context_id"])
return AnonymizedUsageStatisticsConfig(**data)
# noinspection PyUnusedLocal
@post_dump()
def filter_implicit(self, data, **kwargs):
if not data.get("_explicit_url") and "usage_statistics_url" in data:
del data["usage_statistics_url"]
if "_explicit_url" in data:
del data["_explicit_url"]
return data
class NotebookTemplateConfig(DictDot):
def __init__(self, file_name, template_kwargs=None):
self.file_name = file_name
if template_kwargs:
self.template_kwargs = template_kwargs
else:
self.template_kwargs = {}
class NotebookTemplateConfigSchema(Schema):
file_name = fields.String()
template_kwargs = fields.Dict(
keys=fields.Str(), values=fields.Str(), allow_none=True
)
# noinspection PyUnusedLocal
@post_load
def make_notebook_template_config(self, data, **kwargs):
return NotebookTemplateConfig(**data)
class NotebookConfig(DictDot):
def __init__(
self,
class_name,
module_name,
custom_templates_module,
header_markdown=None,
footer_markdown=None,
table_expectations_header_markdown=None,
column_expectations_header_markdown=None,
table_expectations_not_found_markdown=None,
column_expectations_not_found_markdown=None,
authoring_intro_markdown=None,
column_expectations_markdown=None,
header_code=None,
footer_code=None,
table_expectation_code=None,
column_expectation_code=None,
):
self.class_name = class_name
self.module_name = module_name
self.custom_templates_module = custom_templates_module
self.header_markdown = header_markdown
self.footer_markdown = footer_markdown
self.table_expectations_header_markdown = table_expectations_header_markdown
self.column_expectations_header_markdown = column_expectations_header_markdown
self.table_expectations_not_found_markdown = (
table_expectations_not_found_markdown
)
self.column_expectations_not_found_markdown = (
column_expectations_not_found_markdown
)
self.authoring_intro_markdown = authoring_intro_markdown
self.column_expectations_markdown = column_expectations_markdown
self.header_code = header_code
self.footer_code = footer_code
self.table_expectation_code = table_expectation_code
self.column_expectation_code = column_expectation_code
class NotebookConfigSchema(Schema):
class_name = fields.String(missing="SuiteEditNotebookRenderer")
module_name = fields.String(
missing="great_expectations.render.renderer.suite_edit_notebook_renderer"
)
custom_templates_module = fields.String()
header_markdown = fields.Nested(NotebookTemplateConfigSchema, allow_none=True)
footer_markdown = fields.Nested(NotebookTemplateConfigSchema, allow_none=True)
table_expectations_header_markdown = fields.Nested(
NotebookTemplateConfigSchema, allow_none=True
)
column_expectations_header_markdown = fields.Nested(
NotebookTemplateConfigSchema, allow_none=True
)
table_expectations_not_found_markdown = fields.Nested(
NotebookTemplateConfigSchema, allow_none=True
)
column_expectations_not_found_markdown = fields.Nested(
NotebookTemplateConfigSchema, allow_none=True
)
authoring_intro_markdown = fields.Nested(
NotebookTemplateConfigSchema, allow_none=True
)
column_expectations_markdown = fields.Nested(
NotebookTemplateConfigSchema, allow_none=True
)
header_code = fields.Nested(NotebookTemplateConfigSchema, allow_none=True)
footer_code = fields.Nested(NotebookTemplateConfigSchema, allow_none=True)
table_expectation_code = fields.Nested(
NotebookTemplateConfigSchema, allow_none=True
)
column_expectation_code = fields.Nested(
NotebookTemplateConfigSchema, allow_none=True
)
# noinspection PyUnusedLocal
@post_load
def make_notebook_config(self, data, **kwargs):
return NotebookConfig(**data)
class NotebooksConfig(DictDot):
def __init__(self, suite_edit):
self.suite_edit = suite_edit
class NotebooksConfigSchema(Schema):
# for now only suite_edit, could have other customization options for
# notebooks in the future
suite_edit = fields.Nested(NotebookConfigSchema)
# noinspection PyUnusedLocal
@post_load
def make_notebooks_config(self, data, **kwargs):
return NotebooksConfig(**data)
class ConcurrencyConfig(DictDot):
"""WARNING: This class is experimental."""
def __init__(self, enabled: Optional[bool] = False):
"""Initialize a concurrency configuration to control multithreaded execution.
Args:
enabled: Whether or not multithreading is enabled.
"""
self._enabled = enabled
@property
def enabled(self):
"""Whether or not multithreading is enabled."""
return self._enabled
@property
def max_database_query_concurrency(self) -> int:
"""Max number of concurrent database queries to execute with mulithreading."""
# BigQuery has a limit of 100 for "Concurrent rate limit for interactive queries" as described at
# https://cloud.google.com/bigquery/quotas#query_jobs). If necessary, this can later be tuned for other
# databases and/or be manually user configurable.
return 100
def add_sqlalchemy_create_engine_parameters(
self, parameters: MutableMapping[str, Any]
):
"""Update SqlAlchemy parameters to prevent concurrency errors (e.g. http://sqlalche.me/e/14/3o7r) and
bottlenecks.
Args:
parameters: SqlAlchemy create_engine parameters to which we add concurrency appropriate parameters. If the
concurrency parameters are already set, those parameters are left unchanged.
"""
if not self._enabled:
return
if "pool_size" not in parameters:
# https://docs.sqlalchemy.org/en/14/core/engines.html#sqlalchemy.create_engine.params.pool_size
parameters["pool_size"] = 0
if "max_overflow" not in parameters:
# https://docs.sqlalchemy.org/en/14/core/engines.html#sqlalchemy.create_engine.params.max_overflow
parameters["max_overflow"] = -1
class ConcurrencyConfigSchema(Schema):
"""WARNING: This class is experimental."""
enabled = fields.Boolean(default=False)
class GeCloudConfig(DictDot):
def __init__(self, base_url: str, account_id: str, access_token: str):
self.base_url = base_url
self.account_id = account_id
self.access_token = access_token
def to_json_dict(self):
return {
"base_url": self.base_url,
"account_id": self.account_id,
"access_token": self.access_token,
}
class DataContextConfigSchema(Schema):
config_version = fields.Number(
validate=lambda x: 0 < x < 100,
error_messages={"invalid": "config version must " "be a number."},
)
datasources = fields.Dict(
keys=fields.Str(),
values=fields.Nested(DatasourceConfigSchema),
required=False,
allow_none=True,
)
expectations_store_name = fields.Str()
validations_store_name = fields.Str()
evaluation_parameter_store_name = fields.Str()
checkpoint_store_name = fields.Str(required=False, allow_none=True)
plugins_directory = fields.Str(allow_none=True)
validation_operators = fields.Dict(
keys=fields.Str(), values=fields.Dict(), required=False, allow_none=True
)
stores = fields.Dict(keys=fields.Str(), values=fields.Dict())
notebooks = fields.Nested(NotebooksConfigSchema, allow_none=True)
data_docs_sites = fields.Dict(
keys=fields.Str(), values=fields.Dict(), allow_none=True
)
config_variables_file_path = fields.Str(allow_none=True)
anonymous_usage_statistics = fields.Nested(AnonymizedUsageStatisticsConfigSchema)
concurrency = fields.Nested(ConcurrencyConfigSchema)
# noinspection PyMethodMayBeStatic
# noinspection PyUnusedLocal
def handle_error(self, exc, data, **kwargs):
"""Log and raise our custom exception when (de)serialization fails."""
if (
exc
and exc.messages
and isinstance(exc.messages, dict)
and all([key is None for key in exc.messages.keys()])
):
exc.messages = list(itertools.chain.from_iterable(exc.messages.values()))
message: str = (
f"Error while processing DataContextConfig: {' '.join(exc.messages)}"
)
logger.error(message)
raise ge_exceptions.InvalidDataContextConfigError(
message=message,
)
@validates_schema
def validate_schema(self, data, **kwargs):
if "config_version" not in data:
raise ge_exceptions.InvalidDataContextConfigError(
"The key `config_version` is missing; please check your config file.",
validation_error=ValidationError(message="no config_version key"),
)
if not isinstance(data["config_version"], (int, float)):
raise ge_exceptions.InvalidDataContextConfigError(
"The key `config_version` must be a number. Please check your config file.",
validation_error=ValidationError(message="config version not a number"),
)
# When migrating from 0.7.x to 0.8.0
if data["config_version"] == 0 and any(
[
store_config["class_name"] == "ValidationsStore"
for store_config in data["stores"].values()
]
):
raise ge_exceptions.UnsupportedConfigVersionError(
"You appear to be using a config version from the 0.7.x series. This version is no longer supported."
)
if data["config_version"] < MINIMUM_SUPPORTED_CONFIG_VERSION:
raise ge_exceptions.UnsupportedConfigVersionError(
"You appear to have an invalid config version ({}).\n The version number must be at least {}. "
"Please see the migration guide at https://docs.greatexpectations.io/en/latest/guides/how_to_guides/migrating_versions.html".format(
data["config_version"], MINIMUM_SUPPORTED_CONFIG_VERSION
),
)
if data["config_version"] > CURRENT_GE_CONFIG_VERSION:
raise ge_exceptions.InvalidDataContextConfigError(
"You appear to have an invalid config version ({}).\n The maximum valid version is {}.".format(
data["config_version"], CURRENT_GE_CONFIG_VERSION
),
validation_error=ValidationError(message="config version too high"),
)
if data["config_version"] < CURRENT_GE_CONFIG_VERSION and (
"checkpoint_store_name" in data
or any(
[
store_config["class_name"] == "CheckpointStore"
for store_config in data["stores"].values()
]
)
):
raise ge_exceptions.InvalidDataContextConfigError(
"You appear to be using a Checkpoint store with an invalid config version ({}).\n Your data context with this older configuration version specifies a Checkpoint store, which is a new feature. Please update your configuration to the new version number {} before adding a Checkpoint store.\n Visit https://docs.greatexpectations.io/en/latest/how_to_guides/migrating_versions.html to learn more about the upgrade process.".format(
data["config_version"], float(CURRENT_GE_CONFIG_VERSION)
),
validation_error=ValidationError(
message="You appear to be using a Checkpoint store with an invalid config version ({}).\n Your data context with this older configuration version specifies a Checkpoint store, which is a new feature. Please update your configuration to the new version number {} before adding a Checkpoint store.\n Visit https://docs.greatexpectations.io/en/latest/how_to_guides/migrating_versions.html to learn more about the upgrade process.".format(
data["config_version"], float(CURRENT_GE_CONFIG_VERSION)
)
),
)
if (
data["config_version"] >= FIRST_GE_CONFIG_VERSION_WITH_CHECKPOINT_STORE
and "validation_operators" in data
and data["validation_operators"] is not None
):
# TODO: <Alex>Add a URL to the migration guide with instructions for how to replace validation_operators with appropriate actions.</Alex>
logger.warning(
"You appear to be using a legacy capability with the latest config version ({}).\n Your data context with this configuration version uses validation_operators, which are being deprecated. Please update your configuration to be compatible with the version number {}.".format(
data["config_version"], CURRENT_GE_CONFIG_VERSION
),
)
class DataContextConfigDefaults(enum.Enum):
DEFAULT_CONFIG_VERSION = CURRENT_GE_CONFIG_VERSION
DEFAULT_EXPECTATIONS_STORE_NAME = "expectations_store"
EXPECTATIONS_BASE_DIRECTORY = "expectations"
DEFAULT_EXPECTATIONS_STORE_BASE_DIRECTORY_RELATIVE_NAME = (
f"{EXPECTATIONS_BASE_DIRECTORY}/"
)
DEFAULT_VALIDATIONS_STORE_NAME = "validations_store"
VALIDATIONS_BASE_DIRECTORY = "validations"
DEFAULT_VALIDATIONS_STORE_BASE_DIRECTORY_RELATIVE_NAME = (
f"uncommitted/{VALIDATIONS_BASE_DIRECTORY}/"
)
DEFAULT_EVALUATION_PARAMETER_STORE_NAME = "evaluation_parameter_store"
DEFAULT_EVALUATION_PARAMETER_STORE_BASE_DIRECTORY_RELATIVE_NAME = (
"evaluation_parameters/"
)
DEFAULT_CHECKPOINT_STORE_NAME = "checkpoint_store"
CHECKPOINTS_BASE_DIRECTORY = "checkpoints"
DEFAULT_CHECKPOINT_STORE_BASE_DIRECTORY_RELATIVE_NAME = (
f"{CHECKPOINTS_BASE_DIRECTORY}/"
)
DEFAULT_DATA_DOCS_SITE_NAME = "local_site"
DEFAULT_CONFIG_VARIABLES_FILEPATH = "uncommitted/config_variables.yml"
PLUGINS_BASE_DIRECTORY = "plugins"
DEFAULT_PLUGINS_DIRECTORY = f"{PLUGINS_BASE_DIRECTORY}/"
NOTEBOOKS_BASE_DIRECTORY = "notebooks"
DEFAULT_VALIDATION_OPERATORS = {
"action_list_operator": {
"class_name": "ActionListValidationOperator",
"action_list": [
{
"name": "store_validation_result",
"action": {"class_name": "StoreValidationResultAction"},
},
{
"name": "store_evaluation_params",
"action": {"class_name": "StoreEvaluationParametersAction"},
},
{
"name": "update_data_docs",
"action": {"class_name": "UpdateDataDocsAction"},
},
],
}
}
DEFAULT_STORES = {
DEFAULT_EXPECTATIONS_STORE_NAME: {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"base_directory": DEFAULT_EXPECTATIONS_STORE_BASE_DIRECTORY_RELATIVE_NAME,
},
},
DEFAULT_VALIDATIONS_STORE_NAME: {
"class_name": "ValidationsStore",
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"base_directory": DEFAULT_VALIDATIONS_STORE_BASE_DIRECTORY_RELATIVE_NAME,
},
},
DEFAULT_EVALUATION_PARAMETER_STORE_NAME: {
"class_name": "EvaluationParameterStore"
},
DEFAULT_CHECKPOINT_STORE_NAME: {
"class_name": "CheckpointStore",
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"suppress_store_backend_id": True,
"base_directory": DEFAULT_CHECKPOINT_STORE_BASE_DIRECTORY_RELATIVE_NAME,
},
},
}
DEFAULT_DATA_DOCS_SITES = {
DEFAULT_DATA_DOCS_SITE_NAME: {
"class_name": "SiteBuilder",
"show_how_to_buttons": True,
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"base_directory": "uncommitted/data_docs/local_site/",
},
"site_index_builder": {
"class_name": "DefaultSiteIndexBuilder",
},
}
}
class CheckpointConfigDefaults(enum.Enum):
DEFAULT_CONFIG_VERSION = CURRENT_CHECKPOINT_CONFIG_VERSION
class BaseStoreBackendDefaults(DictDot):
"""
Define base defaults for platform specific StoreBackendDefaults.
StoreBackendDefaults define defaults for specific cases of often used configurations.
For example, if you plan to store expectations, validations, and data_docs in s3 use the S3StoreBackendDefaults and you may be able to specify less parameters.
"""
def __init__(
self,
expectations_store_name: str = DataContextConfigDefaults.DEFAULT_EXPECTATIONS_STORE_NAME.value,
validations_store_name: str = DataContextConfigDefaults.DEFAULT_VALIDATIONS_STORE_NAME.value,
evaluation_parameter_store_name: str = DataContextConfigDefaults.DEFAULT_EVALUATION_PARAMETER_STORE_NAME.value,
checkpoint_store_name: str = DataContextConfigDefaults.DEFAULT_CHECKPOINT_STORE_NAME.value,
data_docs_site_name: str = DataContextConfigDefaults.DEFAULT_DATA_DOCS_SITE_NAME.value,
validation_operators: dict = None,
stores: dict = None,
data_docs_sites: dict = None,
):
self.expectations_store_name = expectations_store_name
self.validations_store_name = validations_store_name
self.evaluation_parameter_store_name = evaluation_parameter_store_name
self.checkpoint_store_name = checkpoint_store_name
self.validation_operators = validation_operators
if stores is None:
stores = deepcopy(DataContextConfigDefaults.DEFAULT_STORES.value)
self.stores = stores
if data_docs_sites is None:
data_docs_sites = deepcopy(
DataContextConfigDefaults.DEFAULT_DATA_DOCS_SITES.value
)
self.data_docs_sites = data_docs_sites
self.data_docs_site_name = data_docs_site_name
class S3StoreBackendDefaults(BaseStoreBackendDefaults):
"""
Default store configs for s3 backends, with some accessible parameters
Args:
default_bucket_name: Use this bucket name for stores that do not have a bucket name provided
expectations_store_bucket_name: Overrides default_bucket_name if supplied
validations_store_bucket_name: Overrides default_bucket_name if supplied
data_docs_bucket_name: Overrides default_bucket_name if supplied
checkpoint_store_bucket_name: Overrides default_bucket_name if supplied
expectations_store_prefix: Overrides default if supplied
validations_store_prefix: Overrides default if supplied
data_docs_prefix: Overrides default if supplied
checkpoint_store_prefix: Overrides default if supplied
expectations_store_name: Overrides default if supplied
validations_store_name: Overrides default if supplied
evaluation_parameter_store_name: Overrides default if supplied
checkpoint_store_name: Overrides default if supplied
"""
def __init__(
self,
default_bucket_name: Optional[str] = None,
expectations_store_bucket_name: Optional[str] = None,
validations_store_bucket_name: Optional[str] = None,
data_docs_bucket_name: Optional[str] = None,
checkpoint_store_bucket_name: Optional[str] = None,
expectations_store_prefix: str = "expectations",
validations_store_prefix: str = "validations",
data_docs_prefix: str = "data_docs",
checkpoint_store_prefix: str = "checkpoints",
expectations_store_name: str = "expectations_S3_store",
validations_store_name: str = "validations_S3_store",
evaluation_parameter_store_name: str = "evaluation_parameter_store",
checkpoint_store_name: str = "checkpoint_S3_store",
):
# Initialize base defaults
super().__init__()
# Use default_bucket_name if separate store buckets are not provided
if expectations_store_bucket_name is None:
expectations_store_bucket_name = default_bucket_name
if validations_store_bucket_name is None:
validations_store_bucket_name = default_bucket_name
if data_docs_bucket_name is None:
data_docs_bucket_name = default_bucket_name
if checkpoint_store_bucket_name is None:
checkpoint_store_bucket_name = default_bucket_name
# Overwrite defaults
self.expectations_store_name = expectations_store_name
self.validations_store_name = validations_store_name
self.evaluation_parameter_store_name = evaluation_parameter_store_name
self.checkpoint_store_name = checkpoint_store_name
self.stores = {
expectations_store_name: {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "TupleS3StoreBackend",
"bucket": expectations_store_bucket_name,
"prefix": expectations_store_prefix,
},
},
validations_store_name: {
"class_name": "ValidationsStore",
"store_backend": {
"class_name": "TupleS3StoreBackend",
"bucket": validations_store_bucket_name,
"prefix": validations_store_prefix,
},
},
evaluation_parameter_store_name: {"class_name": "EvaluationParameterStore"},
checkpoint_store_name: {
"class_name": "CheckpointStore",
"store_backend": {
"class_name": "TupleS3StoreBackend",
"bucket": checkpoint_store_bucket_name,
"prefix": checkpoint_store_prefix,
},
},
}
self.data_docs_sites = {
"s3_site": {
"class_name": "SiteBuilder",
"show_how_to_buttons": True,
"store_backend": {
"class_name": "TupleS3StoreBackend",
"bucket": data_docs_bucket_name,
"prefix": data_docs_prefix,
},
"site_index_builder": {
"class_name": "DefaultSiteIndexBuilder",
},
}
}
class FilesystemStoreBackendDefaults(BaseStoreBackendDefaults):
"""
Default store configs for filesystem backends, with some accessible parameters
Args:
root_directory: Absolute directory prepended to the base_directory for each store
plugins_directory: Overrides default if supplied
"""
def __init__(
self,
root_directory: Optional[str] = None,
plugins_directory: Optional[str] = None,
):
# Initialize base defaults
super().__init__()
if plugins_directory is None:
plugins_directory = (
DataContextConfigDefaults.DEFAULT_PLUGINS_DIRECTORY.value
)
self.plugins_directory = plugins_directory
if root_directory is not None:
self.stores[self.expectations_store_name]["store_backend"][
"root_directory"
] = root_directory
self.stores[self.validations_store_name]["store_backend"][
"root_directory"
] = root_directory
self.stores[self.checkpoint_store_name]["store_backend"][
"root_directory"
] = root_directory
self.data_docs_sites[self.data_docs_site_name]["store_backend"][
"root_directory"
] = root_directory
class InMemoryStoreBackendDefaults(BaseStoreBackendDefaults):
"""
Default store configs for in memory backends.
This is useful for testing without persistence.
"""
def __init__(
self,
):
# Initialize base defaults
super().__init__()
self.stores = {
self.expectations_store_name: {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "InMemoryStoreBackend",
},
},
self.validations_store_name: {
"class_name": "ValidationsStore",
"store_backend": {
"class_name": "InMemoryStoreBackend",
},
},
self.evaluation_parameter_store_name: {
"class_name": "EvaluationParameterStore"
},
self.checkpoint_store_name: {
"class_name": "CheckpointStore",
"store_backend": {
"class_name": "InMemoryStoreBackend",
},
},
}
self.data_docs_sites = {}
class GCSStoreBackendDefaults(BaseStoreBackendDefaults):
"""
Default store configs for Google Cloud Storage (GCS) backends, with some accessible parameters
Args:
default_bucket_name: Use this bucket name for stores that do not have a bucket name provided
default_project_name: Use this project name for stores that do not have a project name provided
expectations_store_bucket_name: Overrides default_bucket_name if supplied
validations_store_bucket_name: Overrides default_bucket_name if supplied
data_docs_bucket_name: Overrides default_bucket_name if supplied
checkpoint_store_bucket_name: Overrides default_bucket_name if supplied
expectations_store_project_name: Overrides default_project_name if supplied
validations_store_project_name: Overrides default_project_name if supplied
data_docs_project_name: Overrides default_project_name if supplied
checkpoint_store_project_name: Overrides default_project_name if supplied
expectations_store_prefix: Overrides default if supplied
validations_store_prefix: Overrides default if supplied
data_docs_prefix: Overrides default if supplied
checkpoint_store_prefix: Overrides default if supplied
expectations_store_name: Overrides default if supplied
validations_store_name: Overrides default if supplied
evaluation_parameter_store_name: Overrides default if supplied
checkpoint_store_name: Overrides default if supplied
"""
def __init__(
self,
default_bucket_name: Optional[str] = None,
default_project_name: Optional[str] = None,
expectations_store_bucket_name: Optional[str] = None,
validations_store_bucket_name: Optional[str] = None,
data_docs_bucket_name: Optional[str] = None,
checkpoint_store_bucket_name: Optional[str] = None,
expectations_store_project_name: Optional[str] = None,
validations_store_project_name: Optional[str] = None,
data_docs_project_name: Optional[str] = None,
checkpoint_store_project_name: Optional[str] = None,
expectations_store_prefix: str = "expectations",
validations_store_prefix: str = "validations",
data_docs_prefix: str = "data_docs",
checkpoint_store_prefix: str = "checkpoints",
expectations_store_name: str = "expectations_GCS_store",
validations_store_name: str = "validations_GCS_store",
evaluation_parameter_store_name: str = "evaluation_parameter_store",
checkpoint_store_name: str = "checkpoint_GCS_store",
):
# Initialize base defaults
super().__init__()
# Use default_bucket_name if separate store buckets are not provided
if expectations_store_bucket_name is None:
expectations_store_bucket_name = default_bucket_name
if validations_store_bucket_name is None:
validations_store_bucket_name = default_bucket_name
if data_docs_bucket_name is None:
data_docs_bucket_name = default_bucket_name
if checkpoint_store_bucket_name is None:
checkpoint_store_bucket_name = default_bucket_name
# Use default_project_name if separate store projects are not provided
if expectations_store_project_name is None:
expectations_store_project_name = default_project_name
if validations_store_project_name is None:
validations_store_project_name = default_project_name
if data_docs_project_name is None:
data_docs_project_name = default_project_name
if checkpoint_store_project_name is None:
checkpoint_store_project_name = default_project_name
# Overwrite defaults
self.expectations_store_name = expectations_store_name
self.validations_store_name = validations_store_name
self.evaluation_parameter_store_name = evaluation_parameter_store_name
self.checkpoint_store_name = checkpoint_store_name
self.stores = {
expectations_store_name: {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "TupleGCSStoreBackend",
"project": expectations_store_project_name,
"bucket": expectations_store_bucket_name,
"prefix": expectations_store_prefix,
},
},
validations_store_name: {
"class_name": "ValidationsStore",
"store_backend": {
"class_name": "TupleGCSStoreBackend",
"project": validations_store_project_name,
"bucket": validations_store_bucket_name,
"prefix": validations_store_prefix,
},
},
evaluation_parameter_store_name: {"class_name": "EvaluationParameterStore"},
checkpoint_store_name: {
"class_name": "CheckpointStore",
"store_backend": {
"class_name": "TupleGCSStoreBackend",
"project": checkpoint_store_project_name,
"bucket": checkpoint_store_bucket_name,
"prefix": checkpoint_store_prefix,
},
},
}
self.data_docs_sites = {
"gcs_site": {
"class_name": "SiteBuilder",
"show_how_to_buttons": True,
"store_backend": {
"class_name": "TupleGCSStoreBackend",
"project": data_docs_project_name,
"bucket": data_docs_bucket_name,
"prefix": data_docs_prefix,
},
"site_index_builder": {
"class_name": "DefaultSiteIndexBuilder",
},
}
}
class DatabaseStoreBackendDefaults(BaseStoreBackendDefaults):
"""
Default store configs for database backends, with some accessible parameters
Args:
default_credentials: Use these credentials for all stores that do not have credentials provided
expectations_store_credentials: Overrides default_credentials if supplied
validations_store_credentials: Overrides default_credentials if supplied
checkpoint_store_credentials: Overrides default_credentials if supplied
expectations_store_name: Overrides default if supplied
validations_store_name: Overrides default if supplied
evaluation_parameter_store_name: Overrides default if supplied
checkpoint_store_name: Overrides default if supplied
"""
def __init__(
self,
default_credentials: Optional[Dict] = None,
expectations_store_credentials: Optional[Dict] = None,
validations_store_credentials: Optional[Dict] = None,
checkpoint_store_credentials: Optional[Dict] = None,
expectations_store_name: str = "expectations_database_store",
validations_store_name: str = "validations_database_store",
evaluation_parameter_store_name: str = "evaluation_parameter_store",
checkpoint_store_name: str = "checkpoint_database_store",
):
# Initialize base defaults
super().__init__()
# Use default credentials if separate credentials not supplied for expectations_store and validations_store
if expectations_store_credentials is None:
expectations_store_credentials = default_credentials
if validations_store_credentials is None:
validations_store_credentials = default_credentials
if checkpoint_store_credentials is None:
checkpoint_store_credentials = default_credentials
# Overwrite defaults
self.expectations_store_name = expectations_store_name
self.validations_store_name = validations_store_name
self.evaluation_parameter_store_name = evaluation_parameter_store_name
self.checkpoint_store_name = checkpoint_store_name
self.stores = {
expectations_store_name: {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "DatabaseStoreBackend",
"credentials": expectations_store_credentials,
},
},
validations_store_name: {
"class_name": "ValidationsStore",
"store_backend": {
"class_name": "DatabaseStoreBackend",
"credentials": validations_store_credentials,
},
},
evaluation_parameter_store_name: {"class_name": "EvaluationParameterStore"},
checkpoint_store_name: {
"class_name": "CheckpointStore",
"store_backend": {
"class_name": "DatabaseStoreBackend",
"credentials": checkpoint_store_credentials,
},
},
}
class DataContextConfig(BaseYamlConfig):
# TODO: <Alex>ALEX (does not work yet)</Alex>
# _config_schema_class = DataContextConfigSchema
def __init__(
self,
config_version: Optional[float] = None,
datasources: Optional[
Union[
Dict[str, DatasourceConfig],
Dict[str, Dict[str, Union[Dict[str, str], str, dict]]],
]
] = None,
expectations_store_name: Optional[str] = None,
validations_store_name: Optional[str] = None,
evaluation_parameter_store_name: Optional[str] = None,
checkpoint_store_name: Optional[str] = None,
plugins_directory: Optional[str] = None,
validation_operators=None,
stores: Optional[Dict] = None,
data_docs_sites: Optional[Dict] = None,
notebooks=None,
config_variables_file_path: Optional[str] = None,
anonymous_usage_statistics=None,
store_backend_defaults: Optional[BaseStoreBackendDefaults] = None,
commented_map: Optional[CommentedMap] = None,
concurrency: Optional[Union[ConcurrencyConfig, Dict]] = None,
):
# Set defaults
if config_version is None:
config_version = DataContextConfigDefaults.DEFAULT_CONFIG_VERSION.value
# Set defaults via store_backend_defaults if one is passed in
# Override attributes from store_backend_defaults with any items passed into the constructor:
if store_backend_defaults is not None:
if stores is None:
stores = store_backend_defaults.stores
if expectations_store_name is None:
expectations_store_name = store_backend_defaults.expectations_store_name
if validations_store_name is None:
validations_store_name = store_backend_defaults.validations_store_name
if evaluation_parameter_store_name is None:
evaluation_parameter_store_name = (
store_backend_defaults.evaluation_parameter_store_name
)
if data_docs_sites is None:
data_docs_sites = store_backend_defaults.data_docs_sites
if checkpoint_store_name is None:
checkpoint_store_name = store_backend_defaults.checkpoint_store_name
self._config_version = config_version
if datasources is None:
datasources = {}
self.datasources = datasources
self.expectations_store_name = expectations_store_name
self.validations_store_name = validations_store_name
self.evaluation_parameter_store_name = evaluation_parameter_store_name
if checkpoint_store_name is not None:
self.checkpoint_store_name = checkpoint_store_name
self.plugins_directory = plugins_directory
if validation_operators is not None:
self.validation_operators = validation_operators
self.stores = stores
self.notebooks = notebooks
self.data_docs_sites = data_docs_sites
self.config_variables_file_path = config_variables_file_path
if anonymous_usage_statistics is None:
anonymous_usage_statistics = AnonymizedUsageStatisticsConfig()
elif isinstance(anonymous_usage_statistics, dict):
anonymous_usage_statistics = AnonymizedUsageStatisticsConfig(
**anonymous_usage_statistics
)
self.anonymous_usage_statistics = anonymous_usage_statistics
if concurrency is None:
concurrency = ConcurrencyConfig()
elif isinstance(concurrency, dict):
concurrency = ConcurrencyConfig(**concurrency)
self.concurrency: ConcurrencyConfig = concurrency
super().__init__(commented_map=commented_map)
# TODO: <Alex>ALEX (we still need the next two properties)</Alex>
@classmethod
def get_config_class(cls):
return cls # DataContextConfig
@classmethod
def get_schema_class(cls):
return DataContextConfigSchema
@property
def config_version(self):
return self._config_version
class CheckpointConfigSchema(Schema):
class Meta:
unknown = INCLUDE
fields = (
"name",
"config_version",
"template_name",
"module_name",
"class_name",
"run_name_template",
"expectation_suite_name",
"batch_request",
"action_list",
"evaluation_parameters",
"runtime_configuration",
"validations",
"profilers",
# Next two fields are for LegacyCheckpoint configuration
"validation_operator_name",
"batches",
# Next fields are used by configurators
"site_names",
"slack_webhook",
"notify_on",
"notify_with",
"ge_cloud_id",
"expectation_suite_ge_cloud_id",
)
ordered = True
# if keys have None value, remove in post_dump
REMOVE_KEYS_IF_NONE = [
"site_names",
"slack_webhook",
"notify_on",
"notify_with",
]
ge_cloud_id = fields.UUID(required=False, allow_none=True)
name = fields.String(required=False, allow_none=True)
config_version = fields.Number(
validate=lambda x: (0 < x < 100) or x is None,
error_messages={"invalid": "config version must " "be a number or None."},
required=False,
allow_none=True,
)
template_name = fields.String(required=False, allow_none=True)
module_name = fields.String(required=False, missing="great_expectations.checkpoint")
class_name = fields.Str(required=False, allow_none=True)
run_name_template = fields.String(required=False, allow_none=True)
expectation_suite_name = fields.String(required=False, allow_none=True)
expectation_suite_ge_cloud_id = fields.UUID(required=False, allow_none=True)
batch_request = fields.Dict(required=False, allow_none=True)
action_list = fields.List(
cls_or_instance=fields.Dict(), required=False, allow_none=True
)
evaluation_parameters = fields.Dict(required=False, allow_none=True)
runtime_configuration = fields.Dict(required=False, allow_none=True)
validations = fields.List(
cls_or_instance=fields.Dict(), required=False, allow_none=True
)
profilers = fields.List(
cls_or_instance=fields.Dict(), required=False, allow_none=True
)
# Next two fields are for LegacyCheckpoint configuration
validation_operator_name = fields.Str(required=False, allow_none=True)
batches = fields.List(
cls_or_instance=fields.Dict(
keys=fields.Str(
validate=OneOf(["batch_kwargs", "expectation_suite_names"]),
required=False,
allow_none=True,
)
),
required=False,
allow_none=True,
)
# Next fields are used by configurators
site_names = fields.Raw(required=False, allow_none=True)
slack_webhook = fields.String(required=False, allow_none=True)
notify_on = fields.String(required=False, allow_none=True)
notify_with = fields.String(required=False, allow_none=True)
@validates_schema
def validate_schema(self, data, **kwargs):
if not (
"name" in data or "validation_operator_name" in data or "batches" in data
):
raise ge_exceptions.InvalidConfigError(
f"""Your current Checkpoint configuration is incomplete. Please update your Checkpoint configuration to
continue.
"""
)
if data.get("config_version"):
if "name" not in data:
raise ge_exceptions.InvalidConfigError(
f"""Your Checkpoint configuration requires the "name" field. Please update your current Checkpoint
configuration to continue.
"""
)
@post_dump
def remove_keys_if_none(self, data, **kwargs):
data = deepcopy(data)
for key in self.REMOVE_KEYS_IF_NONE:
if key in data and data[key] is None:
data.pop(key)
return data
class CheckpointConfig(BaseYamlConfig):
# TODO: <Alex>ALEX (does not work yet)</Alex>
# _config_schema_class = CheckpointConfigSchema
def __init__(
self,
name: Optional[str] = None,
config_version: Optional[Union[int, float]] = None,
template_name: Optional[str] = None,
module_name: Optional[str] = None,
class_name: Optional[str] = None,
run_name_template: Optional[str] = None,
expectation_suite_name: Optional[str] = None,
batch_request: Optional[dict] = None,
action_list: Optional[List[dict]] = None,
evaluation_parameters: Optional[dict] = None,
runtime_configuration: Optional[dict] = None,
validations: Optional[List[dict]] = None,
profilers: Optional[List[dict]] = None,
validation_operator_name: Optional[str] = None,
batches: Optional[List[dict]] = None,
commented_map: Optional[CommentedMap] = None,
ge_cloud_id: Optional[str] = None,
# the following four args are used by SimpleCheckpoint
site_names: Optional[Union[list, str]] = None,
slack_webhook: Optional[str] = None,
notify_on: Optional[str] = None,
notify_with: Optional[str] = None,
expectation_suite_ge_cloud_id: Optional[str] = None,
):
self._name = name
self._config_version = config_version
if self.config_version is None:
class_name = class_name or "LegacyCheckpoint"
self.validation_operator_name = validation_operator_name
if batches is not None and isinstance(batches, list):
self.batches = batches
else:
class_name = class_name or "Checkpoint"
self._template_name = template_name
self._run_name_template = run_name_template
self._expectation_suite_name = expectation_suite_name
self._expectation_suite_ge_cloud_id = expectation_suite_ge_cloud_id
self._batch_request = batch_request
self._action_list = action_list or []
self._evaluation_parameters = evaluation_parameters or {}
self._runtime_configuration = runtime_configuration or {}
self._validations = validations or []
self._profilers = profilers or []
self._ge_cloud_id = ge_cloud_id
# the following attributes are used by SimpleCheckpoint
self._site_names = site_names
self._slack_webhook = slack_webhook
self._notify_on = notify_on
self._notify_with = notify_with
self._module_name = module_name or "great_expectations.checkpoint"
self._class_name = class_name
super().__init__(commented_map=commented_map)
def update(
self,
other_config: Optional["CheckpointConfig"] = None,
runtime_kwargs: Optional[dict] = None,
):
assert other_config is not None or runtime_kwargs is not None, (
"other_config and runtime_kwargs cannot both " "be None"
)
if other_config is not None:
# replace
if other_config.name is not None:
self.name = other_config.name
if other_config.module_name is not None:
self.module_name = other_config.module_name
if other_config.class_name is not None:
self.class_name = other_config.class_name
if other_config.run_name_template is not None:
self.run_name_template = other_config.run_name_template
if other_config.expectation_suite_name is not None:
self.expectation_suite_name = other_config.expectation_suite_name
if other_config.expectation_suite_ge_cloud_id is not None:
self.expectation_suite_ge_cloud_id = (
other_config.expectation_suite_ge_cloud_id
)
# update
if other_config.batch_request is not None:
if self.batch_request is None:
batch_request = {}
else:
batch_request = self.batch_request
other_batch_request = other_config.batch_request
updated_batch_request = nested_update(
batch_request,
other_batch_request,
)
self._batch_request = updated_batch_request
if other_config.action_list is not None:
self.action_list = self.get_updated_action_list(
base_action_list=self.action_list,
other_action_list=other_config.action_list,
)
if other_config.evaluation_parameters is not None:
nested_update(
self.evaluation_parameters,
other_config.evaluation_parameters,
)
if other_config.runtime_configuration is not None:
nested_update(
self.runtime_configuration,
other_config.runtime_configuration,
)
if other_config.validations is not None:
self.validations.extend(
filter(
lambda v: v not in self.validations, other_config.validations
)
)
if other_config.profilers is not None:
self.profilers.extend(other_config.profilers)
if runtime_kwargs is not None and any(runtime_kwargs.values()):
# replace
if runtime_kwargs.get("run_name_template") is not None:
self.run_name_template = runtime_kwargs.get("run_name_template")
if runtime_kwargs.get("expectation_suite_name") is not None:
self.expectation_suite_name = runtime_kwargs.get(
"expectation_suite_name"
)
if runtime_kwargs.get("expectation_suite_ge_cloud_id") is not None:
self.expectation_suite_ge_cloud_id = runtime_kwargs.get(
"expectation_suite_ge_cloud_id"
)
# update
if runtime_kwargs.get("batch_request") is not None:
batch_request = self.batch_request
batch_request = batch_request or {}
runtime_batch_request = runtime_kwargs.get("batch_request")
batch_request = nested_update(batch_request, runtime_batch_request)
self._batch_request = batch_request
if runtime_kwargs.get("action_list") is not None:
self.action_list = self.get_updated_action_list(
base_action_list=self.action_list,
other_action_list=runtime_kwargs.get("action_list"),
)
if runtime_kwargs.get("evaluation_parameters") is not None:
nested_update(
self.evaluation_parameters,
runtime_kwargs.get("evaluation_parameters"),
)
if runtime_kwargs.get("runtime_configuration") is not None:
nested_update(
self.runtime_configuration,
runtime_kwargs.get("runtime_configuration"),
)
if runtime_kwargs.get("validations") is not None:
self.validations.extend(
filter(
lambda v: v not in self.validations,
runtime_kwargs.get("validations"),
)
)
if runtime_kwargs.get("profilers") is not None:
self.profilers.extend(runtime_kwargs.get("profilers"))
# TODO: <Alex>ALEX (we still need the next two properties)</Alex>
@classmethod
def get_config_class(cls):
return cls # CheckpointConfig
@classmethod
def get_schema_class(cls):
return CheckpointConfigSchema
@property
def ge_cloud_id(self):
return self._ge_cloud_id
@ge_cloud_id.setter
def ge_cloud_id(self, value: str):
self._ge_cloud_id = value
@property
def expectation_suite_ge_cloud_id(self):
return self._expectation_suite_ge_cloud_id
@expectation_suite_ge_cloud_id.setter
def expectation_suite_ge_cloud_id(self, value: str):
self._expectation_suite_ge_cloud_id = value
@property
def name(self):
return self._name
@name.setter
def name(self, value: str):
self._name = value
@property
def template_name(self):
return self._template_name
@template_name.setter
def template_name(self, value: str):
self._template_name = value
@property
def config_version(self):
return self._config_version
@property
def validations(self):
return self._validations
@property
def profilers(self):
return self._profilers
@property
def module_name(self):
return self._module_name
@module_name.setter
def module_name(self, value: str):
self._module_name = value
@property
def class_name(self):
return self._class_name
@class_name.setter
def class_name(self, value: str):
self._class_name = value
@property
def run_name_template(self):
return self._run_name_template
@run_name_template.setter
def run_name_template(self, value: str):
self._run_name_template = value
@property
def batch_request(self):
return self._batch_request
@batch_request.setter
def batch_request(self, value: dict):
self._batch_request = value
@property
def expectation_suite_name(self):
return self._expectation_suite_name
@expectation_suite_name.setter
def expectation_suite_name(self, value: str):
self._expectation_suite_name = value
@property
def action_list(self):
return self._action_list
@action_list.setter
def action_list(self, value: List[dict]):
self._action_list = value
@property
def site_names(self):
return self._site_names
@property
def slack_webhook(self):
return self._slack_webhook
@property
def notify_on(self):
return self._notify_on
@property
def notify_with(self):
return self._notify_with
@classmethod
def get_updated_action_list(
cls,
base_action_list: list,
other_action_list: list,
) -> List[dict]:
base_action_list_dict = {action["name"]: action for action in base_action_list}
for other_action in other_action_list:
other_action_name = other_action["name"]
if other_action_name in base_action_list_dict:
if other_action["action"] is None:
base_action_list_dict.pop(other_action_name)
else:
nested_update(
base_action_list_dict[other_action_name],
other_action,
dedup=True,
)
else:
base_action_list_dict[other_action_name] = other_action
return list(base_action_list_dict.values())
@property
def evaluation_parameters(self):
return self._evaluation_parameters
@property
def runtime_configuration(self):
return self._runtime_configuration
class CheckpointValidationConfig(DictDot):
pass
class CheckpointValidationConfigSchema(Schema):
pass
dataContextConfigSchema = DataContextConfigSchema()
datasourceConfigSchema = DatasourceConfigSchema()
dataConnectorConfigSchema = DataConnectorConfigSchema()
assetConfigSchema = AssetConfigSchema()
sorterConfigSchema = SorterConfigSchema()
anonymizedUsageStatisticsSchema = AnonymizedUsageStatisticsConfigSchema()
notebookConfigSchema = NotebookConfigSchema()
checkpointConfigSchema = CheckpointConfigSchema()
concurrencyConfigSchema = ConcurrencyConfigSchema()
|
"""
This is an end to end release test automation script used to kick off periodic
release tests, running on Anyscale.
The tool leverages app configs and compute templates.
Calling this script will run a single release test.
Example:
python e2e.py --test-config ~/ray/release/xgboost_tests/xgboost_tests.yaml --test-name tune_small
The following steps are then performed:
1. It will look up the test tune_small in the file xgboost_tests.yaml
2. It will fetch the specified app config and compute template and register
those with anyscale (if they don’t exist yet)
3. It waits until the app config is built
4. It then kicks off the script defined in the run block
5. When the script is finished, it will fetch the latest logs, the full log
output, and any artifacts specified in the artifacts block.
6. The full logs and artifacts will be stored in a s3 bucket
7. It will also fetch the json file specified in the run block as results.
This is the file where you should write your metrics to.
8. All results are then stored in a database.
Specifically it will store the following fields:
- Timestamp
- Test name
- Status (finished, error, timeout, invalid)
- Last logs (50 lines)
- results (see above)
- artifacts (links to s3 files)
Then the script exits. If an error occurs at any time, a fail result is
written to the database.
Writing a new release test
--------------------------
Each release test requires the following:
1. It has to be added in a release test yaml file, describing meta information
about the test (e.g. name, command to run, timeout)
2. You need an app config yaml
3. You need a compute template yaml
4. You need to define a command to run. This is usually a python script.
The command should accept (or ignore) a single optional
`--smoke-test` argument.
Usually the command should write its result metrics to a json file.
The json filename is available in the TEST_OUTPUT_JSON env variable.
5. Add your test in release/.buildkite/build_pipeline.py.
The script will have access to these environment variables:
"RAY_ADDRESS": os.environ.get("RAY_ADDRESS", "auto")
"TEST_OUTPUT_JSON": results_json_filename
"IS_SMOKE_TEST": "1" if smoke_test else "0"
For an example, take a look at the XGBoost test suite:
https://github.com/ray-project/ray/blob/master/release/xgboost_tests/xgboost_tests.yaml
These all use the same app configs and similar compute templates. This means
that app configs can be re-used across runs and only have to be built ones.
App configs and compute templates can interpret environment variables.
A notable one is the `RAY_WHEELS` variable which points to the wheels that
should be tested (e.g. latest master wheels). You might want to include
something like this in your `post_build_cmds`:
- pip3 install -U {{ env["RAY_WHEELS"] | default("ray") }}
If you want to force rebuilds, consider using something like
- echo {{ env["TIMESTAMP"] }}
so that your app configs changes each time the script is executed. If you
only want to trigger rebuilds once per day, use `DATESTAMP` instead:
- echo {{ env["DATESTAMP"] }}
Local testing
-------------
For local testing, make sure to authenticate with the ray-ossci AWS user
(e.g. by setting the respective environment variables obtained from go/aws),
or use the `--no-report` command line argument.
Also make sure to set these environment variables:
- ANYSCALE_CLI_TOKEN (should contain your anyscale credential token)
- ANYSCALE_PROJECT (should point to a project ID you have access to)
A test can then be run like this:
python e2e.py --no-report --test-config ~/ray/release/xgboost_tests/xgboost_tests.yaml --test-name tune_small
The `--no-report` option disables storing the results in the DB and
artifacts on S3. If you set this option, you do not need access to the
ray-ossci AWS user.
Using Compilation on Product + App Config Override
--------------------------------------------------
For quick iteration when debugging a release test, go/compile-on-product allows
you to easily modify and recompile Ray, such that the recompilation happens
within an app build step and can benefit from a warm Bazel cache. See
go/compile-on-product for more information.
After kicking off the app build, you can give the app config ID to this script
as an app config override, where the indicated app config will be used instead
of the app config given in the test config. E.g., running
python e2e.py --no-report --test-config ~/ray/benchmarks/benchmark_tests.yaml --test-name=single_node --app-config-id-override=apt_TBngEXXXrhipMXgexVcrpC9i
would run the single_node benchmark test with the apt_TBngEXXXrhipMXgexVcrpC9i
app config instead of the app config given in
~/ray/benchmarks/benchmark_tests.yaml. If the build for the app config is still
in progress, the script will wait until it completes, same as for a locally
defined app config.
Running on Head Node vs Running with Anyscale Connect
-----------------------------------------------------
By default release tests run their drivers on the head node. Support is being
added to run release tests that execute the driver as a subprocess and run
the workload on Anyscale product via Anyscale connect.
Note that when the driver in the test is a subprocess of releaser, releaser
cannot be terminated before the test finishes.
Other known feature gaps when running with Anyscale connect:
- Kicking off a test or checking progress is not supported.
- Downloading / uploading logs and artifacts are unsupported.
- Logs from remote may not have finished streaming, before the driver exits.
Long running tests
------------------
Long running tests can be kicked off with by adding the --kick-off-only
parameters to the e2e script. The status can then be checked with the
--check command.
Long running test sessions will be terminated after `timeout` seconds, after
which the latest result in the TEST_OUTPUT_JSON will be reported. Thus,
long running release tests should update this file periodically.
There are also two config options to configure behavior. The `time_key` is
needed to track the latest update of the TEST_OUTPUT_JSON and should contain
a floating point number (usually `time.time()`). The `max_update_delay` then
specified the maximum time in seconds that can be passed without an update
to the results json. If the output file hasn't been updated in e.g. 60 seconds,
this could indicate that the command is stale/frozen, and thus should fail.
Release test yaml example
-------------------------
- name: example
owner:
mail: "kai@anyscale.com" # Currently not used
slack: "@tune-team" # Currentl not used
cluster:
app_config: app_config.yaml # Relative to the release test yaml
compute_template: tpl_cpu.yaml
run:
timeout: 600 # in seconds
prepare: python wait_cluster.py 4 600 # prepare cmd to run before test
script: python workloads/train.py # actual release test command
# Only needed for long running test
time_key: last_update # Key in the results json indicating current time
max_update_delay: 30 # If state hasn't been updated in 30s, terminate
# This block is optional
artifacts:
# Artifact name: location on head node
- detailed_output: detailed_output.csv
# This block is optional. If present, the contents will be
# deep updated for smoke testing
smoke_test:
cluster:
compute_template: tpl_cpu_smoketest.yaml
""" # noqa: E501
import argparse
import boto3
import collections
import copy
import datetime
import hashlib
import jinja2
import json
import logging
import multiprocessing
import os
import requests
import shutil
import subprocess
import sys
import tempfile
import time
from queue import Empty
from typing import Any, Dict, Optional, Tuple, List
import yaml
import anyscale
import anyscale.conf
from anyscale.api import instantiate_api_client
from anyscale.controllers.session_controller import SessionController
from anyscale.sdk.anyscale_client.sdk import AnyscaleSDK
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter(fmt="[%(levelname)s %(asctime)s] "
"%(filename)s: %(lineno)d "
"%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def getenv_default(key: str, default: Optional[str] = None):
"""Return environment variable with default value"""
# If the environment variable is set but "", still return default
return os.environ.get(key, None) or default
GLOBAL_CONFIG = {
"ANYSCALE_USER": getenv_default("ANYSCALE_USER",
"release-automation@anyscale.com"),
"ANYSCALE_HOST": getenv_default("ANYSCALE_HOST",
"https://beta.anyscale.com"),
"ANYSCALE_CLI_TOKEN": getenv_default("ANYSCALE_CLI_TOKEN"),
"ANYSCALE_CLOUD_ID": getenv_default(
"ANYSCALE_CLOUD_ID",
"cld_4F7k8814aZzGG8TNUGPKnc"), # cld_4F7k8814aZzGG8TNUGPKnc
"ANYSCALE_PROJECT": getenv_default("ANYSCALE_PROJECT", ""),
"RAY_VERSION": getenv_default("RAY_VERSION", "2.0.0.dev0"),
"RAY_REPO": getenv_default("RAY_REPO",
"https://github.com/ray-project/ray.git"),
"RAY_BRANCH": getenv_default("RAY_BRANCH", "master"),
"RELEASE_AWS_BUCKET": getenv_default("RELEASE_AWS_BUCKET",
"ray-release-automation-results"),
"RELEASE_AWS_LOCATION": getenv_default("RELEASE_AWS_LOCATION", "dev"),
"RELEASE_AWS_DB_NAME": getenv_default("RELEASE_AWS_DB_NAME", "ray_ci"),
"RELEASE_AWS_DB_TABLE": getenv_default("RELEASE_AWS_DB_TABLE",
"release_test_result"),
"RELEASE_AWS_DB_SECRET_ARN": getenv_default(
"RELEASE_AWS_DB_SECRET_ARN",
"arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"rds-db-credentials/cluster-7RB7EYTTBK2EUC3MMTONYRBJLE/ray_ci-MQN2hh",
),
"RELEASE_AWS_DB_RESOURCE_ARN": getenv_default(
"RELEASE_AWS_DB_RESOURCE_ARN",
"arn:aws:rds:us-west-2:029272617770:cluster:ci-reporting",
),
"RELEASE_RESULTS_DIR": getenv_default("RELEASE_RESULTS_DIR",
"/tmp/ray_release_test_artifacts"),
"DATESTAMP": str(datetime.datetime.now().strftime("%Y%m%d")),
"TIMESTAMP": str(int(datetime.datetime.now().timestamp())),
"EXPIRATION_1D": str((datetime.datetime.now() +
datetime.timedelta(days=1)).strftime("%Y-%m-%d")),
"EXPIRATION_2D": str((datetime.datetime.now() +
datetime.timedelta(days=2)).strftime("%Y-%m-%d")),
"EXPIRATION_3D": str((datetime.datetime.now() +
datetime.timedelta(days=3)).strftime("%Y-%m-%d")),
}
REPORT_S = 30
RETRY_MULTIPLIER = 2
def exponential_backoff_retry(f, retry_exceptions, initial_retry_delay_s,
max_retries):
retry_cnt = 0
retry_delay_s = initial_retry_delay_s
while True:
try:
return f()
except retry_exceptions as e:
retry_cnt += 1
if retry_cnt > max_retries:
raise
logger.info(f"Retry function call failed due to {e} "
f"in {retry_delay_s} seconds...")
time.sleep(retry_delay_s)
retry_delay_s *= RETRY_MULTIPLIER
def maybe_fetch_api_token():
if GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"] is None:
logger.info(
"Missing ANYSCALE_CLI_TOKEN, retrieving from AWS secrets store")
# NOTE(simon) This should automatically retrieve
# release-automation@anyscale.com's anyscale token
GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"] = boto3.client(
"secretsmanager", region_name="us-west-2"
).get_secret_value(
SecretId="arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"release-automation/"
"anyscale-token20210505220406333800000001-BcUuKB")["SecretString"]
class PrepareCommandRuntimeError(RuntimeError):
pass
class ReleaseTestTimeoutError(RuntimeError):
pass
class SessionTimeoutError(ReleaseTestTimeoutError):
pass
class FileSyncTimeoutError(ReleaseTestTimeoutError):
pass
class CommandTimeoutError(ReleaseTestTimeoutError):
pass
class PrepareCommandTimeoutError(ReleaseTestTimeoutError):
pass
# e.g., App config failure.
class AppConfigBuildFailure(RuntimeError):
pass
class State:
def __init__(self, state: str, timestamp: float, data: Any):
self.state = state
self.timestamp = timestamp
self.data = data
sys.path.insert(0, anyscale.ANYSCALE_RAY_DIR)
def anyscale_project_url(project_id: str):
return f"{GLOBAL_CONFIG["ANYSCALE_HOST"]}" \
f"/o/anyscale-internal/projects/{project_id}" \
f"/?tab=session-list"
def anyscale_session_url(project_id: str, session_id: str):
return f"{GLOBAL_CONFIG["ANYSCALE_HOST"]}" \
f"/o/anyscale-internal/projects/{project_id}" \
f"/clusters/{session_id}"
def anyscale_compute_tpl_url(compute_tpl_id: str):
return f"{GLOBAL_CONFIG["ANYSCALE_HOST"]}" \
f"/o/anyscale-internal/configurations/cluster-computes" \
f"/{compute_tpl_id}"
def anyscale_app_config_build_url(build_id: str):
return f"{GLOBAL_CONFIG["ANYSCALE_HOST"]}" \
f"/o/anyscale-internal/configurations/app-config-details" \
f"/{build_id}"
def wheel_url(ray_version, git_branch, git_commit):
return f"https://s3-us-west-2.amazonaws.com/ray-wheels/" \
f"{git_branch}/{git_commit}/" \
f"ray-{ray_version}-cp37-cp37m-manylinux2014_x86_64.whl"
def wheel_exists(ray_version, git_branch, git_commit):
url = wheel_url(ray_version, git_branch, git_commit)
return requests.head(url).status_code == 200
def get_latest_commits(repo: str, branch: str = "master") -> List[str]:
cur = os.getcwd()
with tempfile.TemporaryDirectory() as tmpdir:
os.chdir(tmpdir)
clone_cmd = [
"git",
"clone",
"--filter=tree:0",
"--no-checkout",
# "--single-branch",
# "--depth=10",
f"--branch={branch}",
repo,
tmpdir,
]
log_cmd = [
"git",
"log",
"-n",
"10",
"--pretty=format:%H",
]
subprocess.check_output(clone_cmd)
commits = subprocess.check_output(log_cmd).decode(
sys.stdout.encoding).split("\n")
os.chdir(cur)
return commits
def find_ray_wheels(repo: str, branch: str, version: str):
url = None
commits = get_latest_commits(repo, branch)
logger.info(f"Latest 10 commits for branch {branch}: {commits}")
for commit in commits:
if wheel_exists(version, branch, commit):
url = wheel_url(version, branch, commit)
os.environ["RAY_WHEELS"] = url
os.environ["RAY_COMMIT"] = commit
logger.info(
f"Found wheels URL for Ray {version}, branch {branch}: "
f"{url}")
break
return url
def populate_wheels_sanity_check(commit: Optional[str] = None):
if not commit:
cmd = ("python -c 'import ray; print("
"\"No commit sanity check available, but this is the "
"Ray wheel commit:\", ray.__commit__)'")
else:
cmd = (f"python -c 'import ray; "
f"assert ray.__commit__ == \"{commit}\", ray.__commit__'")
os.environ["RAY_WHEELS_SANITY_CHECK"] = cmd
def _check_stop(stop_event: multiprocessing.Event, timeout_type: str):
if stop_event.is_set():
if timeout_type == "prepare_command":
raise PrepareCommandTimeoutError(
"Process timed out in the prepare command stage.")
if timeout_type == "command":
raise CommandTimeoutError(
"Process timed out while running a command.")
elif timeout_type == "file_sync":
raise FileSyncTimeoutError(
"Process timed out while syncing files.")
elif timeout_type == "session":
raise SessionTimeoutError(
"Process timed out while starting a session.")
else:
assert False, "Unexpected timeout type."
def _deep_update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = _deep_update(d.get(k, {}), v)
else:
d[k] = v
return d
def _dict_hash(dt: Dict[Any, Any]) -> str:
json_str = json.dumps(dt, sort_keys=True, ensure_ascii=True)
sha = hashlib.sha256()
sha.update(json_str.encode())
return sha.hexdigest()
def _load_config(local_dir: str, config_file: Optional[str]) -> Optional[Dict]:
if not config_file:
return None
config_path = os.path.join(local_dir, config_file)
with open(config_path, "rt") as f:
# Todo: jinja2 render
content = f.read()
env = copy.deepcopy(os.environ)
env.update(GLOBAL_CONFIG)
content = jinja2.Template(content).render(env=env)
return yaml.safe_load(content)
def has_errored(result: Dict[Any, Any]) -> bool:
return result.get("status", "invalid") != "finished"
def report_result(test_suite: str, test_name: str, status: str, last_logs: str,
results: Dict[Any, Any], artifacts: Dict[Any, Any],
category: str):
now = datetime.datetime.utcnow()
rds_data_client = boto3.client("rds-data", region_name="us-west-2")
schema = GLOBAL_CONFIG["RELEASE_AWS_DB_TABLE"]
sql = (
f"INSERT INTO {schema} "
f"(created_on, test_suite, test_name, status, last_logs, "
f"results, artifacts, category) "
f"VALUES (:created_on, :test_suite, :test_name, :status, :last_logs, "
f":results, :artifacts, :category)")
parameters = [{
"name": "created_on",
"typeHint": "TIMESTAMP",
"value": {
"stringValue": now.strftime("%Y-%m-%d %H:%M:%S")
},
}, {
"name": "test_suite",
"value": {
"stringValue": test_suite
}
}, {
"name": "test_name",
"value": {
"stringValue": test_name
}
}, {
"name": "status",
"value": {
"stringValue": status
}
}, {
"name": "last_logs",
"value": {
"stringValue": last_logs
}
}, {
"name": "results",
"typeHint": "JSON",
"value": {
"stringValue": json.dumps(results)
},
}, {
"name": "artifacts",
"typeHint": "JSON",
"value": {
"stringValue": json.dumps(artifacts)
},
}, {
"name": "category",
"value": {
"stringValue": category
}
}]
# Default boto3 call timeout is 45 seconds.
retry_delay_s = 64
MAX_RDS_RETRY = 3
exponential_backoff_retry(
lambda: rds_data_client.execute_statement(
database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"],
parameters=parameters,
secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"],
resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"],
schema=schema,
sql=sql),
retry_exceptions=rds_data_client.exceptions.StatementTimeoutException,
initial_retry_delay_s=retry_delay_s,
max_retries=MAX_RDS_RETRY)
logger.info("Result has been persisted to the databse")
def log_results_and_artifacts(result: Dict):
results = result.get("results", {})
if results:
msg = "Observed the following results:\n\n"
for key, val in results.items():
msg += f" {key} = {val}\n"
else:
msg = "Did not find any results."
logger.info(msg)
artifacts = result.get("artifacts", {})
if artifacts:
msg = "Saved the following artifacts:\n\n"
for key, val in artifacts.items():
msg += f" {key} = {val}\n"
else:
msg = "Did not find any artifacts."
logger.info(msg)
def _cleanup_session(sdk: AnyscaleSDK, session_id: str):
if session_id:
# Just trigger a request. No need to wait until session shutdown.
sdk.terminate_session(
session_id=session_id, terminate_session_options={})
def search_running_session(sdk: AnyscaleSDK, project_id: str,
session_name: str) -> Optional[str]:
session_id = None
logger.info(f"Looking for existing session with name {session_name}")
result = sdk.search_sessions(
project_id=project_id,
sessions_query=dict(name=dict(equals=session_name)))
if len(result.results) > 0 and result.results[0].state == "Running":
logger.info("Found existing session.")
session_id = result.results[0].id
return session_id
def create_or_find_compute_template(
sdk: AnyscaleSDK,
project_id: str,
compute_tpl: Dict[Any, Any],
_repeat: bool = True) -> Tuple[Optional[str], Optional[str]]:
compute_tpl_id = None
compute_tpl_name = None
if compute_tpl:
# As of Anyscale 0.4.1, it is an error to use the same compute template
# name within the same organization, between different projects.
compute_tpl_name = f"{project_id}/compute/{_dict_hash(compute_tpl)}"
logger.info(f"Tests uses compute template "
f"with name {compute_tpl_name}. Looking up existing "
f"templates.")
paging_token = None
while not compute_tpl_id:
result = sdk.search_compute_templates(
dict(
project_id=project_id,
name=dict(equals=compute_tpl_name),
include_anonymous=True),
paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for res in result.results:
if res.name == compute_tpl_name:
compute_tpl_id = res.id
logger.info(
f"Template already exists with ID {compute_tpl_id}")
break
if not paging_token:
break
if not compute_tpl_id:
logger.info(f"Compute template not found. "
f"Creating with name {compute_tpl_name}.")
try:
result = sdk.create_compute_template(
dict(
name=compute_tpl_name,
project_id=project_id,
config=compute_tpl))
compute_tpl_id = result.result.id
except Exception as e:
if _repeat:
logger.warning(
f"Got exception when trying to create compute "
f"template: {e}. Sleeping for 10 seconds and then "
f"try again once...")
time.sleep(10)
return create_or_find_compute_template(
sdk=sdk,
project_id=project_id,
compute_tpl=compute_tpl,
_repeat=False)
raise e
logger.info(f"Compute template created with ID {compute_tpl_id}")
return compute_tpl_id, compute_tpl_name
def create_or_find_app_config(
sdk: AnyscaleSDK,
project_id: str,
app_config: Dict[Any, Any],
_repeat: bool = True) -> Tuple[Optional[str], Optional[str]]:
app_config_id = None
app_config_name = None
if app_config:
app_config_name = f"{project_id}-{_dict_hash(app_config)}"
logger.info(f"Test uses an app config with hash {app_config_name}. "
f"Looking up existing app configs with this name.")
paging_token = None
while not app_config_id:
result = sdk.list_app_configs(
project_id=project_id, count=50, paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for res in result.results:
if res.name == app_config_name:
app_config_id = res.id
logger.info(
f"App config already exists with ID {app_config_id}")
break
if not paging_token or app_config_id:
break
if not app_config_id:
logger.info("App config not found. Creating new one.")
try:
result = sdk.create_app_config(
dict(
name=app_config_name,
project_id=project_id,
config_json=app_config))
app_config_id = result.result.id
except Exception as e:
if _repeat:
logger.warning(
f"Got exception when trying to create app "
f"config: {e}. Sleeping for 10 seconds and then "
f"try again once...")
time.sleep(10)
return create_or_find_app_config(
sdk=sdk,
project_id=project_id,
app_config=app_config,
_repeat=False)
raise e
logger.info(f"App config created with ID {app_config_id}")
return app_config_id, app_config_name
def install_app_config_packages(app_config: Dict[Any, Any]):
os.environ.update(app_config.get("env_vars", {}))
packages = app_config["python"]["pip_packages"]
for package in packages:
subprocess.check_output(["pip", "install", "-U", package], text=True)
def install_matching_ray():
wheel = os.environ.get("RAY_WHEELS", None)
if not wheel:
return
assert "manylinux2014_x86_64" in wheel, wheel
if sys.platform == "darwin":
platform = "macosx_10_15_intel"
elif sys.platform == "win32":
platform = "win_amd64"
else:
platform = "manylinux2014_x86_64"
wheel = wheel.replace("manylinux2014_x86_64", platform)
subprocess.check_output(["pip", "uninstall", "-y", "ray"], text=True)
subprocess.check_output(["pip", "install", "-U", wheel], text=True)
def wait_for_build_or_raise(sdk: AnyscaleSDK,
app_config_id: Optional[str]) -> Optional[str]:
if not app_config_id:
return None
# Fetch build
build_id = None
last_status = None
result = sdk.list_builds(app_config_id)
for build in sorted(result.results, key=lambda b: b.created_at):
build_id = build.id
last_status = build.status
if build.status == "failed":
continue
if build.status == "succeeded":
logger.info(f"Link to app config build: "
f"{anyscale_app_config_build_url(build_id)}")
return build_id
if last_status == "failed":
raise AppConfigBuildFailure("App config build failed.")
if not build_id:
raise AppConfigBuildFailure("No build found for app config.")
# Build found but not failed/finished yet
completed = False
start_wait = time.time()
next_report = start_wait + REPORT_S
logger.info(f"Waiting for build {build_id} to finish...")
logger.info(f"Track progress here: "
f"{anyscale_app_config_build_url(build_id)}")
while not completed:
now = time.time()
if now > next_report:
logger.info(f"... still waiting for build {build_id} to finish "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
result = sdk.get_build(build_id)
build = result.result
if build.status == "failed":
raise AppConfigBuildFailure(
f"App config build failed. Please see "
f"{anyscale_app_config_build_url(build_id)} for details")
if build.status == "succeeded":
logger.info("Build succeeded.")
return build_id
completed = build.status not in ["in_progress", "pending"]
if completed:
raise AppConfigBuildFailure(
f"Unknown build status: {build.status}. Please see "
f"{anyscale_app_config_build_url(build_id)} for details")
time.sleep(1)
return build_id
def run_job(cluster_name: str, compute_tpl_name: str, cluster_env_name: str,
job_name: str, min_workers: str, script: str,
script_args: List[str], env_vars: Dict[str, str],
autosuspend: int) -> Tuple[int, str]:
# Start cluster and job
address = f"anyscale://{cluster_name}?autosuspend={autosuspend}"
logger.info(f"Starting job {job_name} with Ray address: {address}")
env = copy.deepcopy(os.environ)
env.update(GLOBAL_CONFIG)
env.update(env_vars)
env["RAY_ADDRESS"] = address
env["RAY_JOB_NAME"] = job_name
env["RAY_RELEASE_MIN_WORKERS"] = str(min_workers)
proc = subprocess.Popen(
script.split(" ") + script_args,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True)
proc.stdout.reconfigure(line_buffering=True)
logs = ""
for line in proc.stdout:
logs += line
sys.stdout.write(line)
proc.wait()
return proc.returncode, logs
def create_and_wait_for_session(
sdk: AnyscaleSDK,
stop_event: multiprocessing.Event,
session_name: str,
session_options: Dict[Any, Any],
) -> str:
# Create session
logger.info(f"Creating session {session_name}")
result = sdk.create_session(session_options)
session_id = result.result.id
# Trigger session start
logger.info(f"Starting session {session_name} ({session_id})")
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"], session_id=session_id)
logger.info(f"Link to session: {session_url}")
result = sdk.start_session(session_id, start_session_options={})
sop_id = result.result.id
completed = result.result.completed
# Wait for session
logger.info(f"Waiting for session {session_name}...")
start_wait = time.time()
next_report = start_wait + REPORT_S
while not completed:
# Sleep 1 sec before next check.
time.sleep(1)
session_operation_response = sdk.get_session_operation(
sop_id, _request_timeout=30)
session_operation = session_operation_response.result
completed = session_operation.completed
_check_stop(stop_event, "session")
now = time.time()
if now > next_report:
logger.info(f"... still waiting for session {session_name} "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
return session_id
def run_session_command(sdk: AnyscaleSDK,
session_id: str,
cmd_to_run: str,
result_queue: multiprocessing.Queue,
env_vars: Dict[str, str],
state_str: str = "CMD_RUN") -> Tuple[str, int]:
full_cmd = " ".join(f"{k}={v}"
for k, v in env_vars.items()) + " " + cmd_to_run
logger.info(f"Running command in session {session_id}: \n" f"{full_cmd}")
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"], session_id=session_id)
logger.info(f"Link to session: {session_url}")
result_queue.put(State(state_str, time.time(), None))
result = sdk.create_session_command(
dict(session_id=session_id, shell_command=full_cmd))
scd_id = result.result.id
return scd_id, result
def wait_for_session_command_to_complete(create_session_command_result,
sdk: AnyscaleSDK,
scd_id: str,
stop_event: multiprocessing.Event,
state_str: str = "CMD_RUN"):
result = create_session_command_result
completed = result.result.finished_at is not None
start_wait = time.time()
next_report = start_wait + REPORT_S
while not completed:
# Sleep 1 sec before next check.
time.sleep(1)
result = exponential_backoff_retry(
lambda: sdk.get_session_command(session_command_id=scd_id),
retry_exceptions=Exception,
initial_retry_delay_s=10,
max_retries=3)
completed = result.result.finished_at
if state_str == "CMD_RUN":
_check_stop(stop_event, "command")
elif state_str == "CMD_PREPARE":
_check_stop(stop_event, "prepare_command")
now = time.time()
if now > next_report:
logger.info(f"... still waiting for command to finish "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
status_code = result.result.status_code
runtime = time.time() - start_wait
if status_code != 0:
if state_str == "CMD_RUN":
raise RuntimeError(
f"Command returned non-success status: {status_code}")
elif state_str == "CMD_PREPARE":
raise PrepareCommandRuntimeError(
f"Prepare command returned non-success status: {status_code}")
return status_code, runtime
def get_command_logs(session_controller: SessionController,
scd_id: str,
lines: int = 50):
result = exponential_backoff_retry(
lambda: session_controller.api_client.get_execution_logs_api_v2_session_commands_session_command_id_execution_logs_get( # noqa: E501
session_command_id=scd_id,
start_line=-1 * lines,
end_line=0),
retry_exceptions=Exception,
initial_retry_delay_s=10,
max_retries=3)
return result.result.lines
def get_remote_json_content(
temp_dir: str,
session_name: str,
remote_file: Optional[str],
session_controller: SessionController,
):
if not remote_file:
logger.warning("No remote file specified, returning empty dict")
return {}
local_target_file = os.path.join(temp_dir, ".tmp.json")
session_controller.pull(
session_name=session_name,
source=remote_file,
target=local_target_file)
with open(local_target_file, "rt") as f:
return json.load(f)
def get_local_json_content(local_file: Optional[str], ):
if not local_file:
logger.warning("No local file specified, returning empty dict")
return {}
with open(local_file, "rt") as f:
return json.load(f)
def pull_artifacts_and_store_in_cloud(
temp_dir: str,
logs: str,
session_name: str,
test_name: str,
artifacts: Optional[Dict[Any, Any]],
session_controller: SessionController,
):
output_log_file = os.path.join(temp_dir, "output.log")
with open(output_log_file, "wt") as f:
f.write(logs)
bucket = GLOBAL_CONFIG["RELEASE_AWS_BUCKET"]
location = f"{GLOBAL_CONFIG["RELEASE_AWS_LOCATION"]}" \
f"/{session_name}/{test_name}"
saved_artifacts = {}
s3_client = boto3.client("s3")
s3_client.upload_file(output_log_file, bucket, f"{location}/output.log")
saved_artifacts["output.log"] = f"s3://{bucket}/{location}/output.log"
# Download artifacts
if artifacts:
for name, remote_file in artifacts.items():
logger.info(f"Downloading artifact `{name}` from "
f"{remote_file}")
local_target_file = os.path.join(temp_dir, name)
session_controller.pull(
session_name=session_name,
source=remote_file,
target=local_target_file)
# Upload artifacts to s3
s3_client.upload_file(local_target_file, bucket,
f"{location}/{name}")
saved_artifacts[name] = f"s3://{bucket}/{location}/{name}"
return saved_artifacts
def find_session_by_test_name(
sdk: AnyscaleSDK,
session_controller: SessionController,
temp_dir: str,
state_json: str,
project_id: str,
test_name: str,
) -> Optional[Tuple[str, str, Dict[Any, Any]]]:
paging_token = None
while True: # Will break if paging_token is None after first search
result = sdk.search_sessions(
project_id=project_id,
sessions_query=dict(
name=dict(contains=test_name),
state_filter=["Running"],
paging=dict(count=20, paging_token=paging_token)))
for session in result.results:
logger.info(f"Found sessions {session.name}")
if not session.name.startswith(test_name):
continue
try:
session_state = get_remote_json_content(
temp_dir=temp_dir,
session_name=session.name,
remote_file=state_json,
session_controller=session_controller)
except Exception as exc:
raise RuntimeError(f"Could not get remote json content "
f"for session {session.name}") from exc
if session_state.get("test_name") == test_name:
return session.id, session.name, session_state
session_token = result.metadata.next_paging_token
if not session_token:
return None
def get_latest_running_command_id(sdk: AnyscaleSDK, session_id: str
) -> Tuple[Optional[str], Optional[bool]]:
scd_id = None
paging_token = None
success = None
while not scd_id:
result = sdk.list_session_commands(
session_id=session_id, paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for cmd in result.results:
if not scd_id:
scd_id = cmd.id
completed = cmd.finished_at is not None
if completed:
if success is None:
success = True
success = success and cmd.status_code == 0
if not completed:
return cmd.id, None
return scd_id, success or False
def run_test_config(
local_dir: str,
project_id: str,
test_name: str,
test_config: Dict[Any, Any],
commit_url: str,
session_name: str = None,
smoke_test: bool = False,
no_terminate: bool = False,
kick_off_only: bool = False,
check_progress: bool = False,
upload_artifacts: bool = True,
keep_results_dir: bool = False,
app_config_id_override: Optional[str] = None,
) -> Dict[Any, Any]:
"""
Returns:
Dict with the following entries:
status (str): One of [finished, error, timeout]
command_link (str): Link to command (Anyscale web UI)
last_logs (str): Last logs (excerpt) to send to owner
artifacts (dict): Dict of artifacts
Key: Name
Value: S3 URL
"""
# Todo (mid-term): Support other cluster definitions
# (not only cluster configs)
cluster_config_rel_path = test_config["cluster"].get(
"cluster_config", None)
cluster_config = _load_config(local_dir, cluster_config_rel_path)
app_config_rel_path = test_config["cluster"].get("app_config", None)
app_config = _load_config(local_dir, app_config_rel_path)
compute_tpl_rel_path = test_config["cluster"].get("compute_template", None)
compute_tpl = _load_config(local_dir, compute_tpl_rel_path)
stop_event = multiprocessing.Event()
result_queue = multiprocessing.Queue()
if not session_name:
session_name = f"{test_name}_{int(time.time())}"
temp_dir = tempfile.mkdtemp()
# Result and state files
results_json = test_config["run"].get("results", None)
if results_json is None:
results_json = "/tmp/release_test_out.json"
state_json = test_config["run"].get("state", None)
if state_json is None:
state_json = "/tmp/release_test_state.json"
env_vars = {
"RAY_ADDRESS": os.environ.get("RAY_ADDRESS", "auto"),
"TEST_OUTPUT_JSON": results_json,
"TEST_STATE_JSON": state_json,
"IS_SMOKE_TEST": "1" if smoke_test else "0",
}
with open(os.path.join(local_dir, ".anyscale.yaml"), "wt") as f:
f.write(f"project_id: {project_id}")
os.chdir(local_dir)
# Setup interface
# Unfortunately, there currently seems to be no great way to
# transfer files with the Anyscale SDK.
# So we use the session controller instead.
sdk = AnyscaleSDK(auth_token=GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"])
session_controller = SessionController(
api_client=instantiate_api_client(
cli_token=GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"],
host=GLOBAL_CONFIG["ANYSCALE_HOST"],
),
anyscale_api_client=sdk.api_client,
)
timeout = test_config["run"].get("timeout", 1800)
if "RELEASE_OVERRIDE_TIMEOUT" in os.environ:
previous_timeout = timeout
timeout = int(os.environ.get("RELEASE_OVERRIDE_TIMEOUT", str(timeout)))
logger.warning(f"Release test timeout override: {timeout} "
f"(would have been {previous_timeout})")
# If a test is long running, timeout does not mean it failed
is_long_running = test_config["run"].get("long_running", False)
build_id_override = None
if test_config["run"].get("use_connect"):
autosuspend_mins = test_config["run"].get("autosuspend_mins", 5)
assert not kick_off_only, \
"Unsupported for running with Anyscale connect."
if app_config_id_override is not None:
logger.info(
"Using connect and an app config override, waiting until "
"build finishes so we can fetch the app config in order to "
"install its pip packages locally.")
build_id_override = wait_for_build_or_raise(
sdk, app_config_id_override)
response = sdk.get_cluster_environment_build(build_id_override)
app_config = response.result.config_json
install_app_config_packages(app_config)
install_matching_ray()
elif "autosuspend_mins" in test_config["run"]:
raise ValueError(
"'autosuspend_mins' is only supported if 'use_connect' is True.")
# Add information to results dict
def _update_results(results: Dict):
if "last_update" in results:
results["last_update_diff"] = time.time() - results["last_update"]
if smoke_test:
results["smoke_test"] = True
def _process_finished_command(session_controller: SessionController,
scd_id: str,
results: Optional[Dict] = None,
runtime: int = None,
commit_url: str = None,
session_url: str = None):
logger.info("Command finished successfully.")
if results_json:
results = results or get_remote_json_content(
temp_dir=temp_dir,
session_name=session_name,
remote_file=results_json,
session_controller=session_controller,
)
else:
results = {"passed": 1}
_update_results(results)
if scd_id:
logs = get_command_logs(session_controller, scd_id,
test_config.get("log_lines", 50))
else:
logs = "No command found to fetch logs for"
if upload_artifacts:
saved_artifacts = pull_artifacts_and_store_in_cloud(
temp_dir=temp_dir,
logs=logs, # Also save logs in cloud
session_name=session_name,
test_name=test_name,
artifacts=test_config.get("artifacts", {}),
session_controller=session_controller,
)
logger.info("Fetched results and stored on the cloud. Returning.")
else:
saved_artifacts = {}
logger.info("Usually I would have fetched the results and "
"artifacts and stored them on S3.")
# Add these metadata here to avoid changing SQL schema.
results["_runtime"] = runtime
results["_session_url"] = session_url
results["_commit_url"] = commit_url
results["_stable"] = test_config.get("stable", True)
result_queue.put(
State(
"END",
time.time(),
{
"status": "finished",
"last_logs": logs,
"results": results,
"artifacts": saved_artifacts,
},
))
# When running the test script in client mode, the finish command is a
# completed local process.
def _process_finished_client_command(returncode: int, logs: str):
if upload_artifacts:
saved_artifacts = pull_artifacts_and_store_in_cloud(
temp_dir=temp_dir,
logs=logs, # Also save logs in cloud
session_name=session_name,
test_name=test_name,
artifacts=None,
session_controller=None,
)
logger.info("Stored results on the cloud. Returning.")
else:
saved_artifacts = {}
logger.info("Usually I would have fetched the results and "
"artifacts and stored them on S3.")
if results_json:
results = get_local_json_content(local_file=results_json, )
else:
results = {
"passed": int(returncode == 0),
}
results["returncode"] = returncode
_update_results(results)
result_queue.put(
State(
"END",
time.time(),
{
"status": "finished",
"last_logs": logs,
"results": results,
"artifacts": saved_artifacts,
},
))
def _run(logger):
# These values will be set as the test runs.
session_url = None
runtime = None
anyscale.conf.CLI_TOKEN = GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"]
test_uses_ray_connect = test_config["run"].get("use_connect")
session_id = None
scd_id = None
try:
# First, look for running sessions
session_id = search_running_session(sdk, project_id, session_name)
compute_tpl_name = None
app_config_id = app_config_id_override
app_config_name = None
build_id = build_id_override
if not session_id:
logger.info("No session found.")
# Start session
session_options = dict(
name=session_name, project_id=project_id)
if cluster_config is not None:
logging.info("Starting session with cluster config")
cluster_config_str = json.dumps(cluster_config)
session_options["cluster_config"] = cluster_config_str
session_options["cloud_id"] = (
GLOBAL_CONFIG["ANYSCALE_CLOUD_ID"], )
session_options["uses_app_config"] = False
else:
logging.info("Starting session with app/compute config")
# Find/create compute template
compute_tpl_id, compute_tpl_name = \
create_or_find_compute_template(
sdk, project_id, compute_tpl)
logger.info(f"Link to compute template: "
f"{anyscale_compute_tpl_url(compute_tpl_id)}")
# Find/create app config
if app_config_id is None:
(
app_config_id,
app_config_name,
) = create_or_find_app_config(sdk, project_id,
app_config)
else:
logger.info(
f"Using override app config {app_config_id}")
app_config_name = sdk.get_app_config(
app_config_id).result.name
if build_id is None:
# We might have already retrieved the build ID when
# installing app config packages locally if using
# connect, so only get the build ID if it's not set.
build_id = wait_for_build_or_raise(sdk, app_config_id)
session_options["compute_template_id"] = compute_tpl_id
session_options["build_id"] = build_id
session_options["uses_app_config"] = True
# Start session
session_id = create_and_wait_for_session(
sdk=sdk,
stop_event=stop_event,
session_name=session_name,
session_options=session_options,
)
prepare_command = test_config["run"].get("prepare")
# Write test state json
test_state_file = os.path.join(local_dir, "test_state.json")
with open(test_state_file, "wt") as f:
json.dump({
"start_time": time.time(),
"test_name": test_name
}, f)
if prepare_command or not test_uses_ray_connect:
if test_uses_ray_connect:
logger.info("Found a prepare command, so pushing it "
"to the session.")
# Rsync up
logger.info("Syncing files to session...")
session_controller.push(
session_name=session_name,
source=None,
target=None,
config=None,
all_nodes=False,
)
logger.info("Syncing test state to session...")
session_controller.push(
session_name=session_name,
source=test_state_file,
target=state_json,
config=None,
all_nodes=False,
)
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"],
session_id=session_id)
_check_stop(stop_event, "file_sync")
# Optionally run preparation command
if prepare_command:
logger.info(
f"Running preparation command: {prepare_command}")
scd_id, result = run_session_command(
sdk=sdk,
session_id=session_id,
cmd_to_run=prepare_command,
result_queue=result_queue,
env_vars=env_vars,
state_str="CMD_PREPARE")
_, _ = wait_for_session_command_to_complete(
result,
sdk=sdk,
scd_id=scd_id,
stop_event=stop_event,
state_str="CMD_PREPARE")
if test_uses_ray_connect:
script_args = test_config["run"].get("args", [])
if smoke_test:
script_args += ["--smoke-test"]
min_workers = 0
for node_type in compute_tpl["worker_node_types"]:
min_workers += node_type["min_workers"]
# Build completed, use job timeout
result_queue.put(State("CMD_RUN", time.time(), None))
returncode, logs = run_job(
cluster_name=session_name,
compute_tpl_name=compute_tpl_name,
cluster_env_name=app_config_name,
job_name=session_name,
min_workers=min_workers,
script=test_config["run"]["script"],
script_args=script_args,
env_vars=env_vars,
autosuspend=autosuspend_mins)
_process_finished_client_command(returncode, logs)
return
# Run release test command
cmd_to_run = test_config["run"]["script"] + " "
args = test_config["run"].get("args", [])
if args:
cmd_to_run += " ".join(args) + " "
if smoke_test:
cmd_to_run += " --smoke-test"
scd_id, result = run_session_command(
sdk=sdk,
session_id=session_id,
cmd_to_run=cmd_to_run,
result_queue=result_queue,
env_vars=env_vars,
state_str="CMD_RUN")
if not kick_off_only:
_, runtime = wait_for_session_command_to_complete(
result,
sdk=sdk,
scd_id=scd_id,
stop_event=stop_event,
state_str="CMD_RUN")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
runtime=runtime,
session_url=session_url,
commit_url=commit_url)
else:
result_queue.put(
State("END", time.time(), {
"status": "kickoff",
"last_logs": ""
}))
except (ReleaseTestTimeoutError, Exception) as e:
logger.error(e, exc_info=True)
logs = str(e)
if scd_id is not None:
try:
logs = logs + "; Command logs:" + get_command_logs(
session_controller, scd_id,
test_config.get("log_lines", 50))
except Exception as e2:
logger.error(e2, exc_info=True)
# Long running tests are "finished" successfully when
# timed out
if isinstance(e, ReleaseTestTimeoutError) and is_long_running:
_process_finished_command(
session_controller=session_controller, scd_id=scd_id)
else:
timeout_type = ""
runtime = None
if isinstance(e, CommandTimeoutError):
timeout_type = "timeout"
runtime = 0
elif (isinstance(e, PrepareCommandTimeoutError)
or isinstance(e, FileSyncTimeoutError)
or isinstance(e, SessionTimeoutError)
or isinstance(e, PrepareCommandRuntimeError)
or isinstance(e, AppConfigBuildFailure)):
timeout_type = "infra_timeout"
runtime = None
elif isinstance(e, RuntimeError):
timeout_type = "runtime_error"
runtime = 0
else:
timeout_type = "unknown timeout"
runtime = None
# Add these metadata here to avoid changing SQL schema.
results = {}
results["_runtime"] = runtime
results["_session_url"] = session_url
results["_commit_url"] = commit_url
results["_stable"] = test_config.get("stable", True)
result_queue.put(
State(
"END", time.time(), {
"status": timeout_type,
"last_logs": logs,
"results": results
}))
finally:
if no_terminate:
logger.warning(
"`no_terminate` is set to True, so the session will "
"*not* be terminated!")
else:
_cleanup_session(sdk, session_id)
def _check_progress(logger):
anyscale.conf.CLI_TOKEN = GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"]
should_terminate = False
session_id = None
scd_id = None
try:
existing_session = find_session_by_test_name(
sdk=sdk,
session_controller=session_controller,
temp_dir=temp_dir,
state_json=state_json,
project_id=project_id,
test_name=test_name)
if existing_session is None:
logger.info(f"Found no existing session for {test_name}")
result_queue.put(
State("END", time.time(), {
"status": "nosession",
"last_logs": ""
}))
return
session_id, session_name, session_state = existing_session
logger.info(f"Found existing session for {test_name}: "
f"{session_name}")
scd_id, success = get_latest_running_command_id(
sdk=sdk, session_id=session_id)
latest_result = get_remote_json_content(
temp_dir=temp_dir,
session_name=session_name,
remote_file=results_json,
session_controller=session_controller,
)
# Fetch result json and check if it has been updated recently
result_time_key = test_config["run"].get("time_key", None)
maximum_update_delay = test_config["run"].get(
"max_update_delay", None)
if result_time_key and maximum_update_delay:
last_update = latest_result.get(result_time_key, None)
if not last_update:
result_queue.put(
State(
"END", time.time(), {
"status": "error",
"last_logs": f"Test did not store "
f"{result_time_key} in the "
f"results json."
}))
return
delay = time.time() - last_update
logger.info(f"Last update was at {last_update:.2f}. "
f"This was {delay:.2f} seconds ago "
f"(maximum allowed: {maximum_update_delay})")
if delay > maximum_update_delay:
raise RuntimeError(
f"Test did not update the results json within "
f"the last {maximum_update_delay} seconds.")
if time.time() - session_state["start_time"] > timeout:
# Long running test reached timeout
logger.info(
f"Test command reached timeout after {timeout} seconds")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
results=latest_result)
should_terminate = True
elif success:
logger.info("All commands finished.")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
results=latest_result)
should_terminate = True
else:
rest_time = timeout - time.time() + session_state["start_time"]
logger.info(f"Test command should continue running "
f"for {rest_time} seconds")
result_queue.put(
State("END", time.time(), {
"status": "kickoff",
"last_logs": "Test is still running"
}))
except Exception as e:
logger.error(e, exc_info=True)
logs = str(e)
if scd_id is not None:
try:
logs = get_command_logs(session_controller, scd_id,
test_config.get("log_lines", 50))
logs += f"\n{str(e)}"
except Exception as e2:
logger.error(e2, exc_info=True)
result_queue.put(
State("END", time.time(), {
"status": "error",
"last_logs": logs
}))
should_terminate = True
finally:
if should_terminate:
logger.warning("Terminating session")
_cleanup_session(sdk, session_id)
if not check_progress:
process = multiprocessing.Process(target=_run, args=(logger, ))
else:
process = multiprocessing.Process(
target=_check_progress, args=(logger, ))
build_timeout = test_config["run"].get("build_timeout", 1800)
project_url = anyscale_project_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"])
logger.info(f"Link to project: {project_url}")
msg = f"This will now run test {test_name}."
if smoke_test:
msg += " This is a smoke test."
if is_long_running:
msg += " This is a long running test."
logger.info(msg)
logger.info(f"Starting process with timeout {timeout} "
f"(build timeout {build_timeout})")
process.start()
# The timeout time will be updated after the build finished
# Build = App config + compute template build and session start
timeout_time = time.time() + build_timeout
result = {}
while process.is_alive():
try:
state: State = result_queue.get(timeout=1)
except (Empty, TimeoutError):
if time.time() > timeout_time:
stop_event.set()
logger.warning("Process timed out.")
if not is_long_running:
logger.warning("Terminating process in 10 seconds.")
time.sleep(10)
logger.warning("Terminating process now.")
process.terminate()
else:
logger.info("Process is long running. Give 2 minutes to "
"fetch result and terminate.")
start_terminate = time.time()
while time.time(
) < start_terminate + 120 and process.is_alive():
time.sleep(1)
if process.is_alive():
logger.warning("Terminating forcefully now.")
process.terminate()
else:
logger.info("Long running results collected.")
break
continue
if not isinstance(state, State):
raise RuntimeError(f"Expected `State` object, got {result}")
if state.state == "CMD_PREPARE":
# Reset timeout after build finished
timeout_time = state.timestamp + timeout
if state.state == "CMD_RUN":
# Reset timeout after prepare command or build finished
timeout_time = state.timestamp + timeout
elif state.state == "END":
result = state.data
break
while not result_queue.empty():
state = result_queue.get_nowait()
result = state.data
logger.info("Final check if everything worked.")
try:
result.setdefault("status", "error (status not found)")
except (TimeoutError, Empty):
result = {"status": "timeout", "last_logs": "Test timed out."}
logger.info(f"Final results: {result}")
log_results_and_artifacts(result)
if not keep_results_dir:
logger.info(f"Removing results dir {temp_dir}")
shutil.rmtree(temp_dir)
else:
# Write results.json
with open(os.path.join(temp_dir, "results.json"), "wt") as fp:
json.dump(result, fp)
out_dir = os.path.expanduser(GLOBAL_CONFIG["RELEASE_RESULTS_DIR"])
logger.info(f"Moving results dir {temp_dir} to persistent location "
f"{out_dir}")
shutil.rmtree(out_dir, ignore_errors=True)
shutil.copytree(temp_dir, out_dir)
logger.info(f"Dir contents: {os.listdir(out_dir)}")
return result
def run_test(test_config_file: str,
test_name: str,
project_id: str,
commit_url: str,
category: str = "unspecified",
smoke_test: bool = False,
no_terminate: bool = False,
kick_off_only: bool = False,
check_progress: bool = False,
report: bool = True,
keep_results_dir: bool = False,
session_name: Optional[str] = None,
app_config_id_override=None) -> Dict[str, Any]:
with open(test_config_file, "rt") as f:
test_configs = yaml.safe_load(f)
test_config_dict = {}
for test_config in test_configs:
name = test_config.pop("name")
test_config_dict[name] = test_config
if test_name not in test_config_dict:
raise ValueError(
f"Test with name `{test_name}` not found in test config file "
f"at `{test_config_file}`.")
test_config = test_config_dict[test_name]
if smoke_test and "smoke_test" in test_config:
smoke_test_config = test_config.pop("smoke_test")
test_config = _deep_update(test_config, smoke_test_config)
local_dir = os.path.dirname(test_config_file)
if "local_dir" in test_config:
# local_dir is relative to test_config_file
local_dir = os.path.join(local_dir, test_config["local_dir"])
if test_config["run"].get("use_connect"):
assert not kick_off_only, \
"--kick-off-only is unsupported when running with " \
"Anyscale connect."
assert not check_progress, \
"--check is unsupported when running with Anyscale connect."
if test_config.get("artifacts", {}):
logger.error(
"Saving artifacts are not yet supported when running with "
"Anyscale connect.")
result = run_test_config(
local_dir,
project_id,
test_name,
test_config,
commit_url,
session_name=session_name,
smoke_test=smoke_test,
no_terminate=no_terminate,
kick_off_only=kick_off_only,
check_progress=check_progress,
upload_artifacts=report,
keep_results_dir=keep_results_dir,
app_config_id_override=app_config_id_override)
status = result.get("status", "invalid")
if kick_off_only:
if status != "kickoff":
raise RuntimeError("Error kicking off test.")
logger.info("Kicked off test. It's now up to the `--check` "
"part of the script to track its process.")
return {}
else:
# `--check` or no kick off only
if status == "nosession":
logger.info(f"No running session found for test {test_name}, so "
f"assuming everything is fine.")
return {}
if status == "kickoff":
logger.info(f"Test {test_name} is still running.")
return {}
last_logs = result.get("last_logs", "No logs.")
test_suite = os.path.basename(test_config_file).replace(".yaml", "")
report_kwargs = dict(
test_suite=test_suite,
test_name=test_name,
status=status,
last_logs=last_logs,
results=result.get("results", {}),
artifacts=result.get("artifacts", {}),
category=category,
)
if report:
report_result(**report_kwargs)
else:
logger.info(f"Usually I would now report the following results:\n"
f"{report_kwargs}")
if has_errored(result):
raise RuntimeError(last_logs)
return report_kwargs
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"--test-config", type=str, required=True, help="Test config file")
parser.add_argument("--test-name", type=str, help="Test name in config")
parser.add_argument(
"--ray-wheels", required=False, type=str, help="URL to ray wheels")
parser.add_argument(
"--no-terminate",
action="store_true",
default=False,
help="Don't terminate session after failure")
parser.add_argument(
"--no-report",
action="store_true",
default=False,
help="Do not report any results or upload to S3")
parser.add_argument(
"--kick-off-only",
action="store_true",
default=False,
help="Kick off only (don't wait for command to finish)")
parser.add_argument(
"--check",
action="store_true",
default=False,
help="Check (long running) status")
parser.add_argument(
"--keep-results-dir",
action="store_true",
default=False,
help="Keep results in directory (named RELEASE_RESULTS_DIR), e.g. "
"for Buildkite artifact upload.")
parser.add_argument(
"--category",
type=str,
default="unspecified",
help="Category name, e.g. `release-1.3.0` (will be saved in database)")
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--session-name",
required=False,
type=str,
help="Name of the session to run this test.")
parser.add_argument(
"--app-config-id-override",
required=False,
type=str,
help=("An app config ID, which will override the test config app "
"config."))
args, _ = parser.parse_known_args()
if not GLOBAL_CONFIG["ANYSCALE_PROJECT"]:
raise RuntimeError(
"You have to set the ANYSCALE_PROJECT environment variable!")
maybe_fetch_api_token()
if args.ray_wheels:
os.environ["RAY_WHEELS"] = str(args.ray_wheels)
url = str(args.ray_wheels)
elif not args.check and not os.environ.get("RAY_WHEELS"):
url = find_ray_wheels(
GLOBAL_CONFIG["RAY_REPO"],
GLOBAL_CONFIG["RAY_BRANCH"],
GLOBAL_CONFIG["RAY_VERSION"],
)
if not url:
raise RuntimeError(f"Could not find wheels for "
f"Ray {GLOBAL_CONFIG["RAY_VERSION"]}, "
f"branch {GLOBAL_CONFIG["RAY_BRANCH"]}")
# RAY_COMMIT is set by find_ray_wheels
elif os.environ.get("RAY_WHEELS"):
logger.info(f"Using Ray wheels provided from URL: "
f"{os.environ.get("RAY_WHEELS")}")
url = os.environ.get("RAY_WHEELS")
populate_wheels_sanity_check(os.environ.get("RAY_COMMIT", ""))
test_config_file = os.path.abspath(os.path.expanduser(args.test_config))
result_dict = run_test(
test_config_file=test_config_file,
test_name=args.test_name,
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"],
commit_url=url,
category=args.category,
smoke_test=args.smoke_test,
no_terminate=args.no_terminate or args.kick_off_only,
kick_off_only=args.kick_off_only,
check_progress=args.check,
report=not args.no_report,
session_name=args.session_name,
keep_results_dir=args.keep_results_dir,
app_config_id_override=args.app_config_id_override,
)
if result_dict:
# If we get a result dict, check if any alerts should be raised
from alert import SUITE_TO_FN, default_handle_result
logger.info("Checking if results are valid...")
handle_result_kwargs = result_dict.copy()
handle_result_kwargs["created_on"] = None
test_suite = handle_result_kwargs.get("test_suite", None)
test_name = handle_result_kwargs.get("test_name", None)
category = handle_result_kwargs.get("category", None)
handle_fn = SUITE_TO_FN.get(test_suite, None)
if not handle_fn:
logger.warning(f"No handle for suite {test_suite}")
alert = default_handle_result(**handle_result_kwargs)
else:
alert = handle_fn(**handle_result_kwargs)
if alert:
# If we get an alert, the test failed.
raise RuntimeError(alert)
else:
logger.info(f"No alert raised for test {test_suite}/{test_name} "
f"({category}) - the test successfully passed!")
| """
This is an end to end release test automation script used to kick off periodic
release tests, running on Anyscale.
The tool leverages app configs and compute templates.
Calling this script will run a single release test.
Example:
python e2e.py --test-config ~/ray/release/xgboost_tests/xgboost_tests.yaml --test-name tune_small
The following steps are then performed:
1. It will look up the test tune_small in the file xgboost_tests.yaml
2. It will fetch the specified app config and compute template and register
those with anyscale (if they don’t exist yet)
3. It waits until the app config is built
4. It then kicks off the script defined in the run block
5. When the script is finished, it will fetch the latest logs, the full log
output, and any artifacts specified in the artifacts block.
6. The full logs and artifacts will be stored in a s3 bucket
7. It will also fetch the json file specified in the run block as results.
This is the file where you should write your metrics to.
8. All results are then stored in a database.
Specifically it will store the following fields:
- Timestamp
- Test name
- Status (finished, error, timeout, invalid)
- Last logs (50 lines)
- results (see above)
- artifacts (links to s3 files)
Then the script exits. If an error occurs at any time, a fail result is
written to the database.
Writing a new release test
--------------------------
Each release test requires the following:
1. It has to be added in a release test yaml file, describing meta information
about the test (e.g. name, command to run, timeout)
2. You need an app config yaml
3. You need a compute template yaml
4. You need to define a command to run. This is usually a python script.
The command should accept (or ignore) a single optional
`--smoke-test` argument.
Usually the command should write its result metrics to a json file.
The json filename is available in the TEST_OUTPUT_JSON env variable.
5. Add your test in release/.buildkite/build_pipeline.py.
The script will have access to these environment variables:
"RAY_ADDRESS": os.environ.get("RAY_ADDRESS", "auto")
"TEST_OUTPUT_JSON": results_json_filename
"IS_SMOKE_TEST": "1" if smoke_test else "0"
For an example, take a look at the XGBoost test suite:
https://github.com/ray-project/ray/blob/master/release/xgboost_tests/xgboost_tests.yaml
These all use the same app configs and similar compute templates. This means
that app configs can be re-used across runs and only have to be built ones.
App configs and compute templates can interpret environment variables.
A notable one is the `RAY_WHEELS` variable which points to the wheels that
should be tested (e.g. latest master wheels). You might want to include
something like this in your `post_build_cmds`:
- pip3 install -U {{ env["RAY_WHEELS"] | default("ray") }}
If you want to force rebuilds, consider using something like
- echo {{ env["TIMESTAMP"] }}
so that your app configs changes each time the script is executed. If you
only want to trigger rebuilds once per day, use `DATESTAMP` instead:
- echo {{ env["DATESTAMP"] }}
Local testing
-------------
For local testing, make sure to authenticate with the ray-ossci AWS user
(e.g. by setting the respective environment variables obtained from go/aws),
or use the `--no-report` command line argument.
Also make sure to set these environment variables:
- ANYSCALE_CLI_TOKEN (should contain your anyscale credential token)
- ANYSCALE_PROJECT (should point to a project ID you have access to)
A test can then be run like this:
python e2e.py --no-report --test-config ~/ray/release/xgboost_tests/xgboost_tests.yaml --test-name tune_small
The `--no-report` option disables storing the results in the DB and
artifacts on S3. If you set this option, you do not need access to the
ray-ossci AWS user.
Using Compilation on Product + App Config Override
--------------------------------------------------
For quick iteration when debugging a release test, go/compile-on-product allows
you to easily modify and recompile Ray, such that the recompilation happens
within an app build step and can benefit from a warm Bazel cache. See
go/compile-on-product for more information.
After kicking off the app build, you can give the app config ID to this script
as an app config override, where the indicated app config will be used instead
of the app config given in the test config. E.g., running
python e2e.py --no-report --test-config ~/ray/benchmarks/benchmark_tests.yaml --test-name=single_node --app-config-id-override=apt_TBngEXXXrhipMXgexVcrpC9i
would run the single_node benchmark test with the apt_TBngEXXXrhipMXgexVcrpC9i
app config instead of the app config given in
~/ray/benchmarks/benchmark_tests.yaml. If the build for the app config is still
in progress, the script will wait until it completes, same as for a locally
defined app config.
Running on Head Node vs Running with Anyscale Connect
-----------------------------------------------------
By default release tests run their drivers on the head node. Support is being
added to run release tests that execute the driver as a subprocess and run
the workload on Anyscale product via Anyscale connect.
Note that when the driver in the test is a subprocess of releaser, releaser
cannot be terminated before the test finishes.
Other known feature gaps when running with Anyscale connect:
- Kicking off a test or checking progress is not supported.
- Downloading / uploading logs and artifacts are unsupported.
- Logs from remote may not have finished streaming, before the driver exits.
Long running tests
------------------
Long running tests can be kicked off with by adding the --kick-off-only
parameters to the e2e script. The status can then be checked with the
--check command.
Long running test sessions will be terminated after `timeout` seconds, after
which the latest result in the TEST_OUTPUT_JSON will be reported. Thus,
long running release tests should update this file periodically.
There are also two config options to configure behavior. The `time_key` is
needed to track the latest update of the TEST_OUTPUT_JSON and should contain
a floating point number (usually `time.time()`). The `max_update_delay` then
specified the maximum time in seconds that can be passed without an update
to the results json. If the output file hasn't been updated in e.g. 60 seconds,
this could indicate that the command is stale/frozen, and thus should fail.
Release test yaml example
-------------------------
- name: example
owner:
mail: "kai@anyscale.com" # Currently not used
slack: "@tune-team" # Currentl not used
cluster:
app_config: app_config.yaml # Relative to the release test yaml
compute_template: tpl_cpu.yaml
run:
timeout: 600 # in seconds
prepare: python wait_cluster.py 4 600 # prepare cmd to run before test
script: python workloads/train.py # actual release test command
# Only needed for long running test
time_key: last_update # Key in the results json indicating current time
max_update_delay: 30 # If state hasn't been updated in 30s, terminate
# This block is optional
artifacts:
# Artifact name: location on head node
- detailed_output: detailed_output.csv
# This block is optional. If present, the contents will be
# deep updated for smoke testing
smoke_test:
cluster:
compute_template: tpl_cpu_smoketest.yaml
""" # noqa: E501
import argparse
import boto3
import collections
import copy
import datetime
import hashlib
import jinja2
import json
import logging
import multiprocessing
import os
import requests
import shutil
import subprocess
import sys
import tempfile
import time
from queue import Empty
from typing import Any, Dict, Optional, Tuple, List
import yaml
import anyscale
import anyscale.conf
from anyscale.api import instantiate_api_client
from anyscale.controllers.session_controller import SessionController
from anyscale.sdk.anyscale_client.sdk import AnyscaleSDK
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter(fmt="[%(levelname)s %(asctime)s] "
"%(filename)s: %(lineno)d "
"%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def getenv_default(key: str, default: Optional[str] = None):
"""Return environment variable with default value"""
# If the environment variable is set but "", still return default
return os.environ.get(key, None) or default
GLOBAL_CONFIG = {
"ANYSCALE_USER": getenv_default("ANYSCALE_USER",
"release-automation@anyscale.com"),
"ANYSCALE_HOST": getenv_default("ANYSCALE_HOST",
"https://beta.anyscale.com"),
"ANYSCALE_CLI_TOKEN": getenv_default("ANYSCALE_CLI_TOKEN"),
"ANYSCALE_CLOUD_ID": getenv_default(
"ANYSCALE_CLOUD_ID",
"cld_4F7k8814aZzGG8TNUGPKnc"), # cld_4F7k8814aZzGG8TNUGPKnc
"ANYSCALE_PROJECT": getenv_default("ANYSCALE_PROJECT", ""),
"RAY_VERSION": getenv_default("RAY_VERSION", "2.0.0.dev0"),
"RAY_REPO": getenv_default("RAY_REPO",
"https://github.com/ray-project/ray.git"),
"RAY_BRANCH": getenv_default("RAY_BRANCH", "master"),
"RELEASE_AWS_BUCKET": getenv_default("RELEASE_AWS_BUCKET",
"ray-release-automation-results"),
"RELEASE_AWS_LOCATION": getenv_default("RELEASE_AWS_LOCATION", "dev"),
"RELEASE_AWS_DB_NAME": getenv_default("RELEASE_AWS_DB_NAME", "ray_ci"),
"RELEASE_AWS_DB_TABLE": getenv_default("RELEASE_AWS_DB_TABLE",
"release_test_result"),
"RELEASE_AWS_DB_SECRET_ARN": getenv_default(
"RELEASE_AWS_DB_SECRET_ARN",
"arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"rds-db-credentials/cluster-7RB7EYTTBK2EUC3MMTONYRBJLE/ray_ci-MQN2hh",
),
"RELEASE_AWS_DB_RESOURCE_ARN": getenv_default(
"RELEASE_AWS_DB_RESOURCE_ARN",
"arn:aws:rds:us-west-2:029272617770:cluster:ci-reporting",
),
"RELEASE_RESULTS_DIR": getenv_default("RELEASE_RESULTS_DIR",
"/tmp/ray_release_test_artifacts"),
"DATESTAMP": str(datetime.datetime.now().strftime("%Y%m%d")),
"TIMESTAMP": str(int(datetime.datetime.now().timestamp())),
"EXPIRATION_1D": str((datetime.datetime.now() +
datetime.timedelta(days=1)).strftime("%Y-%m-%d")),
"EXPIRATION_2D": str((datetime.datetime.now() +
datetime.timedelta(days=2)).strftime("%Y-%m-%d")),
"EXPIRATION_3D": str((datetime.datetime.now() +
datetime.timedelta(days=3)).strftime("%Y-%m-%d")),
}
REPORT_S = 30
RETRY_MULTIPLIER = 2
def exponential_backoff_retry(f, retry_exceptions, initial_retry_delay_s,
max_retries):
retry_cnt = 0
retry_delay_s = initial_retry_delay_s
while True:
try:
return f()
except retry_exceptions as e:
retry_cnt += 1
if retry_cnt > max_retries:
raise
logger.info(f"Retry function call failed due to {e} "
f"in {retry_delay_s} seconds...")
time.sleep(retry_delay_s)
retry_delay_s *= RETRY_MULTIPLIER
def maybe_fetch_api_token():
if GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"] is None:
logger.info(
"Missing ANYSCALE_CLI_TOKEN, retrieving from AWS secrets store")
# NOTE(simon) This should automatically retrieve
# release-automation@anyscale.com's anyscale token
GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"] = boto3.client(
"secretsmanager", region_name="us-west-2"
).get_secret_value(
SecretId="arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"release-automation/"
"anyscale-token20210505220406333800000001-BcUuKB")["SecretString"]
class PrepareCommandRuntimeError(RuntimeError):
pass
class ReleaseTestTimeoutError(RuntimeError):
pass
class SessionTimeoutError(ReleaseTestTimeoutError):
pass
class FileSyncTimeoutError(ReleaseTestTimeoutError):
pass
class CommandTimeoutError(ReleaseTestTimeoutError):
pass
class PrepareCommandTimeoutError(ReleaseTestTimeoutError):
pass
# e.g., App config failure.
class AppConfigBuildFailure(RuntimeError):
pass
class State:
def __init__(self, state: str, timestamp: float, data: Any):
self.state = state
self.timestamp = timestamp
self.data = data
sys.path.insert(0, anyscale.ANYSCALE_RAY_DIR)
def anyscale_project_url(project_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/projects/{project_id}" \
f"/?tab=session-list"
def anyscale_session_url(project_id: str, session_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/projects/{project_id}" \
f"/clusters/{session_id}"
def anyscale_compute_tpl_url(compute_tpl_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/configurations/cluster-computes" \
f"/{compute_tpl_id}"
def anyscale_app_config_build_url(build_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/configurations/app-config-details" \
f"/{build_id}"
def wheel_url(ray_version, git_branch, git_commit):
return f"https://s3-us-west-2.amazonaws.com/ray-wheels/" \
f"{git_branch}/{git_commit}/" \
f"ray-{ray_version}-cp37-cp37m-manylinux2014_x86_64.whl"
def wheel_exists(ray_version, git_branch, git_commit):
url = wheel_url(ray_version, git_branch, git_commit)
return requests.head(url).status_code == 200
def get_latest_commits(repo: str, branch: str = "master") -> List[str]:
cur = os.getcwd()
with tempfile.TemporaryDirectory() as tmpdir:
os.chdir(tmpdir)
clone_cmd = [
"git",
"clone",
"--filter=tree:0",
"--no-checkout",
# "--single-branch",
# "--depth=10",
f"--branch={branch}",
repo,
tmpdir,
]
log_cmd = [
"git",
"log",
"-n",
"10",
"--pretty=format:%H",
]
subprocess.check_output(clone_cmd)
commits = subprocess.check_output(log_cmd).decode(
sys.stdout.encoding).split("\n")
os.chdir(cur)
return commits
def find_ray_wheels(repo: str, branch: str, version: str):
url = None
commits = get_latest_commits(repo, branch)
logger.info(f"Latest 10 commits for branch {branch}: {commits}")
for commit in commits:
if wheel_exists(version, branch, commit):
url = wheel_url(version, branch, commit)
os.environ["RAY_WHEELS"] = url
os.environ["RAY_COMMIT"] = commit
logger.info(
f"Found wheels URL for Ray {version}, branch {branch}: "
f"{url}")
break
return url
def populate_wheels_sanity_check(commit: Optional[str] = None):
if not commit:
cmd = ("python -c 'import ray; print("
"\"No commit sanity check available, but this is the "
"Ray wheel commit:\", ray.__commit__)'")
else:
cmd = (f"python -c 'import ray; "
f"assert ray.__commit__ == \"{commit}\", ray.__commit__'")
os.environ["RAY_WHEELS_SANITY_CHECK"] = cmd
def _check_stop(stop_event: multiprocessing.Event, timeout_type: str):
if stop_event.is_set():
if timeout_type == "prepare_command":
raise PrepareCommandTimeoutError(
"Process timed out in the prepare command stage.")
if timeout_type == "command":
raise CommandTimeoutError(
"Process timed out while running a command.")
elif timeout_type == "file_sync":
raise FileSyncTimeoutError(
"Process timed out while syncing files.")
elif timeout_type == "session":
raise SessionTimeoutError(
"Process timed out while starting a session.")
else:
assert False, "Unexpected timeout type."
def _deep_update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = _deep_update(d.get(k, {}), v)
else:
d[k] = v
return d
def _dict_hash(dt: Dict[Any, Any]) -> str:
json_str = json.dumps(dt, sort_keys=True, ensure_ascii=True)
sha = hashlib.sha256()
sha.update(json_str.encode())
return sha.hexdigest()
def _load_config(local_dir: str, config_file: Optional[str]) -> Optional[Dict]:
if not config_file:
return None
config_path = os.path.join(local_dir, config_file)
with open(config_path, "rt") as f:
# Todo: jinja2 render
content = f.read()
env = copy.deepcopy(os.environ)
env.update(GLOBAL_CONFIG)
content = jinja2.Template(content).render(env=env)
return yaml.safe_load(content)
def has_errored(result: Dict[Any, Any]) -> bool:
return result.get("status", "invalid") != "finished"
def report_result(test_suite: str, test_name: str, status: str, last_logs: str,
results: Dict[Any, Any], artifacts: Dict[Any, Any],
category: str):
now = datetime.datetime.utcnow()
rds_data_client = boto3.client("rds-data", region_name="us-west-2")
schema = GLOBAL_CONFIG["RELEASE_AWS_DB_TABLE"]
sql = (
f"INSERT INTO {schema} "
f"(created_on, test_suite, test_name, status, last_logs, "
f"results, artifacts, category) "
f"VALUES (:created_on, :test_suite, :test_name, :status, :last_logs, "
f":results, :artifacts, :category)")
parameters = [{
"name": "created_on",
"typeHint": "TIMESTAMP",
"value": {
"stringValue": now.strftime("%Y-%m-%d %H:%M:%S")
},
}, {
"name": "test_suite",
"value": {
"stringValue": test_suite
}
}, {
"name": "test_name",
"value": {
"stringValue": test_name
}
}, {
"name": "status",
"value": {
"stringValue": status
}
}, {
"name": "last_logs",
"value": {
"stringValue": last_logs
}
}, {
"name": "results",
"typeHint": "JSON",
"value": {
"stringValue": json.dumps(results)
},
}, {
"name": "artifacts",
"typeHint": "JSON",
"value": {
"stringValue": json.dumps(artifacts)
},
}, {
"name": "category",
"value": {
"stringValue": category
}
}]
# Default boto3 call timeout is 45 seconds.
retry_delay_s = 64
MAX_RDS_RETRY = 3
exponential_backoff_retry(
lambda: rds_data_client.execute_statement(
database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"],
parameters=parameters,
secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"],
resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"],
schema=schema,
sql=sql),
retry_exceptions=rds_data_client.exceptions.StatementTimeoutException,
initial_retry_delay_s=retry_delay_s,
max_retries=MAX_RDS_RETRY)
logger.info("Result has been persisted to the databse")
def log_results_and_artifacts(result: Dict):
results = result.get("results", {})
if results:
msg = "Observed the following results:\n\n"
for key, val in results.items():
msg += f" {key} = {val}\n"
else:
msg = "Did not find any results."
logger.info(msg)
artifacts = result.get("artifacts", {})
if artifacts:
msg = "Saved the following artifacts:\n\n"
for key, val in artifacts.items():
msg += f" {key} = {val}\n"
else:
msg = "Did not find any artifacts."
logger.info(msg)
def _cleanup_session(sdk: AnyscaleSDK, session_id: str):
if session_id:
# Just trigger a request. No need to wait until session shutdown.
sdk.terminate_session(
session_id=session_id, terminate_session_options={})
def search_running_session(sdk: AnyscaleSDK, project_id: str,
session_name: str) -> Optional[str]:
session_id = None
logger.info(f"Looking for existing session with name {session_name}")
result = sdk.search_sessions(
project_id=project_id,
sessions_query=dict(name=dict(equals=session_name)))
if len(result.results) > 0 and result.results[0].state == "Running":
logger.info("Found existing session.")
session_id = result.results[0].id
return session_id
def create_or_find_compute_template(
sdk: AnyscaleSDK,
project_id: str,
compute_tpl: Dict[Any, Any],
_repeat: bool = True) -> Tuple[Optional[str], Optional[str]]:
compute_tpl_id = None
compute_tpl_name = None
if compute_tpl:
# As of Anyscale 0.4.1, it is an error to use the same compute template
# name within the same organization, between different projects.
compute_tpl_name = f"{project_id}/compute/{_dict_hash(compute_tpl)}"
logger.info(f"Tests uses compute template "
f"with name {compute_tpl_name}. Looking up existing "
f"templates.")
paging_token = None
while not compute_tpl_id:
result = sdk.search_compute_templates(
dict(
project_id=project_id,
name=dict(equals=compute_tpl_name),
include_anonymous=True),
paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for res in result.results:
if res.name == compute_tpl_name:
compute_tpl_id = res.id
logger.info(
f"Template already exists with ID {compute_tpl_id}")
break
if not paging_token:
break
if not compute_tpl_id:
logger.info(f"Compute template not found. "
f"Creating with name {compute_tpl_name}.")
try:
result = sdk.create_compute_template(
dict(
name=compute_tpl_name,
project_id=project_id,
config=compute_tpl))
compute_tpl_id = result.result.id
except Exception as e:
if _repeat:
logger.warning(
f"Got exception when trying to create compute "
f"template: {e}. Sleeping for 10 seconds and then "
f"try again once...")
time.sleep(10)
return create_or_find_compute_template(
sdk=sdk,
project_id=project_id,
compute_tpl=compute_tpl,
_repeat=False)
raise e
logger.info(f"Compute template created with ID {compute_tpl_id}")
return compute_tpl_id, compute_tpl_name
def create_or_find_app_config(
sdk: AnyscaleSDK,
project_id: str,
app_config: Dict[Any, Any],
_repeat: bool = True) -> Tuple[Optional[str], Optional[str]]:
app_config_id = None
app_config_name = None
if app_config:
app_config_name = f"{project_id}-{_dict_hash(app_config)}"
logger.info(f"Test uses an app config with hash {app_config_name}. "
f"Looking up existing app configs with this name.")
paging_token = None
while not app_config_id:
result = sdk.list_app_configs(
project_id=project_id, count=50, paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for res in result.results:
if res.name == app_config_name:
app_config_id = res.id
logger.info(
f"App config already exists with ID {app_config_id}")
break
if not paging_token or app_config_id:
break
if not app_config_id:
logger.info("App config not found. Creating new one.")
try:
result = sdk.create_app_config(
dict(
name=app_config_name,
project_id=project_id,
config_json=app_config))
app_config_id = result.result.id
except Exception as e:
if _repeat:
logger.warning(
f"Got exception when trying to create app "
f"config: {e}. Sleeping for 10 seconds and then "
f"try again once...")
time.sleep(10)
return create_or_find_app_config(
sdk=sdk,
project_id=project_id,
app_config=app_config,
_repeat=False)
raise e
logger.info(f"App config created with ID {app_config_id}")
return app_config_id, app_config_name
def install_app_config_packages(app_config: Dict[Any, Any]):
os.environ.update(app_config.get("env_vars", {}))
packages = app_config["python"]["pip_packages"]
for package in packages:
subprocess.check_output(["pip", "install", "-U", package], text=True)
def install_matching_ray():
wheel = os.environ.get("RAY_WHEELS", None)
if not wheel:
return
assert "manylinux2014_x86_64" in wheel, wheel
if sys.platform == "darwin":
platform = "macosx_10_15_intel"
elif sys.platform == "win32":
platform = "win_amd64"
else:
platform = "manylinux2014_x86_64"
wheel = wheel.replace("manylinux2014_x86_64", platform)
subprocess.check_output(["pip", "uninstall", "-y", "ray"], text=True)
subprocess.check_output(["pip", "install", "-U", wheel], text=True)
def wait_for_build_or_raise(sdk: AnyscaleSDK,
app_config_id: Optional[str]) -> Optional[str]:
if not app_config_id:
return None
# Fetch build
build_id = None
last_status = None
result = sdk.list_builds(app_config_id)
for build in sorted(result.results, key=lambda b: b.created_at):
build_id = build.id
last_status = build.status
if build.status == "failed":
continue
if build.status == "succeeded":
logger.info(f"Link to app config build: "
f"{anyscale_app_config_build_url(build_id)}")
return build_id
if last_status == "failed":
raise AppConfigBuildFailure("App config build failed.")
if not build_id:
raise AppConfigBuildFailure("No build found for app config.")
# Build found but not failed/finished yet
completed = False
start_wait = time.time()
next_report = start_wait + REPORT_S
logger.info(f"Waiting for build {build_id} to finish...")
logger.info(f"Track progress here: "
f"{anyscale_app_config_build_url(build_id)}")
while not completed:
now = time.time()
if now > next_report:
logger.info(f"... still waiting for build {build_id} to finish "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
result = sdk.get_build(build_id)
build = result.result
if build.status == "failed":
raise AppConfigBuildFailure(
f"App config build failed. Please see "
f"{anyscale_app_config_build_url(build_id)} for details")
if build.status == "succeeded":
logger.info("Build succeeded.")
return build_id
completed = build.status not in ["in_progress", "pending"]
if completed:
raise AppConfigBuildFailure(
f"Unknown build status: {build.status}. Please see "
f"{anyscale_app_config_build_url(build_id)} for details")
time.sleep(1)
return build_id
def run_job(cluster_name: str, compute_tpl_name: str, cluster_env_name: str,
job_name: str, min_workers: str, script: str,
script_args: List[str], env_vars: Dict[str, str],
autosuspend: int) -> Tuple[int, str]:
# Start cluster and job
address = f"anyscale://{cluster_name}?autosuspend={autosuspend}"
logger.info(f"Starting job {job_name} with Ray address: {address}")
env = copy.deepcopy(os.environ)
env.update(GLOBAL_CONFIG)
env.update(env_vars)
env["RAY_ADDRESS"] = address
env["RAY_JOB_NAME"] = job_name
env["RAY_RELEASE_MIN_WORKERS"] = str(min_workers)
proc = subprocess.Popen(
script.split(" ") + script_args,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True)
proc.stdout.reconfigure(line_buffering=True)
logs = ""
for line in proc.stdout:
logs += line
sys.stdout.write(line)
proc.wait()
return proc.returncode, logs
def create_and_wait_for_session(
sdk: AnyscaleSDK,
stop_event: multiprocessing.Event,
session_name: str,
session_options: Dict[Any, Any],
) -> str:
# Create session
logger.info(f"Creating session {session_name}")
result = sdk.create_session(session_options)
session_id = result.result.id
# Trigger session start
logger.info(f"Starting session {session_name} ({session_id})")
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"], session_id=session_id)
logger.info(f"Link to session: {session_url}")
result = sdk.start_session(session_id, start_session_options={})
sop_id = result.result.id
completed = result.result.completed
# Wait for session
logger.info(f"Waiting for session {session_name}...")
start_wait = time.time()
next_report = start_wait + REPORT_S
while not completed:
# Sleep 1 sec before next check.
time.sleep(1)
session_operation_response = sdk.get_session_operation(
sop_id, _request_timeout=30)
session_operation = session_operation_response.result
completed = session_operation.completed
_check_stop(stop_event, "session")
now = time.time()
if now > next_report:
logger.info(f"... still waiting for session {session_name} "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
return session_id
def run_session_command(sdk: AnyscaleSDK,
session_id: str,
cmd_to_run: str,
result_queue: multiprocessing.Queue,
env_vars: Dict[str, str],
state_str: str = "CMD_RUN") -> Tuple[str, int]:
full_cmd = " ".join(f"{k}={v}"
for k, v in env_vars.items()) + " " + cmd_to_run
logger.info(f"Running command in session {session_id}: \n" f"{full_cmd}")
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"], session_id=session_id)
logger.info(f"Link to session: {session_url}")
result_queue.put(State(state_str, time.time(), None))
result = sdk.create_session_command(
dict(session_id=session_id, shell_command=full_cmd))
scd_id = result.result.id
return scd_id, result
def wait_for_session_command_to_complete(create_session_command_result,
sdk: AnyscaleSDK,
scd_id: str,
stop_event: multiprocessing.Event,
state_str: str = "CMD_RUN"):
result = create_session_command_result
completed = result.result.finished_at is not None
start_wait = time.time()
next_report = start_wait + REPORT_S
while not completed:
# Sleep 1 sec before next check.
time.sleep(1)
result = exponential_backoff_retry(
lambda: sdk.get_session_command(session_command_id=scd_id),
retry_exceptions=Exception,
initial_retry_delay_s=10,
max_retries=3)
completed = result.result.finished_at
if state_str == "CMD_RUN":
_check_stop(stop_event, "command")
elif state_str == "CMD_PREPARE":
_check_stop(stop_event, "prepare_command")
now = time.time()
if now > next_report:
logger.info(f"... still waiting for command to finish "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
status_code = result.result.status_code
runtime = time.time() - start_wait
if status_code != 0:
if state_str == "CMD_RUN":
raise RuntimeError(
f"Command returned non-success status: {status_code}")
elif state_str == "CMD_PREPARE":
raise PrepareCommandRuntimeError(
f"Prepare command returned non-success status: {status_code}")
return status_code, runtime
def get_command_logs(session_controller: SessionController,
scd_id: str,
lines: int = 50):
result = exponential_backoff_retry(
lambda: session_controller.api_client.get_execution_logs_api_v2_session_commands_session_command_id_execution_logs_get( # noqa: E501
session_command_id=scd_id,
start_line=-1 * lines,
end_line=0),
retry_exceptions=Exception,
initial_retry_delay_s=10,
max_retries=3)
return result.result.lines
def get_remote_json_content(
temp_dir: str,
session_name: str,
remote_file: Optional[str],
session_controller: SessionController,
):
if not remote_file:
logger.warning("No remote file specified, returning empty dict")
return {}
local_target_file = os.path.join(temp_dir, ".tmp.json")
session_controller.pull(
session_name=session_name,
source=remote_file,
target=local_target_file)
with open(local_target_file, "rt") as f:
return json.load(f)
def get_local_json_content(local_file: Optional[str], ):
if not local_file:
logger.warning("No local file specified, returning empty dict")
return {}
with open(local_file, "rt") as f:
return json.load(f)
def pull_artifacts_and_store_in_cloud(
temp_dir: str,
logs: str,
session_name: str,
test_name: str,
artifacts: Optional[Dict[Any, Any]],
session_controller: SessionController,
):
output_log_file = os.path.join(temp_dir, "output.log")
with open(output_log_file, "wt") as f:
f.write(logs)
bucket = GLOBAL_CONFIG["RELEASE_AWS_BUCKET"]
location = f"{GLOBAL_CONFIG['RELEASE_AWS_LOCATION']}" \
f"/{session_name}/{test_name}"
saved_artifacts = {}
s3_client = boto3.client("s3")
s3_client.upload_file(output_log_file, bucket, f"{location}/output.log")
saved_artifacts["output.log"] = f"s3://{bucket}/{location}/output.log"
# Download artifacts
if artifacts:
for name, remote_file in artifacts.items():
logger.info(f"Downloading artifact `{name}` from "
f"{remote_file}")
local_target_file = os.path.join(temp_dir, name)
session_controller.pull(
session_name=session_name,
source=remote_file,
target=local_target_file)
# Upload artifacts to s3
s3_client.upload_file(local_target_file, bucket,
f"{location}/{name}")
saved_artifacts[name] = f"s3://{bucket}/{location}/{name}"
return saved_artifacts
def find_session_by_test_name(
sdk: AnyscaleSDK,
session_controller: SessionController,
temp_dir: str,
state_json: str,
project_id: str,
test_name: str,
) -> Optional[Tuple[str, str, Dict[Any, Any]]]:
paging_token = None
while True: # Will break if paging_token is None after first search
result = sdk.search_sessions(
project_id=project_id,
sessions_query=dict(
name=dict(contains=test_name),
state_filter=["Running"],
paging=dict(count=20, paging_token=paging_token)))
for session in result.results:
logger.info(f"Found sessions {session.name}")
if not session.name.startswith(test_name):
continue
try:
session_state = get_remote_json_content(
temp_dir=temp_dir,
session_name=session.name,
remote_file=state_json,
session_controller=session_controller)
except Exception as exc:
raise RuntimeError(f"Could not get remote json content "
f"for session {session.name}") from exc
if session_state.get("test_name") == test_name:
return session.id, session.name, session_state
session_token = result.metadata.next_paging_token
if not session_token:
return None
def get_latest_running_command_id(sdk: AnyscaleSDK, session_id: str
) -> Tuple[Optional[str], Optional[bool]]:
scd_id = None
paging_token = None
success = None
while not scd_id:
result = sdk.list_session_commands(
session_id=session_id, paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for cmd in result.results:
if not scd_id:
scd_id = cmd.id
completed = cmd.finished_at is not None
if completed:
if success is None:
success = True
success = success and cmd.status_code == 0
if not completed:
return cmd.id, None
return scd_id, success or False
def run_test_config(
local_dir: str,
project_id: str,
test_name: str,
test_config: Dict[Any, Any],
commit_url: str,
session_name: str = None,
smoke_test: bool = False,
no_terminate: bool = False,
kick_off_only: bool = False,
check_progress: bool = False,
upload_artifacts: bool = True,
keep_results_dir: bool = False,
app_config_id_override: Optional[str] = None,
) -> Dict[Any, Any]:
"""
Returns:
Dict with the following entries:
status (str): One of [finished, error, timeout]
command_link (str): Link to command (Anyscale web UI)
last_logs (str): Last logs (excerpt) to send to owner
artifacts (dict): Dict of artifacts
Key: Name
Value: S3 URL
"""
# Todo (mid-term): Support other cluster definitions
# (not only cluster configs)
cluster_config_rel_path = test_config["cluster"].get(
"cluster_config", None)
cluster_config = _load_config(local_dir, cluster_config_rel_path)
app_config_rel_path = test_config["cluster"].get("app_config", None)
app_config = _load_config(local_dir, app_config_rel_path)
compute_tpl_rel_path = test_config["cluster"].get("compute_template", None)
compute_tpl = _load_config(local_dir, compute_tpl_rel_path)
stop_event = multiprocessing.Event()
result_queue = multiprocessing.Queue()
if not session_name:
session_name = f"{test_name}_{int(time.time())}"
temp_dir = tempfile.mkdtemp()
# Result and state files
results_json = test_config["run"].get("results", None)
if results_json is None:
results_json = "/tmp/release_test_out.json"
state_json = test_config["run"].get("state", None)
if state_json is None:
state_json = "/tmp/release_test_state.json"
env_vars = {
"RAY_ADDRESS": os.environ.get("RAY_ADDRESS", "auto"),
"TEST_OUTPUT_JSON": results_json,
"TEST_STATE_JSON": state_json,
"IS_SMOKE_TEST": "1" if smoke_test else "0",
}
with open(os.path.join(local_dir, ".anyscale.yaml"), "wt") as f:
f.write(f"project_id: {project_id}")
os.chdir(local_dir)
# Setup interface
# Unfortunately, there currently seems to be no great way to
# transfer files with the Anyscale SDK.
# So we use the session controller instead.
sdk = AnyscaleSDK(auth_token=GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"])
session_controller = SessionController(
api_client=instantiate_api_client(
cli_token=GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"],
host=GLOBAL_CONFIG["ANYSCALE_HOST"],
),
anyscale_api_client=sdk.api_client,
)
timeout = test_config["run"].get("timeout", 1800)
if "RELEASE_OVERRIDE_TIMEOUT" in os.environ:
previous_timeout = timeout
timeout = int(os.environ.get("RELEASE_OVERRIDE_TIMEOUT", str(timeout)))
logger.warning(f"Release test timeout override: {timeout} "
f"(would have been {previous_timeout})")
# If a test is long running, timeout does not mean it failed
is_long_running = test_config["run"].get("long_running", False)
build_id_override = None
if test_config["run"].get("use_connect"):
autosuspend_mins = test_config["run"].get("autosuspend_mins", 5)
assert not kick_off_only, \
"Unsupported for running with Anyscale connect."
if app_config_id_override is not None:
logger.info(
"Using connect and an app config override, waiting until "
"build finishes so we can fetch the app config in order to "
"install its pip packages locally.")
build_id_override = wait_for_build_or_raise(
sdk, app_config_id_override)
response = sdk.get_cluster_environment_build(build_id_override)
app_config = response.result.config_json
install_app_config_packages(app_config)
install_matching_ray()
elif "autosuspend_mins" in test_config["run"]:
raise ValueError(
"'autosuspend_mins' is only supported if 'use_connect' is True.")
# Add information to results dict
def _update_results(results: Dict):
if "last_update" in results:
results["last_update_diff"] = time.time() - results["last_update"]
if smoke_test:
results["smoke_test"] = True
def _process_finished_command(session_controller: SessionController,
scd_id: str,
results: Optional[Dict] = None,
runtime: int = None,
commit_url: str = None,
session_url: str = None):
logger.info("Command finished successfully.")
if results_json:
results = results or get_remote_json_content(
temp_dir=temp_dir,
session_name=session_name,
remote_file=results_json,
session_controller=session_controller,
)
else:
results = {"passed": 1}
_update_results(results)
if scd_id:
logs = get_command_logs(session_controller, scd_id,
test_config.get("log_lines", 50))
else:
logs = "No command found to fetch logs for"
if upload_artifacts:
saved_artifacts = pull_artifacts_and_store_in_cloud(
temp_dir=temp_dir,
logs=logs, # Also save logs in cloud
session_name=session_name,
test_name=test_name,
artifacts=test_config.get("artifacts", {}),
session_controller=session_controller,
)
logger.info("Fetched results and stored on the cloud. Returning.")
else:
saved_artifacts = {}
logger.info("Usually I would have fetched the results and "
"artifacts and stored them on S3.")
# Add these metadata here to avoid changing SQL schema.
results["_runtime"] = runtime
results["_session_url"] = session_url
results["_commit_url"] = commit_url
results["_stable"] = test_config.get("stable", True)
result_queue.put(
State(
"END",
time.time(),
{
"status": "finished",
"last_logs": logs,
"results": results,
"artifacts": saved_artifacts,
},
))
# When running the test script in client mode, the finish command is a
# completed local process.
def _process_finished_client_command(returncode: int, logs: str):
if upload_artifacts:
saved_artifacts = pull_artifacts_and_store_in_cloud(
temp_dir=temp_dir,
logs=logs, # Also save logs in cloud
session_name=session_name,
test_name=test_name,
artifacts=None,
session_controller=None,
)
logger.info("Stored results on the cloud. Returning.")
else:
saved_artifacts = {}
logger.info("Usually I would have fetched the results and "
"artifacts and stored them on S3.")
if results_json:
results = get_local_json_content(local_file=results_json, )
else:
results = {
"passed": int(returncode == 0),
}
results["returncode"] = returncode
_update_results(results)
result_queue.put(
State(
"END",
time.time(),
{
"status": "finished",
"last_logs": logs,
"results": results,
"artifacts": saved_artifacts,
},
))
def _run(logger):
# These values will be set as the test runs.
session_url = None
runtime = None
anyscale.conf.CLI_TOKEN = GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"]
test_uses_ray_connect = test_config["run"].get("use_connect")
session_id = None
scd_id = None
try:
# First, look for running sessions
session_id = search_running_session(sdk, project_id, session_name)
compute_tpl_name = None
app_config_id = app_config_id_override
app_config_name = None
build_id = build_id_override
if not session_id:
logger.info("No session found.")
# Start session
session_options = dict(
name=session_name, project_id=project_id)
if cluster_config is not None:
logging.info("Starting session with cluster config")
cluster_config_str = json.dumps(cluster_config)
session_options["cluster_config"] = cluster_config_str
session_options["cloud_id"] = (
GLOBAL_CONFIG["ANYSCALE_CLOUD_ID"], )
session_options["uses_app_config"] = False
else:
logging.info("Starting session with app/compute config")
# Find/create compute template
compute_tpl_id, compute_tpl_name = \
create_or_find_compute_template(
sdk, project_id, compute_tpl)
logger.info(f"Link to compute template: "
f"{anyscale_compute_tpl_url(compute_tpl_id)}")
# Find/create app config
if app_config_id is None:
(
app_config_id,
app_config_name,
) = create_or_find_app_config(sdk, project_id,
app_config)
else:
logger.info(
f"Using override app config {app_config_id}")
app_config_name = sdk.get_app_config(
app_config_id).result.name
if build_id is None:
# We might have already retrieved the build ID when
# installing app config packages locally if using
# connect, so only get the build ID if it's not set.
build_id = wait_for_build_or_raise(sdk, app_config_id)
session_options["compute_template_id"] = compute_tpl_id
session_options["build_id"] = build_id
session_options["uses_app_config"] = True
# Start session
session_id = create_and_wait_for_session(
sdk=sdk,
stop_event=stop_event,
session_name=session_name,
session_options=session_options,
)
prepare_command = test_config["run"].get("prepare")
# Write test state json
test_state_file = os.path.join(local_dir, "test_state.json")
with open(test_state_file, "wt") as f:
json.dump({
"start_time": time.time(),
"test_name": test_name
}, f)
if prepare_command or not test_uses_ray_connect:
if test_uses_ray_connect:
logger.info("Found a prepare command, so pushing it "
"to the session.")
# Rsync up
logger.info("Syncing files to session...")
session_controller.push(
session_name=session_name,
source=None,
target=None,
config=None,
all_nodes=False,
)
logger.info("Syncing test state to session...")
session_controller.push(
session_name=session_name,
source=test_state_file,
target=state_json,
config=None,
all_nodes=False,
)
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"],
session_id=session_id)
_check_stop(stop_event, "file_sync")
# Optionally run preparation command
if prepare_command:
logger.info(
f"Running preparation command: {prepare_command}")
scd_id, result = run_session_command(
sdk=sdk,
session_id=session_id,
cmd_to_run=prepare_command,
result_queue=result_queue,
env_vars=env_vars,
state_str="CMD_PREPARE")
_, _ = wait_for_session_command_to_complete(
result,
sdk=sdk,
scd_id=scd_id,
stop_event=stop_event,
state_str="CMD_PREPARE")
if test_uses_ray_connect:
script_args = test_config["run"].get("args", [])
if smoke_test:
script_args += ["--smoke-test"]
min_workers = 0
for node_type in compute_tpl["worker_node_types"]:
min_workers += node_type["min_workers"]
# Build completed, use job timeout
result_queue.put(State("CMD_RUN", time.time(), None))
returncode, logs = run_job(
cluster_name=session_name,
compute_tpl_name=compute_tpl_name,
cluster_env_name=app_config_name,
job_name=session_name,
min_workers=min_workers,
script=test_config["run"]["script"],
script_args=script_args,
env_vars=env_vars,
autosuspend=autosuspend_mins)
_process_finished_client_command(returncode, logs)
return
# Run release test command
cmd_to_run = test_config["run"]["script"] + " "
args = test_config["run"].get("args", [])
if args:
cmd_to_run += " ".join(args) + " "
if smoke_test:
cmd_to_run += " --smoke-test"
scd_id, result = run_session_command(
sdk=sdk,
session_id=session_id,
cmd_to_run=cmd_to_run,
result_queue=result_queue,
env_vars=env_vars,
state_str="CMD_RUN")
if not kick_off_only:
_, runtime = wait_for_session_command_to_complete(
result,
sdk=sdk,
scd_id=scd_id,
stop_event=stop_event,
state_str="CMD_RUN")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
runtime=runtime,
session_url=session_url,
commit_url=commit_url)
else:
result_queue.put(
State("END", time.time(), {
"status": "kickoff",
"last_logs": ""
}))
except (ReleaseTestTimeoutError, Exception) as e:
logger.error(e, exc_info=True)
logs = str(e)
if scd_id is not None:
try:
logs = logs + "; Command logs:" + get_command_logs(
session_controller, scd_id,
test_config.get("log_lines", 50))
except Exception as e2:
logger.error(e2, exc_info=True)
# Long running tests are "finished" successfully when
# timed out
if isinstance(e, ReleaseTestTimeoutError) and is_long_running:
_process_finished_command(
session_controller=session_controller, scd_id=scd_id)
else:
timeout_type = ""
runtime = None
if isinstance(e, CommandTimeoutError):
timeout_type = "timeout"
runtime = 0
elif (isinstance(e, PrepareCommandTimeoutError)
or isinstance(e, FileSyncTimeoutError)
or isinstance(e, SessionTimeoutError)
or isinstance(e, PrepareCommandRuntimeError)
or isinstance(e, AppConfigBuildFailure)):
timeout_type = "infra_timeout"
runtime = None
elif isinstance(e, RuntimeError):
timeout_type = "runtime_error"
runtime = 0
else:
timeout_type = "unknown timeout"
runtime = None
# Add these metadata here to avoid changing SQL schema.
results = {}
results["_runtime"] = runtime
results["_session_url"] = session_url
results["_commit_url"] = commit_url
results["_stable"] = test_config.get("stable", True)
result_queue.put(
State(
"END", time.time(), {
"status": timeout_type,
"last_logs": logs,
"results": results
}))
finally:
if no_terminate:
logger.warning(
"`no_terminate` is set to True, so the session will "
"*not* be terminated!")
else:
_cleanup_session(sdk, session_id)
def _check_progress(logger):
anyscale.conf.CLI_TOKEN = GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"]
should_terminate = False
session_id = None
scd_id = None
try:
existing_session = find_session_by_test_name(
sdk=sdk,
session_controller=session_controller,
temp_dir=temp_dir,
state_json=state_json,
project_id=project_id,
test_name=test_name)
if existing_session is None:
logger.info(f"Found no existing session for {test_name}")
result_queue.put(
State("END", time.time(), {
"status": "nosession",
"last_logs": ""
}))
return
session_id, session_name, session_state = existing_session
logger.info(f"Found existing session for {test_name}: "
f"{session_name}")
scd_id, success = get_latest_running_command_id(
sdk=sdk, session_id=session_id)
latest_result = get_remote_json_content(
temp_dir=temp_dir,
session_name=session_name,
remote_file=results_json,
session_controller=session_controller,
)
# Fetch result json and check if it has been updated recently
result_time_key = test_config["run"].get("time_key", None)
maximum_update_delay = test_config["run"].get(
"max_update_delay", None)
if result_time_key and maximum_update_delay:
last_update = latest_result.get(result_time_key, None)
if not last_update:
result_queue.put(
State(
"END", time.time(), {
"status": "error",
"last_logs": f"Test did not store "
f"{result_time_key} in the "
f"results json."
}))
return
delay = time.time() - last_update
logger.info(f"Last update was at {last_update:.2f}. "
f"This was {delay:.2f} seconds ago "
f"(maximum allowed: {maximum_update_delay})")
if delay > maximum_update_delay:
raise RuntimeError(
f"Test did not update the results json within "
f"the last {maximum_update_delay} seconds.")
if time.time() - session_state["start_time"] > timeout:
# Long running test reached timeout
logger.info(
f"Test command reached timeout after {timeout} seconds")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
results=latest_result)
should_terminate = True
elif success:
logger.info("All commands finished.")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
results=latest_result)
should_terminate = True
else:
rest_time = timeout - time.time() + session_state["start_time"]
logger.info(f"Test command should continue running "
f"for {rest_time} seconds")
result_queue.put(
State("END", time.time(), {
"status": "kickoff",
"last_logs": "Test is still running"
}))
except Exception as e:
logger.error(e, exc_info=True)
logs = str(e)
if scd_id is not None:
try:
logs = get_command_logs(session_controller, scd_id,
test_config.get("log_lines", 50))
logs += f"\n{str(e)}"
except Exception as e2:
logger.error(e2, exc_info=True)
result_queue.put(
State("END", time.time(), {
"status": "error",
"last_logs": logs
}))
should_terminate = True
finally:
if should_terminate:
logger.warning("Terminating session")
_cleanup_session(sdk, session_id)
if not check_progress:
process = multiprocessing.Process(target=_run, args=(logger, ))
else:
process = multiprocessing.Process(
target=_check_progress, args=(logger, ))
build_timeout = test_config["run"].get("build_timeout", 1800)
project_url = anyscale_project_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"])
logger.info(f"Link to project: {project_url}")
msg = f"This will now run test {test_name}."
if smoke_test:
msg += " This is a smoke test."
if is_long_running:
msg += " This is a long running test."
logger.info(msg)
logger.info(f"Starting process with timeout {timeout} "
f"(build timeout {build_timeout})")
process.start()
# The timeout time will be updated after the build finished
# Build = App config + compute template build and session start
timeout_time = time.time() + build_timeout
result = {}
while process.is_alive():
try:
state: State = result_queue.get(timeout=1)
except (Empty, TimeoutError):
if time.time() > timeout_time:
stop_event.set()
logger.warning("Process timed out.")
if not is_long_running:
logger.warning("Terminating process in 10 seconds.")
time.sleep(10)
logger.warning("Terminating process now.")
process.terminate()
else:
logger.info("Process is long running. Give 2 minutes to "
"fetch result and terminate.")
start_terminate = time.time()
while time.time(
) < start_terminate + 120 and process.is_alive():
time.sleep(1)
if process.is_alive():
logger.warning("Terminating forcefully now.")
process.terminate()
else:
logger.info("Long running results collected.")
break
continue
if not isinstance(state, State):
raise RuntimeError(f"Expected `State` object, got {result}")
if state.state == "CMD_PREPARE":
# Reset timeout after build finished
timeout_time = state.timestamp + timeout
if state.state == "CMD_RUN":
# Reset timeout after prepare command or build finished
timeout_time = state.timestamp + timeout
elif state.state == "END":
result = state.data
break
while not result_queue.empty():
state = result_queue.get_nowait()
result = state.data
logger.info("Final check if everything worked.")
try:
result.setdefault("status", "error (status not found)")
except (TimeoutError, Empty):
result = {"status": "timeout", "last_logs": "Test timed out."}
logger.info(f"Final results: {result}")
log_results_and_artifacts(result)
if not keep_results_dir:
logger.info(f"Removing results dir {temp_dir}")
shutil.rmtree(temp_dir)
else:
# Write results.json
with open(os.path.join(temp_dir, "results.json"), "wt") as fp:
json.dump(result, fp)
out_dir = os.path.expanduser(GLOBAL_CONFIG["RELEASE_RESULTS_DIR"])
logger.info(f"Moving results dir {temp_dir} to persistent location "
f"{out_dir}")
shutil.rmtree(out_dir, ignore_errors=True)
shutil.copytree(temp_dir, out_dir)
logger.info(f"Dir contents: {os.listdir(out_dir)}")
return result
def run_test(test_config_file: str,
test_name: str,
project_id: str,
commit_url: str,
category: str = "unspecified",
smoke_test: bool = False,
no_terminate: bool = False,
kick_off_only: bool = False,
check_progress: bool = False,
report: bool = True,
keep_results_dir: bool = False,
session_name: Optional[str] = None,
app_config_id_override=None) -> Dict[str, Any]:
with open(test_config_file, "rt") as f:
test_configs = yaml.safe_load(f)
test_config_dict = {}
for test_config in test_configs:
name = test_config.pop("name")
test_config_dict[name] = test_config
if test_name not in test_config_dict:
raise ValueError(
f"Test with name `{test_name}` not found in test config file "
f"at `{test_config_file}`.")
test_config = test_config_dict[test_name]
if smoke_test and "smoke_test" in test_config:
smoke_test_config = test_config.pop("smoke_test")
test_config = _deep_update(test_config, smoke_test_config)
local_dir = os.path.dirname(test_config_file)
if "local_dir" in test_config:
# local_dir is relative to test_config_file
local_dir = os.path.join(local_dir, test_config["local_dir"])
if test_config["run"].get("use_connect"):
assert not kick_off_only, \
"--kick-off-only is unsupported when running with " \
"Anyscale connect."
assert not check_progress, \
"--check is unsupported when running with Anyscale connect."
if test_config.get("artifacts", {}):
logger.error(
"Saving artifacts are not yet supported when running with "
"Anyscale connect.")
result = run_test_config(
local_dir,
project_id,
test_name,
test_config,
commit_url,
session_name=session_name,
smoke_test=smoke_test,
no_terminate=no_terminate,
kick_off_only=kick_off_only,
check_progress=check_progress,
upload_artifacts=report,
keep_results_dir=keep_results_dir,
app_config_id_override=app_config_id_override)
status = result.get("status", "invalid")
if kick_off_only:
if status != "kickoff":
raise RuntimeError("Error kicking off test.")
logger.info("Kicked off test. It's now up to the `--check` "
"part of the script to track its process.")
return {}
else:
# `--check` or no kick off only
if status == "nosession":
logger.info(f"No running session found for test {test_name}, so "
f"assuming everything is fine.")
return {}
if status == "kickoff":
logger.info(f"Test {test_name} is still running.")
return {}
last_logs = result.get("last_logs", "No logs.")
test_suite = os.path.basename(test_config_file).replace(".yaml", "")
report_kwargs = dict(
test_suite=test_suite,
test_name=test_name,
status=status,
last_logs=last_logs,
results=result.get("results", {}),
artifacts=result.get("artifacts", {}),
category=category,
)
if report:
report_result(**report_kwargs)
else:
logger.info(f"Usually I would now report the following results:\n"
f"{report_kwargs}")
if has_errored(result):
raise RuntimeError(last_logs)
return report_kwargs
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"--test-config", type=str, required=True, help="Test config file")
parser.add_argument("--test-name", type=str, help="Test name in config")
parser.add_argument(
"--ray-wheels", required=False, type=str, help="URL to ray wheels")
parser.add_argument(
"--no-terminate",
action="store_true",
default=False,
help="Don't terminate session after failure")
parser.add_argument(
"--no-report",
action="store_true",
default=False,
help="Do not report any results or upload to S3")
parser.add_argument(
"--kick-off-only",
action="store_true",
default=False,
help="Kick off only (don't wait for command to finish)")
parser.add_argument(
"--check",
action="store_true",
default=False,
help="Check (long running) status")
parser.add_argument(
"--keep-results-dir",
action="store_true",
default=False,
help="Keep results in directory (named RELEASE_RESULTS_DIR), e.g. "
"for Buildkite artifact upload.")
parser.add_argument(
"--category",
type=str,
default="unspecified",
help="Category name, e.g. `release-1.3.0` (will be saved in database)")
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--session-name",
required=False,
type=str,
help="Name of the session to run this test.")
parser.add_argument(
"--app-config-id-override",
required=False,
type=str,
help=("An app config ID, which will override the test config app "
"config."))
args, _ = parser.parse_known_args()
if not GLOBAL_CONFIG["ANYSCALE_PROJECT"]:
raise RuntimeError(
"You have to set the ANYSCALE_PROJECT environment variable!")
maybe_fetch_api_token()
if args.ray_wheels:
os.environ["RAY_WHEELS"] = str(args.ray_wheels)
url = str(args.ray_wheels)
elif not args.check and not os.environ.get("RAY_WHEELS"):
url = find_ray_wheels(
GLOBAL_CONFIG["RAY_REPO"],
GLOBAL_CONFIG["RAY_BRANCH"],
GLOBAL_CONFIG["RAY_VERSION"],
)
if not url:
raise RuntimeError(f"Could not find wheels for "
f"Ray {GLOBAL_CONFIG['RAY_VERSION']}, "
f"branch {GLOBAL_CONFIG['RAY_BRANCH']}")
# RAY_COMMIT is set by find_ray_wheels
elif os.environ.get("RAY_WHEELS"):
logger.info(f"Using Ray wheels provided from URL: "
f"{os.environ.get('RAY_WHEELS')}")
url = os.environ.get("RAY_WHEELS")
populate_wheels_sanity_check(os.environ.get("RAY_COMMIT", ""))
test_config_file = os.path.abspath(os.path.expanduser(args.test_config))
result_dict = run_test(
test_config_file=test_config_file,
test_name=args.test_name,
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"],
commit_url=url,
category=args.category,
smoke_test=args.smoke_test,
no_terminate=args.no_terminate or args.kick_off_only,
kick_off_only=args.kick_off_only,
check_progress=args.check,
report=not args.no_report,
session_name=args.session_name,
keep_results_dir=args.keep_results_dir,
app_config_id_override=args.app_config_id_override,
)
if result_dict:
# If we get a result dict, check if any alerts should be raised
from alert import SUITE_TO_FN, default_handle_result
logger.info("Checking if results are valid...")
handle_result_kwargs = result_dict.copy()
handle_result_kwargs["created_on"] = None
test_suite = handle_result_kwargs.get("test_suite", None)
test_name = handle_result_kwargs.get("test_name", None)
category = handle_result_kwargs.get("category", None)
handle_fn = SUITE_TO_FN.get(test_suite, None)
if not handle_fn:
logger.warning(f"No handle for suite {test_suite}")
alert = default_handle_result(**handle_result_kwargs)
else:
alert = handle_fn(**handle_result_kwargs)
if alert:
# If we get an alert, the test failed.
raise RuntimeError(alert)
else:
logger.info(f"No alert raised for test {test_suite}/{test_name} "
f"({category}) - the test successfully passed!")
|
"""Option helper functions"""
__docformat__ = "numpy"
import argparse
from typing import List
import pandas as pd
import numpy as np
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
check_non_negative,
)
# pylint: disable=R1710
def load(other_args: List[str]) -> str:
"""Load ticker into object
Parameters
----------
other_args: List[str]
Agrparse arguments
Returns
-------
str:
Ticker
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="opload",
description="Load a ticker into option menu",
)
parser.add_argument(
"-t",
"--ticker",
action="store",
dest="ticker",
required="-h" not in other_args,
help="Stock ticker",
)
try:
if other_args:
if "-t" not in other_args and "-h" not in other_args:
other_args.insert(0, "-t")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return ""
print("")
return ns_parser.ticker
except Exception as e:
print(e, "\n")
return ""
except SystemExit:
print("")
return ""
# pylint: disable=no-else-return
def select_option_date(avalaiable_dates: List[str], other_args: List[str]) -> str:
"""Select an option date out of a supplied list
Parameters
----------
avalaiable_dates: List[str]
Possible date options
other_args: List[str]
Arparse arguments
Returns
-------
expiry_date: str
Selected expiry date
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="exp",
description="See and set expiration date",
)
parser.add_argument(
"-d",
"--date",
dest="n_date",
action="store",
type=int,
default=-1,
choices=range(len(avalaiable_dates)),
help="Select index for expiry date.",
)
parser.add_argument(
"-D",
dest="date",
type=str,
choices=avalaiable_dates + [""],
help="Select date (YYYY-MM-DD)",
default="",
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-d")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return ""
# Print possible expiry dates
if ns_parser.n_date == -1 and not ns_parser.date:
print("\nAvailable expiry dates:")
for i, d in enumerate(avalaiable_dates):
print(f" {(2 - len(str(i))) * " "}{i}. {d}")
print("")
return ""
# It means an expiry date was correctly selected
else:
if ns_parser.date:
if ns_parser.date in avalaiable_dates:
print(f"Expiraration set to {ns_parser.date} \n")
return ns_parser.date
else:
print("Expiration not an option")
return ""
else:
expiry_date = avalaiable_dates[ns_parser.n_date]
print(f"Expiraration set to {expiry_date} \n")
return expiry_date
except Exception as e:
print(e, "\n")
return ""
def get_loss_at_strike(strike: float, chain: pd.DataFrame) -> float:
"""Function to get the loss at the given expiry
Parameters
----------
strike: Union[int,float]
Value to calculate total loss at
chain: Dataframe:
Dataframe containing at least strike and openInterest
Returns
-------
loss: Union[float,int]
Total loss
"""
itm_calls = chain[chain.index < strike][["OI_call"]]
itm_calls["loss"] = (strike - itm_calls.index) * itm_calls["OI_call"]
call_loss = itm_calls["loss"].sum()
itm_puts = chain[chain.index > strike][["OI_put"]]
itm_puts["loss"] = (itm_puts.index - strike) * itm_puts["OI_put"]
put_loss = itm_puts.loss.sum()
loss = call_loss + put_loss
return loss
def calculate_max_pain(chain: pd.DataFrame) -> int:
"""Returns the max pain for a given call/put dataframe
Parameters
----------
chain: DataFrame
Dataframe to calculate value from
Returns
-------
max_pain : int
Max pain value
"""
strikes = np.array(chain.index)
if ("OI_call" not in chain.columns) or ("OI_put" not in chain.columns):
print("Incorrect columns. Unable to parse max pain")
return np.nan
loss = []
for price_at_exp in strikes:
loss.append(get_loss_at_strike(price_at_exp, chain))
chain["loss"] = loss
max_pain = chain["loss"].idxmin()
return max_pain
def vol(other_args: List[str]):
"""Parse volume argparse
Parameters
----------
other_args: List[str]
Argparse arguments
Returns
-------
ns_parser: argparse.Namespace
Parsed namespace
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="vol",
description="Plot volume. Volume refers to the number of contracts traded today.",
)
parser.add_argument(
"-m",
"--min",
default=-1,
type=check_non_negative,
help="Min strike to plot",
dest="min",
)
parser.add_argument(
"-M",
"--max",
default=-1,
type=check_non_negative,
help="Max strike to plot",
dest="max",
)
parser.add_argument(
"--calls",
action="store_true",
default=False,
dest="calls",
help="Flag to plot call options only",
)
parser.add_argument(
"--puts",
action="store_true",
default=False,
dest="puts",
help="Flag to plot put options only",
)
parser.add_argument(
"--source",
type=str,
default="tr",
choices=["tr", "yf"],
dest="source",
help="Source to get data from",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
return ns_parser
except Exception as e:
print(e, "\n")
def voi(other_args: List[str]):
"""Parse Volume + open interest argparse
Parameters
----------
other_args: List[str]
Argparse arguments
Returns
-------
ns_parser: argparse.Namespace
Parsed namespace
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="voi",
description="""
Plots Volume + Open Interest of calls vs puts.
""",
)
parser.add_argument(
"-v",
"--minv",
dest="min_vol",
type=check_non_negative,
default=-1,
help="minimum volume (considering open interest) threshold of the plot.",
)
parser.add_argument(
"-m",
"--min",
dest="min_sp",
type=check_non_negative,
default=-1,
help="minimum strike price to consider in the plot.",
)
parser.add_argument(
"-M",
"--max",
dest="max_sp",
type=check_non_negative,
default=-1,
help="maximum strike price to consider in the plot.",
)
parser.add_argument(
"--source",
type=str,
default="tr",
choices=["tr", "yf"],
dest="source",
help="Source to get data from",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return None
return ns_parser
except Exception as e:
print(e, "\n")
return None
def oi(other_args: List[str]):
"""Parse Open Interest argparse
Parameters
----------
other_args: List[str]
Argparse arguments
Returns
-------
ns_parser: argparse.Namespace
Parsed namespace
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="oi",
description="Plot open interest. Open interest represents the number of contracts that exist.",
)
parser.add_argument(
"-m",
"--min",
default=-1,
type=check_non_negative,
help="Min strike to plot",
dest="min",
)
parser.add_argument(
"-M",
"--max",
default=-1,
type=check_non_negative,
help="Max strike to plot",
dest="max",
)
parser.add_argument(
"--calls",
action="store_true",
default=False,
dest="calls",
help="Flag to plot call options only",
)
parser.add_argument(
"--puts",
action="store_true",
default=False,
dest="puts",
help="Flag to plot put options only",
)
parser.add_argument(
"--source",
type=str,
default="tr",
choices=["tr", "yf"],
dest="source",
help="Source to get data from",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return None
return ns_parser
except Exception as e:
print(e, "\n")
return None
| """Option helper functions"""
__docformat__ = "numpy"
import argparse
from typing import List
import pandas as pd
import numpy as np
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
check_non_negative,
)
# pylint: disable=R1710
def load(other_args: List[str]) -> str:
"""Load ticker into object
Parameters
----------
other_args: List[str]
Agrparse arguments
Returns
-------
str:
Ticker
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="opload",
description="Load a ticker into option menu",
)
parser.add_argument(
"-t",
"--ticker",
action="store",
dest="ticker",
required="-h" not in other_args,
help="Stock ticker",
)
try:
if other_args:
if "-t" not in other_args and "-h" not in other_args:
other_args.insert(0, "-t")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return ""
print("")
return ns_parser.ticker
except Exception as e:
print(e, "\n")
return ""
except SystemExit:
print("")
return ""
# pylint: disable=no-else-return
def select_option_date(avalaiable_dates: List[str], other_args: List[str]) -> str:
"""Select an option date out of a supplied list
Parameters
----------
avalaiable_dates: List[str]
Possible date options
other_args: List[str]
Arparse arguments
Returns
-------
expiry_date: str
Selected expiry date
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="exp",
description="See and set expiration date",
)
parser.add_argument(
"-d",
"--date",
dest="n_date",
action="store",
type=int,
default=-1,
choices=range(len(avalaiable_dates)),
help="Select index for expiry date.",
)
parser.add_argument(
"-D",
dest="date",
type=str,
choices=avalaiable_dates + [""],
help="Select date (YYYY-MM-DD)",
default="",
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-d")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return ""
# Print possible expiry dates
if ns_parser.n_date == -1 and not ns_parser.date:
print("\nAvailable expiry dates:")
for i, d in enumerate(avalaiable_dates):
print(f" {(2 - len(str(i))) * ' '}{i}. {d}")
print("")
return ""
# It means an expiry date was correctly selected
else:
if ns_parser.date:
if ns_parser.date in avalaiable_dates:
print(f"Expiraration set to {ns_parser.date} \n")
return ns_parser.date
else:
print("Expiration not an option")
return ""
else:
expiry_date = avalaiable_dates[ns_parser.n_date]
print(f"Expiraration set to {expiry_date} \n")
return expiry_date
except Exception as e:
print(e, "\n")
return ""
def get_loss_at_strike(strike: float, chain: pd.DataFrame) -> float:
"""Function to get the loss at the given expiry
Parameters
----------
strike: Union[int,float]
Value to calculate total loss at
chain: Dataframe:
Dataframe containing at least strike and openInterest
Returns
-------
loss: Union[float,int]
Total loss
"""
itm_calls = chain[chain.index < strike][["OI_call"]]
itm_calls["loss"] = (strike - itm_calls.index) * itm_calls["OI_call"]
call_loss = itm_calls["loss"].sum()
itm_puts = chain[chain.index > strike][["OI_put"]]
itm_puts["loss"] = (itm_puts.index - strike) * itm_puts["OI_put"]
put_loss = itm_puts.loss.sum()
loss = call_loss + put_loss
return loss
def calculate_max_pain(chain: pd.DataFrame) -> int:
"""Returns the max pain for a given call/put dataframe
Parameters
----------
chain: DataFrame
Dataframe to calculate value from
Returns
-------
max_pain : int
Max pain value
"""
strikes = np.array(chain.index)
if ("OI_call" not in chain.columns) or ("OI_put" not in chain.columns):
print("Incorrect columns. Unable to parse max pain")
return np.nan
loss = []
for price_at_exp in strikes:
loss.append(get_loss_at_strike(price_at_exp, chain))
chain["loss"] = loss
max_pain = chain["loss"].idxmin()
return max_pain
def vol(other_args: List[str]):
"""Parse volume argparse
Parameters
----------
other_args: List[str]
Argparse arguments
Returns
-------
ns_parser: argparse.Namespace
Parsed namespace
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="vol",
description="Plot volume. Volume refers to the number of contracts traded today.",
)
parser.add_argument(
"-m",
"--min",
default=-1,
type=check_non_negative,
help="Min strike to plot",
dest="min",
)
parser.add_argument(
"-M",
"--max",
default=-1,
type=check_non_negative,
help="Max strike to plot",
dest="max",
)
parser.add_argument(
"--calls",
action="store_true",
default=False,
dest="calls",
help="Flag to plot call options only",
)
parser.add_argument(
"--puts",
action="store_true",
default=False,
dest="puts",
help="Flag to plot put options only",
)
parser.add_argument(
"--source",
type=str,
default="tr",
choices=["tr", "yf"],
dest="source",
help="Source to get data from",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
return ns_parser
except Exception as e:
print(e, "\n")
def voi(other_args: List[str]):
"""Parse Volume + open interest argparse
Parameters
----------
other_args: List[str]
Argparse arguments
Returns
-------
ns_parser: argparse.Namespace
Parsed namespace
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="voi",
description="""
Plots Volume + Open Interest of calls vs puts.
""",
)
parser.add_argument(
"-v",
"--minv",
dest="min_vol",
type=check_non_negative,
default=-1,
help="minimum volume (considering open interest) threshold of the plot.",
)
parser.add_argument(
"-m",
"--min",
dest="min_sp",
type=check_non_negative,
default=-1,
help="minimum strike price to consider in the plot.",
)
parser.add_argument(
"-M",
"--max",
dest="max_sp",
type=check_non_negative,
default=-1,
help="maximum strike price to consider in the plot.",
)
parser.add_argument(
"--source",
type=str,
default="tr",
choices=["tr", "yf"],
dest="source",
help="Source to get data from",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return None
return ns_parser
except Exception as e:
print(e, "\n")
return None
def oi(other_args: List[str]):
"""Parse Open Interest argparse
Parameters
----------
other_args: List[str]
Argparse arguments
Returns
-------
ns_parser: argparse.Namespace
Parsed namespace
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="oi",
description="Plot open interest. Open interest represents the number of contracts that exist.",
)
parser.add_argument(
"-m",
"--min",
default=-1,
type=check_non_negative,
help="Min strike to plot",
dest="min",
)
parser.add_argument(
"-M",
"--max",
default=-1,
type=check_non_negative,
help="Max strike to plot",
dest="max",
)
parser.add_argument(
"--calls",
action="store_true",
default=False,
dest="calls",
help="Flag to plot call options only",
)
parser.add_argument(
"--puts",
action="store_true",
default=False,
dest="puts",
help="Flag to plot put options only",
)
parser.add_argument(
"--source",
type=str,
default="tr",
choices=["tr", "yf"],
dest="source",
help="Source to get data from",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return None
return ns_parser
except Exception as e:
print(e, "\n")
return None
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import logging
import os
import shutil
import tempfile
import time
import traceback
from collections import OrderedDict
from urllib.parse import quote_plus
import ctk
import qt
import SampleData
import SimpleITK as sitk
import sitkUtils
import slicer
import vtk
import vtkSegmentationCore
from MONAILabelLib import GenericAnatomyColors, MONAILabelClient
from slicer.ScriptedLoadableModule import *
from slicer.util import VTKObservationMixin
class MONAILabel(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "MONAILabel"
self.parent.categories = ["Active Learning"]
self.parent.dependencies = []
self.parent.contributors = ["NVIDIA, KCL"]
self.parent.helpText = """
Active Learning solution.
See more information in <a href="https://github.com/Project-MONAI/MONAILabel">module documentation</a>.
"""
self.parent.acknowledgementText = """
Developed by NVIDIA, KCL
"""
# Additional initialization step after application startup is complete
slicer.app.connect("startupCompleted()", self.initializeAfterStartup)
def initializeAfterStartup(self):
if not slicer.app.commandOptions().noMainWindow:
self.settingsPanel = MONAILabelSettingsPanel()
slicer.app.settingsDialog().addPanel("MONAI Label", self.settingsPanel)
class _ui_MONAILabelSettingsPanel(object):
def __init__(self, parent):
vBoxLayout = qt.QVBoxLayout(parent)
# settings
groupBox = ctk.ctkCollapsibleGroupBox()
groupBox.title = "MONAI Label Server"
groupLayout = qt.QFormLayout(groupBox)
serverUrl = qt.QLineEdit()
groupLayout.addRow("Server address:", serverUrl)
parent.registerProperty("MONAILabel/serverUrl", serverUrl, "text", str(qt.SIGNAL("textChanged(QString)")))
serverUrlHistory = qt.QLineEdit()
groupLayout.addRow("Server address history:", serverUrlHistory)
parent.registerProperty(
"MONAILabel/serverUrlHistory", serverUrlHistory, "text", str(qt.SIGNAL("textChanged(QString)"))
)
fileExtension = qt.QLineEdit()
fileExtension.setText(".nii.gz")
fileExtension.toolTip = "Default extension for uploading images/labels"
groupLayout.addRow("File Extension:", fileExtension)
parent.registerProperty(
"MONAILabel/fileExtension", fileExtension, "text", str(qt.SIGNAL("textChanged(QString)"))
)
clientId = qt.QLineEdit()
clientId.setText("user-xyz")
clientId.toolTip = "Client/User ID that will be sent to MONAI Label server for reference"
groupLayout.addRow("Client/User-ID:", clientId)
parent.registerProperty("MONAILabel/clientId", clientId, "text", str(qt.SIGNAL("textChanged(QString)")))
autoRunSegmentationCheckBox = qt.QCheckBox()
autoRunSegmentationCheckBox.checked = False
autoRunSegmentationCheckBox.toolTip = (
"Enable this option to auto run segmentation if pre-trained model exists when Next Sample is fetched"
)
groupLayout.addRow("Auto-Run Pre-Trained Model:", autoRunSegmentationCheckBox)
parent.registerProperty(
"MONAILabel/autoRunSegmentationOnNextSample",
ctk.ctkBooleanMapper(autoRunSegmentationCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))),
"valueAsInt",
str(qt.SIGNAL("valueAsIntChanged(int)")),
)
autoFetchNextSampleCheckBox = qt.QCheckBox()
autoFetchNextSampleCheckBox.checked = False
autoFetchNextSampleCheckBox.toolTip = "Enable this option to fetch Next Sample after saving the label"
groupLayout.addRow("Auto-Fetch Next Sample:", autoFetchNextSampleCheckBox)
parent.registerProperty(
"MONAILabel/autoFetchNextSample",
ctk.ctkBooleanMapper(autoFetchNextSampleCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))),
"valueAsInt",
str(qt.SIGNAL("valueAsIntChanged(int)")),
)
autoUpdateModelCheckBox = qt.QCheckBox()
autoUpdateModelCheckBox.checked = True
autoUpdateModelCheckBox.toolTip = "Enable this option to auto update model after submitting the label"
groupLayout.addRow("Auto-Update Model:", autoUpdateModelCheckBox)
parent.registerProperty(
"MONAILabel/autoUpdateModel",
ctk.ctkBooleanMapper(autoUpdateModelCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))),
"valueAsInt",
str(qt.SIGNAL("valueAsIntChanged(int)")),
)
askForUserNameCheckBox = qt.QCheckBox()
askForUserNameCheckBox.checked = False
askForUserNameCheckBox.toolTip = "Enable this option to ask for the user name every time the MONAILabel extension is loaded for the first time"
groupLayout.addRow("Ask For User Name:", askForUserNameCheckBox)
parent.registerProperty(
"MONAILabel/askForUserName",
ctk.ctkBooleanMapper(askForUserNameCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))),
"valueAsInt",
str(qt.SIGNAL("valueAsIntChanged(int)")),
)
allowOverlapCheckBox = qt.QCheckBox()
allowOverlapCheckBox.checked = False
allowOverlapCheckBox.toolTip = "Enable this option to allow overlapping segmentations"
groupLayout.addRow("Allow Overlapping Segmentations:", allowOverlapCheckBox)
parent.registerProperty(
"MONAILabel/allowOverlappingSegments",
ctk.ctkBooleanMapper(allowOverlapCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))),
"valueAsInt",
str(qt.SIGNAL("valueAsIntChanged(int)")),
)
allowOverlapCheckBox.connect("toggled(bool)", self.onUpdateAllowOverlap)
developerModeCheckBox = qt.QCheckBox()
developerModeCheckBox.checked = False
developerModeCheckBox.toolTip = "Enable this option to find options tab etc..."
groupLayout.addRow("Developer Mode:", developerModeCheckBox)
parent.registerProperty(
"MONAILabel/developerMode",
ctk.ctkBooleanMapper(developerModeCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))),
"valueAsInt",
str(qt.SIGNAL("valueAsIntChanged(int)")),
)
vBoxLayout.addWidget(groupBox)
vBoxLayout.addStretch(1)
def onUpdateAllowOverlap(self):
if slicer.util.settingsValue("MONAILabel/allowOverlappingSegments", True, converter=slicer.util.toBool):
if slicer.util.settingsValue("MONAILabel/fileExtension", None) != ".seg.nrrd":
slicer.util.warningDisplay(
"Overlapping segmentations are only availabel with the '.seg.nrrd' file extension! Consider changing MONAILabel file extension."
)
class MONAILabelSettingsPanel(ctk.ctkSettingsPanel):
def __init__(self, *args, **kwargs):
ctk.ctkSettingsPanel.__init__(self, *args, **kwargs)
self.ui = _ui_MONAILabelSettingsPanel(self)
class MONAILabelWidget(ScriptedLoadableModuleWidget, VTKObservationMixin):
def __init__(self, parent=None):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.__init__(self, parent)
VTKObservationMixin.__init__(self) # needed for parameter node observation
self.logic = None
self._parameterNode = None
self._volumeNode = None
self._segmentNode = None
self._volumeNodes = []
self._updatingGUIFromParameterNode = False
self._scribblesEditorWidget = None
self.info = {}
self.models = OrderedDict()
self.trainers = OrderedDict()
self.config = OrderedDict()
self.current_sample = None
self.samples = {}
self.state = {
"SegmentationModel": "",
"DeepgrowModel": "",
"ScribblesMethod": "",
"CurrentStrategy": "",
"CurrentTrainer": "",
}
self.file_ext = ".nii.gz"
self.dgPositiveFiducialNode = None
self.dgPositiveFiducialNodeObservers = []
self.dgNegativeFiducialNode = None
self.dgNegativeFiducialNodeObservers = []
self.ignoreFiducialNodeAddEvent = False
self.progressBar = None
self.tmpdir = None
self.timer = None
self.scribblesMode = None
self.multi_label = False
def setup(self):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.setup(self)
# Load widget from .ui file (created by Qt Designer).
# Additional widgets can be instantiated manually and added to self.layout.
uiWidget = slicer.util.loadUI(self.resourcePath("UI/MONAILabel.ui"))
self.layout.addWidget(uiWidget)
self.ui = slicer.util.childWidgetVariables(uiWidget)
# Set scene in MRML widgets. Make sure that in Qt designer the top-level qMRMLWidget's
# "mrmlSceneChanged(vtkMRMLScene*)" signal in is connected to each MRML widget's.
# "setMRMLScene(vtkMRMLScene*)" slot.
uiWidget.setMRMLScene(slicer.mrmlScene)
# These connections ensure that we update parameter node when scene is closed
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.StartCloseEvent, self.onSceneStartClose)
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.EndCloseEvent, self.onSceneEndClose)
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.NodeAddedEvent, self.onSceneEndImport)
# Create logic class. Logic implements all computations that should be possible to run
# in batch mode, without a graphical user interface.
self.tmpdir = slicer.util.tempDirectory("slicer-monai-label")
self.logic = MONAILabelLogic(self.tmpdir)
# Set icons and tune widget properties
self.ui.serverComboBox.lineEdit().setPlaceholderText("enter server address or leave empty to use default")
self.ui.fetchServerInfoButton.setIcon(self.icon("refresh-icon.png"))
self.ui.segmentationButton.setIcon(self.icon("segment.png"))
self.ui.nextSampleButton.setIcon(self.icon("segment.png"))
self.ui.saveLabelButton.setIcon(self.icon("save.png"))
self.ui.trainingButton.setIcon(self.icon("training.png"))
self.ui.stopTrainingButton.setIcon(self.icon("stop.png"))
self.ui.uploadImageButton.setIcon(self.icon("upload.svg"))
self.ui.importLabelButton.setIcon(self.icon("download.png"))
self.ui.dgPositiveFiducialPlacementWidget.setMRMLScene(slicer.mrmlScene)
self.ui.dgPositiveFiducialPlacementWidget.placeButton().toolTip = "Select +ve points"
self.ui.dgPositiveFiducialPlacementWidget.buttonsVisible = False
self.ui.dgPositiveFiducialPlacementWidget.placeButton().show()
self.ui.dgPositiveFiducialPlacementWidget.deleteButton().show()
self.ui.dgNegativeFiducialPlacementWidget.setMRMLScene(slicer.mrmlScene)
self.ui.dgNegativeFiducialPlacementWidget.placeButton().toolTip = "Select -ve points"
self.ui.dgNegativeFiducialPlacementWidget.buttonsVisible = False
self.ui.dgNegativeFiducialPlacementWidget.placeButton().show()
self.ui.dgNegativeFiducialPlacementWidget.deleteButton().show()
self.ui.dgUpdateButton.setIcon(self.icon("segment.png"))
# Connections
self.ui.fetchServerInfoButton.connect("clicked(bool)", self.onClickFetchInfo)
self.ui.serverComboBox.connect("currentIndexChanged(int)", self.onClickFetchInfo)
self.ui.segmentationModelSelector.connect("currentIndexChanged(int)", self.updateParameterNodeFromGUI)
self.ui.segmentationButton.connect("clicked(bool)", self.onClickSegmentation)
self.ui.deepgrowModelSelector.connect("currentIndexChanged(int)", self.updateParameterNodeFromGUI)
self.ui.nextSampleButton.connect("clicked(bool)", self.onNextSampleButton)
self.ui.trainingButton.connect("clicked(bool)", self.onTraining)
self.ui.stopTrainingButton.connect("clicked(bool)", self.onStopTraining)
self.ui.saveLabelButton.connect("clicked(bool)", self.onSaveLabel)
self.ui.uploadImageButton.connect("clicked(bool)", self.onUploadImage)
self.ui.importLabelButton.connect("clicked(bool)", self.onImportLabel)
self.ui.labelComboBox.connect("currentIndexChanged(int)", self.onSelectLabel)
self.ui.dgUpdateButton.connect("clicked(bool)", self.onUpdateDeepgrow)
self.ui.dgUpdateCheckBox.setStyleSheet("padding-left: 10px;")
# Scribbles
# brush and eraser icon from: https://tablericons.com/
self.ui.scribblesMethodSelector.connect("currentIndexChanged(int)", self.updateParameterNodeFromGUI)
self.ui.paintScribblesButton.setIcon(self.icon("paint.png"))
self.ui.paintScribblesButton.setToolTip("Paint scribbles for selected scribble layer")
self.ui.eraseScribblesButton.setIcon(self.icon("eraser.png"))
self.ui.eraseScribblesButton.setToolTip("Erase scribbles for selected scribble layer")
self.ui.updateScribblesButton.setIcon(self.icon("segment.png"))
self.ui.updateScribblesButton.setToolTip(
"Update label by sending scribbles to server to apply selected post processing method"
)
self.ui.brushSizeSlider.connect("valueChanged(double)", self.updateBrushSize)
self.ui.brushSizeSlider.setToolTip("Change brush size for scribbles tool")
self.ui.brush3dCheckbox.stateChanged.connect(self.on3dBrushCheckbox)
self.ui.brush3dCheckbox.setToolTip("Use 3D brush to paint/erase in multiple slices in 3D")
self.ui.updateScribblesButton.clicked.connect(self.onUpdateScribbles)
self.ui.paintScribblesButton.clicked.connect(self.onPaintScribbles)
self.ui.eraseScribblesButton.clicked.connect(self.onEraseScribbles)
self.ui.scribblesLabelSelector.connect("currentIndexChanged(int)", self.onSelectScribblesLabel)
# creating editable combo box
self.ui.scribblesLabelSelector.addItem(self.icon("fg_green.png"), "Foreground")
self.ui.scribblesLabelSelector.addItem(self.icon("bg_red.png"), "Background")
self.ui.scribblesLabelSelector.setCurrentIndex(0)
# start with scribbles section disabled
self.ui.scribblesCollapsibleButton.setEnabled(False)
self.ui.scribblesCollapsibleButton.collapsed = True
# embedded segment editor
self.ui.embeddedSegmentEditorWidget.setMRMLScene(slicer.mrmlScene)
self.ui.embeddedSegmentEditorWidget.setSegmentationNodeSelectorVisible(False)
self.ui.embeddedSegmentEditorWidget.setMasterVolumeNodeSelectorVisible(False)
self.initializeParameterNode()
self.updateServerUrlGUIFromSettings()
# self.onClickFetchInfo()
if slicer.util.settingsValue("MONAILabel/askForUserName", False, converter=slicer.util.toBool):
text = qt.QInputDialog().getText(
self.parent,
"User Name",
"Please enter your name:",
qt.QLineEdit.Normal,
slicer.util.settingsValue("MONAILabel/clientId", None),
)
if text:
settings = qt.QSettings()
settings.setValue("MONAILabel/clientId", text)
def cleanup(self):
self.removeObservers()
shutil.rmtree(self.tmpdir, ignore_errors=True)
def enter(self):
self.initializeParameterNode()
if self._segmentNode:
self.updateGUIFromParameterNode()
def exit(self):
self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
def onSceneStartClose(self, caller, event):
self.state = {
"SegmentationModel": self.ui.segmentationModelSelector.currentText,
"DeepgrowModel": self.ui.deepgrowModelSelector.currentText,
"ScribblesMethod": self.ui.scribblesMethodSelector.currentText,
"CurrentStrategy": self.ui.strategyBox.currentText,
"CurrentTrainer": self.ui.trainerBox.currentText,
}
self._volumeNode = None
self._segmentNode = None
self._volumeNodes.clear()
self.setParameterNode(None)
self.current_sample = None
self.samples.clear()
self.resetFiducial(
self.ui.dgPositiveFiducialPlacementWidget, self.dgPositiveFiducialNode, self.dgPositiveFiducialNodeObservers
)
self.dgPositiveFiducialNode = None
self.resetFiducial(
self.ui.dgNegativeFiducialPlacementWidget, self.dgNegativeFiducialNode, self.dgNegativeFiducialNodeObservers
)
self.dgNegativeFiducialNode = None
self.onClearScribbles()
def resetFiducial(self, fiducialWidget, fiducialNode, fiducialNodeObservers):
if fiducialWidget.placeModeEnabled:
fiducialWidget.setPlaceModeEnabled(False)
if fiducialNode:
slicer.mrmlScene.RemoveNode(fiducialNode)
self.removeFiducialNodeObservers(fiducialNode, fiducialNodeObservers)
def onSceneEndClose(self, caller, event):
if self.parent.isEntered:
self.initializeParameterNode()
def onSceneEndImport(self, caller, event):
if not self._volumeNode:
self.updateGUIFromParameterNode()
def initializeParameterNode(self):
self.setParameterNode(self.logic.getParameterNode())
# Select default input nodes if nothing is selected yet to save a few clicks for the user
if not self._parameterNode.GetNodeReference("InputVolume"):
firstVolumeNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode")
if firstVolumeNode:
self._parameterNode.SetNodeReferenceID("InputVolume", firstVolumeNode.GetID())
def setParameterNode(self, inputParameterNode):
if inputParameterNode:
self.logic.setDefaultParameters(inputParameterNode)
if self._parameterNode is not None:
self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
self._parameterNode = inputParameterNode
if self._parameterNode is not None:
self.addObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
# Initial GUI update
self.updateGUIFromParameterNode()
def monitorTraining(self):
status = self.isTrainingRunning(check_only=False)
if status and status.get("status") == "RUNNING":
info = self.logic.info()
train_stats = info.get("train_stats")
if not train_stats:
return
train_stats = next(iter(train_stats.values())) if train_stats else train_stats
current = 0 if train_stats.get("total_time") else train_stats.get("epoch", 1)
total = train_stats.get("total_epochs", 1)
percent = max(1, 100 * current / total)
if self.ui.trainingProgressBar.value != percent:
self.ui.trainingProgressBar.setValue(percent)
self.ui.trainingProgressBar.setToolTip(f"{current}/{total} epoch is completed")
dice = train_stats.get("best_metric", 0)
self.updateAccuracyBar(dice)
return
print("Training completed")
self.ui.trainingProgressBar.setValue(100)
self.timer.stop()
self.timer = None
self.ui.trainingProgressBar.setToolTip(f"Training: {status.get("status", "DONE")}")
self.ui.trainingButton.setEnabled(True)
self.ui.stopTrainingButton.setEnabled(False)
self.fetchInfo()
def updateGUIFromParameterNode(self, caller=None, event=None):
if self._parameterNode is None or self._updatingGUIFromParameterNode:
return
# Make sure GUI changes do not call updateParameterNodeFromGUI (it could cause infinite loop)
self._updatingGUIFromParameterNode = True
file_ext = slicer.util.settingsValue("MONAILabel/fileExtension", self.file_ext)
self.file_ext = file_ext if file_ext else self.file_ext
# Update node selectors and sliders
self.ui.inputSelector.clear()
for v in self._volumeNodes:
self.ui.inputSelector.addItem(v.GetName())
self.ui.inputSelector.setToolTip(self.current_sample.get("name", "") if self.current_sample else "")
if self._volumeNode:
self.ui.inputSelector.setCurrentIndex(self.ui.inputSelector.findText(self._volumeNode.GetName()))
self.ui.inputSelector.setEnabled(False) # Allow only one active scene
self.ui.uploadImageButton.setEnabled(False)
if self.info and slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode") and self._volumeNode is None:
self._volumeNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode")
self.initSample({"id": self._volumeNode.GetName(), "session": True}, autosegment=False)
self.ui.inputSelector.setEnabled(False)
self.ui.uploadImageButton.setEnabled(self.current_sample and self.current_sample.get("session"))
self.updateSelector(self.ui.segmentationModelSelector, ["segmentation"], "SegmentationModel", 0)
self.updateSelector(self.ui.deepgrowModelSelector, ["deepgrow", "deepedit"], "DeepgrowModel", 0)
self.updateSelector(self.ui.scribblesMethodSelector, ["scribbles"], "ScribblesMethod", 0)
if self.models and [k for k, v in self.models.items() if v["type"] == "segmentation"]:
self.ui.segmentationCollapsibleButton.collapsed = False
if self.models and [k for k, v in self.models.items() if v["type"] in ("deepgrow", "deepedit")]:
self.ui.deepgrowCollapsibleButton.collapsed = False
if self.models and [k for k, v in self.models.items() if v["type"] == "scribbles"]:
self.ui.scribblesCollapsibleButton.collapsed = False
self.ui.labelComboBox.clear()
if self._segmentNode:
segmentation = self._segmentNode.GetSegmentation()
totalSegments = segmentation.GetNumberOfSegments()
segmentIds = [segmentation.GetNthSegmentID(i) for i in range(totalSegments)]
for idx, segmentId in enumerate(segmentIds):
segment = segmentation.GetSegment(segmentId)
label = segment.GetName()
if label in ["foreground_scribbles", "background_scribbles"]:
continue
self.ui.labelComboBox.addItem(label)
else:
for label in self.info.get("labels", {}):
self.ui.labelComboBox.addItem(label)
currentLabel = self._parameterNode.GetParameter("CurrentLabel")
idx = self.ui.labelComboBox.findText(currentLabel) if currentLabel else 0
idx = 0 if idx < 0 < self.ui.labelComboBox.count else idx
self.ui.labelComboBox.setCurrentIndex(idx)
self.ui.appComboBox.clear()
self.ui.appComboBox.addItem(self.info.get("name", ""))
datastore_stats = self.info.get("datastore", {})
current = datastore_stats.get("completed", 0)
total = datastore_stats.get("total", 0)
self.ui.activeLearningProgressBar.setValue(current / max(total, 1) * 100)
self.ui.activeLearningProgressBar.setToolTip(f"{current}/{total} samples are labeled")
train_stats = self.info.get("train_stats", {})
train_stats = next(iter(train_stats.values())) if train_stats else train_stats
dice = train_stats.get("best_metric", 0)
self.updateAccuracyBar(dice)
self.ui.strategyBox.clear()
for strategy in self.info.get("strategies", {}):
self.ui.strategyBox.addItem(strategy)
currentStrategy = self._parameterNode.GetParameter("CurrentStrategy")
currentStrategy = currentStrategy if currentStrategy else self.state["CurrentStrategy"]
self.ui.strategyBox.setCurrentIndex(self.ui.strategyBox.findText(currentStrategy) if currentStrategy else 0)
self.ui.trainerBox.clear()
trainers = self.info.get("trainers", {})
if trainers:
self.ui.trainerBox.addItem("ALL")
for t in trainers:
self.ui.trainerBox.addItem(t)
currentTrainer = self._parameterNode.GetParameter("CurrentTrainer")
currentTrainer = currentTrainer if currentTrainer else self.state["CurrentTrainer"]
self.ui.trainerBox.setCurrentIndex(self.ui.trainerBox.findText(currentTrainer) if currentTrainer else 0)
developer_mode = slicer.util.settingsValue("MONAILabel/developerMode", True, converter=slicer.util.toBool)
self.ui.optionsCollapsibleButton.setVisible(developer_mode)
# Enable/Disable
self.ui.nextSampleButton.setEnabled(self.ui.strategyBox.count)
is_training_running = True if self.info and self.isTrainingRunning() else False
self.ui.trainingButton.setEnabled(self.info and not is_training_running and current)
self.ui.stopTrainingButton.setEnabled(is_training_running)
if is_training_running and self.timer is None:
self.timer = qt.QTimer()
self.timer.setInterval(5000)
self.timer.connect("timeout()", self.monitorTraining)
self.timer.start()
self.ui.segmentationButton.setEnabled(
self.ui.segmentationModelSelector.currentText and self._volumeNode is not None
)
self.ui.saveLabelButton.setEnabled(self._segmentNode is not None)
self.ui.importLabelButton.setEnabled(self._segmentNode is not None)
# Create empty markup fiducial node for deep grow +ve and -ve
if self._segmentNode:
if not self.dgPositiveFiducialNode:
self.dgPositiveFiducialNode, self.dgPositiveFiducialNodeObservers = self.createFiducialNode(
"P", self.onDeepGrowFiducialNodeModified, [0.5, 1, 0.5]
)
self.ui.dgPositiveFiducialPlacementWidget.setCurrentNode(self.dgPositiveFiducialNode)
self.ui.dgPositiveFiducialPlacementWidget.setPlaceModeEnabled(False)
if not self.dgNegativeFiducialNode:
self.dgNegativeFiducialNode, self.dgNegativeFiducialNodeObservers = self.createFiducialNode(
"N", self.onDeepGrowFiducialNodeModified, [0.5, 0.5, 1]
)
self.ui.dgNegativeFiducialPlacementWidget.setCurrentNode(self.dgNegativeFiducialNode)
self.ui.dgNegativeFiducialPlacementWidget.setPlaceModeEnabled(False)
self.ui.scribblesCollapsibleButton.setEnabled(self.ui.scribblesMethodSelector.count)
self.ui.scribblesCollapsibleButton.collapsed = False
self.ui.dgPositiveFiducialPlacementWidget.setEnabled(self.ui.deepgrowModelSelector.currentText)
self.ui.dgNegativeFiducialPlacementWidget.setEnabled(self.ui.deepgrowModelSelector.currentText)
self.multi_label = "background" in self.info.get("labels", [])
if self.multi_label:
self.ui.dgLabelBackground.hide()
self.ui.dgNegativeFiducialPlacementWidget.hide()
self.ui.freezeUpdateCheckBox.show()
self.ui.dgLabelForeground.setText("Landmarks:")
else:
self.ui.dgNegativeFiducialPlacementWidget.show()
self.ui.freezeUpdateCheckBox.hide()
self.ui.dgLabelForeground.setText("Foreground:")
self.ui.dgUpdateCheckBox.setEnabled(self.ui.deepgrowModelSelector.currentText and self._segmentNode)
self.ui.dgUpdateButton.setEnabled(self.ui.deepgrowModelSelector.currentText and self._segmentNode)
self.ui.embeddedSegmentEditorWidget.setMRMLSegmentEditorNode(
slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLSegmentEditorNode")
)
# All the GUI updates are done
self._updatingGUIFromParameterNode = False
def updateParameterNodeFromGUI(self, caller=None, event=None):
if self._parameterNode is None or self._updatingGUIFromParameterNode:
return
wasModified = self._parameterNode.StartModify() # Modify all properties in a single batch
segmentationModelIndex = self.ui.segmentationModelSelector.currentIndex
if segmentationModelIndex >= 0:
segmentationModel = self.ui.segmentationModelSelector.itemText(segmentationModelIndex)
self._parameterNode.SetParameter("SegmentationModel", segmentationModel)
deepgrowModelIndex = self.ui.deepgrowModelSelector.currentIndex
if deepgrowModelIndex >= 0:
deepgrowModel = self.ui.deepgrowModelSelector.itemText(deepgrowModelIndex)
self._parameterNode.SetParameter("DeepgrowModel", deepgrowModel)
scribblesMethodIndex = self.ui.scribblesMethodSelector.currentIndex
if scribblesMethodIndex >= 0:
scribblesMethod = self.ui.scribblesMethodSelector.itemText(scribblesMethodIndex)
self._parameterNode.SetParameter("ScribblesMethod", scribblesMethod)
currentLabelIndex = self.ui.labelComboBox.currentIndex
if currentLabelIndex >= 0:
currentLabel = self.ui.labelComboBox.itemText(currentLabelIndex)
self._parameterNode.SetParameter("CurrentLabel", currentLabel)
currentStrategyIndex = self.ui.strategyBox.currentIndex
if currentStrategyIndex >= 0:
currentStrategy = self.ui.strategyBox.itemText(currentStrategyIndex)
self._parameterNode.SetParameter("CurrentStrategy", currentStrategy)
currentTrainerIndex = self.ui.trainerBox.currentIndex
if currentTrainerIndex >= 0:
currentTrainer = self.ui.trainerBox.itemText(currentTrainerIndex)
self._parameterNode.SetParameter("CurrentTrainer", currentTrainer)
self._parameterNode.EndModify(wasModified)
def updateSelector(self, selector, model_types, param, defaultIndex=0):
wasSelectorBlocked = selector.blockSignals(True)
selector.clear()
for model_name, model in self.models.items():
if model["type"] in model_types:
selector.addItem(model_name)
selector.setItemData(selector.count - 1, model["description"], qt.Qt.ToolTipRole)
model = self._parameterNode.GetParameter(param)
model = model if model else self.state.get(param, "")
modelIndex = selector.findText(model)
modelIndex = defaultIndex if modelIndex < 0 < selector.count else modelIndex
selector.setCurrentIndex(modelIndex)
try:
modelInfo = self.models[model]
selector.setToolTip(modelInfo["description"])
except:
selector.setToolTip("")
selector.blockSignals(wasSelectorBlocked)
def updateConfigTable(self):
table = self.ui.configTable
table.clear()
headers = ["section", "name", "key", "value"]
table.setColumnCount(len(headers))
table.setHorizontalHeaderLabels(headers)
table.setColumnWidth(0, 50)
config = copy.deepcopy(self.info)
infer = config.get("models", {})
train = config.get("trainers", {})
activelearning = config.get("strategies", {})
scoring = config.get("scoring", {})
row_count = 0
config = {"infer": infer, "train": train, "activelearning": activelearning, "scoring": scoring}
for c in config.values():
row_count += sum([len(c[k].get("config", {})) for k in c.keys()])
# print(f"Total rows: {row_count}")
table.setRowCount(row_count)
n = 0
for section in config:
if not config[section]:
continue
c_section = config[section]
l_section = sum([len(c_section[k].get("config", {})) for k in c_section.keys()])
if not l_section:
continue
# print(f"{n} => l_section = {l_section}")
if l_section:
table.setSpan(n, 0, l_section, 1)
for name in c_section:
c_name = c_section[name]
l_name = len(c_name.get("config", {}))
if not l_name:
continue
# print(f"{n} => l_name = {l_name}")
if l_name:
table.setSpan(n, 1, l_name, 1)
for key, val in c_name.get("config", {}).items():
item = qt.QTableWidgetItem(section)
item.setFlags(item.flags() & ~qt.Qt.ItemIsEditable)
table.setItem(n, 0, item)
item = qt.QTableWidgetItem(name)
table.setItem(n, 1, item)
item.setFlags(item.flags() & ~qt.Qt.ItemIsEditable)
item = qt.QTableWidgetItem(key)
table.setItem(n, 2, item)
item.setFlags(item.flags() & ~qt.Qt.ItemIsEditable)
if isinstance(val, dict) or isinstance(val, list):
combo = qt.QComboBox()
for m, v in enumerate(val):
combo.addItem(v)
combo.setCurrentIndex(0)
table.setCellWidget(n, 3, combo)
elif isinstance(val, bool):
checkbox = qt.QCheckBox()
checkbox.setChecked(val)
table.setCellWidget(n, 3, checkbox)
else:
table.setItem(n, 3, qt.QTableWidgetItem(str(val) if val else ""))
# print(f"{n} => {section} => {name} => {key} => {val}")
n = n + 1
def updateAccuracyBar(self, dice):
self.ui.accuracyProgressBar.setValue(dice * 100)
css = ["stop: 0 red"]
if dice > 0.5:
css.append(f"stop: {0.5 / dice} orange")
if dice > 0.6:
css.append(f"stop: {0.6 / dice} yellow")
if dice > 0.7:
css.append(f"stop: {0.7 / dice} lightgreen")
if dice > 0.8:
css.append(f"stop: {0.8 / dice} green")
if dice > 0.9:
css.append(f"stop: {0.9 / dice} darkgreen")
self.ui.accuracyProgressBar.setStyleSheet(
"QProgressBar {text-align: center;} "
"QProgressBar::chunk {background-color: "
"qlineargradient(x0: 0, x2: 1, " + ",".join(css) + ")}"
)
self.ui.accuracyProgressBar.setToolTip(f"Accuracy: {dice:.4f}")
def getParamsFromConfig(self, filter, filter2=None):
mapping = {"infer": "models", "train": "trainers", "activelearning": "strategies", "scoring": "scoring"}
config = {}
for row in range(self.ui.configTable.rowCount):
section = str(self.ui.configTable.item(row, 0).text())
name = str(self.ui.configTable.item(row, 1).text())
key = str(self.ui.configTable.item(row, 2).text())
value = self.ui.configTable.item(row, 3)
if value is None:
value = self.ui.configTable.cellWidget(row, 3)
value = value.checked if isinstance(value, qt.QCheckBox) else value.currentText
else:
value = str(value.text())
v = self.info.get(mapping.get(section, ""), {}).get(name, {}).get("config", {}).get(key, {})
if isinstance(v, int):
value = int(value) if value else 0
elif isinstance(v, float):
value = float(value) if value else 0.0
# print(f"{section} => {name} => {key} => {value}")
if config.get(section) is None:
config[section] = {}
if config[section].get(name) is None:
config[section][name] = {}
config[section][name][key] = value
# print(f"row: {row}, section: {section}, name: {name}, value: {value}, type: {type(v)}")
res = config.get(filter, {})
res = res.get(filter2, {}) if filter2 else res
return res
def onDeepGrowFiducialNodeModified(self, observer, eventid):
logging.debug("Deepgrow Point Event!!")
if self.ignoreFiducialNodeAddEvent:
return
markupsNode = observer
movingMarkupIndex = markupsNode.GetDisplayNode().GetActiveControlPoint()
logging.debug("Markup point added; point ID = {}".format(movingMarkupIndex))
current_point = self.getFiducialPointXYZ(markupsNode, movingMarkupIndex)
if not self.ui.dgUpdateCheckBox.checked:
self.onClickDeepgrow(current_point, skip_infer=True)
return
self.onClickDeepgrow(current_point)
self.ignoreFiducialNodeAddEvent = True
self.onEditFiducialPoints(self.dgPositiveFiducialNode, "MONAILabel.ForegroundPoints")
self.onEditFiducialPoints(self.dgNegativeFiducialNode, "MONAILabel.BackgroundPoints")
self.ignoreFiducialNodeAddEvent = False
def getFiducialPointsXYZ(self, fiducialNode, name):
v = self._volumeNode
RasToIjkMatrix = vtk.vtkMatrix4x4()
v.GetRASToIJKMatrix(RasToIjkMatrix)
point_set = []
n = fiducialNode.GetNumberOfFiducials()
for i in range(n):
coord = [0.0, 0.0, 0.0]
fiducialNode.GetNthFiducialPosition(i, coord)
world = [0, 0, 0, 0]
fiducialNode.GetNthFiducialWorldCoordinates(i, world)
p_Ras = [coord[0], coord[1], coord[2], 1.0]
p_Ijk = RasToIjkMatrix.MultiplyDoublePoint(p_Ras)
p_Ijk = [round(i) for i in p_Ijk]
logging.debug("RAS: {}; WORLD: {}; IJK: {}".format(coord, world, p_Ijk))
point_set.append(p_Ijk[0:3])
logging.info("{} => Current Fiducials-Points: {}".format(name, point_set))
return point_set
def getFiducialPointXYZ(self, fiducialNode, index):
v = self._volumeNode
RasToIjkMatrix = vtk.vtkMatrix4x4()
v.GetRASToIJKMatrix(RasToIjkMatrix)
coord = [0.0, 0.0, 0.0]
fiducialNode.GetNthFiducialPosition(index, coord)
world = [0, 0, 0, 0]
fiducialNode.GetNthFiducialWorldCoordinates(index, world)
p_Ras = [coord[0], coord[1], coord[2], 1.0]
p_Ijk = RasToIjkMatrix.MultiplyDoublePoint(p_Ras)
p_Ijk = [round(i) for i in p_Ijk]
logging.debug("RAS: {}; WORLD: {}; IJK: {}".format(coord, world, p_Ijk))
return p_Ijk[0:3]
def onEditFiducialPoints(self, fiducialNode, tagName):
if fiducialNode is None:
return
fiducialNode.RemoveAllMarkups()
segmentId, segment = self.currentSegment()
if segment and segmentId:
v = self._volumeNode
IjkToRasMatrix = vtk.vtkMatrix4x4()
v.GetIJKToRASMatrix(IjkToRasMatrix)
fPosStr = vtk.mutable("")
segment.GetTag(tagName, fPosStr)
pointset = str(fPosStr)
logging.debug("{} => {} Fiducial points are: {}".format(segmentId, segment.GetName(), pointset))
if fPosStr is not None and len(pointset) > 0:
points = json.loads(pointset)
for p in points:
p_Ijk = [p[0], p[1], p[2], 1.0]
p_Ras = IjkToRasMatrix.MultiplyDoublePoint(p_Ijk)
logging.debug("Add Fiducial: {} => {}".format(p_Ijk, p_Ras))
fiducialNode.AddFiducialFromArray(p_Ras[0:3])
def currentSegment(self):
segmentation = self._segmentNode.GetSegmentation()
segmentId = segmentation.GetSegmentIdBySegmentName(self.ui.labelComboBox.currentText)
segment = segmentation.GetSegment(segmentId)
logging.debug("Current SegmentID: {}; Segment: {}".format(segmentId, segment))
return segmentId, segment
def onSelectLabel(self, caller=None, event=None):
self.updateParameterNodeFromGUI(caller, event)
self.ignoreFiducialNodeAddEvent = True
self.onEditFiducialPoints(self.dgPositiveFiducialNode, "MONAILabel.ForegroundPoints")
self.onEditFiducialPoints(self.dgNegativeFiducialNode, "MONAILabel.BackgroundPoints")
self.ignoreFiducialNodeAddEvent = False
def icon(self, name="MONAILabel.png"):
# It should not be necessary to modify this method
iconPath = os.path.join(os.path.dirname(__file__), "Resources", "Icons", name)
if os.path.exists(iconPath):
return qt.QIcon(iconPath)
return qt.QIcon()
def updateServerSettings(self):
self.logic.setServer(self.serverUrl())
self.logic.setClientId(slicer.util.settingsValue("MONAILabel/clientId", "user-xyz"))
self.saveServerUrl()
def serverUrl(self):
serverUrl = self.ui.serverComboBox.currentText
if not serverUrl:
serverUrl = "http://127.0.0.1:8000"
return serverUrl.rstrip("/")
def saveServerUrl(self):
self.updateParameterNodeFromGUI()
# Save selected server URL
settings = qt.QSettings()
serverUrl = self.ui.serverComboBox.currentText
settings.setValue("MONAILabel/serverUrl", serverUrl)
# Save current server URL to the top of history
serverUrlHistory = settings.value("MONAILabel/serverUrlHistory")
if serverUrlHistory:
serverUrlHistory = serverUrlHistory.split(";")
else:
serverUrlHistory = []
try:
serverUrlHistory.remove(serverUrl)
except ValueError:
pass
serverUrlHistory.insert(0, serverUrl)
serverUrlHistory = serverUrlHistory[:10] # keep up to first 10 elements
settings.setValue("MONAILabel/serverUrlHistory", ";".join(serverUrlHistory))
self.updateServerUrlGUIFromSettings()
def onClickFetchInfo(self):
self.fetchInfo()
self.updateConfigTable()
def fetchInfo(self, showInfo=False):
if not self.logic:
return
start = time.time()
try:
self.updateServerSettings()
info = self.logic.info()
self.info = info
if self.info.get("config"):
slicer.util.errorDisplay(
"Please upgrade the monai server to latest version",
detailedText=traceback.format_exc(),
)
return
except:
slicer.util.errorDisplay(
"Failed to fetch models from remote server. "
"Make sure server address is correct and <server_uri>/info/ "
"is accessible in browser",
detailedText=traceback.format_exc(),
)
return
self.models.clear()
self.config = info.get("config", {})
model_count = {}
models = info.get("models", {})
for k, v in models.items():
model_type = v.get("type", "segmentation")
model_count[model_type] = model_count.get(model_type, 0) + 1
logging.debug("{} = {}".format(k, model_type))
self.models[k] = v
self.updateGUIFromParameterNode()
msg = ""
msg += "-----------------------------------------------------\t\n"
msg += "Total Models Available: \t" + str(len(models)) + "\t\n"
msg += "-----------------------------------------------------\t\n"
for model_type in model_count.keys():
msg += model_type.capitalize() + " Models: \t" + str(model_count[model_type]) + "\t\n"
msg += "-----------------------------------------------------\t\n"
if showInfo:
qt.QMessageBox.information(slicer.util.mainWindow(), "MONAI Label", msg)
logging.info(msg)
logging.info("Time consumed by fetch info: {0:3.1f}".format(time.time() - start))
def setProgressBarLabelText(self, label):
if not self.progressBar:
self.progressBar = slicer.util.createProgressDialog(windowTitle="Wait...", maximum=100)
self.progressBar.labelText = label
def reportProgress(self, progressPercentage):
if not self.progressBar:
self.progressBar = slicer.util.createProgressDialog(windowTitle="Wait...", maximum=100)
self.progressBar.show()
self.progressBar.activateWindow()
self.progressBar.setValue(progressPercentage)
slicer.app.processEvents()
def onTraining(self):
start = time.time()
status = None
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
self.updateServerSettings()
model = self.ui.trainerBox.currentText
model = model if model and model != "ALL" else None
params = self.getParamsFromConfig("train", model)
status = self.logic.train_start(model, params)
self.ui.trainingProgressBar.setValue(1)
self.ui.trainingProgressBar.setToolTip("Training: STARTED")
time.sleep(1)
self.updateGUIFromParameterNode()
except:
slicer.util.errorDisplay(
"Failed to run training in MONAI Label Server", detailedText=traceback.format_exc()
)
finally:
qt.QApplication.restoreOverrideCursor()
if status:
msg = "ID: {}\nStatus: {}\nStart Time: {}\n".format(
status.get("id"),
status.get("status"),
status.get("start_ts"),
)
# slicer.util.infoDisplay(msg, detailedText=json.dumps(status, indent=2))
logging.info(msg)
logging.info("Time consumed by training: {0:3.1f}".format(time.time() - start))
def onStopTraining(self):
start = time.time()
status = None
if not slicer.util.confirmOkCancelDisplay(
"This will kill/stop current Training task. Are you sure to continue?"
):
return
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
self.updateServerSettings()
status = self.logic.train_stop()
except:
slicer.util.errorDisplay("Failed to stop Training Task", detailedText=traceback.format_exc())
finally:
qt.QApplication.restoreOverrideCursor()
if status:
msg = "Status: {}\nStart Time: {}\nEnd Time: {}\nResult: {}".format(
status.get("status"),
status.get("start_ts"),
status.get("end_ts"),
status.get("result", status.get("details", [])[-1]),
)
# slicer.util.infoDisplay(msg, detailedText=json.dumps(status, indent=2))
logging.info(msg)
self.updateGUIFromParameterNode()
logging.info("Time consumed by stop training: {0:3.1f}".format(time.time() - start))
def isTrainingRunning(self, check_only=True):
if not self.logic:
return False
self.updateServerSettings()
return self.logic.train_status(check_only)
def onNextSampleButton(self):
if not self.logic:
return
if self._volumeNode or len(slicer.util.getNodesByClass("vtkMRMLScalarVolumeNode")):
if not slicer.util.confirmOkCancelDisplay(
"This will close current scene. Please make sure you have saved your current work.\n"
"Are you sure to continue?"
):
return
self.onClearScribbles()
slicer.mrmlScene.Clear(0)
start = time.time()
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
self.updateServerSettings()
strategy = self.ui.strategyBox.currentText
if not strategy:
slicer.util.errorDisplay("No Strategy Found/Selected\t")
return
sample = self.logic.next_sample(strategy, self.getParamsFromConfig("activelearning", strategy))
logging.debug(sample)
if not sample.get("id"):
slicer.util.warningDisplay(
"Unlabled Samples/Images Not Found at server. Instead you can load your own image."
)
return
if self.samples.get(sample["id"]) is not None:
self.current_sample = self.samples[sample["id"]]
name = self.current_sample["VolumeNodeName"]
index = self.ui.inputSelector.findText(name)
self.ui.inputSelector.setCurrentIndex(index)
return
logging.info(sample)
image_id = sample["id"]
image_file = sample.get("path")
image_name = sample.get("name", image_id)
node_name = sample.get("PatientID", sample.get("name", image_id))[-20:]
checksum = sample.get("checksum")
local_exists = image_file and os.path.exists(image_file)
logging.info(f"Check if file exists/shared locally: {image_file} => {local_exists}")
if local_exists:
self._volumeNode = slicer.util.loadVolume(image_file)
self._volumeNode.SetName(node_name)
else:
download_uri = f"{self.serverUrl()}/datastore/image?image={quote_plus(image_id)}"
logging.info(download_uri)
sampleDataLogic = SampleData.SampleDataLogic()
self._volumeNode = sampleDataLogic.downloadFromURL(
nodeNames=node_name, fileNames=image_name, uris=download_uri, checksums=checksum
)[0]
self.initSample(sample)
except:
slicer.util.errorDisplay(
"Failed to fetch Sample from MONAI Label Server", detailedText=traceback.format_exc()
)
finally:
qt.QApplication.restoreOverrideCursor()
self.updateGUIFromParameterNode()
logging.info("Time consumed by next_sample: {0:3.1f}".format(time.time() - start))
def initSample(self, sample, autosegment=True):
sample["VolumeNodeName"] = self._volumeNode.GetName()
self.current_sample = sample
self.samples[sample["id"]] = sample
self._volumeNodes.append(self._volumeNode)
# Create Empty Segments for all labels for this node
self.createSegmentNode()
segmentEditorWidget = slicer.modules.segmenteditor.widgetRepresentation().self().editor
segmentEditorWidget.setSegmentationNode(self._segmentNode)
segmentEditorWidget.setMasterVolumeNode(self._volumeNode)
# check if user allows overlapping segments
if slicer.util.settingsValue("MONAILabel/allowOverlappingSegments", False, converter=slicer.util.toBool):
# set segment editor to allow overlaps
slicer.util.getNodesByClass("vtkMRMLSegmentEditorNode")[0].SetOverwriteMode(2)
if self.info.get("labels"):
self.updateSegmentationMask(None, self.info.get("labels"))
# Check if user wants to run auto-segmentation on new sample
if autosegment and slicer.util.settingsValue(
"MONAILabel/autoRunSegmentationOnNextSample", True, converter=slicer.util.toBool
):
for label in self.info.get("labels", []):
for name, model in self.models.items():
if label in model.get("labels", []):
qt.QApplication.restoreOverrideCursor()
self.ui.segmentationModelSelector.currentText = name
self.onClickSegmentation()
return
def getPermissionForImageDataUpload(self):
return slicer.util.confirmOkCancelDisplay(
"Master volume - without any additional patient information -"
" will be sent to remote data processing server: {0}.\n\n"
"Click 'OK' to proceed with the segmentation.\n"
"Click 'Cancel' to not upload any data and cancel segmentation.\n".format(self.serverUrl()),
dontShowAgainSettingsKey="MONAILabel/showImageDataSendWarning",
)
def onUploadImage(self, init_sample=True, session=False):
volumeNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode")
image_id = volumeNode.GetName()
if not self.getPermissionForImageDataUpload():
return False
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
in_file = tempfile.NamedTemporaryFile(suffix=self.file_ext, dir=self.tmpdir).name
self.reportProgress(5)
start = time.time()
slicer.util.saveNode(volumeNode, in_file)
logging.info("Saved Input Node into {0} in {1:3.1f}s".format(in_file, time.time() - start))
self.reportProgress(30)
if session:
self.current_sample["session_id"] = self.logic.create_session(in_file)["session_id"]
else:
self.logic.upload_image(in_file, image_id)
self.current_sample["session"] = False
self.reportProgress(100)
self._volumeNode = volumeNode
if init_sample:
self.initSample({"id": image_id}, autosegment=False)
qt.QApplication.restoreOverrideCursor()
self.updateGUIFromParameterNode()
return True
except:
self.reportProgress(100)
qt.QApplication.restoreOverrideCursor()
if session:
slicer.util.errorDisplay(
"Server Error:: Session creation Failed\nPlease upgrade to latest monailable version (> 0.2.0)",
detailedText=traceback.format_exc(),
)
else:
slicer.util.errorDisplay("Failed to upload volume to Server", detailedText=traceback.format_exc())
return False
def onImportLabel(self):
if not self.ui.labelPathLineEdit.currentPath or not os.path.exists(self.ui.labelPathLineEdit.currentPath):
slicer.util.warningDisplay("Label File not selected")
return
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
self.updateSegmentationMask(self.ui.labelPathLineEdit.currentPath, self.info["labels"])
qt.QApplication.restoreOverrideCursor()
except:
qt.QApplication.restoreOverrideCursor()
slicer.util.errorDisplay("Failed to import label", detailedText=traceback.format_exc())
def onSaveLabel(self):
start = time.time()
labelmapVolumeNode = None
result = None
self.onClearScribbles()
if self.current_sample.get("session"):
if not self.onUploadImage(init_sample=False):
return
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
segmentationNode = self._segmentNode
labelmapVolumeNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLLabelMapVolumeNode")
slicer.modules.segmentations.logic().ExportVisibleSegmentsToLabelmapNode(
segmentationNode, labelmapVolumeNode, self._volumeNode
)
segmentation = segmentationNode.GetSegmentation()
totalSegments = segmentation.GetNumberOfSegments()
segmentIds = [segmentation.GetNthSegmentID(i) for i in range(totalSegments)]
label_info = []
for idx, segmentId in enumerate(segmentIds):
segment = segmentation.GetSegment(segmentId)
if segment.GetName() in ["foreground_scribbles", "background_scribbles"]:
logging.info(f"Removing segment {segmentId}: {segment.GetName()}")
segmentationNode.RemoveSegment(segmentId)
continue
label_info.append({"name": segment.GetName(), "idx": idx + 1})
# label_info.append({"color": segment.GetColor()})
label_in = tempfile.NamedTemporaryFile(suffix=self.file_ext, dir=self.tmpdir).name
self.reportProgress(5)
if (
slicer.util.settingsValue("MONAILabel/allowOverlappingSegments", True, converter=slicer.util.toBool)
and slicer.util.settingsValue("MONAILabel/fileExtension", self.file_ext) == ".seg.nrrd"
):
slicer.util.saveNode(segmentationNode, label_in)
else:
slicer.util.saveNode(labelmapVolumeNode, label_in)
self.reportProgress(30)
self.updateServerSettings()
result = self.logic.save_label(self.current_sample["id"], label_in, {"label_info": label_info})
self.fetchInfo()
if slicer.util.settingsValue("MONAILabel/autoUpdateModel", True, converter=slicer.util.toBool):
try:
if self.isTrainingRunning(check_only=True):
self.logic.train_stop()
except:
logging.info("Failed to stop training; or already stopped")
self.onTraining()
except:
slicer.util.errorDisplay("Failed to save Label to MONAI Label Server", detailedText=traceback.format_exc())
finally:
qt.QApplication.restoreOverrideCursor()
self.reportProgress(100)
if labelmapVolumeNode:
slicer.mrmlScene.RemoveNode(labelmapVolumeNode)
if result:
slicer.util.infoDisplay(
"Label-Mask saved into MONAI Label Server\t\t", detailedText=json.dumps(result, indent=2)
)
if slicer.util.settingsValue("MONAILabel/autoFetchNextSample", False, converter=slicer.util.toBool):
slicer.mrmlScene.Clear(0)
self.onNextSampleButton()
logging.info("Time consumed by save label: {0:3.1f}".format(time.time() - start))
def getSessionId(self):
session_id = None
if self.current_sample.get("session", False):
session_id = self.current_sample.get("session_id")
if not session_id or not self.logic.get_session(session_id):
self.onUploadImage(init_sample=False, session=True)
session_id = self.current_sample["session_id"]
return session_id
def onClickSegmentation(self):
if not self.current_sample:
return
start = time.time()
result_file = None
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
self.updateServerSettings()
model = self.ui.segmentationModelSelector.currentText
image_file = self.current_sample["id"]
params = self.getParamsFromConfig("infer", model)
result_file, params = self.logic.infer(model, image_file, params, session_id=self.getSessionId())
print(f"Result Params for Segmentation: {params}")
labels = (
params.get("label_names") if params and params.get("label_names") else self.models[model].get("labels")
)
if labels and isinstance(labels, dict):
labels = [k for k, _ in sorted(labels.items(), key=lambda item: item[1])]
self.updateSegmentationMask(result_file, labels)
except:
slicer.util.errorDisplay(
"Failed to run inference in MONAI Label Server", detailedText=traceback.format_exc()
)
finally:
qt.QApplication.restoreOverrideCursor()
if result_file and os.path.exists(result_file):
os.unlink(result_file)
self.updateGUIFromParameterNode()
logging.info("Time consumed by segmentation: {0:3.1f}".format(time.time() - start))
def onUpdateDeepgrow(self):
self.onClickDeepgrow(None)
def onClickDeepgrow(self, current_point, skip_infer=False):
model = self.ui.deepgrowModelSelector.currentText
if not model:
slicer.util.warningDisplay("Please select a deepgrow model")
return
_, segment = self.currentSegment()
if not segment:
slicer.util.warningDisplay("Please add the required label to run deepgrow")
return
foreground_all = self.getFiducialPointsXYZ(self.dgPositiveFiducialNode, "foreground")
background_all = self.getFiducialPointsXYZ(self.dgNegativeFiducialNode, "background")
segment.SetTag("MONAILabel.ForegroundPoints", json.dumps(foreground_all))
segment.SetTag("MONAILabel.BackgroundPoints", json.dumps(background_all))
if skip_infer:
return
# use model info "deepgrow" to determine
deepgrow_3d = False if self.models[model].get("dimension", 3) == 2 else True
start = time.time()
label = segment.GetName()
operationDescription = "Run Deepgrow for segment: {}; model: {}; 3d {}".format(label, model, deepgrow_3d)
logging.debug(operationDescription)
if not current_point:
if not foreground_all and not deepgrow_3d:
slicer.util.warningDisplay(operationDescription + " - points not added")
return
current_point = foreground_all[-1] if foreground_all else background_all[-1] if background_all else None
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
sliceIndex = None
if self.multi_label:
params = {}
segmentation = self._segmentNode.GetSegmentation()
for name in self.info.get("labels", []):
points = []
segmentId = segmentation.GetSegmentIdBySegmentName(name)
segment = segmentation.GetSegment(segmentId) if segmentId else None
if segment:
fPosStr = vtk.mutable("")
segment.GetTag("MONAILabel.ForegroundPoints", fPosStr)
pointset = str(fPosStr)
print("{} => {} Fiducial points are: {}".format(segmentId, name, pointset))
if fPosStr is not None and len(pointset) > 0:
points = json.loads(pointset)
params[name] = points
params["label"] = label
labels = None
else:
sliceIndex = current_point[2] if current_point else None
logging.debug("Slice Index: {}".format(sliceIndex))
if deepgrow_3d or not sliceIndex:
foreground = foreground_all
background = background_all
else:
foreground = [x for x in foreground_all if x[2] == sliceIndex]
background = [x for x in background_all if x[2] == sliceIndex]
logging.debug("Foreground: {}".format(foreground))
logging.debug("Background: {}".format(background))
logging.debug("Current point: {}".format(current_point))
params = {
"label": label,
"foreground": foreground,
"background": background,
}
labels = [label]
params["label"] = label
params.update(self.getParamsFromConfig("infer", model))
print(f"Request Params for Deepgrow/Deepedit: {params}")
image_file = self.current_sample["id"]
result_file, params = self.logic.infer(model, image_file, params, session_id=self.getSessionId())
print(f"Result Params for Deepgrow/Deepedit: {params}")
if labels is None:
labels = (
params.get("label_names")
if params and params.get("label_names")
else self.models[model].get("labels")
)
if labels and isinstance(labels, dict):
labels = [k for k, _ in sorted(labels.items(), key=lambda item: item[1])]
freeze = label if self.ui.freezeUpdateCheckBox.checked else None
self.updateSegmentationMask(result_file, labels, None if deepgrow_3d else sliceIndex, freeze=freeze)
except:
logging.exception("Unknown Exception")
slicer.util.errorDisplay(operationDescription + " - unexpected error.", detailedText=traceback.format_exc())
finally:
qt.QApplication.restoreOverrideCursor()
self.updateGUIFromParameterNode()
logging.info("Time consumed by Deepgrow: {0:3.1f}".format(time.time() - start))
def createCursor(self, widget):
return slicer.util.mainWindow().cursor
def createSegmentNode(self):
if self._volumeNode is None:
return
if self._segmentNode is None:
name = "segmentation_" + self._volumeNode.GetName()
self._segmentNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLSegmentationNode")
self._segmentNode.SetReferenceImageGeometryParameterFromVolumeNode(self._volumeNode)
self._segmentNode.SetName(name)
def getLabelColor(self, name):
color = GenericAnatomyColors.get(name.lower())
return [c / 255.0 for c in color] if color else None
def updateSegmentationMask(self, in_file, labels, sliceIndex=None, freeze=None):
# TODO:: Add ROI Node (for Bounding Box if provided in the result)
start = time.time()
logging.debug("Update Segmentation Mask from: {}".format(in_file))
if in_file and not os.path.exists(in_file):
return False
segmentationNode = self._segmentNode
segmentation = segmentationNode.GetSegmentation()
if in_file is None:
for label in labels:
if not segmentation.GetSegmentIdBySegmentName(label):
segmentation.AddEmptySegment(label, label, self.getLabelColor(label))
return True
labels = [l for l in labels if l != "background"]
print(f"Update Segmentation Mask using Labels: {labels}")
# segmentId, segment = self.currentSegment()
labelImage = sitk.ReadImage(in_file)
labelmapVolumeNode = sitkUtils.PushVolumeToSlicer(labelImage, None, className="vtkMRMLLabelMapVolumeNode")
existing_label_ids = {}
for label in labels:
id = segmentation.GetSegmentIdBySegmentName(label)
if id:
existing_label_ids[label] = id
freeze = [freeze] if freeze and isinstance(freeze, str) else freeze
print(f"Import only Freezed label: {freeze}")
numberOfExistingSegments = segmentation.GetNumberOfSegments()
slicer.modules.segmentations.logic().ImportLabelmapToSegmentationNode(labelmapVolumeNode, segmentationNode)
slicer.mrmlScene.RemoveNode(labelmapVolumeNode)
numberOfAddedSegments = segmentation.GetNumberOfSegments() - numberOfExistingSegments
logging.debug("Adding {} segments".format(numberOfAddedSegments))
addedSegmentIds = [
segmentation.GetNthSegmentID(numberOfExistingSegments + i) for i in range(numberOfAddedSegments)
]
for i, segmentId in enumerate(addedSegmentIds):
segment = segmentation.GetSegment(segmentId)
print("Setting new segmentation with id: {} => {}".format(segmentId, segment.GetName()))
label = labels[i] if i < len(labels) else "unknown {}".format(i)
# segment.SetName(label)
# segment.SetColor(self.getLabelColor(label))
if freeze and label not in freeze:
print(f"Discard label update for: {label}")
elif label in existing_label_ids:
segmentEditorWidget = slicer.modules.segmenteditor.widgetRepresentation().self().editor
segmentEditorWidget.setSegmentationNode(segmentationNode)
segmentEditorWidget.setMasterVolumeNode(self._volumeNode)
segmentEditorWidget.setCurrentSegmentID(existing_label_ids[label])
effect = segmentEditorWidget.effectByName("Logical operators")
labelmap = slicer.vtkOrientedImageData()
segmentationNode.GetBinaryLabelmapRepresentation(segmentId, labelmap)
if sliceIndex:
selectedSegmentLabelmap = effect.selectedSegmentLabelmap()
dims = selectedSegmentLabelmap.GetDimensions()
count = 0
for x in range(dims[0]):
for y in range(dims[1]):
if selectedSegmentLabelmap.GetScalarComponentAsDouble(x, y, sliceIndex, 0):
count = count + 1
selectedSegmentLabelmap.SetScalarComponentFromDouble(x, y, sliceIndex, 0, 0)
logging.debug("Total Non Zero: {}".format(count))
# Clear the Slice
if count:
effect.modifySelectedSegmentByLabelmap(
selectedSegmentLabelmap, slicer.qSlicerSegmentEditorAbstractEffect.ModificationModeSet
)
# Union label map
effect.modifySelectedSegmentByLabelmap(
labelmap, slicer.qSlicerSegmentEditorAbstractEffect.ModificationModeAdd
)
else:
# adding bypass masking to not overwrite other layers,
# needed for preserving scribbles during updates
# help from: https://github.com/Slicer/Slicer/blob/master/Modules/Loadable/Segmentations/EditorEffects/Python/SegmentEditorLogicalEffect.py
bypassMask = True
effect.modifySelectedSegmentByLabelmap(
labelmap, slicer.qSlicerSegmentEditorAbstractEffect.ModificationModeSet, bypassMask
)
segmentationNode.RemoveSegment(segmentId)
self.showSegmentationsIn3D()
logging.info("Time consumed by updateSegmentationMask: {0:3.1f}".format(time.time() - start))
return True
def showSegmentationsIn3D(self):
# add closed surface representation
if self._segmentNode:
self._segmentNode.CreateClosedSurfaceRepresentation()
view = slicer.app.layoutManager().threeDWidget(0).threeDView()
view.resetFocalPoint()
def updateServerUrlGUIFromSettings(self):
# Save current server URL to the top of history
settings = qt.QSettings()
serverUrlHistory = settings.value("MONAILabel/serverUrlHistory")
wasBlocked = self.ui.serverComboBox.blockSignals(True)
self.ui.serverComboBox.clear()
if serverUrlHistory:
self.ui.serverComboBox.addItems(serverUrlHistory.split(";"))
self.ui.serverComboBox.setCurrentText(settings.value("MONAILabel/serverUrl"))
self.ui.serverComboBox.blockSignals(wasBlocked)
def createFiducialNode(self, name, onMarkupNodeModified, color):
displayNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLMarkupsDisplayNode")
displayNode.SetTextScale(0)
displayNode.SetSelectedColor(color)
fiducialNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLMarkupsFiducialNode")
fiducialNode.SetName(name)
fiducialNode.SetAndObserveDisplayNodeID(displayNode.GetID())
fiducialNodeObservers = []
self.addFiducialNodeObserver(fiducialNode, onMarkupNodeModified)
return fiducialNode, fiducialNodeObservers
def removeFiducialNodeObservers(self, fiducialNode, fiducialNodeObservers):
if fiducialNode and fiducialNodeObservers:
for observer in fiducialNodeObservers:
fiducialNode.RemoveObserver(observer)
def addFiducialNodeObserver(self, fiducialNode, onMarkupNodeModified):
fiducialNodeObservers = []
if fiducialNode:
eventIds = [slicer.vtkMRMLMarkupsNode.PointPositionDefinedEvent]
for eventId in eventIds:
fiducialNodeObservers.append(fiducialNode.AddObserver(eventId, onMarkupNodeModified))
return fiducialNodeObservers
def scribblesLayersPresent(self):
scribbles_exist = False
if self._segmentNode is not None:
segmentationNode = self._segmentNode
segmentation = segmentationNode.GetSegmentation()
numSegments = segmentation.GetNumberOfSegments()
segmentIds = [segmentation.GetNthSegmentID(i) for i in range(numSegments)]
scribbles_exist = sum([int("scribbles" in sid) for sid in segmentIds]) > 0
return scribbles_exist
def onStartScribbling(self):
if not self._segmentNode:
return
logging.debug("Scribbles start event")
if (not self.scribblesLayersPresent()) and (self._scribblesEditorWidget is None):
# add background, layer index = -2 [2], color = red
self._segmentNode.GetSegmentation().AddEmptySegment(
"background_scribbles", "background_scribbles", [1.0, 0.0, 0.0]
)
# add foreground, layer index = -1 [3], color = green
self._segmentNode.GetSegmentation().AddEmptySegment(
"foreground_scribbles", "foreground_scribbles", [0.0, 1.0, 0.0]
)
# change segmentation display properties to "see through" the scribbles
# further explanation at:
# https://apidocs.slicer.org/master/classvtkMRMLSegmentationDisplayNode.html
segmentationDisplayNode = self._segmentNode.GetDisplayNode()
# background
opacity = 0.2
segmentationDisplayNode.SetSegmentOpacity2DFill("background_scribbles", opacity)
segmentationDisplayNode.SetSegmentOpacity2DOutline("background_scribbles", opacity)
# foreground
segmentationDisplayNode.SetSegmentOpacity2DFill("foreground_scribbles", opacity)
segmentationDisplayNode.SetSegmentOpacity2DOutline("foreground_scribbles", opacity)
# create segmentEditorWidget to access "Paint" and "Erase" segmentation tools
# these will be used to draw scribbles
self._scribblesEditorWidget = slicer.qMRMLSegmentEditorWidget()
self._scribblesEditorWidget.setMRMLScene(slicer.mrmlScene)
segmentEditorNode = slicer.vtkMRMLSegmentEditorNode()
# adding new scribbles can overwrite a new one-hot vector, hence erase any existing
# labels - this is not a desired behaviour hence we swith to overlay mode that enables drawing
# scribbles without changing existing labels. Further explanation at:
# https://discourse.slicer.org/t/how-can-i-set-masking-settings-on-a-segment-editor-effect-in-python/4406/7
segmentEditorNode.SetOverwriteMode(slicer.vtkMRMLSegmentEditorNode.OverwriteNone)
# add all nodes to the widget
slicer.mrmlScene.AddNode(segmentEditorNode)
self._scribblesEditorWidget.setMRMLSegmentEditorNode(segmentEditorNode)
self._scribblesEditorWidget.setSegmentationNode(self._segmentNode)
self._scribblesEditorWidget.setMasterVolumeNode(self._volumeNode)
def onUpdateScribbles(self):
logging.info("Scribbles update event")
scribblesMethod = self.ui.scribblesMethodSelector.currentText
scribbles_in = None
result_file = None
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
# get scribbles + label
segmentationNode = self._segmentNode
labelmapVolumeNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLLabelMapVolumeNode")
slicer.modules.segmentations.logic().ExportVisibleSegmentsToLabelmapNode(
segmentationNode, labelmapVolumeNode, self._volumeNode
)
scribbles_in = tempfile.NamedTemporaryFile(suffix=self.file_ext, dir=self.tmpdir).name
self.reportProgress(5)
# save scribbles + label to file
slicer.util.saveNode(labelmapVolumeNode, scribbles_in)
self.reportProgress(30)
self.updateServerSettings()
self.reportProgress(60)
# try to first fetch vtkMRMLAnnotationROINode
roiNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLAnnotationROINode")
if roiNode == None: # if vtkMRMLAnnotationROINode not present, then check for vtkMRMLMarkupsROINode node
roiNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLMarkupsROINode")
# if roi node found, then try to get roi
selected_roi = self.getROIPointsXYZ(roiNode)
# send scribbles + label to server along with selected scribbles method
params = self.getParamsFromConfig("infer", scribblesMethod)
params.update({"roi": selected_roi})
image_file = self.current_sample["id"]
result_file, params = self.logic.infer(
scribblesMethod, image_file, params, scribbles_in, session_id=self.getSessionId()
)
# display result from server
self.reportProgress(90)
_, segment = self.currentSegment()
label = segment.GetName()
self.updateSegmentationMask(result_file, [label])
except:
slicer.util.errorDisplay(
"Failed to post process label on MONAI Label Server using {}".format(scribblesMethod),
detailedText=traceback.format_exc(),
)
finally:
qt.QApplication.restoreOverrideCursor()
self.reportProgress(100)
# clear all temporary files
if scribbles_in and os.path.exists(scribbles_in):
os.unlink(scribbles_in)
if result_file and os.path.exists(result_file):
os.unlink(result_file)
def getROIPointsXYZ(self, roiNode):
if roiNode == None:
return []
v = self._volumeNode
RasToIjkMatrix = vtk.vtkMatrix4x4()
v.GetRASToIJKMatrix(RasToIjkMatrix)
roi_points_ras = [0.0] * 6
if roiNode.__class__.__name__ == "vtkMRMLMarkupsROINode":
# for vtkMRMLMarkupsROINode
print(roiNode.__class__.__name__)
center = [0] * 3
roiNode.GetCenter(center)
roi_points_ras = [(x - s / 2, x + s / 2) for x, s in zip(center, roiNode.GetSize())]
roi_points_ras = [item for sublist in roi_points_ras for item in sublist]
elif roiNode.__class__.__name__ == "vtkMRMLAnnotationROINode":
# for vtkMRMLAnnotationROINode (old method)
print(roiNode.__class__.__name__)
roiNode.GetBounds(roi_points_ras)
else:
# if none found then best to return empty list
return []
min_points_ras = [roi_points_ras[0], roi_points_ras[2], roi_points_ras[4], 1.0]
max_points_ras = [roi_points_ras[0 + 1], roi_points_ras[2 + 1], roi_points_ras[4 + 1], 1.0]
min_points_ijk = RasToIjkMatrix.MultiplyDoublePoint(min_points_ras)
max_points_ijk = RasToIjkMatrix.MultiplyDoublePoint(max_points_ras)
min_points_ijk = [round(i) for i in min_points_ijk]
max_points_ijk = [round(i) for i in max_points_ijk]
roi_points_ijk = [val for pair in zip(min_points_ijk[0:3], max_points_ijk[0:3]) for val in pair]
logging.debug("RAS: {}; IJK: {}".format(roi_points_ras, roi_points_ijk))
# print("RAS: {}; IJK: {}".format(roi_points_ras, roi_points_ijk))
return roi_points_ijk
def onClearScribblesSegmentNodes(self):
# more explanation on this at:
# https://discourse.slicer.org/t/how-to-clear-segmentation/7433/4
# clear "scribbles" segment before saving the label
if not self._segmentNode:
return
segmentation = self._segmentNode
num_segments = segmentation.GetSegmentation().GetNumberOfSegments()
for i in range(num_segments):
segmentId = segmentation.GetSegmentation().GetNthSegmentID(i)
if "scribbles" in segmentId:
logging.info("clearning {}".format(segmentId))
labelMapRep = slicer.vtkOrientedImageData()
segmentation.GetBinaryLabelmapRepresentation(segmentId, labelMapRep)
vtkSegmentationCore.vtkOrientedImageDataResample.FillImage(labelMapRep, 0, labelMapRep.GetExtent())
slicer.vtkSlicerSegmentationsModuleLogic.SetBinaryLabelmapToSegment(
labelMapRep, segmentation, segmentId, slicer.vtkSlicerSegmentationsModuleLogic.MODE_REPLACE
)
def onClearScribbles(self):
# reset scribbles mode
self.scribblesMode = None
# clear scribbles editor widget
if self._scribblesEditorWidget:
widget = self._scribblesEditorWidget
del widget
self._scribblesEditorWidget = None
# remove "scribbles" segments from label
self.onClearScribblesSegmentNodes()
# reset UI elements associated with scribbles
self.ui.scribblesCollapsibleButton.collapsed = True
self.ui.paintScribblesButton.setChecked(False)
self.ui.eraseScribblesButton.setChecked(False)
self.ui.scribblesLabelSelector.setCurrentIndex(0)
def checkAndInitialiseScribbles(self):
if not self._segmentNode:
return
if self._scribblesEditorWidget is None:
self.onStartScribbling()
if self.scribblesMode is None:
self.changeScribblesMode(tool="Paint", layer="foreground_scribbles")
self.updateScribToolLayerFromMode()
def updateScribToolLayerFromMode(self):
if not self._segmentNode:
return
logging.info("Scribbles mode {} ".format(self.scribblesMode))
self.checkAndInitialiseScribbles()
# update tool/layer select for scribblesEditorWidget
tool, layer = self.getToolAndLayerFromScribblesMode()
if self._scribblesEditorWidget:
self._scribblesEditorWidget.setActiveEffectByName(tool)
self._scribblesEditorWidget.setCurrentSegmentID(layer)
# update brush type from checkbox
if tool in ("Paint", "Erase"):
is3dbrush = self.ui.brush3dCheckbox.checkState()
self.on3dBrushCheckbox(state=is3dbrush)
# update brush size from slider
brushSize = self.ui.brushSizeSlider.value
self.updateBrushSize(value=brushSize)
def getToolAndLayerFromScribblesMode(self):
if self.scribblesMode is not None:
return self.scribblesMode.split("+")
else:
# default modes
return "Paint", "foreground_scribbles"
def changeScribblesMode(self, tool=None, layer=None):
ctool, clayer = self.getToolAndLayerFromScribblesMode()
ctool = tool if tool != None else ctool
clayer = layer if layer != None else clayer
self.scribblesMode = "+".join([ctool, clayer])
def onPaintScribbles(self):
if not self._segmentNode:
return
if self.ui.eraseScribblesButton.checked:
self.ui.eraseScribblesButton.setChecked(False)
self.changeScribblesMode(tool="Paint" if self.ui.paintScribblesButton.checked else "None")
self.updateScribToolLayerFromMode()
def onEraseScribbles(self):
if not self._segmentNode:
return
if self.ui.paintScribblesButton.checked:
self.ui.paintScribblesButton.setChecked(False)
self.changeScribblesMode(tool="Erase" if self.ui.eraseScribblesButton.checked else "None")
self.updateScribToolLayerFromMode()
def onSelectScribblesLabel(self):
if not self._segmentNode:
return
index = self.ui.scribblesLabelSelector.currentIndex
index = 0 if index < 0 else index
selected = self.ui.scribblesLabelSelector.itemText(index)
layer = "foreground_scribbles" if selected == "Foreground" else "background_scribbles"
self.changeScribblesMode(layer=layer)
self.updateScribToolLayerFromMode()
def on3dBrushCheckbox(self, state):
logging.info("3D brush update {}".format(state))
self.checkAndInitialiseScribbles()
effect = self._scribblesEditorWidget.activeEffect()
# enable scribbles in 3d using a sphere brush
effect.setParameter("BrushSphere", state)
def updateBrushSize(self, value):
logging.info("brush size update {}".format(value))
if self.ui.paintScribblesButton.checked or self.ui.eraseScribblesButton.checked:
self.checkAndInitialiseScribbles()
effect = self._scribblesEditorWidget.activeEffect()
effect.setParameter("BrushAbsoluteDiameter", value)
class MONAILabelLogic(ScriptedLoadableModuleLogic):
def __init__(self, tmpdir=None, server_url=None, progress_callback=None, client_id=None):
ScriptedLoadableModuleLogic.__init__(self)
self.server_url = server_url
self.tmpdir = slicer.util.tempDirectory("slicer-monai-label") if tmpdir is None else tmpdir
self.client_id = client_id
self.volumeToSessions = dict()
self.progress_callback = progress_callback
def setDefaultParameters(self, parameterNode):
if not parameterNode.GetParameter("SegmentationModel"):
parameterNode.SetParameter("SegmentationModel", "")
if not parameterNode.GetParameter("DeepgrowModel"):
parameterNode.SetParameter("DeepgrowModel", "")
if not parameterNode.GetParameter("ScribblesMethod"):
parameterNode.SetParameter("ScribblesMethod", "")
def __del__(self):
shutil.rmtree(self.tmpdir, ignore_errors=True)
def setServer(self, server_url=None):
self.server_url = server_url if server_url else "http://127.0.0.1:8000"
def setClientId(self, client_id):
self.client_id = client_id if client_id else "user-xyz"
def setProgressCallback(self, progress_callback=None):
self.progress_callback = progress_callback
def reportProgress(self, progress):
if self.progress_callback:
self.progress_callback(progress)
def info(self):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).info()
def next_sample(self, strategy, params={}):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).next_sample(strategy, params)
def create_session(self, image_in):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).create_session(image_in)
def get_session(self, session_id):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).get_session(session_id)
def remove_session(self, session_id):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).remove_session(session_id)
def upload_image(self, image_in, image_id=None):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).upload_image(image_in, image_id)
def save_label(self, image_in, label_in, params):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).save_label(
image_in, label_in, params=params
)
def infer(self, model, image_in, params={}, label_in=None, file=None, session_id=None):
logging.debug("Preparing input data for segmentation")
self.reportProgress(0)
client = MONAILabelClient(self.server_url, self.tmpdir, self.client_id)
result_file, params = client.infer(model, image_in, params, label_in, file, session_id)
logging.debug(f"Image Response: {result_file}")
logging.debug(f"JSON Response: {params}")
self.reportProgress(100)
return result_file, params
def train_start(self, model=None, params={}):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).train_start(model, params)
def train_status(self, check_if_running):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).train_status(check_if_running)
def train_stop(self):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).train_stop()
class MONAILabelTest(ScriptedLoadableModuleTest):
def setUp(self):
slicer.mrmlScene.Clear()
def runTest(self):
self.setUp()
self.test_MONAILabel1()
def test_MONAILabel1(self):
self.delayDisplay("Test passed")
| # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import logging
import os
import shutil
import tempfile
import time
import traceback
from collections import OrderedDict
from urllib.parse import quote_plus
import ctk
import qt
import SampleData
import SimpleITK as sitk
import sitkUtils
import slicer
import vtk
import vtkSegmentationCore
from MONAILabelLib import GenericAnatomyColors, MONAILabelClient
from slicer.ScriptedLoadableModule import *
from slicer.util import VTKObservationMixin
class MONAILabel(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "MONAILabel"
self.parent.categories = ["Active Learning"]
self.parent.dependencies = []
self.parent.contributors = ["NVIDIA, KCL"]
self.parent.helpText = """
Active Learning solution.
See more information in <a href="https://github.com/Project-MONAI/MONAILabel">module documentation</a>.
"""
self.parent.acknowledgementText = """
Developed by NVIDIA, KCL
"""
# Additional initialization step after application startup is complete
slicer.app.connect("startupCompleted()", self.initializeAfterStartup)
def initializeAfterStartup(self):
if not slicer.app.commandOptions().noMainWindow:
self.settingsPanel = MONAILabelSettingsPanel()
slicer.app.settingsDialog().addPanel("MONAI Label", self.settingsPanel)
class _ui_MONAILabelSettingsPanel(object):
def __init__(self, parent):
vBoxLayout = qt.QVBoxLayout(parent)
# settings
groupBox = ctk.ctkCollapsibleGroupBox()
groupBox.title = "MONAI Label Server"
groupLayout = qt.QFormLayout(groupBox)
serverUrl = qt.QLineEdit()
groupLayout.addRow("Server address:", serverUrl)
parent.registerProperty("MONAILabel/serverUrl", serverUrl, "text", str(qt.SIGNAL("textChanged(QString)")))
serverUrlHistory = qt.QLineEdit()
groupLayout.addRow("Server address history:", serverUrlHistory)
parent.registerProperty(
"MONAILabel/serverUrlHistory", serverUrlHistory, "text", str(qt.SIGNAL("textChanged(QString)"))
)
fileExtension = qt.QLineEdit()
fileExtension.setText(".nii.gz")
fileExtension.toolTip = "Default extension for uploading images/labels"
groupLayout.addRow("File Extension:", fileExtension)
parent.registerProperty(
"MONAILabel/fileExtension", fileExtension, "text", str(qt.SIGNAL("textChanged(QString)"))
)
clientId = qt.QLineEdit()
clientId.setText("user-xyz")
clientId.toolTip = "Client/User ID that will be sent to MONAI Label server for reference"
groupLayout.addRow("Client/User-ID:", clientId)
parent.registerProperty("MONAILabel/clientId", clientId, "text", str(qt.SIGNAL("textChanged(QString)")))
autoRunSegmentationCheckBox = qt.QCheckBox()
autoRunSegmentationCheckBox.checked = False
autoRunSegmentationCheckBox.toolTip = (
"Enable this option to auto run segmentation if pre-trained model exists when Next Sample is fetched"
)
groupLayout.addRow("Auto-Run Pre-Trained Model:", autoRunSegmentationCheckBox)
parent.registerProperty(
"MONAILabel/autoRunSegmentationOnNextSample",
ctk.ctkBooleanMapper(autoRunSegmentationCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))),
"valueAsInt",
str(qt.SIGNAL("valueAsIntChanged(int)")),
)
autoFetchNextSampleCheckBox = qt.QCheckBox()
autoFetchNextSampleCheckBox.checked = False
autoFetchNextSampleCheckBox.toolTip = "Enable this option to fetch Next Sample after saving the label"
groupLayout.addRow("Auto-Fetch Next Sample:", autoFetchNextSampleCheckBox)
parent.registerProperty(
"MONAILabel/autoFetchNextSample",
ctk.ctkBooleanMapper(autoFetchNextSampleCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))),
"valueAsInt",
str(qt.SIGNAL("valueAsIntChanged(int)")),
)
autoUpdateModelCheckBox = qt.QCheckBox()
autoUpdateModelCheckBox.checked = True
autoUpdateModelCheckBox.toolTip = "Enable this option to auto update model after submitting the label"
groupLayout.addRow("Auto-Update Model:", autoUpdateModelCheckBox)
parent.registerProperty(
"MONAILabel/autoUpdateModel",
ctk.ctkBooleanMapper(autoUpdateModelCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))),
"valueAsInt",
str(qt.SIGNAL("valueAsIntChanged(int)")),
)
askForUserNameCheckBox = qt.QCheckBox()
askForUserNameCheckBox.checked = False
askForUserNameCheckBox.toolTip = "Enable this option to ask for the user name every time the MONAILabel extension is loaded for the first time"
groupLayout.addRow("Ask For User Name:", askForUserNameCheckBox)
parent.registerProperty(
"MONAILabel/askForUserName",
ctk.ctkBooleanMapper(askForUserNameCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))),
"valueAsInt",
str(qt.SIGNAL("valueAsIntChanged(int)")),
)
allowOverlapCheckBox = qt.QCheckBox()
allowOverlapCheckBox.checked = False
allowOverlapCheckBox.toolTip = "Enable this option to allow overlapping segmentations"
groupLayout.addRow("Allow Overlapping Segmentations:", allowOverlapCheckBox)
parent.registerProperty(
"MONAILabel/allowOverlappingSegments",
ctk.ctkBooleanMapper(allowOverlapCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))),
"valueAsInt",
str(qt.SIGNAL("valueAsIntChanged(int)")),
)
allowOverlapCheckBox.connect("toggled(bool)", self.onUpdateAllowOverlap)
developerModeCheckBox = qt.QCheckBox()
developerModeCheckBox.checked = False
developerModeCheckBox.toolTip = "Enable this option to find options tab etc..."
groupLayout.addRow("Developer Mode:", developerModeCheckBox)
parent.registerProperty(
"MONAILabel/developerMode",
ctk.ctkBooleanMapper(developerModeCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))),
"valueAsInt",
str(qt.SIGNAL("valueAsIntChanged(int)")),
)
vBoxLayout.addWidget(groupBox)
vBoxLayout.addStretch(1)
def onUpdateAllowOverlap(self):
if slicer.util.settingsValue("MONAILabel/allowOverlappingSegments", True, converter=slicer.util.toBool):
if slicer.util.settingsValue("MONAILabel/fileExtension", None) != ".seg.nrrd":
slicer.util.warningDisplay(
"Overlapping segmentations are only availabel with the '.seg.nrrd' file extension! Consider changing MONAILabel file extension."
)
class MONAILabelSettingsPanel(ctk.ctkSettingsPanel):
def __init__(self, *args, **kwargs):
ctk.ctkSettingsPanel.__init__(self, *args, **kwargs)
self.ui = _ui_MONAILabelSettingsPanel(self)
class MONAILabelWidget(ScriptedLoadableModuleWidget, VTKObservationMixin):
def __init__(self, parent=None):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.__init__(self, parent)
VTKObservationMixin.__init__(self) # needed for parameter node observation
self.logic = None
self._parameterNode = None
self._volumeNode = None
self._segmentNode = None
self._volumeNodes = []
self._updatingGUIFromParameterNode = False
self._scribblesEditorWidget = None
self.info = {}
self.models = OrderedDict()
self.trainers = OrderedDict()
self.config = OrderedDict()
self.current_sample = None
self.samples = {}
self.state = {
"SegmentationModel": "",
"DeepgrowModel": "",
"ScribblesMethod": "",
"CurrentStrategy": "",
"CurrentTrainer": "",
}
self.file_ext = ".nii.gz"
self.dgPositiveFiducialNode = None
self.dgPositiveFiducialNodeObservers = []
self.dgNegativeFiducialNode = None
self.dgNegativeFiducialNodeObservers = []
self.ignoreFiducialNodeAddEvent = False
self.progressBar = None
self.tmpdir = None
self.timer = None
self.scribblesMode = None
self.multi_label = False
def setup(self):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.setup(self)
# Load widget from .ui file (created by Qt Designer).
# Additional widgets can be instantiated manually and added to self.layout.
uiWidget = slicer.util.loadUI(self.resourcePath("UI/MONAILabel.ui"))
self.layout.addWidget(uiWidget)
self.ui = slicer.util.childWidgetVariables(uiWidget)
# Set scene in MRML widgets. Make sure that in Qt designer the top-level qMRMLWidget's
# "mrmlSceneChanged(vtkMRMLScene*)" signal in is connected to each MRML widget's.
# "setMRMLScene(vtkMRMLScene*)" slot.
uiWidget.setMRMLScene(slicer.mrmlScene)
# These connections ensure that we update parameter node when scene is closed
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.StartCloseEvent, self.onSceneStartClose)
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.EndCloseEvent, self.onSceneEndClose)
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.NodeAddedEvent, self.onSceneEndImport)
# Create logic class. Logic implements all computations that should be possible to run
# in batch mode, without a graphical user interface.
self.tmpdir = slicer.util.tempDirectory("slicer-monai-label")
self.logic = MONAILabelLogic(self.tmpdir)
# Set icons and tune widget properties
self.ui.serverComboBox.lineEdit().setPlaceholderText("enter server address or leave empty to use default")
self.ui.fetchServerInfoButton.setIcon(self.icon("refresh-icon.png"))
self.ui.segmentationButton.setIcon(self.icon("segment.png"))
self.ui.nextSampleButton.setIcon(self.icon("segment.png"))
self.ui.saveLabelButton.setIcon(self.icon("save.png"))
self.ui.trainingButton.setIcon(self.icon("training.png"))
self.ui.stopTrainingButton.setIcon(self.icon("stop.png"))
self.ui.uploadImageButton.setIcon(self.icon("upload.svg"))
self.ui.importLabelButton.setIcon(self.icon("download.png"))
self.ui.dgPositiveFiducialPlacementWidget.setMRMLScene(slicer.mrmlScene)
self.ui.dgPositiveFiducialPlacementWidget.placeButton().toolTip = "Select +ve points"
self.ui.dgPositiveFiducialPlacementWidget.buttonsVisible = False
self.ui.dgPositiveFiducialPlacementWidget.placeButton().show()
self.ui.dgPositiveFiducialPlacementWidget.deleteButton().show()
self.ui.dgNegativeFiducialPlacementWidget.setMRMLScene(slicer.mrmlScene)
self.ui.dgNegativeFiducialPlacementWidget.placeButton().toolTip = "Select -ve points"
self.ui.dgNegativeFiducialPlacementWidget.buttonsVisible = False
self.ui.dgNegativeFiducialPlacementWidget.placeButton().show()
self.ui.dgNegativeFiducialPlacementWidget.deleteButton().show()
self.ui.dgUpdateButton.setIcon(self.icon("segment.png"))
# Connections
self.ui.fetchServerInfoButton.connect("clicked(bool)", self.onClickFetchInfo)
self.ui.serverComboBox.connect("currentIndexChanged(int)", self.onClickFetchInfo)
self.ui.segmentationModelSelector.connect("currentIndexChanged(int)", self.updateParameterNodeFromGUI)
self.ui.segmentationButton.connect("clicked(bool)", self.onClickSegmentation)
self.ui.deepgrowModelSelector.connect("currentIndexChanged(int)", self.updateParameterNodeFromGUI)
self.ui.nextSampleButton.connect("clicked(bool)", self.onNextSampleButton)
self.ui.trainingButton.connect("clicked(bool)", self.onTraining)
self.ui.stopTrainingButton.connect("clicked(bool)", self.onStopTraining)
self.ui.saveLabelButton.connect("clicked(bool)", self.onSaveLabel)
self.ui.uploadImageButton.connect("clicked(bool)", self.onUploadImage)
self.ui.importLabelButton.connect("clicked(bool)", self.onImportLabel)
self.ui.labelComboBox.connect("currentIndexChanged(int)", self.onSelectLabel)
self.ui.dgUpdateButton.connect("clicked(bool)", self.onUpdateDeepgrow)
self.ui.dgUpdateCheckBox.setStyleSheet("padding-left: 10px;")
# Scribbles
# brush and eraser icon from: https://tablericons.com/
self.ui.scribblesMethodSelector.connect("currentIndexChanged(int)", self.updateParameterNodeFromGUI)
self.ui.paintScribblesButton.setIcon(self.icon("paint.png"))
self.ui.paintScribblesButton.setToolTip("Paint scribbles for selected scribble layer")
self.ui.eraseScribblesButton.setIcon(self.icon("eraser.png"))
self.ui.eraseScribblesButton.setToolTip("Erase scribbles for selected scribble layer")
self.ui.updateScribblesButton.setIcon(self.icon("segment.png"))
self.ui.updateScribblesButton.setToolTip(
"Update label by sending scribbles to server to apply selected post processing method"
)
self.ui.brushSizeSlider.connect("valueChanged(double)", self.updateBrushSize)
self.ui.brushSizeSlider.setToolTip("Change brush size for scribbles tool")
self.ui.brush3dCheckbox.stateChanged.connect(self.on3dBrushCheckbox)
self.ui.brush3dCheckbox.setToolTip("Use 3D brush to paint/erase in multiple slices in 3D")
self.ui.updateScribblesButton.clicked.connect(self.onUpdateScribbles)
self.ui.paintScribblesButton.clicked.connect(self.onPaintScribbles)
self.ui.eraseScribblesButton.clicked.connect(self.onEraseScribbles)
self.ui.scribblesLabelSelector.connect("currentIndexChanged(int)", self.onSelectScribblesLabel)
# creating editable combo box
self.ui.scribblesLabelSelector.addItem(self.icon("fg_green.png"), "Foreground")
self.ui.scribblesLabelSelector.addItem(self.icon("bg_red.png"), "Background")
self.ui.scribblesLabelSelector.setCurrentIndex(0)
# start with scribbles section disabled
self.ui.scribblesCollapsibleButton.setEnabled(False)
self.ui.scribblesCollapsibleButton.collapsed = True
# embedded segment editor
self.ui.embeddedSegmentEditorWidget.setMRMLScene(slicer.mrmlScene)
self.ui.embeddedSegmentEditorWidget.setSegmentationNodeSelectorVisible(False)
self.ui.embeddedSegmentEditorWidget.setMasterVolumeNodeSelectorVisible(False)
self.initializeParameterNode()
self.updateServerUrlGUIFromSettings()
# self.onClickFetchInfo()
if slicer.util.settingsValue("MONAILabel/askForUserName", False, converter=slicer.util.toBool):
text = qt.QInputDialog().getText(
self.parent,
"User Name",
"Please enter your name:",
qt.QLineEdit.Normal,
slicer.util.settingsValue("MONAILabel/clientId", None),
)
if text:
settings = qt.QSettings()
settings.setValue("MONAILabel/clientId", text)
def cleanup(self):
self.removeObservers()
shutil.rmtree(self.tmpdir, ignore_errors=True)
def enter(self):
self.initializeParameterNode()
if self._segmentNode:
self.updateGUIFromParameterNode()
def exit(self):
self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
def onSceneStartClose(self, caller, event):
self.state = {
"SegmentationModel": self.ui.segmentationModelSelector.currentText,
"DeepgrowModel": self.ui.deepgrowModelSelector.currentText,
"ScribblesMethod": self.ui.scribblesMethodSelector.currentText,
"CurrentStrategy": self.ui.strategyBox.currentText,
"CurrentTrainer": self.ui.trainerBox.currentText,
}
self._volumeNode = None
self._segmentNode = None
self._volumeNodes.clear()
self.setParameterNode(None)
self.current_sample = None
self.samples.clear()
self.resetFiducial(
self.ui.dgPositiveFiducialPlacementWidget, self.dgPositiveFiducialNode, self.dgPositiveFiducialNodeObservers
)
self.dgPositiveFiducialNode = None
self.resetFiducial(
self.ui.dgNegativeFiducialPlacementWidget, self.dgNegativeFiducialNode, self.dgNegativeFiducialNodeObservers
)
self.dgNegativeFiducialNode = None
self.onClearScribbles()
def resetFiducial(self, fiducialWidget, fiducialNode, fiducialNodeObservers):
if fiducialWidget.placeModeEnabled:
fiducialWidget.setPlaceModeEnabled(False)
if fiducialNode:
slicer.mrmlScene.RemoveNode(fiducialNode)
self.removeFiducialNodeObservers(fiducialNode, fiducialNodeObservers)
def onSceneEndClose(self, caller, event):
if self.parent.isEntered:
self.initializeParameterNode()
def onSceneEndImport(self, caller, event):
if not self._volumeNode:
self.updateGUIFromParameterNode()
def initializeParameterNode(self):
self.setParameterNode(self.logic.getParameterNode())
# Select default input nodes if nothing is selected yet to save a few clicks for the user
if not self._parameterNode.GetNodeReference("InputVolume"):
firstVolumeNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode")
if firstVolumeNode:
self._parameterNode.SetNodeReferenceID("InputVolume", firstVolumeNode.GetID())
def setParameterNode(self, inputParameterNode):
if inputParameterNode:
self.logic.setDefaultParameters(inputParameterNode)
if self._parameterNode is not None:
self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
self._parameterNode = inputParameterNode
if self._parameterNode is not None:
self.addObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
# Initial GUI update
self.updateGUIFromParameterNode()
def monitorTraining(self):
status = self.isTrainingRunning(check_only=False)
if status and status.get("status") == "RUNNING":
info = self.logic.info()
train_stats = info.get("train_stats")
if not train_stats:
return
train_stats = next(iter(train_stats.values())) if train_stats else train_stats
current = 0 if train_stats.get("total_time") else train_stats.get("epoch", 1)
total = train_stats.get("total_epochs", 1)
percent = max(1, 100 * current / total)
if self.ui.trainingProgressBar.value != percent:
self.ui.trainingProgressBar.setValue(percent)
self.ui.trainingProgressBar.setToolTip(f"{current}/{total} epoch is completed")
dice = train_stats.get("best_metric", 0)
self.updateAccuracyBar(dice)
return
print("Training completed")
self.ui.trainingProgressBar.setValue(100)
self.timer.stop()
self.timer = None
self.ui.trainingProgressBar.setToolTip(f"Training: {status.get('status', 'DONE')}")
self.ui.trainingButton.setEnabled(True)
self.ui.stopTrainingButton.setEnabled(False)
self.fetchInfo()
def updateGUIFromParameterNode(self, caller=None, event=None):
if self._parameterNode is None or self._updatingGUIFromParameterNode:
return
# Make sure GUI changes do not call updateParameterNodeFromGUI (it could cause infinite loop)
self._updatingGUIFromParameterNode = True
file_ext = slicer.util.settingsValue("MONAILabel/fileExtension", self.file_ext)
self.file_ext = file_ext if file_ext else self.file_ext
# Update node selectors and sliders
self.ui.inputSelector.clear()
for v in self._volumeNodes:
self.ui.inputSelector.addItem(v.GetName())
self.ui.inputSelector.setToolTip(self.current_sample.get("name", "") if self.current_sample else "")
if self._volumeNode:
self.ui.inputSelector.setCurrentIndex(self.ui.inputSelector.findText(self._volumeNode.GetName()))
self.ui.inputSelector.setEnabled(False) # Allow only one active scene
self.ui.uploadImageButton.setEnabled(False)
if self.info and slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode") and self._volumeNode is None:
self._volumeNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode")
self.initSample({"id": self._volumeNode.GetName(), "session": True}, autosegment=False)
self.ui.inputSelector.setEnabled(False)
self.ui.uploadImageButton.setEnabled(self.current_sample and self.current_sample.get("session"))
self.updateSelector(self.ui.segmentationModelSelector, ["segmentation"], "SegmentationModel", 0)
self.updateSelector(self.ui.deepgrowModelSelector, ["deepgrow", "deepedit"], "DeepgrowModel", 0)
self.updateSelector(self.ui.scribblesMethodSelector, ["scribbles"], "ScribblesMethod", 0)
if self.models and [k for k, v in self.models.items() if v["type"] == "segmentation"]:
self.ui.segmentationCollapsibleButton.collapsed = False
if self.models and [k for k, v in self.models.items() if v["type"] in ("deepgrow", "deepedit")]:
self.ui.deepgrowCollapsibleButton.collapsed = False
if self.models and [k for k, v in self.models.items() if v["type"] == "scribbles"]:
self.ui.scribblesCollapsibleButton.collapsed = False
self.ui.labelComboBox.clear()
if self._segmentNode:
segmentation = self._segmentNode.GetSegmentation()
totalSegments = segmentation.GetNumberOfSegments()
segmentIds = [segmentation.GetNthSegmentID(i) for i in range(totalSegments)]
for idx, segmentId in enumerate(segmentIds):
segment = segmentation.GetSegment(segmentId)
label = segment.GetName()
if label in ["foreground_scribbles", "background_scribbles"]:
continue
self.ui.labelComboBox.addItem(label)
else:
for label in self.info.get("labels", {}):
self.ui.labelComboBox.addItem(label)
currentLabel = self._parameterNode.GetParameter("CurrentLabel")
idx = self.ui.labelComboBox.findText(currentLabel) if currentLabel else 0
idx = 0 if idx < 0 < self.ui.labelComboBox.count else idx
self.ui.labelComboBox.setCurrentIndex(idx)
self.ui.appComboBox.clear()
self.ui.appComboBox.addItem(self.info.get("name", ""))
datastore_stats = self.info.get("datastore", {})
current = datastore_stats.get("completed", 0)
total = datastore_stats.get("total", 0)
self.ui.activeLearningProgressBar.setValue(current / max(total, 1) * 100)
self.ui.activeLearningProgressBar.setToolTip(f"{current}/{total} samples are labeled")
train_stats = self.info.get("train_stats", {})
train_stats = next(iter(train_stats.values())) if train_stats else train_stats
dice = train_stats.get("best_metric", 0)
self.updateAccuracyBar(dice)
self.ui.strategyBox.clear()
for strategy in self.info.get("strategies", {}):
self.ui.strategyBox.addItem(strategy)
currentStrategy = self._parameterNode.GetParameter("CurrentStrategy")
currentStrategy = currentStrategy if currentStrategy else self.state["CurrentStrategy"]
self.ui.strategyBox.setCurrentIndex(self.ui.strategyBox.findText(currentStrategy) if currentStrategy else 0)
self.ui.trainerBox.clear()
trainers = self.info.get("trainers", {})
if trainers:
self.ui.trainerBox.addItem("ALL")
for t in trainers:
self.ui.trainerBox.addItem(t)
currentTrainer = self._parameterNode.GetParameter("CurrentTrainer")
currentTrainer = currentTrainer if currentTrainer else self.state["CurrentTrainer"]
self.ui.trainerBox.setCurrentIndex(self.ui.trainerBox.findText(currentTrainer) if currentTrainer else 0)
developer_mode = slicer.util.settingsValue("MONAILabel/developerMode", True, converter=slicer.util.toBool)
self.ui.optionsCollapsibleButton.setVisible(developer_mode)
# Enable/Disable
self.ui.nextSampleButton.setEnabled(self.ui.strategyBox.count)
is_training_running = True if self.info and self.isTrainingRunning() else False
self.ui.trainingButton.setEnabled(self.info and not is_training_running and current)
self.ui.stopTrainingButton.setEnabled(is_training_running)
if is_training_running and self.timer is None:
self.timer = qt.QTimer()
self.timer.setInterval(5000)
self.timer.connect("timeout()", self.monitorTraining)
self.timer.start()
self.ui.segmentationButton.setEnabled(
self.ui.segmentationModelSelector.currentText and self._volumeNode is not None
)
self.ui.saveLabelButton.setEnabled(self._segmentNode is not None)
self.ui.importLabelButton.setEnabled(self._segmentNode is not None)
# Create empty markup fiducial node for deep grow +ve and -ve
if self._segmentNode:
if not self.dgPositiveFiducialNode:
self.dgPositiveFiducialNode, self.dgPositiveFiducialNodeObservers = self.createFiducialNode(
"P", self.onDeepGrowFiducialNodeModified, [0.5, 1, 0.5]
)
self.ui.dgPositiveFiducialPlacementWidget.setCurrentNode(self.dgPositiveFiducialNode)
self.ui.dgPositiveFiducialPlacementWidget.setPlaceModeEnabled(False)
if not self.dgNegativeFiducialNode:
self.dgNegativeFiducialNode, self.dgNegativeFiducialNodeObservers = self.createFiducialNode(
"N", self.onDeepGrowFiducialNodeModified, [0.5, 0.5, 1]
)
self.ui.dgNegativeFiducialPlacementWidget.setCurrentNode(self.dgNegativeFiducialNode)
self.ui.dgNegativeFiducialPlacementWidget.setPlaceModeEnabled(False)
self.ui.scribblesCollapsibleButton.setEnabled(self.ui.scribblesMethodSelector.count)
self.ui.scribblesCollapsibleButton.collapsed = False
self.ui.dgPositiveFiducialPlacementWidget.setEnabled(self.ui.deepgrowModelSelector.currentText)
self.ui.dgNegativeFiducialPlacementWidget.setEnabled(self.ui.deepgrowModelSelector.currentText)
self.multi_label = "background" in self.info.get("labels", [])
if self.multi_label:
self.ui.dgLabelBackground.hide()
self.ui.dgNegativeFiducialPlacementWidget.hide()
self.ui.freezeUpdateCheckBox.show()
self.ui.dgLabelForeground.setText("Landmarks:")
else:
self.ui.dgNegativeFiducialPlacementWidget.show()
self.ui.freezeUpdateCheckBox.hide()
self.ui.dgLabelForeground.setText("Foreground:")
self.ui.dgUpdateCheckBox.setEnabled(self.ui.deepgrowModelSelector.currentText and self._segmentNode)
self.ui.dgUpdateButton.setEnabled(self.ui.deepgrowModelSelector.currentText and self._segmentNode)
self.ui.embeddedSegmentEditorWidget.setMRMLSegmentEditorNode(
slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLSegmentEditorNode")
)
# All the GUI updates are done
self._updatingGUIFromParameterNode = False
def updateParameterNodeFromGUI(self, caller=None, event=None):
if self._parameterNode is None or self._updatingGUIFromParameterNode:
return
wasModified = self._parameterNode.StartModify() # Modify all properties in a single batch
segmentationModelIndex = self.ui.segmentationModelSelector.currentIndex
if segmentationModelIndex >= 0:
segmentationModel = self.ui.segmentationModelSelector.itemText(segmentationModelIndex)
self._parameterNode.SetParameter("SegmentationModel", segmentationModel)
deepgrowModelIndex = self.ui.deepgrowModelSelector.currentIndex
if deepgrowModelIndex >= 0:
deepgrowModel = self.ui.deepgrowModelSelector.itemText(deepgrowModelIndex)
self._parameterNode.SetParameter("DeepgrowModel", deepgrowModel)
scribblesMethodIndex = self.ui.scribblesMethodSelector.currentIndex
if scribblesMethodIndex >= 0:
scribblesMethod = self.ui.scribblesMethodSelector.itemText(scribblesMethodIndex)
self._parameterNode.SetParameter("ScribblesMethod", scribblesMethod)
currentLabelIndex = self.ui.labelComboBox.currentIndex
if currentLabelIndex >= 0:
currentLabel = self.ui.labelComboBox.itemText(currentLabelIndex)
self._parameterNode.SetParameter("CurrentLabel", currentLabel)
currentStrategyIndex = self.ui.strategyBox.currentIndex
if currentStrategyIndex >= 0:
currentStrategy = self.ui.strategyBox.itemText(currentStrategyIndex)
self._parameterNode.SetParameter("CurrentStrategy", currentStrategy)
currentTrainerIndex = self.ui.trainerBox.currentIndex
if currentTrainerIndex >= 0:
currentTrainer = self.ui.trainerBox.itemText(currentTrainerIndex)
self._parameterNode.SetParameter("CurrentTrainer", currentTrainer)
self._parameterNode.EndModify(wasModified)
def updateSelector(self, selector, model_types, param, defaultIndex=0):
wasSelectorBlocked = selector.blockSignals(True)
selector.clear()
for model_name, model in self.models.items():
if model["type"] in model_types:
selector.addItem(model_name)
selector.setItemData(selector.count - 1, model["description"], qt.Qt.ToolTipRole)
model = self._parameterNode.GetParameter(param)
model = model if model else self.state.get(param, "")
modelIndex = selector.findText(model)
modelIndex = defaultIndex if modelIndex < 0 < selector.count else modelIndex
selector.setCurrentIndex(modelIndex)
try:
modelInfo = self.models[model]
selector.setToolTip(modelInfo["description"])
except:
selector.setToolTip("")
selector.blockSignals(wasSelectorBlocked)
def updateConfigTable(self):
table = self.ui.configTable
table.clear()
headers = ["section", "name", "key", "value"]
table.setColumnCount(len(headers))
table.setHorizontalHeaderLabels(headers)
table.setColumnWidth(0, 50)
config = copy.deepcopy(self.info)
infer = config.get("models", {})
train = config.get("trainers", {})
activelearning = config.get("strategies", {})
scoring = config.get("scoring", {})
row_count = 0
config = {"infer": infer, "train": train, "activelearning": activelearning, "scoring": scoring}
for c in config.values():
row_count += sum([len(c[k].get("config", {})) for k in c.keys()])
# print(f"Total rows: {row_count}")
table.setRowCount(row_count)
n = 0
for section in config:
if not config[section]:
continue
c_section = config[section]
l_section = sum([len(c_section[k].get("config", {})) for k in c_section.keys()])
if not l_section:
continue
# print(f"{n} => l_section = {l_section}")
if l_section:
table.setSpan(n, 0, l_section, 1)
for name in c_section:
c_name = c_section[name]
l_name = len(c_name.get("config", {}))
if not l_name:
continue
# print(f"{n} => l_name = {l_name}")
if l_name:
table.setSpan(n, 1, l_name, 1)
for key, val in c_name.get("config", {}).items():
item = qt.QTableWidgetItem(section)
item.setFlags(item.flags() & ~qt.Qt.ItemIsEditable)
table.setItem(n, 0, item)
item = qt.QTableWidgetItem(name)
table.setItem(n, 1, item)
item.setFlags(item.flags() & ~qt.Qt.ItemIsEditable)
item = qt.QTableWidgetItem(key)
table.setItem(n, 2, item)
item.setFlags(item.flags() & ~qt.Qt.ItemIsEditable)
if isinstance(val, dict) or isinstance(val, list):
combo = qt.QComboBox()
for m, v in enumerate(val):
combo.addItem(v)
combo.setCurrentIndex(0)
table.setCellWidget(n, 3, combo)
elif isinstance(val, bool):
checkbox = qt.QCheckBox()
checkbox.setChecked(val)
table.setCellWidget(n, 3, checkbox)
else:
table.setItem(n, 3, qt.QTableWidgetItem(str(val) if val else ""))
# print(f"{n} => {section} => {name} => {key} => {val}")
n = n + 1
def updateAccuracyBar(self, dice):
self.ui.accuracyProgressBar.setValue(dice * 100)
css = ["stop: 0 red"]
if dice > 0.5:
css.append(f"stop: {0.5 / dice} orange")
if dice > 0.6:
css.append(f"stop: {0.6 / dice} yellow")
if dice > 0.7:
css.append(f"stop: {0.7 / dice} lightgreen")
if dice > 0.8:
css.append(f"stop: {0.8 / dice} green")
if dice > 0.9:
css.append(f"stop: {0.9 / dice} darkgreen")
self.ui.accuracyProgressBar.setStyleSheet(
"QProgressBar {text-align: center;} "
"QProgressBar::chunk {background-color: "
"qlineargradient(x0: 0, x2: 1, " + ",".join(css) + ")}"
)
self.ui.accuracyProgressBar.setToolTip(f"Accuracy: {dice:.4f}")
def getParamsFromConfig(self, filter, filter2=None):
mapping = {"infer": "models", "train": "trainers", "activelearning": "strategies", "scoring": "scoring"}
config = {}
for row in range(self.ui.configTable.rowCount):
section = str(self.ui.configTable.item(row, 0).text())
name = str(self.ui.configTable.item(row, 1).text())
key = str(self.ui.configTable.item(row, 2).text())
value = self.ui.configTable.item(row, 3)
if value is None:
value = self.ui.configTable.cellWidget(row, 3)
value = value.checked if isinstance(value, qt.QCheckBox) else value.currentText
else:
value = str(value.text())
v = self.info.get(mapping.get(section, ""), {}).get(name, {}).get("config", {}).get(key, {})
if isinstance(v, int):
value = int(value) if value else 0
elif isinstance(v, float):
value = float(value) if value else 0.0
# print(f"{section} => {name} => {key} => {value}")
if config.get(section) is None:
config[section] = {}
if config[section].get(name) is None:
config[section][name] = {}
config[section][name][key] = value
# print(f"row: {row}, section: {section}, name: {name}, value: {value}, type: {type(v)}")
res = config.get(filter, {})
res = res.get(filter2, {}) if filter2 else res
return res
def onDeepGrowFiducialNodeModified(self, observer, eventid):
logging.debug("Deepgrow Point Event!!")
if self.ignoreFiducialNodeAddEvent:
return
markupsNode = observer
movingMarkupIndex = markupsNode.GetDisplayNode().GetActiveControlPoint()
logging.debug("Markup point added; point ID = {}".format(movingMarkupIndex))
current_point = self.getFiducialPointXYZ(markupsNode, movingMarkupIndex)
if not self.ui.dgUpdateCheckBox.checked:
self.onClickDeepgrow(current_point, skip_infer=True)
return
self.onClickDeepgrow(current_point)
self.ignoreFiducialNodeAddEvent = True
self.onEditFiducialPoints(self.dgPositiveFiducialNode, "MONAILabel.ForegroundPoints")
self.onEditFiducialPoints(self.dgNegativeFiducialNode, "MONAILabel.BackgroundPoints")
self.ignoreFiducialNodeAddEvent = False
def getFiducialPointsXYZ(self, fiducialNode, name):
v = self._volumeNode
RasToIjkMatrix = vtk.vtkMatrix4x4()
v.GetRASToIJKMatrix(RasToIjkMatrix)
point_set = []
n = fiducialNode.GetNumberOfFiducials()
for i in range(n):
coord = [0.0, 0.0, 0.0]
fiducialNode.GetNthFiducialPosition(i, coord)
world = [0, 0, 0, 0]
fiducialNode.GetNthFiducialWorldCoordinates(i, world)
p_Ras = [coord[0], coord[1], coord[2], 1.0]
p_Ijk = RasToIjkMatrix.MultiplyDoublePoint(p_Ras)
p_Ijk = [round(i) for i in p_Ijk]
logging.debug("RAS: {}; WORLD: {}; IJK: {}".format(coord, world, p_Ijk))
point_set.append(p_Ijk[0:3])
logging.info("{} => Current Fiducials-Points: {}".format(name, point_set))
return point_set
def getFiducialPointXYZ(self, fiducialNode, index):
v = self._volumeNode
RasToIjkMatrix = vtk.vtkMatrix4x4()
v.GetRASToIJKMatrix(RasToIjkMatrix)
coord = [0.0, 0.0, 0.0]
fiducialNode.GetNthFiducialPosition(index, coord)
world = [0, 0, 0, 0]
fiducialNode.GetNthFiducialWorldCoordinates(index, world)
p_Ras = [coord[0], coord[1], coord[2], 1.0]
p_Ijk = RasToIjkMatrix.MultiplyDoublePoint(p_Ras)
p_Ijk = [round(i) for i in p_Ijk]
logging.debug("RAS: {}; WORLD: {}; IJK: {}".format(coord, world, p_Ijk))
return p_Ijk[0:3]
def onEditFiducialPoints(self, fiducialNode, tagName):
if fiducialNode is None:
return
fiducialNode.RemoveAllMarkups()
segmentId, segment = self.currentSegment()
if segment and segmentId:
v = self._volumeNode
IjkToRasMatrix = vtk.vtkMatrix4x4()
v.GetIJKToRASMatrix(IjkToRasMatrix)
fPosStr = vtk.mutable("")
segment.GetTag(tagName, fPosStr)
pointset = str(fPosStr)
logging.debug("{} => {} Fiducial points are: {}".format(segmentId, segment.GetName(), pointset))
if fPosStr is not None and len(pointset) > 0:
points = json.loads(pointset)
for p in points:
p_Ijk = [p[0], p[1], p[2], 1.0]
p_Ras = IjkToRasMatrix.MultiplyDoublePoint(p_Ijk)
logging.debug("Add Fiducial: {} => {}".format(p_Ijk, p_Ras))
fiducialNode.AddFiducialFromArray(p_Ras[0:3])
def currentSegment(self):
segmentation = self._segmentNode.GetSegmentation()
segmentId = segmentation.GetSegmentIdBySegmentName(self.ui.labelComboBox.currentText)
segment = segmentation.GetSegment(segmentId)
logging.debug("Current SegmentID: {}; Segment: {}".format(segmentId, segment))
return segmentId, segment
def onSelectLabel(self, caller=None, event=None):
self.updateParameterNodeFromGUI(caller, event)
self.ignoreFiducialNodeAddEvent = True
self.onEditFiducialPoints(self.dgPositiveFiducialNode, "MONAILabel.ForegroundPoints")
self.onEditFiducialPoints(self.dgNegativeFiducialNode, "MONAILabel.BackgroundPoints")
self.ignoreFiducialNodeAddEvent = False
def icon(self, name="MONAILabel.png"):
# It should not be necessary to modify this method
iconPath = os.path.join(os.path.dirname(__file__), "Resources", "Icons", name)
if os.path.exists(iconPath):
return qt.QIcon(iconPath)
return qt.QIcon()
def updateServerSettings(self):
self.logic.setServer(self.serverUrl())
self.logic.setClientId(slicer.util.settingsValue("MONAILabel/clientId", "user-xyz"))
self.saveServerUrl()
def serverUrl(self):
serverUrl = self.ui.serverComboBox.currentText
if not serverUrl:
serverUrl = "http://127.0.0.1:8000"
return serverUrl.rstrip("/")
def saveServerUrl(self):
self.updateParameterNodeFromGUI()
# Save selected server URL
settings = qt.QSettings()
serverUrl = self.ui.serverComboBox.currentText
settings.setValue("MONAILabel/serverUrl", serverUrl)
# Save current server URL to the top of history
serverUrlHistory = settings.value("MONAILabel/serverUrlHistory")
if serverUrlHistory:
serverUrlHistory = serverUrlHistory.split(";")
else:
serverUrlHistory = []
try:
serverUrlHistory.remove(serverUrl)
except ValueError:
pass
serverUrlHistory.insert(0, serverUrl)
serverUrlHistory = serverUrlHistory[:10] # keep up to first 10 elements
settings.setValue("MONAILabel/serverUrlHistory", ";".join(serverUrlHistory))
self.updateServerUrlGUIFromSettings()
def onClickFetchInfo(self):
self.fetchInfo()
self.updateConfigTable()
def fetchInfo(self, showInfo=False):
if not self.logic:
return
start = time.time()
try:
self.updateServerSettings()
info = self.logic.info()
self.info = info
if self.info.get("config"):
slicer.util.errorDisplay(
"Please upgrade the monai server to latest version",
detailedText=traceback.format_exc(),
)
return
except:
slicer.util.errorDisplay(
"Failed to fetch models from remote server. "
"Make sure server address is correct and <server_uri>/info/ "
"is accessible in browser",
detailedText=traceback.format_exc(),
)
return
self.models.clear()
self.config = info.get("config", {})
model_count = {}
models = info.get("models", {})
for k, v in models.items():
model_type = v.get("type", "segmentation")
model_count[model_type] = model_count.get(model_type, 0) + 1
logging.debug("{} = {}".format(k, model_type))
self.models[k] = v
self.updateGUIFromParameterNode()
msg = ""
msg += "-----------------------------------------------------\t\n"
msg += "Total Models Available: \t" + str(len(models)) + "\t\n"
msg += "-----------------------------------------------------\t\n"
for model_type in model_count.keys():
msg += model_type.capitalize() + " Models: \t" + str(model_count[model_type]) + "\t\n"
msg += "-----------------------------------------------------\t\n"
if showInfo:
qt.QMessageBox.information(slicer.util.mainWindow(), "MONAI Label", msg)
logging.info(msg)
logging.info("Time consumed by fetch info: {0:3.1f}".format(time.time() - start))
def setProgressBarLabelText(self, label):
if not self.progressBar:
self.progressBar = slicer.util.createProgressDialog(windowTitle="Wait...", maximum=100)
self.progressBar.labelText = label
def reportProgress(self, progressPercentage):
if not self.progressBar:
self.progressBar = slicer.util.createProgressDialog(windowTitle="Wait...", maximum=100)
self.progressBar.show()
self.progressBar.activateWindow()
self.progressBar.setValue(progressPercentage)
slicer.app.processEvents()
def onTraining(self):
start = time.time()
status = None
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
self.updateServerSettings()
model = self.ui.trainerBox.currentText
model = model if model and model != "ALL" else None
params = self.getParamsFromConfig("train", model)
status = self.logic.train_start(model, params)
self.ui.trainingProgressBar.setValue(1)
self.ui.trainingProgressBar.setToolTip("Training: STARTED")
time.sleep(1)
self.updateGUIFromParameterNode()
except:
slicer.util.errorDisplay(
"Failed to run training in MONAI Label Server", detailedText=traceback.format_exc()
)
finally:
qt.QApplication.restoreOverrideCursor()
if status:
msg = "ID: {}\nStatus: {}\nStart Time: {}\n".format(
status.get("id"),
status.get("status"),
status.get("start_ts"),
)
# slicer.util.infoDisplay(msg, detailedText=json.dumps(status, indent=2))
logging.info(msg)
logging.info("Time consumed by training: {0:3.1f}".format(time.time() - start))
def onStopTraining(self):
start = time.time()
status = None
if not slicer.util.confirmOkCancelDisplay(
"This will kill/stop current Training task. Are you sure to continue?"
):
return
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
self.updateServerSettings()
status = self.logic.train_stop()
except:
slicer.util.errorDisplay("Failed to stop Training Task", detailedText=traceback.format_exc())
finally:
qt.QApplication.restoreOverrideCursor()
if status:
msg = "Status: {}\nStart Time: {}\nEnd Time: {}\nResult: {}".format(
status.get("status"),
status.get("start_ts"),
status.get("end_ts"),
status.get("result", status.get("details", [])[-1]),
)
# slicer.util.infoDisplay(msg, detailedText=json.dumps(status, indent=2))
logging.info(msg)
self.updateGUIFromParameterNode()
logging.info("Time consumed by stop training: {0:3.1f}".format(time.time() - start))
def isTrainingRunning(self, check_only=True):
if not self.logic:
return False
self.updateServerSettings()
return self.logic.train_status(check_only)
def onNextSampleButton(self):
if not self.logic:
return
if self._volumeNode or len(slicer.util.getNodesByClass("vtkMRMLScalarVolumeNode")):
if not slicer.util.confirmOkCancelDisplay(
"This will close current scene. Please make sure you have saved your current work.\n"
"Are you sure to continue?"
):
return
self.onClearScribbles()
slicer.mrmlScene.Clear(0)
start = time.time()
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
self.updateServerSettings()
strategy = self.ui.strategyBox.currentText
if not strategy:
slicer.util.errorDisplay("No Strategy Found/Selected\t")
return
sample = self.logic.next_sample(strategy, self.getParamsFromConfig("activelearning", strategy))
logging.debug(sample)
if not sample.get("id"):
slicer.util.warningDisplay(
"Unlabled Samples/Images Not Found at server. Instead you can load your own image."
)
return
if self.samples.get(sample["id"]) is not None:
self.current_sample = self.samples[sample["id"]]
name = self.current_sample["VolumeNodeName"]
index = self.ui.inputSelector.findText(name)
self.ui.inputSelector.setCurrentIndex(index)
return
logging.info(sample)
image_id = sample["id"]
image_file = sample.get("path")
image_name = sample.get("name", image_id)
node_name = sample.get("PatientID", sample.get("name", image_id))[-20:]
checksum = sample.get("checksum")
local_exists = image_file and os.path.exists(image_file)
logging.info(f"Check if file exists/shared locally: {image_file} => {local_exists}")
if local_exists:
self._volumeNode = slicer.util.loadVolume(image_file)
self._volumeNode.SetName(node_name)
else:
download_uri = f"{self.serverUrl()}/datastore/image?image={quote_plus(image_id)}"
logging.info(download_uri)
sampleDataLogic = SampleData.SampleDataLogic()
self._volumeNode = sampleDataLogic.downloadFromURL(
nodeNames=node_name, fileNames=image_name, uris=download_uri, checksums=checksum
)[0]
self.initSample(sample)
except:
slicer.util.errorDisplay(
"Failed to fetch Sample from MONAI Label Server", detailedText=traceback.format_exc()
)
finally:
qt.QApplication.restoreOverrideCursor()
self.updateGUIFromParameterNode()
logging.info("Time consumed by next_sample: {0:3.1f}".format(time.time() - start))
def initSample(self, sample, autosegment=True):
sample["VolumeNodeName"] = self._volumeNode.GetName()
self.current_sample = sample
self.samples[sample["id"]] = sample
self._volumeNodes.append(self._volumeNode)
# Create Empty Segments for all labels for this node
self.createSegmentNode()
segmentEditorWidget = slicer.modules.segmenteditor.widgetRepresentation().self().editor
segmentEditorWidget.setSegmentationNode(self._segmentNode)
segmentEditorWidget.setMasterVolumeNode(self._volumeNode)
# check if user allows overlapping segments
if slicer.util.settingsValue("MONAILabel/allowOverlappingSegments", False, converter=slicer.util.toBool):
# set segment editor to allow overlaps
slicer.util.getNodesByClass("vtkMRMLSegmentEditorNode")[0].SetOverwriteMode(2)
if self.info.get("labels"):
self.updateSegmentationMask(None, self.info.get("labels"))
# Check if user wants to run auto-segmentation on new sample
if autosegment and slicer.util.settingsValue(
"MONAILabel/autoRunSegmentationOnNextSample", True, converter=slicer.util.toBool
):
for label in self.info.get("labels", []):
for name, model in self.models.items():
if label in model.get("labels", []):
qt.QApplication.restoreOverrideCursor()
self.ui.segmentationModelSelector.currentText = name
self.onClickSegmentation()
return
def getPermissionForImageDataUpload(self):
return slicer.util.confirmOkCancelDisplay(
"Master volume - without any additional patient information -"
" will be sent to remote data processing server: {0}.\n\n"
"Click 'OK' to proceed with the segmentation.\n"
"Click 'Cancel' to not upload any data and cancel segmentation.\n".format(self.serverUrl()),
dontShowAgainSettingsKey="MONAILabel/showImageDataSendWarning",
)
def onUploadImage(self, init_sample=True, session=False):
volumeNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode")
image_id = volumeNode.GetName()
if not self.getPermissionForImageDataUpload():
return False
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
in_file = tempfile.NamedTemporaryFile(suffix=self.file_ext, dir=self.tmpdir).name
self.reportProgress(5)
start = time.time()
slicer.util.saveNode(volumeNode, in_file)
logging.info("Saved Input Node into {0} in {1:3.1f}s".format(in_file, time.time() - start))
self.reportProgress(30)
if session:
self.current_sample["session_id"] = self.logic.create_session(in_file)["session_id"]
else:
self.logic.upload_image(in_file, image_id)
self.current_sample["session"] = False
self.reportProgress(100)
self._volumeNode = volumeNode
if init_sample:
self.initSample({"id": image_id}, autosegment=False)
qt.QApplication.restoreOverrideCursor()
self.updateGUIFromParameterNode()
return True
except:
self.reportProgress(100)
qt.QApplication.restoreOverrideCursor()
if session:
slicer.util.errorDisplay(
"Server Error:: Session creation Failed\nPlease upgrade to latest monailable version (> 0.2.0)",
detailedText=traceback.format_exc(),
)
else:
slicer.util.errorDisplay("Failed to upload volume to Server", detailedText=traceback.format_exc())
return False
def onImportLabel(self):
if not self.ui.labelPathLineEdit.currentPath or not os.path.exists(self.ui.labelPathLineEdit.currentPath):
slicer.util.warningDisplay("Label File not selected")
return
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
self.updateSegmentationMask(self.ui.labelPathLineEdit.currentPath, self.info["labels"])
qt.QApplication.restoreOverrideCursor()
except:
qt.QApplication.restoreOverrideCursor()
slicer.util.errorDisplay("Failed to import label", detailedText=traceback.format_exc())
def onSaveLabel(self):
start = time.time()
labelmapVolumeNode = None
result = None
self.onClearScribbles()
if self.current_sample.get("session"):
if not self.onUploadImage(init_sample=False):
return
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
segmentationNode = self._segmentNode
labelmapVolumeNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLLabelMapVolumeNode")
slicer.modules.segmentations.logic().ExportVisibleSegmentsToLabelmapNode(
segmentationNode, labelmapVolumeNode, self._volumeNode
)
segmentation = segmentationNode.GetSegmentation()
totalSegments = segmentation.GetNumberOfSegments()
segmentIds = [segmentation.GetNthSegmentID(i) for i in range(totalSegments)]
label_info = []
for idx, segmentId in enumerate(segmentIds):
segment = segmentation.GetSegment(segmentId)
if segment.GetName() in ["foreground_scribbles", "background_scribbles"]:
logging.info(f"Removing segment {segmentId}: {segment.GetName()}")
segmentationNode.RemoveSegment(segmentId)
continue
label_info.append({"name": segment.GetName(), "idx": idx + 1})
# label_info.append({"color": segment.GetColor()})
label_in = tempfile.NamedTemporaryFile(suffix=self.file_ext, dir=self.tmpdir).name
self.reportProgress(5)
if (
slicer.util.settingsValue("MONAILabel/allowOverlappingSegments", True, converter=slicer.util.toBool)
and slicer.util.settingsValue("MONAILabel/fileExtension", self.file_ext) == ".seg.nrrd"
):
slicer.util.saveNode(segmentationNode, label_in)
else:
slicer.util.saveNode(labelmapVolumeNode, label_in)
self.reportProgress(30)
self.updateServerSettings()
result = self.logic.save_label(self.current_sample["id"], label_in, {"label_info": label_info})
self.fetchInfo()
if slicer.util.settingsValue("MONAILabel/autoUpdateModel", True, converter=slicer.util.toBool):
try:
if self.isTrainingRunning(check_only=True):
self.logic.train_stop()
except:
logging.info("Failed to stop training; or already stopped")
self.onTraining()
except:
slicer.util.errorDisplay("Failed to save Label to MONAI Label Server", detailedText=traceback.format_exc())
finally:
qt.QApplication.restoreOverrideCursor()
self.reportProgress(100)
if labelmapVolumeNode:
slicer.mrmlScene.RemoveNode(labelmapVolumeNode)
if result:
slicer.util.infoDisplay(
"Label-Mask saved into MONAI Label Server\t\t", detailedText=json.dumps(result, indent=2)
)
if slicer.util.settingsValue("MONAILabel/autoFetchNextSample", False, converter=slicer.util.toBool):
slicer.mrmlScene.Clear(0)
self.onNextSampleButton()
logging.info("Time consumed by save label: {0:3.1f}".format(time.time() - start))
def getSessionId(self):
session_id = None
if self.current_sample.get("session", False):
session_id = self.current_sample.get("session_id")
if not session_id or not self.logic.get_session(session_id):
self.onUploadImage(init_sample=False, session=True)
session_id = self.current_sample["session_id"]
return session_id
def onClickSegmentation(self):
if not self.current_sample:
return
start = time.time()
result_file = None
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
self.updateServerSettings()
model = self.ui.segmentationModelSelector.currentText
image_file = self.current_sample["id"]
params = self.getParamsFromConfig("infer", model)
result_file, params = self.logic.infer(model, image_file, params, session_id=self.getSessionId())
print(f"Result Params for Segmentation: {params}")
labels = (
params.get("label_names") if params and params.get("label_names") else self.models[model].get("labels")
)
if labels and isinstance(labels, dict):
labels = [k for k, _ in sorted(labels.items(), key=lambda item: item[1])]
self.updateSegmentationMask(result_file, labels)
except:
slicer.util.errorDisplay(
"Failed to run inference in MONAI Label Server", detailedText=traceback.format_exc()
)
finally:
qt.QApplication.restoreOverrideCursor()
if result_file and os.path.exists(result_file):
os.unlink(result_file)
self.updateGUIFromParameterNode()
logging.info("Time consumed by segmentation: {0:3.1f}".format(time.time() - start))
def onUpdateDeepgrow(self):
self.onClickDeepgrow(None)
def onClickDeepgrow(self, current_point, skip_infer=False):
model = self.ui.deepgrowModelSelector.currentText
if not model:
slicer.util.warningDisplay("Please select a deepgrow model")
return
_, segment = self.currentSegment()
if not segment:
slicer.util.warningDisplay("Please add the required label to run deepgrow")
return
foreground_all = self.getFiducialPointsXYZ(self.dgPositiveFiducialNode, "foreground")
background_all = self.getFiducialPointsXYZ(self.dgNegativeFiducialNode, "background")
segment.SetTag("MONAILabel.ForegroundPoints", json.dumps(foreground_all))
segment.SetTag("MONAILabel.BackgroundPoints", json.dumps(background_all))
if skip_infer:
return
# use model info "deepgrow" to determine
deepgrow_3d = False if self.models[model].get("dimension", 3) == 2 else True
start = time.time()
label = segment.GetName()
operationDescription = "Run Deepgrow for segment: {}; model: {}; 3d {}".format(label, model, deepgrow_3d)
logging.debug(operationDescription)
if not current_point:
if not foreground_all and not deepgrow_3d:
slicer.util.warningDisplay(operationDescription + " - points not added")
return
current_point = foreground_all[-1] if foreground_all else background_all[-1] if background_all else None
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
sliceIndex = None
if self.multi_label:
params = {}
segmentation = self._segmentNode.GetSegmentation()
for name in self.info.get("labels", []):
points = []
segmentId = segmentation.GetSegmentIdBySegmentName(name)
segment = segmentation.GetSegment(segmentId) if segmentId else None
if segment:
fPosStr = vtk.mutable("")
segment.GetTag("MONAILabel.ForegroundPoints", fPosStr)
pointset = str(fPosStr)
print("{} => {} Fiducial points are: {}".format(segmentId, name, pointset))
if fPosStr is not None and len(pointset) > 0:
points = json.loads(pointset)
params[name] = points
params["label"] = label
labels = None
else:
sliceIndex = current_point[2] if current_point else None
logging.debug("Slice Index: {}".format(sliceIndex))
if deepgrow_3d or not sliceIndex:
foreground = foreground_all
background = background_all
else:
foreground = [x for x in foreground_all if x[2] == sliceIndex]
background = [x for x in background_all if x[2] == sliceIndex]
logging.debug("Foreground: {}".format(foreground))
logging.debug("Background: {}".format(background))
logging.debug("Current point: {}".format(current_point))
params = {
"label": label,
"foreground": foreground,
"background": background,
}
labels = [label]
params["label"] = label
params.update(self.getParamsFromConfig("infer", model))
print(f"Request Params for Deepgrow/Deepedit: {params}")
image_file = self.current_sample["id"]
result_file, params = self.logic.infer(model, image_file, params, session_id=self.getSessionId())
print(f"Result Params for Deepgrow/Deepedit: {params}")
if labels is None:
labels = (
params.get("label_names")
if params and params.get("label_names")
else self.models[model].get("labels")
)
if labels and isinstance(labels, dict):
labels = [k for k, _ in sorted(labels.items(), key=lambda item: item[1])]
freeze = label if self.ui.freezeUpdateCheckBox.checked else None
self.updateSegmentationMask(result_file, labels, None if deepgrow_3d else sliceIndex, freeze=freeze)
except:
logging.exception("Unknown Exception")
slicer.util.errorDisplay(operationDescription + " - unexpected error.", detailedText=traceback.format_exc())
finally:
qt.QApplication.restoreOverrideCursor()
self.updateGUIFromParameterNode()
logging.info("Time consumed by Deepgrow: {0:3.1f}".format(time.time() - start))
def createCursor(self, widget):
return slicer.util.mainWindow().cursor
def createSegmentNode(self):
if self._volumeNode is None:
return
if self._segmentNode is None:
name = "segmentation_" + self._volumeNode.GetName()
self._segmentNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLSegmentationNode")
self._segmentNode.SetReferenceImageGeometryParameterFromVolumeNode(self._volumeNode)
self._segmentNode.SetName(name)
def getLabelColor(self, name):
color = GenericAnatomyColors.get(name.lower())
return [c / 255.0 for c in color] if color else None
def updateSegmentationMask(self, in_file, labels, sliceIndex=None, freeze=None):
# TODO:: Add ROI Node (for Bounding Box if provided in the result)
start = time.time()
logging.debug("Update Segmentation Mask from: {}".format(in_file))
if in_file and not os.path.exists(in_file):
return False
segmentationNode = self._segmentNode
segmentation = segmentationNode.GetSegmentation()
if in_file is None:
for label in labels:
if not segmentation.GetSegmentIdBySegmentName(label):
segmentation.AddEmptySegment(label, label, self.getLabelColor(label))
return True
labels = [l for l in labels if l != "background"]
print(f"Update Segmentation Mask using Labels: {labels}")
# segmentId, segment = self.currentSegment()
labelImage = sitk.ReadImage(in_file)
labelmapVolumeNode = sitkUtils.PushVolumeToSlicer(labelImage, None, className="vtkMRMLLabelMapVolumeNode")
existing_label_ids = {}
for label in labels:
id = segmentation.GetSegmentIdBySegmentName(label)
if id:
existing_label_ids[label] = id
freeze = [freeze] if freeze and isinstance(freeze, str) else freeze
print(f"Import only Freezed label: {freeze}")
numberOfExistingSegments = segmentation.GetNumberOfSegments()
slicer.modules.segmentations.logic().ImportLabelmapToSegmentationNode(labelmapVolumeNode, segmentationNode)
slicer.mrmlScene.RemoveNode(labelmapVolumeNode)
numberOfAddedSegments = segmentation.GetNumberOfSegments() - numberOfExistingSegments
logging.debug("Adding {} segments".format(numberOfAddedSegments))
addedSegmentIds = [
segmentation.GetNthSegmentID(numberOfExistingSegments + i) for i in range(numberOfAddedSegments)
]
for i, segmentId in enumerate(addedSegmentIds):
segment = segmentation.GetSegment(segmentId)
print("Setting new segmentation with id: {} => {}".format(segmentId, segment.GetName()))
label = labels[i] if i < len(labels) else "unknown {}".format(i)
# segment.SetName(label)
# segment.SetColor(self.getLabelColor(label))
if freeze and label not in freeze:
print(f"Discard label update for: {label}")
elif label in existing_label_ids:
segmentEditorWidget = slicer.modules.segmenteditor.widgetRepresentation().self().editor
segmentEditorWidget.setSegmentationNode(segmentationNode)
segmentEditorWidget.setMasterVolumeNode(self._volumeNode)
segmentEditorWidget.setCurrentSegmentID(existing_label_ids[label])
effect = segmentEditorWidget.effectByName("Logical operators")
labelmap = slicer.vtkOrientedImageData()
segmentationNode.GetBinaryLabelmapRepresentation(segmentId, labelmap)
if sliceIndex:
selectedSegmentLabelmap = effect.selectedSegmentLabelmap()
dims = selectedSegmentLabelmap.GetDimensions()
count = 0
for x in range(dims[0]):
for y in range(dims[1]):
if selectedSegmentLabelmap.GetScalarComponentAsDouble(x, y, sliceIndex, 0):
count = count + 1
selectedSegmentLabelmap.SetScalarComponentFromDouble(x, y, sliceIndex, 0, 0)
logging.debug("Total Non Zero: {}".format(count))
# Clear the Slice
if count:
effect.modifySelectedSegmentByLabelmap(
selectedSegmentLabelmap, slicer.qSlicerSegmentEditorAbstractEffect.ModificationModeSet
)
# Union label map
effect.modifySelectedSegmentByLabelmap(
labelmap, slicer.qSlicerSegmentEditorAbstractEffect.ModificationModeAdd
)
else:
# adding bypass masking to not overwrite other layers,
# needed for preserving scribbles during updates
# help from: https://github.com/Slicer/Slicer/blob/master/Modules/Loadable/Segmentations/EditorEffects/Python/SegmentEditorLogicalEffect.py
bypassMask = True
effect.modifySelectedSegmentByLabelmap(
labelmap, slicer.qSlicerSegmentEditorAbstractEffect.ModificationModeSet, bypassMask
)
segmentationNode.RemoveSegment(segmentId)
self.showSegmentationsIn3D()
logging.info("Time consumed by updateSegmentationMask: {0:3.1f}".format(time.time() - start))
return True
def showSegmentationsIn3D(self):
# add closed surface representation
if self._segmentNode:
self._segmentNode.CreateClosedSurfaceRepresentation()
view = slicer.app.layoutManager().threeDWidget(0).threeDView()
view.resetFocalPoint()
def updateServerUrlGUIFromSettings(self):
# Save current server URL to the top of history
settings = qt.QSettings()
serverUrlHistory = settings.value("MONAILabel/serverUrlHistory")
wasBlocked = self.ui.serverComboBox.blockSignals(True)
self.ui.serverComboBox.clear()
if serverUrlHistory:
self.ui.serverComboBox.addItems(serverUrlHistory.split(";"))
self.ui.serverComboBox.setCurrentText(settings.value("MONAILabel/serverUrl"))
self.ui.serverComboBox.blockSignals(wasBlocked)
def createFiducialNode(self, name, onMarkupNodeModified, color):
displayNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLMarkupsDisplayNode")
displayNode.SetTextScale(0)
displayNode.SetSelectedColor(color)
fiducialNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLMarkupsFiducialNode")
fiducialNode.SetName(name)
fiducialNode.SetAndObserveDisplayNodeID(displayNode.GetID())
fiducialNodeObservers = []
self.addFiducialNodeObserver(fiducialNode, onMarkupNodeModified)
return fiducialNode, fiducialNodeObservers
def removeFiducialNodeObservers(self, fiducialNode, fiducialNodeObservers):
if fiducialNode and fiducialNodeObservers:
for observer in fiducialNodeObservers:
fiducialNode.RemoveObserver(observer)
def addFiducialNodeObserver(self, fiducialNode, onMarkupNodeModified):
fiducialNodeObservers = []
if fiducialNode:
eventIds = [slicer.vtkMRMLMarkupsNode.PointPositionDefinedEvent]
for eventId in eventIds:
fiducialNodeObservers.append(fiducialNode.AddObserver(eventId, onMarkupNodeModified))
return fiducialNodeObservers
def scribblesLayersPresent(self):
scribbles_exist = False
if self._segmentNode is not None:
segmentationNode = self._segmentNode
segmentation = segmentationNode.GetSegmentation()
numSegments = segmentation.GetNumberOfSegments()
segmentIds = [segmentation.GetNthSegmentID(i) for i in range(numSegments)]
scribbles_exist = sum([int("scribbles" in sid) for sid in segmentIds]) > 0
return scribbles_exist
def onStartScribbling(self):
if not self._segmentNode:
return
logging.debug("Scribbles start event")
if (not self.scribblesLayersPresent()) and (self._scribblesEditorWidget is None):
# add background, layer index = -2 [2], color = red
self._segmentNode.GetSegmentation().AddEmptySegment(
"background_scribbles", "background_scribbles", [1.0, 0.0, 0.0]
)
# add foreground, layer index = -1 [3], color = green
self._segmentNode.GetSegmentation().AddEmptySegment(
"foreground_scribbles", "foreground_scribbles", [0.0, 1.0, 0.0]
)
# change segmentation display properties to "see through" the scribbles
# further explanation at:
# https://apidocs.slicer.org/master/classvtkMRMLSegmentationDisplayNode.html
segmentationDisplayNode = self._segmentNode.GetDisplayNode()
# background
opacity = 0.2
segmentationDisplayNode.SetSegmentOpacity2DFill("background_scribbles", opacity)
segmentationDisplayNode.SetSegmentOpacity2DOutline("background_scribbles", opacity)
# foreground
segmentationDisplayNode.SetSegmentOpacity2DFill("foreground_scribbles", opacity)
segmentationDisplayNode.SetSegmentOpacity2DOutline("foreground_scribbles", opacity)
# create segmentEditorWidget to access "Paint" and "Erase" segmentation tools
# these will be used to draw scribbles
self._scribblesEditorWidget = slicer.qMRMLSegmentEditorWidget()
self._scribblesEditorWidget.setMRMLScene(slicer.mrmlScene)
segmentEditorNode = slicer.vtkMRMLSegmentEditorNode()
# adding new scribbles can overwrite a new one-hot vector, hence erase any existing
# labels - this is not a desired behaviour hence we swith to overlay mode that enables drawing
# scribbles without changing existing labels. Further explanation at:
# https://discourse.slicer.org/t/how-can-i-set-masking-settings-on-a-segment-editor-effect-in-python/4406/7
segmentEditorNode.SetOverwriteMode(slicer.vtkMRMLSegmentEditorNode.OverwriteNone)
# add all nodes to the widget
slicer.mrmlScene.AddNode(segmentEditorNode)
self._scribblesEditorWidget.setMRMLSegmentEditorNode(segmentEditorNode)
self._scribblesEditorWidget.setSegmentationNode(self._segmentNode)
self._scribblesEditorWidget.setMasterVolumeNode(self._volumeNode)
def onUpdateScribbles(self):
logging.info("Scribbles update event")
scribblesMethod = self.ui.scribblesMethodSelector.currentText
scribbles_in = None
result_file = None
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
# get scribbles + label
segmentationNode = self._segmentNode
labelmapVolumeNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLLabelMapVolumeNode")
slicer.modules.segmentations.logic().ExportVisibleSegmentsToLabelmapNode(
segmentationNode, labelmapVolumeNode, self._volumeNode
)
scribbles_in = tempfile.NamedTemporaryFile(suffix=self.file_ext, dir=self.tmpdir).name
self.reportProgress(5)
# save scribbles + label to file
slicer.util.saveNode(labelmapVolumeNode, scribbles_in)
self.reportProgress(30)
self.updateServerSettings()
self.reportProgress(60)
# try to first fetch vtkMRMLAnnotationROINode
roiNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLAnnotationROINode")
if roiNode == None: # if vtkMRMLAnnotationROINode not present, then check for vtkMRMLMarkupsROINode node
roiNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLMarkupsROINode")
# if roi node found, then try to get roi
selected_roi = self.getROIPointsXYZ(roiNode)
# send scribbles + label to server along with selected scribbles method
params = self.getParamsFromConfig("infer", scribblesMethod)
params.update({"roi": selected_roi})
image_file = self.current_sample["id"]
result_file, params = self.logic.infer(
scribblesMethod, image_file, params, scribbles_in, session_id=self.getSessionId()
)
# display result from server
self.reportProgress(90)
_, segment = self.currentSegment()
label = segment.GetName()
self.updateSegmentationMask(result_file, [label])
except:
slicer.util.errorDisplay(
"Failed to post process label on MONAI Label Server using {}".format(scribblesMethod),
detailedText=traceback.format_exc(),
)
finally:
qt.QApplication.restoreOverrideCursor()
self.reportProgress(100)
# clear all temporary files
if scribbles_in and os.path.exists(scribbles_in):
os.unlink(scribbles_in)
if result_file and os.path.exists(result_file):
os.unlink(result_file)
def getROIPointsXYZ(self, roiNode):
if roiNode == None:
return []
v = self._volumeNode
RasToIjkMatrix = vtk.vtkMatrix4x4()
v.GetRASToIJKMatrix(RasToIjkMatrix)
roi_points_ras = [0.0] * 6
if roiNode.__class__.__name__ == "vtkMRMLMarkupsROINode":
# for vtkMRMLMarkupsROINode
print(roiNode.__class__.__name__)
center = [0] * 3
roiNode.GetCenter(center)
roi_points_ras = [(x - s / 2, x + s / 2) for x, s in zip(center, roiNode.GetSize())]
roi_points_ras = [item for sublist in roi_points_ras for item in sublist]
elif roiNode.__class__.__name__ == "vtkMRMLAnnotationROINode":
# for vtkMRMLAnnotationROINode (old method)
print(roiNode.__class__.__name__)
roiNode.GetBounds(roi_points_ras)
else:
# if none found then best to return empty list
return []
min_points_ras = [roi_points_ras[0], roi_points_ras[2], roi_points_ras[4], 1.0]
max_points_ras = [roi_points_ras[0 + 1], roi_points_ras[2 + 1], roi_points_ras[4 + 1], 1.0]
min_points_ijk = RasToIjkMatrix.MultiplyDoublePoint(min_points_ras)
max_points_ijk = RasToIjkMatrix.MultiplyDoublePoint(max_points_ras)
min_points_ijk = [round(i) for i in min_points_ijk]
max_points_ijk = [round(i) for i in max_points_ijk]
roi_points_ijk = [val for pair in zip(min_points_ijk[0:3], max_points_ijk[0:3]) for val in pair]
logging.debug("RAS: {}; IJK: {}".format(roi_points_ras, roi_points_ijk))
# print("RAS: {}; IJK: {}".format(roi_points_ras, roi_points_ijk))
return roi_points_ijk
def onClearScribblesSegmentNodes(self):
# more explanation on this at:
# https://discourse.slicer.org/t/how-to-clear-segmentation/7433/4
# clear "scribbles" segment before saving the label
if not self._segmentNode:
return
segmentation = self._segmentNode
num_segments = segmentation.GetSegmentation().GetNumberOfSegments()
for i in range(num_segments):
segmentId = segmentation.GetSegmentation().GetNthSegmentID(i)
if "scribbles" in segmentId:
logging.info("clearning {}".format(segmentId))
labelMapRep = slicer.vtkOrientedImageData()
segmentation.GetBinaryLabelmapRepresentation(segmentId, labelMapRep)
vtkSegmentationCore.vtkOrientedImageDataResample.FillImage(labelMapRep, 0, labelMapRep.GetExtent())
slicer.vtkSlicerSegmentationsModuleLogic.SetBinaryLabelmapToSegment(
labelMapRep, segmentation, segmentId, slicer.vtkSlicerSegmentationsModuleLogic.MODE_REPLACE
)
def onClearScribbles(self):
# reset scribbles mode
self.scribblesMode = None
# clear scribbles editor widget
if self._scribblesEditorWidget:
widget = self._scribblesEditorWidget
del widget
self._scribblesEditorWidget = None
# remove "scribbles" segments from label
self.onClearScribblesSegmentNodes()
# reset UI elements associated with scribbles
self.ui.scribblesCollapsibleButton.collapsed = True
self.ui.paintScribblesButton.setChecked(False)
self.ui.eraseScribblesButton.setChecked(False)
self.ui.scribblesLabelSelector.setCurrentIndex(0)
def checkAndInitialiseScribbles(self):
if not self._segmentNode:
return
if self._scribblesEditorWidget is None:
self.onStartScribbling()
if self.scribblesMode is None:
self.changeScribblesMode(tool="Paint", layer="foreground_scribbles")
self.updateScribToolLayerFromMode()
def updateScribToolLayerFromMode(self):
if not self._segmentNode:
return
logging.info("Scribbles mode {} ".format(self.scribblesMode))
self.checkAndInitialiseScribbles()
# update tool/layer select for scribblesEditorWidget
tool, layer = self.getToolAndLayerFromScribblesMode()
if self._scribblesEditorWidget:
self._scribblesEditorWidget.setActiveEffectByName(tool)
self._scribblesEditorWidget.setCurrentSegmentID(layer)
# update brush type from checkbox
if tool in ("Paint", "Erase"):
is3dbrush = self.ui.brush3dCheckbox.checkState()
self.on3dBrushCheckbox(state=is3dbrush)
# update brush size from slider
brushSize = self.ui.brushSizeSlider.value
self.updateBrushSize(value=brushSize)
def getToolAndLayerFromScribblesMode(self):
if self.scribblesMode is not None:
return self.scribblesMode.split("+")
else:
# default modes
return "Paint", "foreground_scribbles"
def changeScribblesMode(self, tool=None, layer=None):
ctool, clayer = self.getToolAndLayerFromScribblesMode()
ctool = tool if tool != None else ctool
clayer = layer if layer != None else clayer
self.scribblesMode = "+".join([ctool, clayer])
def onPaintScribbles(self):
if not self._segmentNode:
return
if self.ui.eraseScribblesButton.checked:
self.ui.eraseScribblesButton.setChecked(False)
self.changeScribblesMode(tool="Paint" if self.ui.paintScribblesButton.checked else "None")
self.updateScribToolLayerFromMode()
def onEraseScribbles(self):
if not self._segmentNode:
return
if self.ui.paintScribblesButton.checked:
self.ui.paintScribblesButton.setChecked(False)
self.changeScribblesMode(tool="Erase" if self.ui.eraseScribblesButton.checked else "None")
self.updateScribToolLayerFromMode()
def onSelectScribblesLabel(self):
if not self._segmentNode:
return
index = self.ui.scribblesLabelSelector.currentIndex
index = 0 if index < 0 else index
selected = self.ui.scribblesLabelSelector.itemText(index)
layer = "foreground_scribbles" if selected == "Foreground" else "background_scribbles"
self.changeScribblesMode(layer=layer)
self.updateScribToolLayerFromMode()
def on3dBrushCheckbox(self, state):
logging.info("3D brush update {}".format(state))
self.checkAndInitialiseScribbles()
effect = self._scribblesEditorWidget.activeEffect()
# enable scribbles in 3d using a sphere brush
effect.setParameter("BrushSphere", state)
def updateBrushSize(self, value):
logging.info("brush size update {}".format(value))
if self.ui.paintScribblesButton.checked or self.ui.eraseScribblesButton.checked:
self.checkAndInitialiseScribbles()
effect = self._scribblesEditorWidget.activeEffect()
effect.setParameter("BrushAbsoluteDiameter", value)
class MONAILabelLogic(ScriptedLoadableModuleLogic):
def __init__(self, tmpdir=None, server_url=None, progress_callback=None, client_id=None):
ScriptedLoadableModuleLogic.__init__(self)
self.server_url = server_url
self.tmpdir = slicer.util.tempDirectory("slicer-monai-label") if tmpdir is None else tmpdir
self.client_id = client_id
self.volumeToSessions = dict()
self.progress_callback = progress_callback
def setDefaultParameters(self, parameterNode):
if not parameterNode.GetParameter("SegmentationModel"):
parameterNode.SetParameter("SegmentationModel", "")
if not parameterNode.GetParameter("DeepgrowModel"):
parameterNode.SetParameter("DeepgrowModel", "")
if not parameterNode.GetParameter("ScribblesMethod"):
parameterNode.SetParameter("ScribblesMethod", "")
def __del__(self):
shutil.rmtree(self.tmpdir, ignore_errors=True)
def setServer(self, server_url=None):
self.server_url = server_url if server_url else "http://127.0.0.1:8000"
def setClientId(self, client_id):
self.client_id = client_id if client_id else "user-xyz"
def setProgressCallback(self, progress_callback=None):
self.progress_callback = progress_callback
def reportProgress(self, progress):
if self.progress_callback:
self.progress_callback(progress)
def info(self):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).info()
def next_sample(self, strategy, params={}):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).next_sample(strategy, params)
def create_session(self, image_in):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).create_session(image_in)
def get_session(self, session_id):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).get_session(session_id)
def remove_session(self, session_id):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).remove_session(session_id)
def upload_image(self, image_in, image_id=None):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).upload_image(image_in, image_id)
def save_label(self, image_in, label_in, params):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).save_label(
image_in, label_in, params=params
)
def infer(self, model, image_in, params={}, label_in=None, file=None, session_id=None):
logging.debug("Preparing input data for segmentation")
self.reportProgress(0)
client = MONAILabelClient(self.server_url, self.tmpdir, self.client_id)
result_file, params = client.infer(model, image_in, params, label_in, file, session_id)
logging.debug(f"Image Response: {result_file}")
logging.debug(f"JSON Response: {params}")
self.reportProgress(100)
return result_file, params
def train_start(self, model=None, params={}):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).train_start(model, params)
def train_status(self, check_if_running):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).train_status(check_if_running)
def train_stop(self):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).train_stop()
class MONAILabelTest(ScriptedLoadableModuleTest):
def setUp(self):
slicer.mrmlScene.Clear()
def runTest(self):
self.setUp()
self.test_MONAILabel1()
def test_MONAILabel1(self):
self.delayDisplay("Test passed")
|
# mypy: allow-untyped-defs
from unittest import mock
import pytest
from tools.ci.tc import decision
@pytest.mark.parametrize("run_jobs,tasks,expected", [
([], {"task-no-schedule-if": {}}, ["task-no-schedule-if"]),
([], {"task-schedule-if-no-run-job": {"schedule-if": {}}}, []),
(["job"],
{"job-present": {"schedule-if": {"run-job": ["other-job", "job"]}}},
["job-present"]),
(["job"], {"job-missing": {"schedule-if": {"run-job": ["other-job"]}}}, []),
(["all"], {"job-all": {"schedule-if": {"run-job": ["other-job"]}}}, ["job-all"]),
(["job"],
{"job-1": {"schedule-if": {"run-job": ["job"]}},
"job-2": {"schedule-if": {"run-job": ["other-job"]}}},
["job-1"]),
])
def test_filter_schedule_if(run_jobs, tasks, expected):
with mock.patch("tools.ci.tc.decision.get_run_jobs",
return_value=run_jobs) as get_run_jobs:
assert (decision.filter_schedule_if({}, tasks) ==
{name: tasks[name] for name in expected})
get_run_jobs.call_count in (0, 1)
@pytest.mark.parametrize("msg,expected", [
("Some initial line\n\ntc-jobs:foo,bar", {"foo", "bar"}),
("Some initial line\n\ntc-jobs:foo, bar", {"foo", "bar"}),
("tc-jobs:foo, bar \nbaz", {"foo", "bar"}),
("tc-jobs:all", {"all"}),
("", set()),
("tc-jobs:foo\ntc-jobs:bar", {"foo"})])
@pytest.mark.parametrize("event", [
{"commits": [{"message": "<message>"}]},
{"pull_request": {"body": "<message>"}}
])
def test_extra_jobs_pr(msg, expected, event):
def sub(obj):
"""Copy obj, except if it's a string with the value <message>
replace it with the value of the msg argument"""
if isinstance(obj, dict):
return {key: sub(value) for (key, value) in obj.items()}
elif isinstance(obj, list):
return [sub(value) for value in obj]
elif obj == "<message>":
return msg
return obj
event = sub(event)
assert decision.get_extra_jobs(event) == expected
| # mypy: allow-untyped-defs
from unittest import mock
import pytest
from tools.ci.tc import decision
@pytest.mark.parametrize("run_jobs,tasks,expected", [
([], {"task-no-schedule-if": {}}, ["task-no-schedule-if"]),
([], {"task-schedule-if-no-run-job": {"schedule-if": {}}}, []),
(["job"],
{"job-present": {"schedule-if": {"run-job": ["other-job", "job"]}}},
["job-present"]),
(["job"], {"job-missing": {"schedule-if": {"run-job": ["other-job"]}}}, []),
(["all"], {"job-all": {"schedule-if": {"run-job": ["other-job"]}}}, ["job-all"]),
(["job"],
{"job-1": {"schedule-if": {"run-job": ["job"]}},
"job-2": {"schedule-if": {"run-job": ["other-job"]}}},
["job-1"]),
])
def test_filter_schedule_if(run_jobs, tasks, expected):
with mock.patch("tools.ci.tc.decision.get_run_jobs",
return_value=run_jobs) as get_run_jobs:
assert (decision.filter_schedule_if({}, tasks) ==
{name: tasks[name] for name in expected})
get_run_jobs.call_count in (0, 1)
@pytest.mark.parametrize("msg,expected", [
("Some initial line\n\ntc-jobs:foo,bar", {"foo", "bar"}),
("Some initial line\n\ntc-jobs:foo, bar", {"foo", "bar"}),
("tc-jobs:foo, bar \nbaz", {"foo", "bar"}),
("tc-jobs:all", {"all"}),
("", set()),
("tc-jobs:foo\ntc-jobs:bar", {"foo"})])
@pytest.mark.parametrize("event", [
{"commits": [{"message": "<message>"}]},
{"pull_request": {"body": "<message>"}}
])
def test_extra_jobs_pr(msg, expected, event):
def sub(obj):
"""Copy obj, except if it's a string with the value <message>
replace it with the value of the msg argument"""
if isinstance(obj, dict):
return {key: sub(value) for (key, value) in obj.items()}
elif isinstance(obj, list):
return [sub(value) for value in obj]
elif obj == "<message>":
return msg
return obj
event = sub(event)
assert decision.get_extra_jobs(event) == expected
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch CUTOFFBERT model. """
import math
import os
import warnings
import numpy as np
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss, KLDivLoss
from torch.distributions.beta import Beta
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
DualPassageEncoderModelOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_cutoffbert import CutoffBertConfig
from ..bert.modeling_bert import BertEmbeddings as CutoffBertEmbeddings
from ..bert.modeling_bert import BertEncoder as CutoffBertEncoder
from ..bert.modeling_bert import BertPooler as CutoffBertPooler
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "CutoffBertConfig"
_TOKENIZER_FOR_DOC = "CutoffBertTokenizer"
CUTOFFBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_cutoffbert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {"/".join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {"/".join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class CutoffBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = CutoffBertConfig
load_tf_weights = load_tf_weights_in_cutoffbert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
CUTOFFBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
CUTOFFBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare CutoffBert Model transformer outputting raw hidden-states without any specific head on top.",
CUTOFFBERT_START_DOCSTRING,
)
class CutoffBertModel(CutoffBertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = CutoffBertEmbeddings(config)
self.encoder = CutoffBertEncoder(config)
self.pooler = CutoffBertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
CutoffBert Model transformer with a sequence classification head on top (a linear layer on top of the pooled
output) + Cut-off data augmentation support.
""",
CUTOFFBERT_START_DOCSTRING,
)
class CutoffBertForSequenceClassification(CutoffBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.cls_token_id = config.cls_token_id
self.sep_token_id = config.sep_token_id
self.mask_token_id = config.mask_token_id
self.masking_prob = config.cutoff_masking_prob
self.temperature = config.cutoff_temperature
self.mask_loss_wgt = config.cutoff_mask_loss_wgt
self.js_loss_wgt = config.cutoff_js_loss_wgt
self.config = config
self.bert = CutoffBertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def _apply_cutoff(self, inputs):
masked_inputs = inputs.clone()
valid_masking_indices = (inputs != self.cls_token_id) & (inputs != self.sep_token_id)
random_masking_indices = torch.bernoulli(torch.full(inputs.shape, self.masking_prob, device=inputs.device)).bool()
masking_indices = random_masking_indices & valid_masking_indices
masked_inputs[masking_indices] = self.mask_token_id
return masked_inputs
@add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is None:
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = self.dropout(outputs[1])
logits = self.classifier(pooled_output)
if not return_dict:
return (logits,) + outputs[2:]
return SequenceClassifierOutput(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
b, l = input_ids.size()
masked_input_ids = self._apply_cutoff(input_ids.clone())
flatten_input_ids = torch.stack((input_ids, masked_input_ids), dim=1).reshape(-1, l)
flatten_attention_mask = attention_mask.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if attention_mask is not None else None
flatten_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if token_type_ids is not None else None
flatten_position_ids = position_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if position_ids is not None else None
flatten_inputs_embeds = inputs_embeds.unsqueeze(1).expand(-1, 2, -1, -1).reshape(-1, l, self.config.hidden_size) if inputs_embeds is not None else None
flatten_outputs = self.bert(
flatten_input_ids,
attention_mask=flatten_attention_mask,
token_type_ids=flatten_token_type_ids,
position_ids=flatten_position_ids,
head_mask=head_mask,
inputs_embeds=flatten_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
flatten_pooled_output = self.dropout(flatten_outputs[1])
flatten_logits = self.classifier(flatten_pooled_output)
logits, masked_logits = flatten_logits.reshape(b, 2, self.config.num_labels).chunk(2, dim=1)
logits, masked_logits = logits.squeeze(dim=1).contiguous(), masked_logits.squeeze(dim=1).contiguous()
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if self.mask_loss_wgt is not None and self.mask_loss_wgt > 0.0:
mask_loss = loss_fct(masked_logits.view(-1, self.num_labels), labels.view(-1))
loss += mask_loss * self.mask_loss_wgt
if self.js_loss_wgt is not None and self.js_loss_wgt > 0.0:
kl_loss_fct = KLDivLoss(reduction="batchmean")
src_logits, trg_logits = logits, masked_logits
mean_logits = (src_logits + trg_logits) * 0.5
src_loss = kl_loss_fct(
F.log_softmax(src_logits / self.temperature, dim=-1),
F.softmax(mean_logits / self.temperature, dim=-1)
) * (self.temperature ** 2)
trg_loss = kl_loss_fct(
F.log_softmax(trg_logits / self.temperature, dim=-1),
F.softmax(mean_logits / self.temperature, dim=-1)
) * (self.temperature ** 2)
js_loss = (src_loss + trg_loss) * 0.5
loss += js_loss * self.js_loss_wgt
if not return_dict:
return (loss, logits)
return SequenceClassifierOutput(
loss=loss,
logits=logits,
)
| # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch CUTOFFBERT model. """
import math
import os
import warnings
import numpy as np
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss, KLDivLoss
from torch.distributions.beta import Beta
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
DualPassageEncoderModelOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_cutoffbert import CutoffBertConfig
from ..bert.modeling_bert import BertEmbeddings as CutoffBertEmbeddings
from ..bert.modeling_bert import BertEncoder as CutoffBertEncoder
from ..bert.modeling_bert import BertPooler as CutoffBertPooler
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "CutoffBertConfig"
_TOKENIZER_FOR_DOC = "CutoffBertTokenizer"
CUTOFFBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_cutoffbert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class CutoffBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = CutoffBertConfig
load_tf_weights = load_tf_weights_in_cutoffbert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
CUTOFFBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
CUTOFFBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare CutoffBert Model transformer outputting raw hidden-states without any specific head on top.",
CUTOFFBERT_START_DOCSTRING,
)
class CutoffBertModel(CutoffBertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = CutoffBertEmbeddings(config)
self.encoder = CutoffBertEncoder(config)
self.pooler = CutoffBertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
CutoffBert Model transformer with a sequence classification head on top (a linear layer on top of the pooled
output) + Cut-off data augmentation support.
""",
CUTOFFBERT_START_DOCSTRING,
)
class CutoffBertForSequenceClassification(CutoffBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.cls_token_id = config.cls_token_id
self.sep_token_id = config.sep_token_id
self.mask_token_id = config.mask_token_id
self.masking_prob = config.cutoff_masking_prob
self.temperature = config.cutoff_temperature
self.mask_loss_wgt = config.cutoff_mask_loss_wgt
self.js_loss_wgt = config.cutoff_js_loss_wgt
self.config = config
self.bert = CutoffBertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def _apply_cutoff(self, inputs):
masked_inputs = inputs.clone()
valid_masking_indices = (inputs != self.cls_token_id) & (inputs != self.sep_token_id)
random_masking_indices = torch.bernoulli(torch.full(inputs.shape, self.masking_prob, device=inputs.device)).bool()
masking_indices = random_masking_indices & valid_masking_indices
masked_inputs[masking_indices] = self.mask_token_id
return masked_inputs
@add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is None:
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = self.dropout(outputs[1])
logits = self.classifier(pooled_output)
if not return_dict:
return (logits,) + outputs[2:]
return SequenceClassifierOutput(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
b, l = input_ids.size()
masked_input_ids = self._apply_cutoff(input_ids.clone())
flatten_input_ids = torch.stack((input_ids, masked_input_ids), dim=1).reshape(-1, l)
flatten_attention_mask = attention_mask.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if attention_mask is not None else None
flatten_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if token_type_ids is not None else None
flatten_position_ids = position_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if position_ids is not None else None
flatten_inputs_embeds = inputs_embeds.unsqueeze(1).expand(-1, 2, -1, -1).reshape(-1, l, self.config.hidden_size) if inputs_embeds is not None else None
flatten_outputs = self.bert(
flatten_input_ids,
attention_mask=flatten_attention_mask,
token_type_ids=flatten_token_type_ids,
position_ids=flatten_position_ids,
head_mask=head_mask,
inputs_embeds=flatten_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
flatten_pooled_output = self.dropout(flatten_outputs[1])
flatten_logits = self.classifier(flatten_pooled_output)
logits, masked_logits = flatten_logits.reshape(b, 2, self.config.num_labels).chunk(2, dim=1)
logits, masked_logits = logits.squeeze(dim=1).contiguous(), masked_logits.squeeze(dim=1).contiguous()
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if self.mask_loss_wgt is not None and self.mask_loss_wgt > 0.0:
mask_loss = loss_fct(masked_logits.view(-1, self.num_labels), labels.view(-1))
loss += mask_loss * self.mask_loss_wgt
if self.js_loss_wgt is not None and self.js_loss_wgt > 0.0:
kl_loss_fct = KLDivLoss(reduction="batchmean")
src_logits, trg_logits = logits, masked_logits
mean_logits = (src_logits + trg_logits) * 0.5
src_loss = kl_loss_fct(
F.log_softmax(src_logits / self.temperature, dim=-1),
F.softmax(mean_logits / self.temperature, dim=-1)
) * (self.temperature ** 2)
trg_loss = kl_loss_fct(
F.log_softmax(trg_logits / self.temperature, dim=-1),
F.softmax(mean_logits / self.temperature, dim=-1)
) * (self.temperature ** 2)
js_loss = (src_loss + trg_loss) * 0.5
loss += js_loss * self.js_loss_wgt
if not return_dict:
return (loss, logits)
return SequenceClassifierOutput(
loss=loss,
logits=logits,
)
|
import os
import pytest
import sys
import random
import tempfile
import requests
from pathlib import Path
import ray
from ray.test_utils import (run_string_as_driver,
run_string_as_driver_nonblocking)
from ray._private.utils import (get_wheel_filename, get_master_wheel_url,
get_release_wheel_url)
import ray.experimental.internal_kv as kv
from time import sleep
driver_script = """
from time import sleep
import sys
import logging
sys.path.insert(0, "{working_dir}")
import ray
import ray.util
import os
try:
import test_module
except:
pass
try:
job_config = ray.job_config.JobConfig(
runtime_env={runtime_env}
)
if not job_config.runtime_env:
job_config=None
if os.environ.get("USE_RAY_CLIENT"):
ray.client("{address}").env({runtime_env}).namespace("").connect()
else:
ray.init(address="{address}",
job_config=job_config,
logging_level=logging.DEBUG,
namespace=""
)
except ValueError:
print("ValueError")
sys.exit(0)
except TypeError:
print("TypeError")
sys.exit(0)
except:
print("ERROR")
sys.exit(0)
if os.environ.get("EXIT_AFTER_INIT"):
sys.exit(0)
@ray.remote
def run_test():
return test_module.one()
@ray.remote
def check_file(name):
try:
with open(name) as f:
return f.read()
except:
return "FAILED"
@ray.remote
class TestActor(object):
@ray.method(num_returns=1)
def one(self):
return test_module.one()
{execute_statement}
if os.environ.get("USE_RAY_CLIENT"):
ray.util.disconnect()
else:
ray.shutdown()
sleep(10)
"""
def create_file(p):
if not p.parent.exists():
p.parent.mkdir()
with p.open("w") as f:
f.write("Test")
@pytest.fixture(scope="function")
def working_dir():
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir)
module_path = path / "test_module"
module_path.mkdir(parents=True)
init_file = module_path / "__init__.py"
test_file = module_path / "test.py"
with test_file.open(mode="w") as f:
f.write("""
def one():
return 1
""")
with init_file.open(mode="w") as f:
f.write("""
from test_module.test import one
""")
old_dir = os.getcwd()
os.chdir(tmp_dir)
yield tmp_dir
os.chdir(old_dir)
def start_client_server(cluster, client_mode):
from ray._private.runtime_env import PKG_DIR
if not client_mode:
return (cluster.address, {}, PKG_DIR)
ray.worker._global_node._ray_params.ray_client_server_port = "10003"
ray.worker._global_node.start_ray_client_server()
return ("localhost:10003", {"USE_RAY_CLIENT": "1"}, PKG_DIR)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_travel():
import uuid
with tempfile.TemporaryDirectory() as tmp_dir:
dir_paths = set()
file_paths = set()
item_num = 0
excludes = []
root = Path(tmp_dir) / "test"
def construct(path, excluded=False, depth=0):
nonlocal item_num
path.mkdir(parents=True)
if not excluded:
dir_paths.add(str(path))
if depth > 8:
return
if item_num > 500:
return
dir_num = random.randint(0, 10)
file_num = random.randint(0, 10)
for _ in range(dir_num):
uid = str(uuid.uuid4()).split("-")[0]
dir_path = path / uid
exclud_sub = random.randint(0, 5) == 0
if not excluded and exclud_sub:
excludes.append(str(dir_path.relative_to(root)))
if not excluded:
construct(dir_path, exclud_sub or excluded, depth + 1)
item_num += 1
if item_num > 1000:
return
for _ in range(file_num):
uid = str(uuid.uuid4()).split("-")[0]
with (path / uid).open("w") as f:
v = random.randint(0, 1000)
f.write(str(v))
if not excluded:
if random.randint(0, 5) == 0:
excludes.append(
str((path / uid).relative_to(root)))
else:
file_paths.add((str(path / uid), str(v)))
item_num += 1
construct(root)
exclude_spec = ray._private.runtime_env._get_excludes(root, excludes)
visited_dir_paths = set()
visited_file_paths = set()
def handler(path):
if path.is_dir():
visited_dir_paths.add(str(path))
else:
with open(path) as f:
visited_file_paths.add((str(path), f.read()))
ray._private.runtime_env._dir_travel(root, [exclude_spec], handler)
assert file_paths == visited_file_paths
assert dir_paths == visited_dir_paths
"""
The following test cases are related with runtime env. It following these steps
1) Creating a temporary dir with fixture working_dir
2) Using a template named driver_script defined globally
3) Overwrite runtime_env and execute_statement in the template
4) Execute it as a separate driver and return the result
"""
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_empty_working_dir(ray_start_cluster_head, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
env["EXIT_AFTER_INIT"] = "1"
with tempfile.TemporaryDirectory() as working_dir:
runtime_env = f"""{{
"working_dir": r"{working_dir}",
"py_modules": [r"{working_dir}"]
}}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "sys.exit(0)"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out != "ERROR"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_invalid_working_dir(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
env["EXIT_AFTER_INIT"] = "1"
runtime_env = "{ 'working_dir': 10 }"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "TypeError"
runtime_env = "{ 'py_modules': [10] }"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "TypeError"
runtime_env = f"{{ "working_dir": os.path.join(r"{working_dir}', 'na') }}"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "ValueError"
runtime_env = f"{{ "py_modules": [os.path.join(r"{working_dir}', 'na')] }}"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "ValueError"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_single_node(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# Setup runtime env here
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# Testing runtime env with working_dir
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_module(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# test runtime_env iwth py_modules
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_local_file(two_node_cluster, working_dir, client_mode):
with open(os.path.join(working_dir, "test_file"), "w") as f:
f.write("1")
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# test runtime_env iwth working_dir
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
vals = ray.get([check_file.remote('test_file')] * 1000)
print(sum([int(v) for v in vals]))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_exclusion(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
working_path = Path(working_dir)
create_file(working_path / "tmp_dir" / "test_1")
create_file(working_path / "tmp_dir" / "test_2")
create_file(working_path / "tmp_dir" / "test_3")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_1")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_2")
create_file(working_path / "test1")
create_file(working_path / "test2")
create_file(working_path / "test3")
tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute())
runtime_env = f"""{{
"working_dir": r"{working_dir}",
}}"""
execute_statement = """
vals = ray.get([
check_file.remote('test1'),
check_file.remote('test2'),
check_file.remote('test3'),
check_file.remote(os.path.join('tmp_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'test_2')),
check_file.remote(os.path.join('tmp_dir', 'test_3')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')),
])
print(','.join(vals))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
# Test it works before
assert out.strip().split("\n")[-1] == \
"Test,Test,Test,Test,Test,Test,Test,Test"
runtime_env = f"""{{
"working_dir": r"{working_dir}",
"excludes": [
# exclude by relative path
r"test2",
# exclude by dir
r"{str(Path("tmp_dir") / "sub_dir")}",
# exclude part of the dir
r"{str(Path("tmp_dir") / "test_1")}",
# exclude part of the dir
r"{str(Path("tmp_dir") / "test_2")}",
]
}}"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split("\n")[-1] == \
"Test,FAILED,Test,FAILED,FAILED,Test,FAILED,FAILED"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_exclusion_2(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
working_path = Path(working_dir)
def create_file(p):
if not p.parent.exists():
p.parent.mkdir(parents=True)
with p.open("w") as f:
f.write("Test")
create_file(working_path / "tmp_dir" / "test_1")
create_file(working_path / "tmp_dir" / "test_2")
create_file(working_path / "tmp_dir" / "test_3")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_1")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_2")
create_file(working_path / "test1")
create_file(working_path / "test2")
create_file(working_path / "test3")
create_file(working_path / "cache" / "test_1")
create_file(working_path / "tmp_dir" / "cache" / "test_1")
create_file(working_path / "another_dir" / "cache" / "test_1")
tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute())
runtime_env = f"""{{
"working_dir": r"{working_dir}",
}}"""
execute_statement = """
vals = ray.get([
check_file.remote('test1'),
check_file.remote('test2'),
check_file.remote('test3'),
check_file.remote(os.path.join('tmp_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'test_2')),
check_file.remote(os.path.join('tmp_dir', 'test_3')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')),
check_file.remote(os.path.join("cache", "test_1")),
check_file.remote(os.path.join("tmp_dir", "cache", "test_1")),
check_file.remote(os.path.join("another_dir", "cache", "test_1")),
])
print(','.join(vals))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
# Test it works before
assert out.strip().split("\n")[-1] == \
"Test,Test,Test,Test,Test,Test,Test,Test,Test,Test,Test"
with open(f"{working_dir}/.gitignore", "w") as f:
f.write("""
# Comment
test_[12]
/test1
!/tmp_dir/sub_dir/test_1
cache/
""")
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
t = out.strip().split("\n")[-1]
assert out.strip().split("\n")[-1] == \
"FAILED,Test,Test,FAILED,FAILED,Test,Test,FAILED,FAILED,FAILED,FAILED"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_runtime_env_getter(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
print(ray.get_runtime_context().runtime_env["working_dir"])
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == working_dir
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_uri(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
import ray._private.runtime_env as runtime_env
import tempfile
with tempfile.NamedTemporaryFile(suffix="zip") as tmp_file:
pkg_name = runtime_env.get_project_package_name(working_dir, [], [])
pkg_uri = runtime_env.Protocol.PIN_GCS.value + "://" + pkg_name
runtime_env.create_project_package(working_dir, [], [], tmp_file.name)
runtime_env.push_package(pkg_uri, tmp_file.name)
runtime_env = f"""{{ "uris": ["{pkg_uri}"] }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
# pinned uri will not be deleted
print(list(kv._internal_kv_list("")))
assert len(kv._internal_kv_list("pingcs://")) == 1
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_regular_actors(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
test_actor = TestActor.options(name="test_actor").remote()
print(sum(ray.get([test_actor.one.remote()] * 1000)))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_detached_actors(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
test_actor = TestActor.options(name="test_actor", lifetime="detached").remote()
print(sum(ray.get([test_actor.one.remote()] * 1000)))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
# It's a detached actors, so it should still be there
assert len(kv._internal_kv_list("gcs://")) == 1
assert len(list(Path(PKG_DIR).iterdir())) == 2
pkg_dir = [f for f in Path(PKG_DIR).glob("*") if f.is_dir()][0]
import sys
sys.path.insert(0, str(pkg_dir))
test_actor = ray.get_actor("test_actor")
assert sum(ray.get([test_actor.one.remote()] * 1000)) == 1000
ray.kill(test_actor)
from time import sleep
sleep(5)
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_1(ray_start_cluster_head, working_dir):
# start job_config=None
# start job_config=something
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = None
# To make the first one hanging there
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
# Have one running with job config = None
proc = run_string_as_driver_nonblocking(script, env)
# waiting it to be up
sleep(5)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the second one which should work because Ray Client servers.
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
proc.kill()
proc.wait()
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_2(ray_start_cluster_head, working_dir):
# start job_config=something
# start job_config=None
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# To make the first one hanging there
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
proc = run_string_as_driver_nonblocking(script, env)
sleep(5)
runtime_env = None
# Execute the following in the second one which should
# succeed
execute_statement = "print('OK')"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "OK", out
proc.kill()
proc.wait()
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_3(ray_start_cluster_head, working_dir):
# start job_config=something
# start job_config=something else
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# To make the first one hanging ther
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
proc = run_string_as_driver_nonblocking(script, env)
sleep(5)
runtime_env = f"""
{{ "working_dir": test_module.__path__[0] }}""" # noqa: F541
# Execute the following cmd in the second one and ensure that
# it is able to run.
execute_statement = "print('OK')"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
proc.kill()
proc.wait()
assert out.strip().split()[-1] == "OK"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_util_without_job_config(shutdown_only):
from ray.cluster_utils import Cluster
with tempfile.TemporaryDirectory() as tmp_dir:
with (Path(tmp_dir) / "lib.py").open("w") as f:
f.write("""
def one():
return 1
""")
old_dir = os.getcwd()
os.chdir(tmp_dir)
cluster = Cluster()
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
(address, env, PKG_DIR) = start_client_server(cluster, True)
script = f"""
import ray
import ray.util
import os
ray.util.connect("{address}", job_config=None)
@ray.remote
def run():
from lib import one
return one()
print(ray.get([run.remote()])[0])
"""
out = run_string_as_driver(script, env)
print(out)
os.chdir(old_dir)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_init(shutdown_only):
with tempfile.TemporaryDirectory() as tmp_dir:
old_dir = os.getcwd()
os.chdir(tmp_dir)
with open("hello", "w") as f:
f.write("world")
job_config = ray.job_config.JobConfig(runtime_env={"working_dir": "."})
ray.init(job_config=job_config)
@ray.remote
class Test:
def test(self):
with open("hello") as f:
return f.read()
t = Test.remote()
assert ray.get(t.test.remote()) == "world"
os.chdir(old_dir)
def test_get_wheel_filename():
ray_version = "2.0.0.dev0"
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
filename = get_wheel_filename(sys_platform, ray_version,
py_version)
prefix = "https://s3-us-west-2.amazonaws.com/ray-wheels/latest/"
url = f"{prefix}{filename}"
assert requests.head(url).status_code == 200
def test_get_master_wheel_url():
ray_version = "2.0.0.dev0"
test_commit = "ba6cebe30fab6925e5b2d9e859ad064d53015246"
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
url = get_master_wheel_url(test_commit, sys_platform, ray_version,
py_version)
assert requests.head(url).status_code == 200, url
def test_get_release_wheel_url():
test_commits = {
"1.4.0rc1": "e7c7f6371a69eb727fa469e4cd6f4fbefd143b4c",
"1.3.0": "0b4b444fadcdc23226e11fef066b982175804232",
"1.2.0": "1b1a2496ca51b745c07c79fb859946d3350d471b"
}
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
for version, commit in test_commits.items():
url = get_release_wheel_url(commit, sys_platform, version,
py_version)
assert requests.head(url).status_code == 200, url
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_task(ray_start_cluster_head):
@ray.remote(runtime_env={"env_vars": {"foo": "bar"}})
def f():
return os.environ.get("foo")
assert ray.get(f.remote()) == "bar"
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_actor(ray_start_cluster_head):
@ray.remote(runtime_env={"env_vars": {"foo": "bar"}})
class A:
def g(self):
return os.environ.get("foo")
a = A.remote()
assert ray.get(a.g.remote()) == "bar"
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_complex(shutdown_only):
ray.init(
job_config=ray.job_config.JobConfig(
runtime_env={"env_vars": {
"foo": "job"
}}))
@ray.remote
def env_from_job():
return os.environ.get("foo")
assert ray.get(env_from_job.remote()) == "job"
@ray.remote(runtime_env={"env_vars": {"foo": "task"}})
def f():
return os.environ.get("foo")
assert ray.get(f.remote()) == "task"
@ray.remote(runtime_env={"env_vars": {"foo": "actor"}})
class A:
def g(self):
return os.environ.get("foo")
a = A.remote()
assert ray.get(a.g.remote()) == "actor"
# Test that runtime_env can be overridden by specifying .options().
assert ray.get(
f.options(runtime_env={
"env_vars": {
"foo": "new"
}
}).remote()) == "new"
a = A.options(runtime_env={"env_vars": {"foo": "new2"}}).remote()
assert ray.get(a.g.remote()) == "new2"
def test_container_option_serialize():
runtime_env = {
"container": {
"image": "ray:latest",
"run_options": ["--name=test"]
}
}
job_config = ray.job_config.JobConfig(runtime_env=runtime_env)
job_config_serialized = job_config.serialize()
# job_config_serialized is JobConfig protobuf serialized string,
# job_config.runtime_env.raw_json has container_option info
# job_config.serialized_runtime_env also has container_option info
assert job_config_serialized.count(b"image") == 2
def test_working_dir_override_failure(shutdown_only):
ray.init()
@ray.remote(runtime_env={"working_dir": "."})
def f():
pass
with pytest.raises(NotImplementedError):
f.remote()
@ray.remote
def g():
pass
with pytest.raises(NotImplementedError):
g.options(runtime_env={"working_dir": "."}).remote()
@ray.remote(runtime_env={"working_dir": "."})
class A:
pass
with pytest.raises(NotImplementedError):
A.remote()
@ray.remote
class B:
pass
with pytest.raises(NotImplementedError):
B.options(runtime_env={"working_dir": "."}).remote()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-sv", __file__]))
| import os
import pytest
import sys
import random
import tempfile
import requests
from pathlib import Path
import ray
from ray.test_utils import (run_string_as_driver,
run_string_as_driver_nonblocking)
from ray._private.utils import (get_wheel_filename, get_master_wheel_url,
get_release_wheel_url)
import ray.experimental.internal_kv as kv
from time import sleep
driver_script = """
from time import sleep
import sys
import logging
sys.path.insert(0, "{working_dir}")
import ray
import ray.util
import os
try:
import test_module
except:
pass
try:
job_config = ray.job_config.JobConfig(
runtime_env={runtime_env}
)
if not job_config.runtime_env:
job_config=None
if os.environ.get("USE_RAY_CLIENT"):
ray.client("{address}").env({runtime_env}).namespace("").connect()
else:
ray.init(address="{address}",
job_config=job_config,
logging_level=logging.DEBUG,
namespace=""
)
except ValueError:
print("ValueError")
sys.exit(0)
except TypeError:
print("TypeError")
sys.exit(0)
except:
print("ERROR")
sys.exit(0)
if os.environ.get("EXIT_AFTER_INIT"):
sys.exit(0)
@ray.remote
def run_test():
return test_module.one()
@ray.remote
def check_file(name):
try:
with open(name) as f:
return f.read()
except:
return "FAILED"
@ray.remote
class TestActor(object):
@ray.method(num_returns=1)
def one(self):
return test_module.one()
{execute_statement}
if os.environ.get("USE_RAY_CLIENT"):
ray.util.disconnect()
else:
ray.shutdown()
sleep(10)
"""
def create_file(p):
if not p.parent.exists():
p.parent.mkdir()
with p.open("w") as f:
f.write("Test")
@pytest.fixture(scope="function")
def working_dir():
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir)
module_path = path / "test_module"
module_path.mkdir(parents=True)
init_file = module_path / "__init__.py"
test_file = module_path / "test.py"
with test_file.open(mode="w") as f:
f.write("""
def one():
return 1
""")
with init_file.open(mode="w") as f:
f.write("""
from test_module.test import one
""")
old_dir = os.getcwd()
os.chdir(tmp_dir)
yield tmp_dir
os.chdir(old_dir)
def start_client_server(cluster, client_mode):
from ray._private.runtime_env import PKG_DIR
if not client_mode:
return (cluster.address, {}, PKG_DIR)
ray.worker._global_node._ray_params.ray_client_server_port = "10003"
ray.worker._global_node.start_ray_client_server()
return ("localhost:10003", {"USE_RAY_CLIENT": "1"}, PKG_DIR)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_travel():
import uuid
with tempfile.TemporaryDirectory() as tmp_dir:
dir_paths = set()
file_paths = set()
item_num = 0
excludes = []
root = Path(tmp_dir) / "test"
def construct(path, excluded=False, depth=0):
nonlocal item_num
path.mkdir(parents=True)
if not excluded:
dir_paths.add(str(path))
if depth > 8:
return
if item_num > 500:
return
dir_num = random.randint(0, 10)
file_num = random.randint(0, 10)
for _ in range(dir_num):
uid = str(uuid.uuid4()).split("-")[0]
dir_path = path / uid
exclud_sub = random.randint(0, 5) == 0
if not excluded and exclud_sub:
excludes.append(str(dir_path.relative_to(root)))
if not excluded:
construct(dir_path, exclud_sub or excluded, depth + 1)
item_num += 1
if item_num > 1000:
return
for _ in range(file_num):
uid = str(uuid.uuid4()).split("-")[0]
with (path / uid).open("w") as f:
v = random.randint(0, 1000)
f.write(str(v))
if not excluded:
if random.randint(0, 5) == 0:
excludes.append(
str((path / uid).relative_to(root)))
else:
file_paths.add((str(path / uid), str(v)))
item_num += 1
construct(root)
exclude_spec = ray._private.runtime_env._get_excludes(root, excludes)
visited_dir_paths = set()
visited_file_paths = set()
def handler(path):
if path.is_dir():
visited_dir_paths.add(str(path))
else:
with open(path) as f:
visited_file_paths.add((str(path), f.read()))
ray._private.runtime_env._dir_travel(root, [exclude_spec], handler)
assert file_paths == visited_file_paths
assert dir_paths == visited_dir_paths
"""
The following test cases are related with runtime env. It following these steps
1) Creating a temporary dir with fixture working_dir
2) Using a template named driver_script defined globally
3) Overwrite runtime_env and execute_statement in the template
4) Execute it as a separate driver and return the result
"""
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_empty_working_dir(ray_start_cluster_head, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
env["EXIT_AFTER_INIT"] = "1"
with tempfile.TemporaryDirectory() as working_dir:
runtime_env = f"""{{
"working_dir": r"{working_dir}",
"py_modules": [r"{working_dir}"]
}}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "sys.exit(0)"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out != "ERROR"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_invalid_working_dir(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
env["EXIT_AFTER_INIT"] = "1"
runtime_env = "{ 'working_dir': 10 }"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "TypeError"
runtime_env = "{ 'py_modules': [10] }"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "TypeError"
runtime_env = f"{{ 'working_dir': os.path.join(r'{working_dir}', 'na') }}"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "ValueError"
runtime_env = f"{{ 'py_modules': [os.path.join(r'{working_dir}', 'na')] }}"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "ValueError"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_single_node(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# Setup runtime env here
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# Testing runtime env with working_dir
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_module(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# test runtime_env iwth py_modules
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_local_file(two_node_cluster, working_dir, client_mode):
with open(os.path.join(working_dir, "test_file"), "w") as f:
f.write("1")
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# test runtime_env iwth working_dir
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
vals = ray.get([check_file.remote('test_file')] * 1000)
print(sum([int(v) for v in vals]))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_exclusion(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
working_path = Path(working_dir)
create_file(working_path / "tmp_dir" / "test_1")
create_file(working_path / "tmp_dir" / "test_2")
create_file(working_path / "tmp_dir" / "test_3")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_1")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_2")
create_file(working_path / "test1")
create_file(working_path / "test2")
create_file(working_path / "test3")
tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute())
runtime_env = f"""{{
"working_dir": r"{working_dir}",
}}"""
execute_statement = """
vals = ray.get([
check_file.remote('test1'),
check_file.remote('test2'),
check_file.remote('test3'),
check_file.remote(os.path.join('tmp_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'test_2')),
check_file.remote(os.path.join('tmp_dir', 'test_3')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')),
])
print(','.join(vals))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
# Test it works before
assert out.strip().split("\n")[-1] == \
"Test,Test,Test,Test,Test,Test,Test,Test"
runtime_env = f"""{{
"working_dir": r"{working_dir}",
"excludes": [
# exclude by relative path
r"test2",
# exclude by dir
r"{str(Path("tmp_dir") / "sub_dir")}",
# exclude part of the dir
r"{str(Path("tmp_dir") / "test_1")}",
# exclude part of the dir
r"{str(Path("tmp_dir") / "test_2")}",
]
}}"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split("\n")[-1] == \
"Test,FAILED,Test,FAILED,FAILED,Test,FAILED,FAILED"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_exclusion_2(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
working_path = Path(working_dir)
def create_file(p):
if not p.parent.exists():
p.parent.mkdir(parents=True)
with p.open("w") as f:
f.write("Test")
create_file(working_path / "tmp_dir" / "test_1")
create_file(working_path / "tmp_dir" / "test_2")
create_file(working_path / "tmp_dir" / "test_3")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_1")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_2")
create_file(working_path / "test1")
create_file(working_path / "test2")
create_file(working_path / "test3")
create_file(working_path / "cache" / "test_1")
create_file(working_path / "tmp_dir" / "cache" / "test_1")
create_file(working_path / "another_dir" / "cache" / "test_1")
tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute())
runtime_env = f"""{{
"working_dir": r"{working_dir}",
}}"""
execute_statement = """
vals = ray.get([
check_file.remote('test1'),
check_file.remote('test2'),
check_file.remote('test3'),
check_file.remote(os.path.join('tmp_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'test_2')),
check_file.remote(os.path.join('tmp_dir', 'test_3')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')),
check_file.remote(os.path.join("cache", "test_1")),
check_file.remote(os.path.join("tmp_dir", "cache", "test_1")),
check_file.remote(os.path.join("another_dir", "cache", "test_1")),
])
print(','.join(vals))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
# Test it works before
assert out.strip().split("\n")[-1] == \
"Test,Test,Test,Test,Test,Test,Test,Test,Test,Test,Test"
with open(f"{working_dir}/.gitignore", "w") as f:
f.write("""
# Comment
test_[12]
/test1
!/tmp_dir/sub_dir/test_1
cache/
""")
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
t = out.strip().split("\n")[-1]
assert out.strip().split("\n")[-1] == \
"FAILED,Test,Test,FAILED,FAILED,Test,Test,FAILED,FAILED,FAILED,FAILED"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_runtime_env_getter(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
print(ray.get_runtime_context().runtime_env["working_dir"])
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == working_dir
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_uri(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
import ray._private.runtime_env as runtime_env
import tempfile
with tempfile.NamedTemporaryFile(suffix="zip") as tmp_file:
pkg_name = runtime_env.get_project_package_name(working_dir, [], [])
pkg_uri = runtime_env.Protocol.PIN_GCS.value + "://" + pkg_name
runtime_env.create_project_package(working_dir, [], [], tmp_file.name)
runtime_env.push_package(pkg_uri, tmp_file.name)
runtime_env = f"""{{ "uris": ["{pkg_uri}"] }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
# pinned uri will not be deleted
print(list(kv._internal_kv_list("")))
assert len(kv._internal_kv_list("pingcs://")) == 1
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_regular_actors(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
test_actor = TestActor.options(name="test_actor").remote()
print(sum(ray.get([test_actor.one.remote()] * 1000)))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_detached_actors(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
test_actor = TestActor.options(name="test_actor", lifetime="detached").remote()
print(sum(ray.get([test_actor.one.remote()] * 1000)))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
# It's a detached actors, so it should still be there
assert len(kv._internal_kv_list("gcs://")) == 1
assert len(list(Path(PKG_DIR).iterdir())) == 2
pkg_dir = [f for f in Path(PKG_DIR).glob("*") if f.is_dir()][0]
import sys
sys.path.insert(0, str(pkg_dir))
test_actor = ray.get_actor("test_actor")
assert sum(ray.get([test_actor.one.remote()] * 1000)) == 1000
ray.kill(test_actor)
from time import sleep
sleep(5)
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_1(ray_start_cluster_head, working_dir):
# start job_config=None
# start job_config=something
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = None
# To make the first one hanging there
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
# Have one running with job config = None
proc = run_string_as_driver_nonblocking(script, env)
# waiting it to be up
sleep(5)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the second one which should work because Ray Client servers.
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
proc.kill()
proc.wait()
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_2(ray_start_cluster_head, working_dir):
# start job_config=something
# start job_config=None
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# To make the first one hanging there
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
proc = run_string_as_driver_nonblocking(script, env)
sleep(5)
runtime_env = None
# Execute the following in the second one which should
# succeed
execute_statement = "print('OK')"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "OK", out
proc.kill()
proc.wait()
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_3(ray_start_cluster_head, working_dir):
# start job_config=something
# start job_config=something else
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# To make the first one hanging ther
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
proc = run_string_as_driver_nonblocking(script, env)
sleep(5)
runtime_env = f"""
{{ "working_dir": test_module.__path__[0] }}""" # noqa: F541
# Execute the following cmd in the second one and ensure that
# it is able to run.
execute_statement = "print('OK')"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
proc.kill()
proc.wait()
assert out.strip().split()[-1] == "OK"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_util_without_job_config(shutdown_only):
from ray.cluster_utils import Cluster
with tempfile.TemporaryDirectory() as tmp_dir:
with (Path(tmp_dir) / "lib.py").open("w") as f:
f.write("""
def one():
return 1
""")
old_dir = os.getcwd()
os.chdir(tmp_dir)
cluster = Cluster()
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
(address, env, PKG_DIR) = start_client_server(cluster, True)
script = f"""
import ray
import ray.util
import os
ray.util.connect("{address}", job_config=None)
@ray.remote
def run():
from lib import one
return one()
print(ray.get([run.remote()])[0])
"""
out = run_string_as_driver(script, env)
print(out)
os.chdir(old_dir)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_init(shutdown_only):
with tempfile.TemporaryDirectory() as tmp_dir:
old_dir = os.getcwd()
os.chdir(tmp_dir)
with open("hello", "w") as f:
f.write("world")
job_config = ray.job_config.JobConfig(runtime_env={"working_dir": "."})
ray.init(job_config=job_config)
@ray.remote
class Test:
def test(self):
with open("hello") as f:
return f.read()
t = Test.remote()
assert ray.get(t.test.remote()) == "world"
os.chdir(old_dir)
def test_get_wheel_filename():
ray_version = "2.0.0.dev0"
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
filename = get_wheel_filename(sys_platform, ray_version,
py_version)
prefix = "https://s3-us-west-2.amazonaws.com/ray-wheels/latest/"
url = f"{prefix}{filename}"
assert requests.head(url).status_code == 200
def test_get_master_wheel_url():
ray_version = "2.0.0.dev0"
test_commit = "ba6cebe30fab6925e5b2d9e859ad064d53015246"
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
url = get_master_wheel_url(test_commit, sys_platform, ray_version,
py_version)
assert requests.head(url).status_code == 200, url
def test_get_release_wheel_url():
test_commits = {
"1.4.0rc1": "e7c7f6371a69eb727fa469e4cd6f4fbefd143b4c",
"1.3.0": "0b4b444fadcdc23226e11fef066b982175804232",
"1.2.0": "1b1a2496ca51b745c07c79fb859946d3350d471b"
}
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
for version, commit in test_commits.items():
url = get_release_wheel_url(commit, sys_platform, version,
py_version)
assert requests.head(url).status_code == 200, url
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_task(ray_start_cluster_head):
@ray.remote(runtime_env={"env_vars": {"foo": "bar"}})
def f():
return os.environ.get("foo")
assert ray.get(f.remote()) == "bar"
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_actor(ray_start_cluster_head):
@ray.remote(runtime_env={"env_vars": {"foo": "bar"}})
class A:
def g(self):
return os.environ.get("foo")
a = A.remote()
assert ray.get(a.g.remote()) == "bar"
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_complex(shutdown_only):
ray.init(
job_config=ray.job_config.JobConfig(
runtime_env={"env_vars": {
"foo": "job"
}}))
@ray.remote
def env_from_job():
return os.environ.get("foo")
assert ray.get(env_from_job.remote()) == "job"
@ray.remote(runtime_env={"env_vars": {"foo": "task"}})
def f():
return os.environ.get("foo")
assert ray.get(f.remote()) == "task"
@ray.remote(runtime_env={"env_vars": {"foo": "actor"}})
class A:
def g(self):
return os.environ.get("foo")
a = A.remote()
assert ray.get(a.g.remote()) == "actor"
# Test that runtime_env can be overridden by specifying .options().
assert ray.get(
f.options(runtime_env={
"env_vars": {
"foo": "new"
}
}).remote()) == "new"
a = A.options(runtime_env={"env_vars": {"foo": "new2"}}).remote()
assert ray.get(a.g.remote()) == "new2"
def test_container_option_serialize():
runtime_env = {
"container": {
"image": "ray:latest",
"run_options": ["--name=test"]
}
}
job_config = ray.job_config.JobConfig(runtime_env=runtime_env)
job_config_serialized = job_config.serialize()
# job_config_serialized is JobConfig protobuf serialized string,
# job_config.runtime_env.raw_json has container_option info
# job_config.serialized_runtime_env also has container_option info
assert job_config_serialized.count(b"image") == 2
def test_working_dir_override_failure(shutdown_only):
ray.init()
@ray.remote(runtime_env={"working_dir": "."})
def f():
pass
with pytest.raises(NotImplementedError):
f.remote()
@ray.remote
def g():
pass
with pytest.raises(NotImplementedError):
g.options(runtime_env={"working_dir": "."}).remote()
@ray.remote(runtime_env={"working_dir": "."})
class A:
pass
with pytest.raises(NotImplementedError):
A.remote()
@ray.remote
class B:
pass
with pytest.raises(NotImplementedError):
B.options(runtime_env={"working_dir": "."}).remote()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-sv", __file__]))
|
# coding: utf-8
# @author octopoulo <polluxyz@gmail.com>
# @version 2020-05-01
"""
Sync
"""
import gzip
from logging import getLogger
import os
import re
import shutil
from subprocess import run
from time import time
from typing import Any
from PIL import Image, ImageFile
from common import makedirs_safe, read_text_safe, write_text_safe
from css_minify import css_minify
# folders, might want to edit these
BASE = os.path.dirname(os.path.dirname(__file__))
COMPILER = os.path.join(BASE, 'script/closure-compiler-v20200406.jar')
CSS_FOLDER = os.path.join(BASE, 'css')
JAVA = 'java'
JS_FOLDER = os.path.join(BASE, 'js')
LOCAL = BASE
# edit these files
CSS_FILES = [
'light',
]
JS_FILES = {
'4d': [
'libs/three',
'libs/stats',
'libs/GLTFLoader',
'libs/DRACOLoader',
'libs/camera-controls',
],
'all': [
'libs/socket.io',
':common',
'libs/chess-quick',
':engine',
':global',
':3d',
':xboard',
':graph',
':game',
':temp',
':network',
':startup',
':config',
'script',
],
'chart': [
'libs/chart-quick',
],
}
NEED_GZIPS = {
'4d_.js',
'ammo.wasm.js',
'ammo.wasm.wasm',
'chart_.js',
'chart.min.js',
'dark.css',
'dark-archive.css',
'draco_decoder.js',
'draco_decoder.wasm',
'draco_wasm_wrapper.js',
'fra.json',
'index.html',
'jpn.json',
'light-archive.css',
'manifest.json',
'pieces-draco.glb',
'rus.json',
'sea.css',
'sea-archive.css',
'ukr.json',
}
# don't gzip inside those folders
SKIP_GZIPS = {
'archive',
'doc',
'image',
'model',
'node_modules',
'script',
'sound',
'test',
'theme',
}
class Sync:
"""Sync
"""
#
def __init__(self, **kwargs):
self.kwargs = kwargs
self.clean = kwargs.get('clean') # type: bool
self.host = kwargs.get('host') # type: str
self.no_compress = kwargs.get('no_compress') # type: bool
self.no_debug = kwargs.get('no_debug') # type: bool
self.no_process = kwargs.get('no_process') # type: bool
self.zip = kwargs.get('zip') # type: bool
self.logger = getLogger(self.__class__.__name__)
def combine_pieces(self, folder: str):
"""Combine chess pieces png files into 1 file
"""
if 'metro' in folder:
height = 160
width = 160
else:
height = 80
width = 80
combined = Image.new('RGBA', (width * 12, height), (0, 255, 0, 0))
output = f'{folder}.png'
i = 0
pieces = 'bknpqr'
for color in 'bw':
for piece in pieces:
name = f'{color}{piece}'
image = Image.open(os.path.join(folder, f'{name}.png'))
offset = (i * width, 0)
combined.paste(image, offset)
i += 1
combined.save(output, format='png')
print('a', end='')
def combine_themes(self, folder: str):
"""Combine all pieces of each theme
"""
sources = os.listdir(folder)
for source in sources:
filename = os.path.join(folder, source)
if os.path.isdir(filename):
self.combine_pieces(filename)
def compress_3d(self, data: str) -> str:
"""Compress THREE javascript
"""
data = re.sub(r'\bTHREE\b', 'T', data)
data = re.sub(r'console\.(error|warn)\(.+?\);', '', data, flags=re.S)
return data
def compress_gzip(self, filename: str):
"""Gzip compress a file
"""
output = f'{filename}.gz'
with open(filename, 'rb') as f_in:
with gzip.open(output, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
# synchronise the date/time
if os.path.isfile(output):
info = os.stat(output)
os.utime(filename, (info.st_atime, info.st_mtime))
print('g', end='')
def compress_js(self, filename: str) -> str:
"""Compress javascript
"""
base, ext = os.path.splitext(filename)
output = f'{base}_{ext}'
if self.no_compress:
shutil.copy(filename, output)
return output
args = [
JAVA,
'-jar', COMPILER,
'--js', filename,
'--js_output_file', output,
'--language_in', 'ECMASCRIPT_2018',
'--language_out', 'ECMASCRIPT_2018',
]
if self.kwargs.get('advanced'):
args.extend(['--compilation_level', 'ADVANCED'])
run(args)
return output
def gzip_files(self, folder: str, depth: int, delete: bool):
"""Gzip all wanted files, recursively
"""
queues = []
sources = os.listdir(folder)
for source in sources:
if source.startswith(('.', '_')):
continue
filename = os.path.join(folder, source)
if os.path.isdir(filename):
if source not in SKIP_GZIPS:
queues.append(filename)
continue
# file
if not os.path.isfile(filename):
continue
if source not in NEED_GZIPS:
continue
output = f'{filename}.gz'
source_time = os.path.getmtime(filename)
if os.path.isfile(output):
destin_time = os.path.getmtime(output)
if delete:
os.unlink(output)
print('d', end='')
else:
destin_time = 0
if not delete and source_time != destin_time:
self.compress_gzip(filename)
print(f"{" " * depth}{filename}")
for queue in queues:
self.gzip_files(queue, depth + 1, delete)
@staticmethod
def import_file(match: Any) -> str:
"""@import {common.js}
"""
source = match.group(1)
filename = os.path.join(JS_FOLDER, source)
data = read_text_safe(filename) or ''
if source.endswith('.js'):
data = re.sub(r'["\']use strict["\'];?', '', data)
return data
def normalise_folders(self):
"""Add the missing / (slash) at the end of the folder
"""
global CSS_FOLDER, JS_FOLDER, LOCAL
if CSS_FOLDER[-1] != '/':
CSS_FOLDER += '/'
if JS_FOLDER[-1] != '/':
JS_FOLDER += '/'
if LOCAL[-1] != '/':
LOCAL += '/'
def create_index(self):
"""Create the new index.html
"""
base = os.path.join(LOCAL, 'index_base.html')
base_time = os.path.getmtime(base)
index = os.path.join(LOCAL, 'index.html')
index_time = os.path.getmtime(index) if os.path.isfile(index) else 0
change = 0
if base_time >= index_time:
change += 1
# 1) minimise JS
for js_output, js_files in JS_FILES.items():
all_js = os.path.join(JS_FOLDER, f'{js_output}.js')
all_min_js = os.path.join(JS_FOLDER, f'{js_output}_.js')
# common/engine changed => need to update, even though we're not using those files
js_dates = [os.path.abspath(f"{JS_FOLDER}{js_file.strip(":")}.js") for js_file in js_files]
js_names = [os.path.abspath(f'{JS_FOLDER}{js_file}.js') for js_file in js_files if js_file[0] != ':']
if js_output == 'all':
# script_js = os.path.join(JS_FOLDER, 'script.js')
extras = []
else:
extras = []
# skip?
update = True
if os.path.isfile(all_min_js) and os.path.isfile(all_js):
all_time = os.path.getmtime(all_min_js)
update = False
for js_date in js_dates + extras:
update |= os.path.isfile(js_date) and os.path.getmtime(js_date) >= all_time
if not update:
print('J', end='')
continue
datas = []
for js_name in js_names:
print(js_name)
script_data = read_text_safe(js_name)
if not script_data:
continue
# process the script.js
if js_name.endswith('script.js'):
script_data = re.sub('@import {(.*?)}', self.import_file, script_data);
script_data = re.sub('// BEGIN.*?// END', '', script_data, flags=re.S)
if self.no_debug:
script_data = re.sub('// <<.*?// >>', '', script_data, flags=re.S)
# use HOST
print(f'host={self.host}')
if self.host != '/':
script_data = script_data.replace("HOST = '/',", f"HOST = '{self.host}',")
datas.append(script_data)
data = '\n'.join(datas)
if '4d' in js_output:
data = self.compress_3d(data)
write_text_safe(all_js, data)
self.compress_js(all_js)
print('j', end='')
change += 1
# 2) minimise CSS
all_css = os.path.join(CSS_FOLDER, 'all.css')
all_min_css = os.path.join(CSS_FOLDER, 'all_.css')
css_names = [os.path.abspath(f'{CSS_FOLDER}{css_file}.css') for css_file in CSS_FILES]
update = True
if os.path.isfile(all_min_css) and os.path.isfile(all_css):
all_time = os.path.getmtime(all_min_css)
update = False
for css_name in css_names:
update |= os.path.isfile(css_name) and os.path.getmtime(css_name) >= all_time
if update:
datas = []
for css_name in css_names:
datas.append(read_text_safe(css_name) or '')
data = '\n'.join(datas)
write_text_safe(all_css, data)
css_data = css_minify(data)
write_text_safe(all_min_css, css_data)
print('c', end='')
change += 1
else:
css_data = read_text_safe(all_min_css) or ''
print('C', end='')
if not change:
print('X', end='')
return
# 3) remove BEGIN ... END
html = read_text_safe(base)
html = re.sub('<!-- BEGIN -->.*?<!-- END -->', '', html, flags=re.S)
html = re.sub('// BEGIN.*?// END', '', html, flags=re.S)
# use the HOST
if self.host != '/':
replaces = {
'href="/': f'href="{self.host}',
'src="/': f'src="{self.host}',
}
for key, value in replaces.items():
html = html.replace(key, value)
# 4) create the new index.html
if not self.no_process:
all_min_js = os.path.join(JS_FOLDER, 'all_.js')
js_data = read_text_safe(all_min_js) or ''
replaces = {
'<!-- {SCRIPT} -->': f'<script>{js_data}</script>',
'<!-- {STYLE} -->': f'<style>{css_data}</style>',
}
for key, value in replaces.items():
html = html.replace(key, value)
html = re.sub('<!-- .*? -->', '', html, flags=re.S)
html = re.sub(r'\n\s+', '\n', html)
filename = os.path.join(LOCAL, 'index.html')
write_text_safe(filename, html)
def synchronise(self) -> bool:
"""Synchronise the files
"""
self.normalise_folders()
self.create_index()
if self.clean:
self.gzip_files(LOCAL, 0, True)
elif self.zip:
self.gzip_files(LOCAL, 0, False)
return True
if __name__ == '__main__':
start = time()
sync = Sync()
if 0:
sync.combine_themes(os.path.join(BASE, 'theme'))
else:
sync.synchronise()
end = time()
print(f'\nELAPSED: {end-start:.3f} seconds')
| # coding: utf-8
# @author octopoulo <polluxyz@gmail.com>
# @version 2020-05-01
"""
Sync
"""
import gzip
from logging import getLogger
import os
import re
import shutil
from subprocess import run
from time import time
from typing import Any
from PIL import Image, ImageFile
from common import makedirs_safe, read_text_safe, write_text_safe
from css_minify import css_minify
# folders, might want to edit these
BASE = os.path.dirname(os.path.dirname(__file__))
COMPILER = os.path.join(BASE, 'script/closure-compiler-v20200406.jar')
CSS_FOLDER = os.path.join(BASE, 'css')
JAVA = 'java'
JS_FOLDER = os.path.join(BASE, 'js')
LOCAL = BASE
# edit these files
CSS_FILES = [
'light',
]
JS_FILES = {
'4d': [
'libs/three',
'libs/stats',
'libs/GLTFLoader',
'libs/DRACOLoader',
'libs/camera-controls',
],
'all': [
'libs/socket.io',
':common',
'libs/chess-quick',
':engine',
':global',
':3d',
':xboard',
':graph',
':game',
':temp',
':network',
':startup',
':config',
'script',
],
'chart': [
'libs/chart-quick',
],
}
NEED_GZIPS = {
'4d_.js',
'ammo.wasm.js',
'ammo.wasm.wasm',
'chart_.js',
'chart.min.js',
'dark.css',
'dark-archive.css',
'draco_decoder.js',
'draco_decoder.wasm',
'draco_wasm_wrapper.js',
'fra.json',
'index.html',
'jpn.json',
'light-archive.css',
'manifest.json',
'pieces-draco.glb',
'rus.json',
'sea.css',
'sea-archive.css',
'ukr.json',
}
# don't gzip inside those folders
SKIP_GZIPS = {
'archive',
'doc',
'image',
'model',
'node_modules',
'script',
'sound',
'test',
'theme',
}
class Sync:
"""Sync
"""
#
def __init__(self, **kwargs):
self.kwargs = kwargs
self.clean = kwargs.get('clean') # type: bool
self.host = kwargs.get('host') # type: str
self.no_compress = kwargs.get('no_compress') # type: bool
self.no_debug = kwargs.get('no_debug') # type: bool
self.no_process = kwargs.get('no_process') # type: bool
self.zip = kwargs.get('zip') # type: bool
self.logger = getLogger(self.__class__.__name__)
def combine_pieces(self, folder: str):
"""Combine chess pieces png files into 1 file
"""
if 'metro' in folder:
height = 160
width = 160
else:
height = 80
width = 80
combined = Image.new('RGBA', (width * 12, height), (0, 255, 0, 0))
output = f'{folder}.png'
i = 0
pieces = 'bknpqr'
for color in 'bw':
for piece in pieces:
name = f'{color}{piece}'
image = Image.open(os.path.join(folder, f'{name}.png'))
offset = (i * width, 0)
combined.paste(image, offset)
i += 1
combined.save(output, format='png')
print('a', end='')
def combine_themes(self, folder: str):
"""Combine all pieces of each theme
"""
sources = os.listdir(folder)
for source in sources:
filename = os.path.join(folder, source)
if os.path.isdir(filename):
self.combine_pieces(filename)
def compress_3d(self, data: str) -> str:
"""Compress THREE javascript
"""
data = re.sub(r'\bTHREE\b', 'T', data)
data = re.sub(r'console\.(error|warn)\(.+?\);', '', data, flags=re.S)
return data
def compress_gzip(self, filename: str):
"""Gzip compress a file
"""
output = f'{filename}.gz'
with open(filename, 'rb') as f_in:
with gzip.open(output, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
# synchronise the date/time
if os.path.isfile(output):
info = os.stat(output)
os.utime(filename, (info.st_atime, info.st_mtime))
print('g', end='')
def compress_js(self, filename: str) -> str:
"""Compress javascript
"""
base, ext = os.path.splitext(filename)
output = f'{base}_{ext}'
if self.no_compress:
shutil.copy(filename, output)
return output
args = [
JAVA,
'-jar', COMPILER,
'--js', filename,
'--js_output_file', output,
'--language_in', 'ECMASCRIPT_2018',
'--language_out', 'ECMASCRIPT_2018',
]
if self.kwargs.get('advanced'):
args.extend(['--compilation_level', 'ADVANCED'])
run(args)
return output
def gzip_files(self, folder: str, depth: int, delete: bool):
"""Gzip all wanted files, recursively
"""
queues = []
sources = os.listdir(folder)
for source in sources:
if source.startswith(('.', '_')):
continue
filename = os.path.join(folder, source)
if os.path.isdir(filename):
if source not in SKIP_GZIPS:
queues.append(filename)
continue
# file
if not os.path.isfile(filename):
continue
if source not in NEED_GZIPS:
continue
output = f'{filename}.gz'
source_time = os.path.getmtime(filename)
if os.path.isfile(output):
destin_time = os.path.getmtime(output)
if delete:
os.unlink(output)
print('d', end='')
else:
destin_time = 0
if not delete and source_time != destin_time:
self.compress_gzip(filename)
print(f"{' ' * depth}{filename}")
for queue in queues:
self.gzip_files(queue, depth + 1, delete)
@staticmethod
def import_file(match: Any) -> str:
"""@import {common.js}
"""
source = match.group(1)
filename = os.path.join(JS_FOLDER, source)
data = read_text_safe(filename) or ''
if source.endswith('.js'):
data = re.sub(r'["\']use strict["\'];?', '', data)
return data
def normalise_folders(self):
"""Add the missing / (slash) at the end of the folder
"""
global CSS_FOLDER, JS_FOLDER, LOCAL
if CSS_FOLDER[-1] != '/':
CSS_FOLDER += '/'
if JS_FOLDER[-1] != '/':
JS_FOLDER += '/'
if LOCAL[-1] != '/':
LOCAL += '/'
def create_index(self):
"""Create the new index.html
"""
base = os.path.join(LOCAL, 'index_base.html')
base_time = os.path.getmtime(base)
index = os.path.join(LOCAL, 'index.html')
index_time = os.path.getmtime(index) if os.path.isfile(index) else 0
change = 0
if base_time >= index_time:
change += 1
# 1) minimise JS
for js_output, js_files in JS_FILES.items():
all_js = os.path.join(JS_FOLDER, f'{js_output}.js')
all_min_js = os.path.join(JS_FOLDER, f'{js_output}_.js')
# common/engine changed => need to update, even though we're not using those files
js_dates = [os.path.abspath(f"{JS_FOLDER}{js_file.strip(':')}.js") for js_file in js_files]
js_names = [os.path.abspath(f'{JS_FOLDER}{js_file}.js') for js_file in js_files if js_file[0] != ':']
if js_output == 'all':
# script_js = os.path.join(JS_FOLDER, 'script.js')
extras = []
else:
extras = []
# skip?
update = True
if os.path.isfile(all_min_js) and os.path.isfile(all_js):
all_time = os.path.getmtime(all_min_js)
update = False
for js_date in js_dates + extras:
update |= os.path.isfile(js_date) and os.path.getmtime(js_date) >= all_time
if not update:
print('J', end='')
continue
datas = []
for js_name in js_names:
print(js_name)
script_data = read_text_safe(js_name)
if not script_data:
continue
# process the script.js
if js_name.endswith('script.js'):
script_data = re.sub('@import {(.*?)}', self.import_file, script_data);
script_data = re.sub('// BEGIN.*?// END', '', script_data, flags=re.S)
if self.no_debug:
script_data = re.sub('// <<.*?// >>', '', script_data, flags=re.S)
# use HOST
print(f'host={self.host}')
if self.host != '/':
script_data = script_data.replace("HOST = '/',", f"HOST = '{self.host}',")
datas.append(script_data)
data = '\n'.join(datas)
if '4d' in js_output:
data = self.compress_3d(data)
write_text_safe(all_js, data)
self.compress_js(all_js)
print('j', end='')
change += 1
# 2) minimise CSS
all_css = os.path.join(CSS_FOLDER, 'all.css')
all_min_css = os.path.join(CSS_FOLDER, 'all_.css')
css_names = [os.path.abspath(f'{CSS_FOLDER}{css_file}.css') for css_file in CSS_FILES]
update = True
if os.path.isfile(all_min_css) and os.path.isfile(all_css):
all_time = os.path.getmtime(all_min_css)
update = False
for css_name in css_names:
update |= os.path.isfile(css_name) and os.path.getmtime(css_name) >= all_time
if update:
datas = []
for css_name in css_names:
datas.append(read_text_safe(css_name) or '')
data = '\n'.join(datas)
write_text_safe(all_css, data)
css_data = css_minify(data)
write_text_safe(all_min_css, css_data)
print('c', end='')
change += 1
else:
css_data = read_text_safe(all_min_css) or ''
print('C', end='')
if not change:
print('X', end='')
return
# 3) remove BEGIN ... END
html = read_text_safe(base)
html = re.sub('<!-- BEGIN -->.*?<!-- END -->', '', html, flags=re.S)
html = re.sub('// BEGIN.*?// END', '', html, flags=re.S)
# use the HOST
if self.host != '/':
replaces = {
'href="/': f'href="{self.host}',
'src="/': f'src="{self.host}',
}
for key, value in replaces.items():
html = html.replace(key, value)
# 4) create the new index.html
if not self.no_process:
all_min_js = os.path.join(JS_FOLDER, 'all_.js')
js_data = read_text_safe(all_min_js) or ''
replaces = {
'<!-- {SCRIPT} -->': f'<script>{js_data}</script>',
'<!-- {STYLE} -->': f'<style>{css_data}</style>',
}
for key, value in replaces.items():
html = html.replace(key, value)
html = re.sub('<!-- .*? -->', '', html, flags=re.S)
html = re.sub(r'\n\s+', '\n', html)
filename = os.path.join(LOCAL, 'index.html')
write_text_safe(filename, html)
def synchronise(self) -> bool:
"""Synchronise the files
"""
self.normalise_folders()
self.create_index()
if self.clean:
self.gzip_files(LOCAL, 0, True)
elif self.zip:
self.gzip_files(LOCAL, 0, False)
return True
if __name__ == '__main__':
start = time()
sync = Sync()
if 0:
sync.combine_themes(os.path.join(BASE, 'theme'))
else:
sync.synchronise()
end = time()
print(f'\nELAPSED: {end-start:.3f} seconds')
|
import logging
import logging.handlers
import sys
import os
import json
import sqlite3
import signal
import threading
import time
import difflib
import vk_api
from vk_api.longpoll import VkLongPoll, VkEventType
import requests.exceptions
cwd = os.path.dirname(os.path.abspath(__file__))
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
stream=sys.stdout,
level=logging.WARNING
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.handlers.RotatingFileHandler(
os.path.join(cwd, 'log.txt'),
maxBytes=102400
)
handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
logger.info("Запуск...")
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, requests.exceptions.RequestException):
return
elif issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logger.error("Непойманное исключение.", exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception
defaultConfig = {
"ACCESS_TOKEN": "",
"createIndex": False,
"maxCacheAge": 86400,
"preloadMessages": False,
"customActions": False,
"disableMessagesLogging": False,
'enableFlaskWebServer': False,
'useAuth': False,
'users': {
'admin':'password'
},
'port': 8080,
'https': False,
'httpsPort': 8443,
'cert': [
os.path.join(cwd, "cert.pem"),
os.path.join(cwd, "key.pem")
]
}
def grab_token_from_args():
if len(sys.argv) > 1:
defaultConfig['ACCESS_TOKEN'] = sys.argv[1]
elif defaultConfig['ACCESS_TOKEN'] == "":
raise Exception("Не задан ACCESS_TOKEN")
if not os.path.exists(os.path.join(cwd, "config.json")):
with open(os.path.join(cwd, "config.json"), 'w') as conf:
grab_token_from_args()
json.dump(defaultConfig, conf, indent=4)
config = defaultConfig
del defaultConfig
else:
with open(os.path.join(cwd, "config.json"), 'r') as conf:
config = json.load(conf)
for i in config:
if i in defaultConfig:
defaultConfig[i] = config[i]
grab_token_from_args()
if len(set(config)) - len(set(defaultConfig)) != 0:
with open(os.path.join(cwd, "config.json"), 'w') as conf:
json.dump(defaultConfig, conf, indent=4)
config = defaultConfig
del defaultConfig
stop_mutex = threading.Lock()
def run_flask_server():
port = config['httpsPort'] if config['https'] else config['port']
import socket
ip = socket.gethostbyname(socket.gethostname())
del socket
while True:
try:
if config['https']:
logger.info("Trying to run on https://%s:%s/", ip, port)
app.run(
host='0.0.0.0',
port=port,
ssl_context=(
config['cert'][0],
config['cert'][1]
)
)
else:
logger.info("Trying to run on http://%s:%s/", ip, port)
app.run(host='0.0.0.0', port=port)
except OSError:
port += 1
if config['enableFlaskWebServer']:
from flaskWebServer import app
threading.Thread(target=run_flask_server).start()
if config['createIndex']:
from updateIndex import indexUpdater
indexUpdater()
def tryAgainIfFailed(func, *args, maxRetries=5, **kwargs):
c = maxRetries
delay = 1
while True:
try:
return func(*args, **kwargs)
except vk_api.exceptions.ApiError:
if str(sys.exc_info()[1]).find("User authorization failed") != -1:
logger.warning("Токен недействителен.")
interrupt_handler(0, None)
raise Warning
except requests.exceptions.RequestException:
if delay < 32:
delay*=2
time.sleep(delay)
continue
except BaseException:
if maxRetries == 0:
logger.exception("После %s попыток %s(%s%s) завершился с ошибкой.", c, func.__name__, args, kwargs)
raise Warning
logger.warning("Перезапуск %s(%s%s) через %s секунд...", func.__name__, args, kwargs, delay)
if delay < 32:
delay*=2
time.sleep(delay)
if maxRetries > 0:
maxRetries -= 1
continue
vk_session = vk_api.VkApi(token=config['ACCESS_TOKEN'],api_version='5.130')
longpoll = VkLongPoll(vk_session, wait=60, mode=2)
vk = vk_session.get_api()
account_id = tryAgainIfFailed(vk.users.get)[0]['id']
if not config['disableMessagesLogging']:
if not os.path.exists(
os.path.join(
cwd,
"mesAct"
)
):
os.makedirs(
os.path.join(
cwd,
"mesAct"
)
)
f = open(
os.path.join(
cwd,
"mesAct",
"vkGetVideoLink.html"
),
'w',
encoding='utf-8'
)
f.write("""<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<style>
html,body,iframe{
width: 100%;
height: 100%;
}
</style>
</head>
<body>
<p>Если видео не проигрывается, прямую ссылку можно получить через api:</p>
<script>
function embedLink(id) {
var link = document.createElement('a');
link.href = "https://vk.com/dev/video.get?params[videos]=0_0," + id + "¶ms[count]=1¶ms[offset]=1";
link.innerText = id;
link.setAttribute('target', '_blank')
document.getElementsByTagName("body")[0].appendChild(link);
}
function embedPlayer(link) {
var frame = document.createElement('iframe');
frame.src = link;
frame.style = "width:100%;height:100%;";
frame.setAttribute('allowFullScreen', '')
document.getElementsByTagName("body")[0].appendChild(frame);
}
function splitArgs(){
var args = document.location.search;
var lastAmpersand = args.lastIndexOf('&');
return [args.slice(1, lastAmpersand), args.slice(lastAmpersand + 1)];
}
var args = splitArgs();
embedLink(args[1]);
embedPlayer(args[0]);
</script>
</body>
</html>""")
f.close()
if not os.path.exists(
os.path.join(
cwd,
"messages.db"
)
):
conn = sqlite3.connect(
os.path.join(
cwd,
"messages.db"
),
check_same_thread=False,
isolation_level=None,
timeout=15.0
)
cursor = conn.cursor()
cursor.execute("""CREATE TABLE "messages" (
"peer_id" INTEGER NOT NULL,
"user_id" INTEGER NOT NULL,
"message_id" INTEGER NOT NULL UNIQUE,
"message" TEXT,
"attachments" TEXT,
"timestamp" INTEGER NOT NULL,
"fwd_messages" TEXT
)""")
cursor.execute("""CREATE TABLE "chats_cache" (
"chat_id" INTEGER NOT NULL UNIQUE,
"chat_name" TEXT NOT NULL
)""")
cursor.execute("""CREATE TABLE "users_cache" (
"user_id" INTEGER NOT NULL UNIQUE,
"user_name" TEXT NOT NULL
)""")
account_name = tryAgainIfFailed(
vk.users.get,
user_id=account_id
)[0]
account_name = f"{account_name["first_name"]} {account_name["last_name"]}"
cursor.execute(
"""INSERT INTO users_cache (user_id,user_name) VALUES (?,?)""",
(account_id, account_name,)
)
conn.commit()
else:
conn = sqlite3.connect(
os.path.join(cwd, "messages.db"),
check_same_thread=False,
timeout=15.0
)
cursor = conn.cursor()
if not os.path.exists(
os.path.join(
cwd,
"mesAct",
"bootstrap.css"
)
):
f = open(
os.path.join(
cwd,
"mesAct",
"bootstrap.css"
),
'w',
encoding='utf-8'
)
f.write(':root{--blue:#007bff;--indigo:#6610f2;--purple:#6f42c1;--pink:#e83e8c;--red:#dc3545;--orange:#fd7e14;--yellow:#ffc107;--green:#28a745;--teal:#20c997;--cyan:#17a2b8;--white:#fff;--gray:#6c757d;--gray-dark:#343a40;--primary:#007bff;--secondary:#6c757d;--success:#28a745;--info:#17a2b8;--warning:#ffc107;--danger:#dc3545;--light:#f8f9fa;--dark:#343a40;--breakpoint-xs:0;--breakpoint-sm:576px;--breakpoint-md:768px;--breakpoint-lg:992px;--breakpoint-xl:1200px;--font-family-sans-serif:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-family-monospace:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace}*,::after,::before{box-sizing:border-box}html{font-family:sans-serif;line-height:1.15;-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:transparent}body{margin:0;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";font-size:1rem;font-weight:400;line-height:1.5;color:#212529;text-align:left;background-color:#fff}dl,ol,ul{margin-top:0;margin-bottom:1rem}b,strong{font-weight:bolder}a{color:#007bff;text-decoration:none;background-color:transparent}img{vertical-align:middle;border-style:none}table{border-collapse:collapse}.table{width:100%;margin-bottom:1rem;color:#212529}.table td,.table th{padding:.75rem;vertical-align:top;border-top:1px solid #dee2e6}.table-sm td,.table-sm th{padding:.3rem}.table-bordered{border:1px solid #dee2e6}.table-bordered td,.table-bordered th{border:1px solid #dee2e6}.list-group{display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;padding-left:0;margin-bottom:0;border-radius:.25rem}.list-group-item{position:relative;display:block;padding:.75rem 1.25rem;background-color:#fff;border:1px solid rgba(0,0,0,.125)}.list-group-item:first-child{border-top-left-radius:inherit;border-top-right-radius:inherit}.list-group-item:last-child{border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.list-group-item+.list-group-item{border-top-width:0}.stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;pointer-events:auto;content:"";background-color:rgba(0,0,0,0)}.mes{word-break:break-all}img,a,audio{display:block}img{max-width:100%}')
f.close()
if config['customActions']:
from customActions import customActions
cust = customActions(vk, conn, cursor)
def bgWatcher():
while True:
maxCacheAge = config['maxCacheAge']
with stop_mutex:
logger.info("Обслуживание БД...")
try:
showMessagesWithDeletedAttachments()
except BaseException:
logger.exception("Ошибка при поиске удаленных фото")
try:
if maxCacheAge != -1:
cursor.execute(
"""DELETE FROM messages WHERE timestamp < ?""",
(time.time() - maxCacheAge,)
)
conn.commit()
cursor.execute("VACUUM")
else:
maxCacheAge = 86400
except BaseException:
logger.exception("Ошибка при очистке базы данных")
logger.info("Обслуживание БД завершено.")
time.sleep(maxCacheAge)
def interrupt_handler(signum, frame):
conn.commit()
cursor.close()
try:
tableWatcher.cancel()
except AttributeError:
pass
logger.info("Завершение...")
os._exit(0)
signal.signal(signal.SIGINT, interrupt_handler)
signal.signal(signal.SIGTERM, interrupt_handler)
def eventWorker_predefinedDisabled():
global events
while True:
flag.wait()
event = events.pop(0)
with stop_mutex:
try:
cust.act(event)
except BaseException:
logger.exception("Ошибка в customActions. \n %s", vars(event))
if len(events) == 0:
flag.clear()
def eventWorker_customDisabled():
global events
while True:
flag.wait()
event = events.pop(0)
with stop_mutex:
predefinedActions(event)
if len(events) == 0:
flag.clear()
conn.commit()
def eventWorker():
global events
while True:
flag.wait()
event = events.pop(0)
with stop_mutex:
try:
cust.act(event)
except BaseException:
logger.exception("Ошибка в customActions. \n %s", vars(event))
predefinedActions(event)
if len(events) == 0:
flag.clear()
conn.commit()
def predefinedActions(event):
try:
if event.type == VkEventType.MESSAGE_NEW:
cursor.execute(
"""INSERT INTO messages(peer_id,user_id,message_id,message,attachments,timestamp,fwd_messages) VALUES (?,?,?,?,?,?,?)""",
(event.peer_id, event.user_id, event.message_id, event.message, event.message_data[1], event.timestamp, event.message_data[2],)
)
conn.commit()
elif event.type == VkEventType.MESSAGE_EDIT:
if event.message_data[0]:
activityReport(event.message_id, event.peer_id, event.user_id, event.timestamp, True, event.message_data[1], event.message_data[2], event.text)
cursor.execute(
"""INSERT or REPLACE INTO messages(peer_id,user_id,message_id,message,attachments,timestamp,fwd_messages) VALUES (?,?,?,?,?,?,?)""",
(event.peer_id, event.user_id, event.message_id, event.message, event.message_data[1], event.timestamp, event.message_data[2],)
)
conn.commit()
elif event.type == VkEventType.MESSAGE_FLAGS_SET:
try:
activityReport(event.message_id)
cursor.execute(
"""DELETE FROM messages WHERE message_id = ?""",
(event.message_id,)
)
conn.commit()
except TypeError:
logger.info("Удаление невозможно, сообщение отсутствует в БД.")
except sqlite3.IntegrityError:
logger.warning("Запущено несколько копий программы, завершение...")
interrupt_handler(0, None)
except Warning:
pass
except BaseException:
logger.exception("Ошибка при сохранении сообщения. \n %s", vars(event))
def main():
logger.info("Запущен основной цикл.")
global events
for event in longpoll.listen():
try:
if event.raw[0] == 4 or event.raw[0] == 5:
if event.attachments != {}:
event.message_data = getAttachments(event)
else:
event.message_data = True, None, None
if event.from_user and event.raw[2] & 2:
event.user_id = account_id
elif event.from_group:
if event.from_me:
event.user_id = account_id
else:
event.user_id = event.peer_id
if not event.message:
event.message = None
events.append(event)
flag.set()
elif event.raw[0] == 2 and (event.raw[2] & 131072 or event.raw[2] & 128):
events.append(event)
flag.set()
except Warning:
pass
except BaseException:
logger.exception("Ошибка при добавлении события в очередь. \n %s", vars(event))
def showMessagesWithDeletedAttachments():
cursor.execute("""SELECT message_id, attachments FROM messages WHERE attachments IS NOT NULL""")
fetch_attachments = [[str(i[0]), json.loads(i[1])] for i in cursor.fetchall()]
cursor.execute("""SELECT message_id, fwd_messages FROM messages WHERE fwd_messages IS NOT NULL""")
fetch_fwd = [[str(i[0]), json.loads(i[1])] for i in cursor.fetchall()]
c = 0
for i in range(len(fetch_attachments)):
for j in fetch_attachments[i - c][1]:
if j['type'] == 'photo' or j['type'] == 'video' or j['type'] == 'doc':
break
else:
del fetch_attachments[i - c]
c += 1
messages_attachments = []
messages_fwd = []
for i in [[j[0] for j in fetch_attachments[i:i + 100]] for i in range(0, len(fetch_attachments), 100)]:
messages_attachments.extend(tryAgainIfFailed(
vk.messages.getById,
message_ids=','.join(i))['items']
)
for i in [[j[0] for j in fetch_fwd[i:i + 100]] for i in range(0, len(fetch_fwd), 100)]:
messages_fwd.extend(tryAgainIfFailed(
vk.messages.getById,
message_ids=','.join(i))['items']
)
c = 0
for i in range(len(fetch_attachments)):
if compareAttachments(messages_attachments[i - c]['attachments'], fetch_attachments[i - c][1]):
del fetch_attachments[i - c]
del messages_attachments[i - c]
c += 1
for i in range(len(fetch_attachments)):
activityReport(fetch_attachments[i][0])
if messages_attachments[i]['attachments'] == []:
cursor.execute(
"""UPDATE messages SET attachments = ? WHERE message_id = ?""",
(None, fetch_attachments[i][0],)
)
else:
cursor.execute(
"""UPDATE messages SET attachments = ? WHERE message_id = ?""",
(
json.dumps(messages_attachments[i]['attachments']),
fetch_attachments[i][0],
)
)
c = 0
for i in range(len(fetch_fwd)):
if compareFwd(
messages_fwd[i - c],
{
'fwd_messages': fetch_fwd[i - c][1]
}
):
del fetch_fwd[i - c]
del messages_fwd[i - c]
c += 1
for i in range(len(fetch_fwd)):
activityReport(fetch_fwd[i][0])
if messages_fwd[i]['fwd_messages'] == []:
cursor.execute(
"""UPDATE messages SET fwd_messages = ? WHERE message_id = ?""",
(None, fetch_fwd[i][0],)
)
else:
cursor.execute(
"""UPDATE messages SET fwd_messages = ? WHERE message_id = ?""",
(
json.dumps(messages_fwd[i]['fwd_messages']),
fetch_fwd[i][0],
)
)
conn.commit()
def compareFwd(new, old):
if 'reply_message' in new:
new['fwd_messages'] = [new['reply_message']]
if 'reply_message' in old:
old['fwd_messages'] = [old['reply_message']]
for i in range(len(old['fwd_messages'])):
if 'fwd_messages' in old['fwd_messages'][i] and 'fwd_messages' in new['fwd_messages'][i]:
if not compareFwd(
new['fwd_messages'][i],
old['fwd_messages'][i]
):
return False
if not compareAttachments(
new['fwd_messages'][i]['attachments'],
old['fwd_messages'][i]['attachments']
):
return False
return True
def compareAttachments(new, old):
if len(new) < len(old):
return False
return True
def attachmentsParse(urls):
if urls is None:
return ""
html = """<div>
"""
for i in urls:
urlSplit = i.split(',')
if i.find('vk.com/sticker/') != -1:
html += """ <img src="{}" />
""".format(i)
elif i.find('.jpg') != -1 and i.find(',') == -1:
html += """ <img src="{}" />
""".format(i)
elif i.find('.mp3') != -1:
html += """ <audio src="{}" controls></audio>
""".format(i)
elif i.find('https://vk.com/audio') != -1:
html += """ <a href="{}" target="_blank">
{}
</a>
""".format(i, i[23:-11].replace('%20', ' '))
elif i.find('@') != -1:
i = i.rsplit('@', 1)
html += """ <a href="{}" target="_blank">
{}
</a>
""".format(i[1], i[0])
elif len(urlSplit) == 3:
html += """ <a href="{}" target="_blank">
Видео
<img src="{}"/>
</a>
""".format(f"./vkGetVideoLink.html?{urlSplit[1]}&{urlSplit[2]}", urlSplit[0])
else:
html += """ <a href="{0}" target="_blank">
{0}
</a>
""".format(i)
html += """</div>"""
return html
def getAttachments(event):
message_id = event.message_id
fullLoadUnNeeded = not (event.raw[0] == 5 or 'fwd' in event.attachments)
count = 0
if fullLoadUnNeeded:
for i in range(1,11):
if f'attach{i}_type' in event.attachments:
if event.attachments[f'attach{i}_type'] not in ('sticker', 'link'):
fullLoadUnNeeded = False
else:
count = i
break
if fullLoadUnNeeded:
attachments = []
for i in range(1,count):
if event.attachments[f'attach{i}_type'] == 'sticker':
attachments.append({'type':'sticker','sticker':{'images':[{'height':64,'url':f'https://vk.com/sticker/1-{event.attachments[f'attach{i}']}-64'}]}})
else:
if f'attach{i}_title' in event.attachments:
title = event.attachments[f'attach{i}_title']
else:
title = event.attachments[f'attach{i}_url']
attachments.append({'type':'link','link':{'title':title,'url':event.attachments[f'attach{i}_url']}})
return False, json.dumps(attachments, ensure_ascii=False,), None
mes = tryAgainIfFailed(
vk.messages.getById,
message_ids=message_id
)['items']
if not len(mes):
logger.info("Не удалось запросить вложения для сообщения, message_id = %i.", event.message_id)
return False, "[]", "[]"
else:
mes = mes[0]
hasUpdateTime = 'update_time' in mes
fwd_messages = None
if 'reply_message' in mes:
fwd_messages = json.dumps([mes['reply_message']], ensure_ascii=False,)
elif mes['fwd_messages'] != []:
fwd_messages = json.dumps(mes['fwd_messages'], ensure_ascii=False,)
if mes['attachments'] == []:
attachments = None
else:
attachments = json.dumps(mes['attachments'], ensure_ascii=False,)
return hasUpdateTime, attachments, fwd_messages
def parseUrls(attachments):
urls = []
for i in attachments:
if i['type'] == 'photo':
maxHeight = 0
maxUrl = ""
for j in i['photo']['sizes']:
if j['height'] > maxHeight:
maxHeight = j['height']
maxUrl = j['url']
urls.append(maxUrl)
elif i['type'] == 'audio_message':
urls.append(i['audio_message']['link_mp3'])
elif i['type'] == 'sticker':
urls.append(i['sticker']['images'][0]['url'])
elif i['type'] == 'gift':
urls.append(i['gift']['thumb_48'])
elif i['type'] == 'link':
urls.append(f"Ссылка: {i["link"]["title"]}@{i["link"]["url"]}")
elif i['type'] == 'video':
urls.append(f"{i["video"]["image"][0]["url"]},{i["video"]["player"]},{i["video"]["owner_id"]}_{i["video"]["id"]}_{i["video"]["access_key"]}")
elif i['type'] == 'wall':
urls.append(f"Пост: {i["wall"]["text"][:25]}@https://vk.com/wall{i["wall"]["from_id"]}_{i["wall"]["id"]}")
elif i['type'] == 'wall_reply':
urls.append(f"Комментарий: {i["wall_reply"]["text"][:25]}@https://vk.com/wall{i["wall_reply"]["owner_id"]}_{i["wall_reply"]["post_id"]}?reply={i["wall_reply"]["id"]}")
elif i['type'] == 'audio':
urls.append(f"https://vk.com/audio?q={i["audio"]["artist"].replace(" ", "%20")}%20-%20{i["audio"]["title"].replace(" ", "%20")}&tab=global")
elif i['type'] == 'audio_playlist':
urls.append(f"Плейлист: {i["audio_playlist"]["title"]}@https://vk.com/music?z=audio_playlist{i["audio_playlist"]["owner_id"]}_{i["audio_playlist"]["id"]}/{i["audio_playlist"]["access_key"]}")
elif i['type'] == 'market':
urls.append(f"https://vk.com/market?w=product{i["market"]["owner_id"]}_{i["market"]["id"]}")
elif i['type'] == 'poll':
urls.append(f"Голосование: {i["poll"]["question"][:25]}@https://vk.com/poll{i["poll"]["owner_id"]}_{i["poll"]["id"]}")
elif i['type'] == 'doc':
urls.append(f"Документ: {i["doc"]["title"]}@{i["doc"]["url"]}")
else:
if 'url' in i[i['type']]:
urls.append(i[i['type']]['url'])
if urls == []:
return None
return urls
def getPeerName(id):
if id > 2000000000:
cursor.execute("""SELECT chat_name FROM chats_cache WHERE chat_id = ?""", (id,))
fetch = cursor.fetchone()
if fetch is None:
try:
name = tryAgainIfFailed(
vk.messages.getChat,
chat_id=id-2000000000
)['title']
cursor.execute("""INSERT INTO chats_cache (chat_id,chat_name) VALUES (?,?)""", (id, name,))
conn.commit()
except Warning:
name = "Секретный чат, используйте токен другого приложения"
else:
name = fetch[0]
elif id < 0:
cursor.execute("""SELECT user_name FROM users_cache WHERE user_id = ?""", (id,))
fetch = cursor.fetchone()
if fetch is None:
name = tryAgainIfFailed(
vk.groups.getById,
group_id=-id
)[0]['name']
cursor.execute("""INSERT INTO users_cache (user_id,user_name) VALUES (?,?)""", (id, name,))
conn.commit()
else:
name = fetch[0]
else:
cursor.execute("""SELECT user_name FROM users_cache WHERE user_id = ?""", (id,))
fetch = cursor.fetchone()
if fetch is None:
name = tryAgainIfFailed(
vk.users.get,
user_id=id
)[0]
name = f"{name["first_name"]} {name["last_name"]}"
cursor.execute("""INSERT INTO users_cache (user_id,user_name) VALUES (?,?)""", (id, name,))
conn.commit()
else:
name = fetch[0]
return name
def fwdParse(fwd):
html = """<table class="table table-sm table-bordered">
"""
for i in fwd:
user_name = getPeerName(i['from_id'])
if i['from_id'] < 0:
html += """ <tr>
<td>
<a href='https://vk.com/public{}' target="_blank">
{}
</a>
</td>
</tr>
""".format(-i['from_id'], user_name)
else:
html += """ <tr>
<td>
<a href='https://vk.com/id{}' target="_blank">
{}
</a>
</td>
</tr>
""".format(i['from_id'], user_name)
if i['text'] != "":
html += """ <tr>
<td>
<div class='mes'>
{}
</div>
""".format(xssFilter(i['text']))
else:
html += """ <tr>
<td>
"""
if i['attachments'] != []:
html += attachmentsParse(parseUrls(i['attachments']))
if 'fwd_messages' in i:
html += fwdParse(i['fwd_messages'])
elif 'reply_message' in i:
html += fwdParse([i['reply_message']])
html += """ </td>
</tr>
<tr>
<td>
{}
</td>
</tr>
""".format(time.strftime('%H:%M:%S %d.%m.%y', time.localtime(i['date'])))
html += "</table>"
return html
def xssFilter(s):
return s\
.replace('<', '<')\
.replace('>', '>')\
.replace('\n', '<br />')
def compareStrings(a, b):
aCounter = 0
bCounter = 0
for i in difflib.SequenceMatcher(None, a, b).get_opcodes():
if i[0] == 'insert':
b = f"{b[: i[3]+bCounter]}<ins>{b[i[3]+bCounter : i[4]+bCounter]}</ins>{b[i[4]+bCounter:]}"
bCounter += 11
elif i[0] == 'delete':
a = f"{a[: i[1]+aCounter]}<ins>{a[i[1]+aCounter : i[2]+aCounter]}</ins>{a[i[2]+aCounter:]}"
aCounter += 11
elif i[0] == 'replace':
a = f"{a[: i[1]+aCounter]}<ins>{a[i[1]+aCounter : i[2]+aCounter]}</ins>{a[i[2]+aCounter:]}"
b = f"{b[: i[3]+bCounter]}<ins>{b[i[3]+bCounter : i[4]+bCounter]}</ins>{b[i[4]+bCounter:]}"
aCounter += 11
bCounter += 11
return a, b
def activityReport(message_id, peer_id=None, user_id=None, timestamp=None, isEdited=False, attachments=None, fwd=None, message=None):
try:
peer_name = user_name = oldMessage = oldAttachments = date = oldFwd = None
cursor.execute("""SELECT * FROM messages WHERE message_id = ?""", (message_id,))
fetch = cursor.fetchone()
if attachments is not None:
attachments = parseUrls(json.loads(attachments))
if fwd is not None:
fwd = json.loads(fwd)
if fetch is None:
if isEdited:
logger.info("Изменение сообщения, отсутствующего в БД, message_id = %i.", message_id)
fetch = [0]*7
peer_name = getPeerName(peer_id)
user_name = getPeerName(user_id)
oldMessage = f"⚠️ {message}"
oldAttachments = attachments
oldFwd = fwd
date = f"<b>Доб:</b> {time.strftime("%H:%M:%S %d.%m", time.localtime(timestamp))}<br /><b>Изм:</b> {time.strftime("%H:%M:%S %d.%m", time.localtime())}"
else:
raise TypeError
else:
if fetch[3] is not None:
oldMessage = str(fetch[3])
if fetch[4] is not None:
oldAttachments = parseUrls(json.loads(fetch[4]))
if fetch[6] is not None:
oldFwd = json.loads(fetch[6])
peer_name = getPeerName(fetch[0])
user_name = getPeerName(fetch[1])
date = f"<b>Доб:</b> {time.strftime("%H:%M:%S %d.%m", time.localtime(fetch[5]))}<br /><b>Изм:</b> {time.strftime("%H:%M:%S %d.%m", time.localtime())}"
peer_id = fetch[0]
user_id = fetch[1]
del fetch
row = """ <tr><!-- {} -->
<td>{}
</td>
<td>{}
</td>
{}
<td>
{}
</td>
</tr>
"""
messageBlock = """
<div class='mes'>
{}
</div>"""
attachmentsBlock = """
<div>
<b>Вложения</b><br />
{}
</div>"""
fwdBlock = """
<div>
<b>Пересланное</b><br />
{}
</div>"""
if peer_id > 2000000000:
peer_id = """
<a href='https://vk.com/im?sel=c{}' target='_blank'>
{}
</a>""".format(str(peer_id-2000000000), peer_name)
elif peer_id < 0:
peer_id = """
<a href='https://vk.com/public{}' target='_blank'>
{}
</a>""".format(str(-peer_id), peer_name)
else:
peer_id = """
<a href='https://vk.com/id{}' target='_blank'>
{}
</a>""".format(str(peer_id), peer_name)
if user_id < 0:
user_id = """
<a href='https://vk.com/public{}' target='_blank'>
{}
</a>""".format(str(-user_id), user_name)
else:
user_id = """
<a href='https://vk.com/id{}' target='_blank'>
{}
</a>""".format(str(user_id), user_name)
if isEdited:
if not (oldMessage is None or message is None):
message = xssFilter(message)
oldMessage = xssFilter(oldMessage)
message, oldMessage = compareStrings(message, oldMessage)
oldMessage = messageBlock.format(oldMessage)
message = messageBlock.format(message)
elif oldMessage is None:
oldMessage = ""
message = messageBlock.format(xssFilter(message))
else:
oldMessage = messageBlock.format(xssFilter(oldMessage))
message = ""
if oldAttachments is not None:
oldAttachments = attachmentsBlock.format(attachmentsParse(oldAttachments))
else:
oldAttachments = ""
if oldFwd is not None:
oldFwd = fwdBlock.format(fwdParse(oldFwd))
else:
oldFwd = ""
if attachments is not None:
attachments = attachmentsBlock.format(attachmentsParse(attachments))
else:
attachments = ""
if fwd is not None:
fwd = fwdBlock.format(fwdParse(fwd))
else:
fwd = ""
messageBlock = """<td width='50%'>
<b>Старое</b><br />{}
</td>
<td width='50%'>
<b>Новое</b><br />{}
</td>""".format(oldMessage+oldAttachments+oldFwd, message+attachments+fwd)
else:
if oldMessage is not None:
oldMessage = messageBlock.format(xssFilter(oldMessage))
else:
oldMessage = ""
if oldAttachments is not None:
oldAttachments = attachmentsBlock.format(attachmentsParse(oldAttachments))
else:
oldAttachments = ""
if oldFwd is not None:
oldFwd = fwdBlock.format(fwdParse(oldFwd))
else:
oldFwd = ""
messageBlock = """<td width='100%' colspan='2'>
<b>Удалено</b><br />{}
</td>""".format(oldMessage+oldAttachments+oldFwd)
row = row.format(message_id, peer_id, user_id, messageBlock, date)
if os.path.exists(
os.path.join(
cwd,
"mesAct",
f"messages_{time.strftime("%d%m%y", time.localtime())}.html"
)
):
messagesActivities = open(
os.path.join(
cwd,
"mesAct",
f"messages_{time.strftime("%d%m%y",time.localtime())}.html"
),
'r',
encoding='utf-8'
)
messagesDump = messagesActivities.read()
messagesActivities.close()
messagesActivities = open(
os.path.join(
cwd,
"mesAct",
f"messages_{time.strftime("%d%m%y",time.localtime())}.html"
),
'w',
encoding='utf-8'
)
else:
messagesDump = template
messagesActivities = open(
os.path.join(
cwd,
"mesAct",
f"messages_{time.strftime("%d%m%y",time.localtime())}.html"
),
'w',
encoding='utf-8'
)
messagesDump = messagesDump[:offset]+row+messagesDump[offset:]
messagesActivities.write(messagesDump)
messagesActivities.close()
except TypeError:
raise TypeError
except BaseException:
logger.exception("Ошибка при логгировании изменений.")
if not config['disableMessagesLogging']:
tableWatcher = threading.Thread(target=bgWatcher)
tableWatcher.start()
template = """<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<link rel="stylesheet" href="./bootstrap.css">
</head>
<body>
<table class="table table-sm">
</table>
</body>
</html>"""
offset = template.index(""" </table>""")
events = []
flag = threading.Event()
def preloadMessages():
logger.info("Предзагрузка сообщений...")
offset = 0
peer_ids = []
messages = []
shouldContinue = True
try:
while shouldContinue:
shouldContinue = False
dialogs = tryAgainIfFailed(vk.messages.getConversations, offset=offset, count=20)
for i in range(0,len(dialogs['items'])):
if dialogs['items'][i]['last_message']['date'] >= time.time() - config['maxCacheAge']:
peer_ids.append(dialogs['items'][i]['conversation']['peer']['id'])
if i == len(dialogs['items']) - 1:
shouldContinue = True
offset+=20
for i in peer_ids:
offset = 0
if i > 2000000000:
count = 200
else:
count = 50
shouldContinue = True
while shouldContinue:
shouldContinue = False
mes = vk.messages.getHistory(offset=offset, count=count, peer_id=i)['items']
if mes[-1]['date']>= time.time() - config['maxCacheAge']:
shouldContinue = True
offset+=count
for j in mes:
if j['date'] >= time.time() - config['maxCacheAge']:
messages.append(j)
for i in messages:
message_id = i['id']
with stop_mutex:
cursor.execute("""SELECT message_id FROM messages WHERE message_id = ?""", (message_id,))
if cursor.fetchone() is not None:
continue
peer_id = i['peer_id']
user_id = i['from_id']
message = i['text']
timestamp = i['date']
fwd_messages = None
if 'reply_message' in i:
fwd_messages = json.dumps([i['reply_message']], ensure_ascii=False,)
elif i['fwd_messages'] != []:
fwd_messages = json.dumps(i['fwd_messages'], ensure_ascii=False,)
if i['attachments'] == []:
attachments = None
else:
attachments = json.dumps(i['attachments'], ensure_ascii=False,)
with stop_mutex:
cursor.execute(
"""INSERT INTO messages(peer_id,user_id,message_id,message,attachments,timestamp,fwd_messages) VALUES (?,?,?,?,?,?,?)""",
(peer_id, user_id, message_id, message, attachments, timestamp, fwd_messages,)
)
conn.commit()
except BaseException:
logger.exception("Ошибка во время предзагрузки сообщений")
logger.info("Предзагрузка сообщений завершена.")
if config['customActions'] and config['disableMessagesLogging']:
threading.Thread(target=eventWorker_predefinedDisabled).start()
elif not config['disableMessagesLogging'] and not config['customActions']:
threading.Thread(target=eventWorker_customDisabled).start()
else:
threading.Thread(target=eventWorker).start()
if config['preloadMessages']:
threading.Thread(target=preloadMessages).start()
try:
tryAgainIfFailed(
main,
maxRetries=-1
)
except Warning:
pass
| import logging
import logging.handlers
import sys
import os
import json
import sqlite3
import signal
import threading
import time
import difflib
import vk_api
from vk_api.longpoll import VkLongPoll, VkEventType
import requests.exceptions
cwd = os.path.dirname(os.path.abspath(__file__))
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
stream=sys.stdout,
level=logging.WARNING
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.handlers.RotatingFileHandler(
os.path.join(cwd, 'log.txt'),
maxBytes=102400
)
handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
logger.info("Запуск...")
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, requests.exceptions.RequestException):
return
elif issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logger.error("Непойманное исключение.", exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception
defaultConfig = {
"ACCESS_TOKEN": "",
"createIndex": False,
"maxCacheAge": 86400,
"preloadMessages": False,
"customActions": False,
"disableMessagesLogging": False,
'enableFlaskWebServer': False,
'useAuth': False,
'users': {
'admin':'password'
},
'port': 8080,
'https': False,
'httpsPort': 8443,
'cert': [
os.path.join(cwd, "cert.pem"),
os.path.join(cwd, "key.pem")
]
}
def grab_token_from_args():
if len(sys.argv) > 1:
defaultConfig['ACCESS_TOKEN'] = sys.argv[1]
elif defaultConfig['ACCESS_TOKEN'] == "":
raise Exception("Не задан ACCESS_TOKEN")
if not os.path.exists(os.path.join(cwd, "config.json")):
with open(os.path.join(cwd, "config.json"), 'w') as conf:
grab_token_from_args()
json.dump(defaultConfig, conf, indent=4)
config = defaultConfig
del defaultConfig
else:
with open(os.path.join(cwd, "config.json"), 'r') as conf:
config = json.load(conf)
for i in config:
if i in defaultConfig:
defaultConfig[i] = config[i]
grab_token_from_args()
if len(set(config)) - len(set(defaultConfig)) != 0:
with open(os.path.join(cwd, "config.json"), 'w') as conf:
json.dump(defaultConfig, conf, indent=4)
config = defaultConfig
del defaultConfig
stop_mutex = threading.Lock()
def run_flask_server():
port = config['httpsPort'] if config['https'] else config['port']
import socket
ip = socket.gethostbyname(socket.gethostname())
del socket
while True:
try:
if config['https']:
logger.info("Trying to run on https://%s:%s/", ip, port)
app.run(
host='0.0.0.0',
port=port,
ssl_context=(
config['cert'][0],
config['cert'][1]
)
)
else:
logger.info("Trying to run on http://%s:%s/", ip, port)
app.run(host='0.0.0.0', port=port)
except OSError:
port += 1
if config['enableFlaskWebServer']:
from flaskWebServer import app
threading.Thread(target=run_flask_server).start()
if config['createIndex']:
from updateIndex import indexUpdater
indexUpdater()
def tryAgainIfFailed(func, *args, maxRetries=5, **kwargs):
c = maxRetries
delay = 1
while True:
try:
return func(*args, **kwargs)
except vk_api.exceptions.ApiError:
if str(sys.exc_info()[1]).find("User authorization failed") != -1:
logger.warning("Токен недействителен.")
interrupt_handler(0, None)
raise Warning
except requests.exceptions.RequestException:
if delay < 32:
delay*=2
time.sleep(delay)
continue
except BaseException:
if maxRetries == 0:
logger.exception("После %s попыток %s(%s%s) завершился с ошибкой.", c, func.__name__, args, kwargs)
raise Warning
logger.warning("Перезапуск %s(%s%s) через %s секунд...", func.__name__, args, kwargs, delay)
if delay < 32:
delay*=2
time.sleep(delay)
if maxRetries > 0:
maxRetries -= 1
continue
vk_session = vk_api.VkApi(token=config['ACCESS_TOKEN'],api_version='5.130')
longpoll = VkLongPoll(vk_session, wait=60, mode=2)
vk = vk_session.get_api()
account_id = tryAgainIfFailed(vk.users.get)[0]['id']
if not config['disableMessagesLogging']:
if not os.path.exists(
os.path.join(
cwd,
"mesAct"
)
):
os.makedirs(
os.path.join(
cwd,
"mesAct"
)
)
f = open(
os.path.join(
cwd,
"mesAct",
"vkGetVideoLink.html"
),
'w',
encoding='utf-8'
)
f.write("""<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<style>
html,body,iframe{
width: 100%;
height: 100%;
}
</style>
</head>
<body>
<p>Если видео не проигрывается, прямую ссылку можно получить через api:</p>
<script>
function embedLink(id) {
var link = document.createElement('a');
link.href = "https://vk.com/dev/video.get?params[videos]=0_0," + id + "¶ms[count]=1¶ms[offset]=1";
link.innerText = id;
link.setAttribute('target', '_blank')
document.getElementsByTagName("body")[0].appendChild(link);
}
function embedPlayer(link) {
var frame = document.createElement('iframe');
frame.src = link;
frame.style = "width:100%;height:100%;";
frame.setAttribute('allowFullScreen', '')
document.getElementsByTagName("body")[0].appendChild(frame);
}
function splitArgs(){
var args = document.location.search;
var lastAmpersand = args.lastIndexOf('&');
return [args.slice(1, lastAmpersand), args.slice(lastAmpersand + 1)];
}
var args = splitArgs();
embedLink(args[1]);
embedPlayer(args[0]);
</script>
</body>
</html>""")
f.close()
if not os.path.exists(
os.path.join(
cwd,
"messages.db"
)
):
conn = sqlite3.connect(
os.path.join(
cwd,
"messages.db"
),
check_same_thread=False,
isolation_level=None,
timeout=15.0
)
cursor = conn.cursor()
cursor.execute("""CREATE TABLE "messages" (
"peer_id" INTEGER NOT NULL,
"user_id" INTEGER NOT NULL,
"message_id" INTEGER NOT NULL UNIQUE,
"message" TEXT,
"attachments" TEXT,
"timestamp" INTEGER NOT NULL,
"fwd_messages" TEXT
)""")
cursor.execute("""CREATE TABLE "chats_cache" (
"chat_id" INTEGER NOT NULL UNIQUE,
"chat_name" TEXT NOT NULL
)""")
cursor.execute("""CREATE TABLE "users_cache" (
"user_id" INTEGER NOT NULL UNIQUE,
"user_name" TEXT NOT NULL
)""")
account_name = tryAgainIfFailed(
vk.users.get,
user_id=account_id
)[0]
account_name = f"{account_name['first_name']} {account_name['last_name']}"
cursor.execute(
"""INSERT INTO users_cache (user_id,user_name) VALUES (?,?)""",
(account_id, account_name,)
)
conn.commit()
else:
conn = sqlite3.connect(
os.path.join(cwd, "messages.db"),
check_same_thread=False,
timeout=15.0
)
cursor = conn.cursor()
if not os.path.exists(
os.path.join(
cwd,
"mesAct",
"bootstrap.css"
)
):
f = open(
os.path.join(
cwd,
"mesAct",
"bootstrap.css"
),
'w',
encoding='utf-8'
)
f.write(':root{--blue:#007bff;--indigo:#6610f2;--purple:#6f42c1;--pink:#e83e8c;--red:#dc3545;--orange:#fd7e14;--yellow:#ffc107;--green:#28a745;--teal:#20c997;--cyan:#17a2b8;--white:#fff;--gray:#6c757d;--gray-dark:#343a40;--primary:#007bff;--secondary:#6c757d;--success:#28a745;--info:#17a2b8;--warning:#ffc107;--danger:#dc3545;--light:#f8f9fa;--dark:#343a40;--breakpoint-xs:0;--breakpoint-sm:576px;--breakpoint-md:768px;--breakpoint-lg:992px;--breakpoint-xl:1200px;--font-family-sans-serif:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-family-monospace:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace}*,::after,::before{box-sizing:border-box}html{font-family:sans-serif;line-height:1.15;-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:transparent}body{margin:0;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";font-size:1rem;font-weight:400;line-height:1.5;color:#212529;text-align:left;background-color:#fff}dl,ol,ul{margin-top:0;margin-bottom:1rem}b,strong{font-weight:bolder}a{color:#007bff;text-decoration:none;background-color:transparent}img{vertical-align:middle;border-style:none}table{border-collapse:collapse}.table{width:100%;margin-bottom:1rem;color:#212529}.table td,.table th{padding:.75rem;vertical-align:top;border-top:1px solid #dee2e6}.table-sm td,.table-sm th{padding:.3rem}.table-bordered{border:1px solid #dee2e6}.table-bordered td,.table-bordered th{border:1px solid #dee2e6}.list-group{display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;padding-left:0;margin-bottom:0;border-radius:.25rem}.list-group-item{position:relative;display:block;padding:.75rem 1.25rem;background-color:#fff;border:1px solid rgba(0,0,0,.125)}.list-group-item:first-child{border-top-left-radius:inherit;border-top-right-radius:inherit}.list-group-item:last-child{border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.list-group-item+.list-group-item{border-top-width:0}.stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;pointer-events:auto;content:"";background-color:rgba(0,0,0,0)}.mes{word-break:break-all}img,a,audio{display:block}img{max-width:100%}')
f.close()
if config['customActions']:
from customActions import customActions
cust = customActions(vk, conn, cursor)
def bgWatcher():
while True:
maxCacheAge = config['maxCacheAge']
with stop_mutex:
logger.info("Обслуживание БД...")
try:
showMessagesWithDeletedAttachments()
except BaseException:
logger.exception("Ошибка при поиске удаленных фото")
try:
if maxCacheAge != -1:
cursor.execute(
"""DELETE FROM messages WHERE timestamp < ?""",
(time.time() - maxCacheAge,)
)
conn.commit()
cursor.execute("VACUUM")
else:
maxCacheAge = 86400
except BaseException:
logger.exception("Ошибка при очистке базы данных")
logger.info("Обслуживание БД завершено.")
time.sleep(maxCacheAge)
def interrupt_handler(signum, frame):
conn.commit()
cursor.close()
try:
tableWatcher.cancel()
except AttributeError:
pass
logger.info("Завершение...")
os._exit(0)
signal.signal(signal.SIGINT, interrupt_handler)
signal.signal(signal.SIGTERM, interrupt_handler)
def eventWorker_predefinedDisabled():
global events
while True:
flag.wait()
event = events.pop(0)
with stop_mutex:
try:
cust.act(event)
except BaseException:
logger.exception("Ошибка в customActions. \n %s", vars(event))
if len(events) == 0:
flag.clear()
def eventWorker_customDisabled():
global events
while True:
flag.wait()
event = events.pop(0)
with stop_mutex:
predefinedActions(event)
if len(events) == 0:
flag.clear()
conn.commit()
def eventWorker():
global events
while True:
flag.wait()
event = events.pop(0)
with stop_mutex:
try:
cust.act(event)
except BaseException:
logger.exception("Ошибка в customActions. \n %s", vars(event))
predefinedActions(event)
if len(events) == 0:
flag.clear()
conn.commit()
def predefinedActions(event):
try:
if event.type == VkEventType.MESSAGE_NEW:
cursor.execute(
"""INSERT INTO messages(peer_id,user_id,message_id,message,attachments,timestamp,fwd_messages) VALUES (?,?,?,?,?,?,?)""",
(event.peer_id, event.user_id, event.message_id, event.message, event.message_data[1], event.timestamp, event.message_data[2],)
)
conn.commit()
elif event.type == VkEventType.MESSAGE_EDIT:
if event.message_data[0]:
activityReport(event.message_id, event.peer_id, event.user_id, event.timestamp, True, event.message_data[1], event.message_data[2], event.text)
cursor.execute(
"""INSERT or REPLACE INTO messages(peer_id,user_id,message_id,message,attachments,timestamp,fwd_messages) VALUES (?,?,?,?,?,?,?)""",
(event.peer_id, event.user_id, event.message_id, event.message, event.message_data[1], event.timestamp, event.message_data[2],)
)
conn.commit()
elif event.type == VkEventType.MESSAGE_FLAGS_SET:
try:
activityReport(event.message_id)
cursor.execute(
"""DELETE FROM messages WHERE message_id = ?""",
(event.message_id,)
)
conn.commit()
except TypeError:
logger.info("Удаление невозможно, сообщение отсутствует в БД.")
except sqlite3.IntegrityError:
logger.warning("Запущено несколько копий программы, завершение...")
interrupt_handler(0, None)
except Warning:
pass
except BaseException:
logger.exception("Ошибка при сохранении сообщения. \n %s", vars(event))
def main():
logger.info("Запущен основной цикл.")
global events
for event in longpoll.listen():
try:
if event.raw[0] == 4 or event.raw[0] == 5:
if event.attachments != {}:
event.message_data = getAttachments(event)
else:
event.message_data = True, None, None
if event.from_user and event.raw[2] & 2:
event.user_id = account_id
elif event.from_group:
if event.from_me:
event.user_id = account_id
else:
event.user_id = event.peer_id
if not event.message:
event.message = None
events.append(event)
flag.set()
elif event.raw[0] == 2 and (event.raw[2] & 131072 or event.raw[2] & 128):
events.append(event)
flag.set()
except Warning:
pass
except BaseException:
logger.exception("Ошибка при добавлении события в очередь. \n %s", vars(event))
def showMessagesWithDeletedAttachments():
cursor.execute("""SELECT message_id, attachments FROM messages WHERE attachments IS NOT NULL""")
fetch_attachments = [[str(i[0]), json.loads(i[1])] for i in cursor.fetchall()]
cursor.execute("""SELECT message_id, fwd_messages FROM messages WHERE fwd_messages IS NOT NULL""")
fetch_fwd = [[str(i[0]), json.loads(i[1])] for i in cursor.fetchall()]
c = 0
for i in range(len(fetch_attachments)):
for j in fetch_attachments[i - c][1]:
if j['type'] == 'photo' or j['type'] == 'video' or j['type'] == 'doc':
break
else:
del fetch_attachments[i - c]
c += 1
messages_attachments = []
messages_fwd = []
for i in [[j[0] for j in fetch_attachments[i:i + 100]] for i in range(0, len(fetch_attachments), 100)]:
messages_attachments.extend(tryAgainIfFailed(
vk.messages.getById,
message_ids=','.join(i))['items']
)
for i in [[j[0] for j in fetch_fwd[i:i + 100]] for i in range(0, len(fetch_fwd), 100)]:
messages_fwd.extend(tryAgainIfFailed(
vk.messages.getById,
message_ids=','.join(i))['items']
)
c = 0
for i in range(len(fetch_attachments)):
if compareAttachments(messages_attachments[i - c]['attachments'], fetch_attachments[i - c][1]):
del fetch_attachments[i - c]
del messages_attachments[i - c]
c += 1
for i in range(len(fetch_attachments)):
activityReport(fetch_attachments[i][0])
if messages_attachments[i]['attachments'] == []:
cursor.execute(
"""UPDATE messages SET attachments = ? WHERE message_id = ?""",
(None, fetch_attachments[i][0],)
)
else:
cursor.execute(
"""UPDATE messages SET attachments = ? WHERE message_id = ?""",
(
json.dumps(messages_attachments[i]['attachments']),
fetch_attachments[i][0],
)
)
c = 0
for i in range(len(fetch_fwd)):
if compareFwd(
messages_fwd[i - c],
{
'fwd_messages': fetch_fwd[i - c][1]
}
):
del fetch_fwd[i - c]
del messages_fwd[i - c]
c += 1
for i in range(len(fetch_fwd)):
activityReport(fetch_fwd[i][0])
if messages_fwd[i]['fwd_messages'] == []:
cursor.execute(
"""UPDATE messages SET fwd_messages = ? WHERE message_id = ?""",
(None, fetch_fwd[i][0],)
)
else:
cursor.execute(
"""UPDATE messages SET fwd_messages = ? WHERE message_id = ?""",
(
json.dumps(messages_fwd[i]['fwd_messages']),
fetch_fwd[i][0],
)
)
conn.commit()
def compareFwd(new, old):
if 'reply_message' in new:
new['fwd_messages'] = [new['reply_message']]
if 'reply_message' in old:
old['fwd_messages'] = [old['reply_message']]
for i in range(len(old['fwd_messages'])):
if 'fwd_messages' in old['fwd_messages'][i] and 'fwd_messages' in new['fwd_messages'][i]:
if not compareFwd(
new['fwd_messages'][i],
old['fwd_messages'][i]
):
return False
if not compareAttachments(
new['fwd_messages'][i]['attachments'],
old['fwd_messages'][i]['attachments']
):
return False
return True
def compareAttachments(new, old):
if len(new) < len(old):
return False
return True
def attachmentsParse(urls):
if urls is None:
return ""
html = """<div>
"""
for i in urls:
urlSplit = i.split(',')
if i.find('vk.com/sticker/') != -1:
html += """ <img src="{}" />
""".format(i)
elif i.find('.jpg') != -1 and i.find(',') == -1:
html += """ <img src="{}" />
""".format(i)
elif i.find('.mp3') != -1:
html += """ <audio src="{}" controls></audio>
""".format(i)
elif i.find('https://vk.com/audio') != -1:
html += """ <a href="{}" target="_blank">
{}
</a>
""".format(i, i[23:-11].replace('%20', ' '))
elif i.find('@') != -1:
i = i.rsplit('@', 1)
html += """ <a href="{}" target="_blank">
{}
</a>
""".format(i[1], i[0])
elif len(urlSplit) == 3:
html += """ <a href="{}" target="_blank">
Видео
<img src="{}"/>
</a>
""".format(f"./vkGetVideoLink.html?{urlSplit[1]}&{urlSplit[2]}", urlSplit[0])
else:
html += """ <a href="{0}" target="_blank">
{0}
</a>
""".format(i)
html += """</div>"""
return html
def getAttachments(event):
message_id = event.message_id
fullLoadUnNeeded = not (event.raw[0] == 5 or 'fwd' in event.attachments)
count = 0
if fullLoadUnNeeded:
for i in range(1,11):
if f'attach{i}_type' in event.attachments:
if event.attachments[f'attach{i}_type'] not in ('sticker', 'link'):
fullLoadUnNeeded = False
else:
count = i
break
if fullLoadUnNeeded:
attachments = []
for i in range(1,count):
if event.attachments[f'attach{i}_type'] == 'sticker':
attachments.append({'type':'sticker','sticker':{'images':[{'height':64,'url':f'https://vk.com/sticker/1-{event.attachments[f"attach{i}"]}-64'}]}})
else:
if f'attach{i}_title' in event.attachments:
title = event.attachments[f'attach{i}_title']
else:
title = event.attachments[f'attach{i}_url']
attachments.append({'type':'link','link':{'title':title,'url':event.attachments[f'attach{i}_url']}})
return False, json.dumps(attachments, ensure_ascii=False,), None
mes = tryAgainIfFailed(
vk.messages.getById,
message_ids=message_id
)['items']
if not len(mes):
logger.info("Не удалось запросить вложения для сообщения, message_id = %i.", event.message_id)
return False, "[]", "[]"
else:
mes = mes[0]
hasUpdateTime = 'update_time' in mes
fwd_messages = None
if 'reply_message' in mes:
fwd_messages = json.dumps([mes['reply_message']], ensure_ascii=False,)
elif mes['fwd_messages'] != []:
fwd_messages = json.dumps(mes['fwd_messages'], ensure_ascii=False,)
if mes['attachments'] == []:
attachments = None
else:
attachments = json.dumps(mes['attachments'], ensure_ascii=False,)
return hasUpdateTime, attachments, fwd_messages
def parseUrls(attachments):
urls = []
for i in attachments:
if i['type'] == 'photo':
maxHeight = 0
maxUrl = ""
for j in i['photo']['sizes']:
if j['height'] > maxHeight:
maxHeight = j['height']
maxUrl = j['url']
urls.append(maxUrl)
elif i['type'] == 'audio_message':
urls.append(i['audio_message']['link_mp3'])
elif i['type'] == 'sticker':
urls.append(i['sticker']['images'][0]['url'])
elif i['type'] == 'gift':
urls.append(i['gift']['thumb_48'])
elif i['type'] == 'link':
urls.append(f"Ссылка: {i['link']['title']}@{i['link']['url']}")
elif i['type'] == 'video':
urls.append(f"{i['video']['image'][0]['url']},{i['video']['player']},{i['video']['owner_id']}_{i['video']['id']}_{i['video']['access_key']}")
elif i['type'] == 'wall':
urls.append(f"Пост: {i['wall']['text'][:25]}@https://vk.com/wall{i['wall']['from_id']}_{i['wall']['id']}")
elif i['type'] == 'wall_reply':
urls.append(f"Комментарий: {i['wall_reply']['text'][:25]}@https://vk.com/wall{i['wall_reply']['owner_id']}_{i['wall_reply']['post_id']}?reply={i['wall_reply']['id']}")
elif i['type'] == 'audio':
urls.append(f"https://vk.com/audio?q={i['audio']['artist'].replace(' ', '%20')}%20-%20{i['audio']['title'].replace(' ', '%20')}&tab=global")
elif i['type'] == 'audio_playlist':
urls.append(f"Плейлист: {i['audio_playlist']['title']}@https://vk.com/music?z=audio_playlist{i['audio_playlist']['owner_id']}_{i['audio_playlist']['id']}/{i['audio_playlist']['access_key']}")
elif i['type'] == 'market':
urls.append(f"https://vk.com/market?w=product{i['market']['owner_id']}_{i['market']['id']}")
elif i['type'] == 'poll':
urls.append(f"Голосование: {i['poll']['question'][:25]}@https://vk.com/poll{i['poll']['owner_id']}_{i['poll']['id']}")
elif i['type'] == 'doc':
urls.append(f"Документ: {i['doc']['title']}@{i['doc']['url']}")
else:
if 'url' in i[i['type']]:
urls.append(i[i['type']]['url'])
if urls == []:
return None
return urls
def getPeerName(id):
if id > 2000000000:
cursor.execute("""SELECT chat_name FROM chats_cache WHERE chat_id = ?""", (id,))
fetch = cursor.fetchone()
if fetch is None:
try:
name = tryAgainIfFailed(
vk.messages.getChat,
chat_id=id-2000000000
)['title']
cursor.execute("""INSERT INTO chats_cache (chat_id,chat_name) VALUES (?,?)""", (id, name,))
conn.commit()
except Warning:
name = "Секретный чат, используйте токен другого приложения"
else:
name = fetch[0]
elif id < 0:
cursor.execute("""SELECT user_name FROM users_cache WHERE user_id = ?""", (id,))
fetch = cursor.fetchone()
if fetch is None:
name = tryAgainIfFailed(
vk.groups.getById,
group_id=-id
)[0]['name']
cursor.execute("""INSERT INTO users_cache (user_id,user_name) VALUES (?,?)""", (id, name,))
conn.commit()
else:
name = fetch[0]
else:
cursor.execute("""SELECT user_name FROM users_cache WHERE user_id = ?""", (id,))
fetch = cursor.fetchone()
if fetch is None:
name = tryAgainIfFailed(
vk.users.get,
user_id=id
)[0]
name = f"{name['first_name']} {name['last_name']}"
cursor.execute("""INSERT INTO users_cache (user_id,user_name) VALUES (?,?)""", (id, name,))
conn.commit()
else:
name = fetch[0]
return name
def fwdParse(fwd):
html = """<table class="table table-sm table-bordered">
"""
for i in fwd:
user_name = getPeerName(i['from_id'])
if i['from_id'] < 0:
html += """ <tr>
<td>
<a href='https://vk.com/public{}' target="_blank">
{}
</a>
</td>
</tr>
""".format(-i['from_id'], user_name)
else:
html += """ <tr>
<td>
<a href='https://vk.com/id{}' target="_blank">
{}
</a>
</td>
</tr>
""".format(i['from_id'], user_name)
if i['text'] != "":
html += """ <tr>
<td>
<div class='mes'>
{}
</div>
""".format(xssFilter(i['text']))
else:
html += """ <tr>
<td>
"""
if i['attachments'] != []:
html += attachmentsParse(parseUrls(i['attachments']))
if 'fwd_messages' in i:
html += fwdParse(i['fwd_messages'])
elif 'reply_message' in i:
html += fwdParse([i['reply_message']])
html += """ </td>
</tr>
<tr>
<td>
{}
</td>
</tr>
""".format(time.strftime('%H:%M:%S %d.%m.%y', time.localtime(i['date'])))
html += "</table>"
return html
def xssFilter(s):
return s\
.replace('<', '<')\
.replace('>', '>')\
.replace('\n', '<br />')
def compareStrings(a, b):
aCounter = 0
bCounter = 0
for i in difflib.SequenceMatcher(None, a, b).get_opcodes():
if i[0] == 'insert':
b = f"{b[: i[3]+bCounter]}<ins>{b[i[3]+bCounter : i[4]+bCounter]}</ins>{b[i[4]+bCounter:]}"
bCounter += 11
elif i[0] == 'delete':
a = f"{a[: i[1]+aCounter]}<ins>{a[i[1]+aCounter : i[2]+aCounter]}</ins>{a[i[2]+aCounter:]}"
aCounter += 11
elif i[0] == 'replace':
a = f"{a[: i[1]+aCounter]}<ins>{a[i[1]+aCounter : i[2]+aCounter]}</ins>{a[i[2]+aCounter:]}"
b = f"{b[: i[3]+bCounter]}<ins>{b[i[3]+bCounter : i[4]+bCounter]}</ins>{b[i[4]+bCounter:]}"
aCounter += 11
bCounter += 11
return a, b
def activityReport(message_id, peer_id=None, user_id=None, timestamp=None, isEdited=False, attachments=None, fwd=None, message=None):
try:
peer_name = user_name = oldMessage = oldAttachments = date = oldFwd = None
cursor.execute("""SELECT * FROM messages WHERE message_id = ?""", (message_id,))
fetch = cursor.fetchone()
if attachments is not None:
attachments = parseUrls(json.loads(attachments))
if fwd is not None:
fwd = json.loads(fwd)
if fetch is None:
if isEdited:
logger.info("Изменение сообщения, отсутствующего в БД, message_id = %i.", message_id)
fetch = [0]*7
peer_name = getPeerName(peer_id)
user_name = getPeerName(user_id)
oldMessage = f"⚠️ {message}"
oldAttachments = attachments
oldFwd = fwd
date = f"<b>Доб:</b> {time.strftime('%H:%M:%S %d.%m', time.localtime(timestamp))}<br /><b>Изм:</b> {time.strftime('%H:%M:%S %d.%m', time.localtime())}"
else:
raise TypeError
else:
if fetch[3] is not None:
oldMessage = str(fetch[3])
if fetch[4] is not None:
oldAttachments = parseUrls(json.loads(fetch[4]))
if fetch[6] is not None:
oldFwd = json.loads(fetch[6])
peer_name = getPeerName(fetch[0])
user_name = getPeerName(fetch[1])
date = f"<b>Доб:</b> {time.strftime('%H:%M:%S %d.%m', time.localtime(fetch[5]))}<br /><b>Изм:</b> {time.strftime('%H:%M:%S %d.%m', time.localtime())}"
peer_id = fetch[0]
user_id = fetch[1]
del fetch
row = """ <tr><!-- {} -->
<td>{}
</td>
<td>{}
</td>
{}
<td>
{}
</td>
</tr>
"""
messageBlock = """
<div class='mes'>
{}
</div>"""
attachmentsBlock = """
<div>
<b>Вложения</b><br />
{}
</div>"""
fwdBlock = """
<div>
<b>Пересланное</b><br />
{}
</div>"""
if peer_id > 2000000000:
peer_id = """
<a href='https://vk.com/im?sel=c{}' target='_blank'>
{}
</a>""".format(str(peer_id-2000000000), peer_name)
elif peer_id < 0:
peer_id = """
<a href='https://vk.com/public{}' target='_blank'>
{}
</a>""".format(str(-peer_id), peer_name)
else:
peer_id = """
<a href='https://vk.com/id{}' target='_blank'>
{}
</a>""".format(str(peer_id), peer_name)
if user_id < 0:
user_id = """
<a href='https://vk.com/public{}' target='_blank'>
{}
</a>""".format(str(-user_id), user_name)
else:
user_id = """
<a href='https://vk.com/id{}' target='_blank'>
{}
</a>""".format(str(user_id), user_name)
if isEdited:
if not (oldMessage is None or message is None):
message = xssFilter(message)
oldMessage = xssFilter(oldMessage)
message, oldMessage = compareStrings(message, oldMessage)
oldMessage = messageBlock.format(oldMessage)
message = messageBlock.format(message)
elif oldMessage is None:
oldMessage = ""
message = messageBlock.format(xssFilter(message))
else:
oldMessage = messageBlock.format(xssFilter(oldMessage))
message = ""
if oldAttachments is not None:
oldAttachments = attachmentsBlock.format(attachmentsParse(oldAttachments))
else:
oldAttachments = ""
if oldFwd is not None:
oldFwd = fwdBlock.format(fwdParse(oldFwd))
else:
oldFwd = ""
if attachments is not None:
attachments = attachmentsBlock.format(attachmentsParse(attachments))
else:
attachments = ""
if fwd is not None:
fwd = fwdBlock.format(fwdParse(fwd))
else:
fwd = ""
messageBlock = """<td width='50%'>
<b>Старое</b><br />{}
</td>
<td width='50%'>
<b>Новое</b><br />{}
</td>""".format(oldMessage+oldAttachments+oldFwd, message+attachments+fwd)
else:
if oldMessage is not None:
oldMessage = messageBlock.format(xssFilter(oldMessage))
else:
oldMessage = ""
if oldAttachments is not None:
oldAttachments = attachmentsBlock.format(attachmentsParse(oldAttachments))
else:
oldAttachments = ""
if oldFwd is not None:
oldFwd = fwdBlock.format(fwdParse(oldFwd))
else:
oldFwd = ""
messageBlock = """<td width='100%' colspan='2'>
<b>Удалено</b><br />{}
</td>""".format(oldMessage+oldAttachments+oldFwd)
row = row.format(message_id, peer_id, user_id, messageBlock, date)
if os.path.exists(
os.path.join(
cwd,
"mesAct",
f"messages_{time.strftime('%d%m%y', time.localtime())}.html"
)
):
messagesActivities = open(
os.path.join(
cwd,
"mesAct",
f"messages_{time.strftime('%d%m%y',time.localtime())}.html"
),
'r',
encoding='utf-8'
)
messagesDump = messagesActivities.read()
messagesActivities.close()
messagesActivities = open(
os.path.join(
cwd,
"mesAct",
f"messages_{time.strftime('%d%m%y',time.localtime())}.html"
),
'w',
encoding='utf-8'
)
else:
messagesDump = template
messagesActivities = open(
os.path.join(
cwd,
"mesAct",
f"messages_{time.strftime('%d%m%y',time.localtime())}.html"
),
'w',
encoding='utf-8'
)
messagesDump = messagesDump[:offset]+row+messagesDump[offset:]
messagesActivities.write(messagesDump)
messagesActivities.close()
except TypeError:
raise TypeError
except BaseException:
logger.exception("Ошибка при логгировании изменений.")
if not config['disableMessagesLogging']:
tableWatcher = threading.Thread(target=bgWatcher)
tableWatcher.start()
template = """<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<link rel="stylesheet" href="./bootstrap.css">
</head>
<body>
<table class="table table-sm">
</table>
</body>
</html>"""
offset = template.index(""" </table>""")
events = []
flag = threading.Event()
def preloadMessages():
logger.info("Предзагрузка сообщений...")
offset = 0
peer_ids = []
messages = []
shouldContinue = True
try:
while shouldContinue:
shouldContinue = False
dialogs = tryAgainIfFailed(vk.messages.getConversations, offset=offset, count=20)
for i in range(0,len(dialogs['items'])):
if dialogs['items'][i]['last_message']['date'] >= time.time() - config['maxCacheAge']:
peer_ids.append(dialogs['items'][i]['conversation']['peer']['id'])
if i == len(dialogs['items']) - 1:
shouldContinue = True
offset+=20
for i in peer_ids:
offset = 0
if i > 2000000000:
count = 200
else:
count = 50
shouldContinue = True
while shouldContinue:
shouldContinue = False
mes = vk.messages.getHistory(offset=offset, count=count, peer_id=i)['items']
if mes[-1]['date']>= time.time() - config['maxCacheAge']:
shouldContinue = True
offset+=count
for j in mes:
if j['date'] >= time.time() - config['maxCacheAge']:
messages.append(j)
for i in messages:
message_id = i['id']
with stop_mutex:
cursor.execute("""SELECT message_id FROM messages WHERE message_id = ?""", (message_id,))
if cursor.fetchone() is not None:
continue
peer_id = i['peer_id']
user_id = i['from_id']
message = i['text']
timestamp = i['date']
fwd_messages = None
if 'reply_message' in i:
fwd_messages = json.dumps([i['reply_message']], ensure_ascii=False,)
elif i['fwd_messages'] != []:
fwd_messages = json.dumps(i['fwd_messages'], ensure_ascii=False,)
if i['attachments'] == []:
attachments = None
else:
attachments = json.dumps(i['attachments'], ensure_ascii=False,)
with stop_mutex:
cursor.execute(
"""INSERT INTO messages(peer_id,user_id,message_id,message,attachments,timestamp,fwd_messages) VALUES (?,?,?,?,?,?,?)""",
(peer_id, user_id, message_id, message, attachments, timestamp, fwd_messages,)
)
conn.commit()
except BaseException:
logger.exception("Ошибка во время предзагрузки сообщений")
logger.info("Предзагрузка сообщений завершена.")
if config['customActions'] and config['disableMessagesLogging']:
threading.Thread(target=eventWorker_predefinedDisabled).start()
elif not config['disableMessagesLogging'] and not config['customActions']:
threading.Thread(target=eventWorker_customDisabled).start()
else:
threading.Thread(target=eventWorker).start()
if config['preloadMessages']:
threading.Thread(target=preloadMessages).start()
try:
tryAgainIfFailed(
main,
maxRetries=-1
)
except Warning:
pass
|
import numpy as np
import pyqtgraph as pg
from datetime import datetime, timedelta
from vnpy.trader.constant import Interval, Direction, Offset
from vnpy.trader.engine import MainEngine
from vnpy.trader.ui import QtCore, QtWidgets, QtGui
from vnpy.trader.ui.widget import BaseMonitor, BaseCell, DirectionCell, EnumCell
from vnpy.trader.ui.editor import CodeEditor
from vnpy.event import Event, EventEngine
from vnpy.chart import ChartWidget, CandleItem, VolumeItem
from vnpy.trader.utility import load_json, save_json
from ..engine import (
APP_NAME,
EVENT_BACKTESTER_LOG,
EVENT_BACKTESTER_BACKTESTING_FINISHED,
EVENT_BACKTESTER_OPTIMIZATION_FINISHED,
OptimizationSetting
)
class BacktesterManager(QtWidgets.QWidget):
""""""
setting_filename = "cta_backtester_setting.json"
signal_log = QtCore.pyqtSignal(Event)
signal_backtesting_finished = QtCore.pyqtSignal(Event)
signal_optimization_finished = QtCore.pyqtSignal(Event)
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__()
self.main_engine = main_engine
self.event_engine = event_engine
self.backtester_engine = main_engine.get_engine(APP_NAME)
self.class_names = []
self.settings = {}
self.target_display = ""
self.init_ui()
self.register_event()
self.backtester_engine.init_engine()
self.init_strategy_settings()
def init_strategy_settings(self):
""""""
self.class_names = self.backtester_engine.get_strategy_class_names()
for class_name in self.class_names:
setting = self.backtester_engine.get_default_setting(class_name)
self.settings[class_name] = setting
self.class_combo.addItems(self.class_names)
def init_ui(self):
""""""
self.setWindowTitle("CTA回测")
# Setting Part
self.class_combo = QtWidgets.QComboBox()
self.symbol_line = QtWidgets.QLineEdit("IF88.CFFEX")
self.interval_combo = QtWidgets.QComboBox()
for inteval in Interval:
self.interval_combo.addItem(inteval.value)
end_dt = datetime.now()
start_dt = end_dt - timedelta(days=3 * 365)
self.start_date_edit = QtWidgets.QDateEdit(
QtCore.QDate(
start_dt.year,
start_dt.month,
start_dt.day
)
)
self.end_date_edit = QtWidgets.QDateEdit(
QtCore.QDate.currentDate()
)
self.rate_line = QtWidgets.QLineEdit("0.000025")
self.slippage_line = QtWidgets.QLineEdit("0.2")
self.size_line = QtWidgets.QLineEdit("300")
self.pricetick_line = QtWidgets.QLineEdit("0.2")
self.capital_line = QtWidgets.QLineEdit("1000000")
self.inverse_combo = QtWidgets.QComboBox()
self.inverse_combo.addItems(["正向", "反向"])
backtesting_button = QtWidgets.QPushButton("开始回测")
backtesting_button.clicked.connect(self.start_backtesting)
optimization_button = QtWidgets.QPushButton("参数优化")
optimization_button.clicked.connect(self.start_optimization)
self.result_button = QtWidgets.QPushButton("优化结果")
self.result_button.clicked.connect(self.show_optimization_result)
self.result_button.setEnabled(False)
downloading_button = QtWidgets.QPushButton("下载数据")
downloading_button.clicked.connect(self.start_downloading)
self.order_button = QtWidgets.QPushButton("委托记录")
self.order_button.clicked.connect(self.show_backtesting_orders)
self.order_button.setEnabled(False)
self.trade_button = QtWidgets.QPushButton("成交记录")
self.trade_button.clicked.connect(self.show_backtesting_trades)
self.trade_button.setEnabled(False)
self.daily_button = QtWidgets.QPushButton("每日盈亏")
self.daily_button.clicked.connect(self.show_daily_results)
self.daily_button.setEnabled(False)
self.candle_button = QtWidgets.QPushButton("K线图表")
self.candle_button.clicked.connect(self.show_candle_chart)
self.candle_button.setEnabled(False)
edit_button = QtWidgets.QPushButton("代码编辑")
edit_button.clicked.connect(self.edit_strategy_code)
reload_button = QtWidgets.QPushButton("策略重载")
reload_button.clicked.connect(self.reload_strategy_class)
for button in [
backtesting_button,
optimization_button,
downloading_button,
self.result_button,
self.order_button,
self.trade_button,
self.daily_button,
self.candle_button,
edit_button,
reload_button
]:
button.setFixedHeight(button.sizeHint().height() * 2)
form = QtWidgets.QFormLayout()
form.addRow("交易策略", self.class_combo)
form.addRow("本地代码", self.symbol_line)
form.addRow("K线周期", self.interval_combo)
form.addRow("开始日期", self.start_date_edit)
form.addRow("结束日期", self.end_date_edit)
form.addRow("手续费率", self.rate_line)
form.addRow("交易滑点", self.slippage_line)
form.addRow("合约乘数", self.size_line)
form.addRow("价格跳动", self.pricetick_line)
form.addRow("回测资金", self.capital_line)
form.addRow("合约模式", self.inverse_combo)
result_grid = QtWidgets.QGridLayout()
result_grid.addWidget(self.trade_button, 0, 0)
result_grid.addWidget(self.order_button, 0, 1)
result_grid.addWidget(self.daily_button, 1, 0)
result_grid.addWidget(self.candle_button, 1, 1)
left_vbox = QtWidgets.QVBoxLayout()
left_vbox.addLayout(form)
left_vbox.addWidget(backtesting_button)
left_vbox.addWidget(downloading_button)
left_vbox.addStretch()
left_vbox.addLayout(result_grid)
left_vbox.addStretch()
left_vbox.addWidget(optimization_button)
left_vbox.addWidget(self.result_button)
left_vbox.addStretch()
left_vbox.addWidget(edit_button)
left_vbox.addWidget(reload_button)
# Result part
self.statistics_monitor = StatisticsMonitor()
self.log_monitor = QtWidgets.QTextEdit()
self.log_monitor.setMaximumHeight(400)
self.chart = BacktesterChart()
self.chart.setMinimumWidth(1000)
self.trade_dialog = BacktestingResultDialog(
self.main_engine,
self.event_engine,
"回测成交记录",
BacktestingTradeMonitor
)
self.order_dialog = BacktestingResultDialog(
self.main_engine,
self.event_engine,
"回测委托记录",
BacktestingOrderMonitor
)
self.daily_dialog = BacktestingResultDialog(
self.main_engine,
self.event_engine,
"回测每日盈亏",
DailyResultMonitor
)
# Candle Chart
self.candle_dialog = CandleChartDialog()
# Layout
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.statistics_monitor)
vbox.addWidget(self.log_monitor)
hbox = QtWidgets.QHBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(vbox)
hbox.addWidget(self.chart)
self.setLayout(hbox)
# Code Editor
self.editor = CodeEditor(self.main_engine, self.event_engine)
# Load setting
setting = load_json(self.setting_filename)
if not setting:
return
self.class_combo.setCurrentIndex(
self.class_combo.findText(setting["class_name"])
)
self.symbol_line.setText(setting["vt_symbol"])
self.interval_combo.setCurrentIndex(
self.interval_combo.findText(setting["interval"])
)
self.rate_line.setText(str(setting["rate"]))
self.slippage_line.setText(str(setting["slippage"]))
self.size_line.setText(str(setting["size"]))
self.pricetick_line.setText(str(setting["pricetick"]))
self.capital_line.setText(str(setting["capital"]))
if not setting["inverse"]:
self.inverse_combo.setCurrentIndex(0)
else:
self.inverse_combo.setCurrentIndex(1)
def register_event(self):
""""""
self.signal_log.connect(self.process_log_event)
self.signal_backtesting_finished.connect(
self.process_backtesting_finished_event)
self.signal_optimization_finished.connect(
self.process_optimization_finished_event)
self.event_engine.register(EVENT_BACKTESTER_LOG, self.signal_log.emit)
self.event_engine.register(
EVENT_BACKTESTER_BACKTESTING_FINISHED, self.signal_backtesting_finished.emit)
self.event_engine.register(
EVENT_BACKTESTER_OPTIMIZATION_FINISHED, self.signal_optimization_finished.emit)
def process_log_event(self, event: Event):
""""""
msg = event.data
self.write_log(msg)
def write_log(self, msg):
""""""
timestamp = datetime.now().strftime("%H:%M:%S")
msg = f"{timestamp}\t{msg}"
self.log_monitor.append(msg)
def process_backtesting_finished_event(self, event: Event):
""""""
statistics = self.backtester_engine.get_result_statistics()
self.statistics_monitor.set_data(statistics)
df = self.backtester_engine.get_result_df()
self.chart.set_data(df)
self.trade_button.setEnabled(True)
self.order_button.setEnabled(True)
self.daily_button.setEnabled(True)
self.candle_button.setEnabled(True)
def process_optimization_finished_event(self, event: Event):
""""""
self.write_log("请点击[优化结果]按钮查看")
self.result_button.setEnabled(True)
def start_backtesting(self):
""""""
class_name = self.class_combo.currentText()
vt_symbol = self.symbol_line.text()
interval = self.interval_combo.currentText()
start = self.start_date_edit.date().toPyDate()
end = self.end_date_edit.date().toPyDate()
rate = float(self.rate_line.text())
slippage = float(self.slippage_line.text())
size = float(self.size_line.text())
pricetick = float(self.pricetick_line.text())
capital = float(self.capital_line.text())
if self.inverse_combo.currentText() == "正向":
inverse = False
else:
inverse = True
# Save backtesting parameters
backtesting_setting = {
"class_name": class_name,
"vt_symbol": vt_symbol,
"interval": interval,
"rate": rate,
"slippage": slippage,
"size": size,
"pricetick": pricetick,
"capital": capital,
"inverse": inverse,
}
save_json(self.setting_filename, backtesting_setting)
# Get strategy setting
old_setting = self.settings[class_name]
dialog = BacktestingSettingEditor(class_name, old_setting)
i = dialog.exec()
if i != dialog.Accepted:
return
new_setting = dialog.get_setting()
self.settings[class_name] = new_setting
result = self.backtester_engine.start_backtesting(
class_name,
vt_symbol,
interval,
start,
end,
rate,
slippage,
size,
pricetick,
capital,
inverse,
new_setting
)
if result:
self.statistics_monitor.clear_data()
self.chart.clear_data()
self.trade_button.setEnabled(False)
self.order_button.setEnabled(False)
self.daily_button.setEnabled(False)
self.candle_button.setEnabled(False)
self.trade_dialog.clear_data()
self.order_dialog.clear_data()
self.daily_dialog.clear_data()
self.candle_dialog.clear_data()
def start_optimization(self):
""""""
class_name = self.class_combo.currentText()
vt_symbol = self.symbol_line.text()
interval = self.interval_combo.currentText()
start = self.start_date_edit.date().toPyDate()
end = self.end_date_edit.date().toPyDate()
rate = float(self.rate_line.text())
slippage = float(self.slippage_line.text())
size = float(self.size_line.text())
pricetick = float(self.pricetick_line.text())
capital = float(self.capital_line.text())
if self.inverse_combo.currentText() == "正向":
inverse = False
else:
inverse = True
parameters = self.settings[class_name]
dialog = OptimizationSettingEditor(class_name, parameters)
i = dialog.exec()
if i != dialog.Accepted:
return
optimization_setting, use_ga = dialog.get_setting()
self.target_display = dialog.target_display
self.backtester_engine.start_optimization(
class_name,
vt_symbol,
interval,
start,
end,
rate,
slippage,
size,
pricetick,
capital,
inverse,
optimization_setting,
use_ga
)
self.result_button.setEnabled(False)
def start_downloading(self):
""""""
vt_symbol = self.symbol_line.text()
interval = self.interval_combo.currentText()
start_date = self.start_date_edit.date()
end_date = self.end_date_edit.date()
start = datetime(start_date.year(), start_date.month(), start_date.day())
end = datetime(end_date.year(), end_date.month(), end_date.day(), 23, 59, 59)
self.backtester_engine.start_downloading(
vt_symbol,
interval,
start,
end
)
def show_optimization_result(self):
""""""
result_values = self.backtester_engine.get_result_values()
dialog = OptimizationResultMonitor(
result_values,
self.target_display
)
dialog.exec_()
def show_backtesting_trades(self):
""""""
if not self.trade_dialog.is_updated():
trades = self.backtester_engine.get_all_trades()
self.trade_dialog.update_data(trades)
self.trade_dialog.exec_()
def show_backtesting_orders(self):
""""""
if not self.order_dialog.is_updated():
orders = self.backtester_engine.get_all_orders()
self.order_dialog.update_data(orders)
self.order_dialog.exec_()
def show_daily_results(self):
""""""
if not self.daily_dialog.is_updated():
results = self.backtester_engine.get_all_daily_results()
self.daily_dialog.update_data(results)
self.daily_dialog.exec_()
def show_candle_chart(self):
""""""
if not self.candle_dialog.is_updated():
history = self.backtester_engine.get_history_data()
self.candle_dialog.update_history(history)
trades = self.backtester_engine.get_all_trades()
self.candle_dialog.update_trades(trades)
self.candle_dialog.exec_()
def edit_strategy_code(self):
""""""
class_name = self.class_combo.currentText()
file_path = self.backtester_engine.get_strategy_class_file(class_name)
self.editor.open_editor(file_path)
self.editor.show()
def reload_strategy_class(self):
""""""
self.backtester_engine.reload_strategy_class()
self.class_combo.clear()
self.init_strategy_settings()
def show(self):
""""""
self.showMaximized()
class StatisticsMonitor(QtWidgets.QTableWidget):
""""""
KEY_NAME_MAP = {
"start_date": "首个交易日",
"end_date": "最后交易日",
"total_days": "总交易日",
"profit_days": "盈利交易日",
"loss_days": "亏损交易日",
"capital": "起始资金",
"end_balance": "结束资金",
"total_return": "总收益率",
"annual_return": "年化收益",
"max_drawdown": "最大回撤",
"max_ddpercent": "百分比最大回撤",
"total_net_pnl": "总盈亏",
"total_commission": "总手续费",
"total_slippage": "总滑点",
"total_turnover": "总成交额",
"total_trade_count": "总成交笔数",
"daily_net_pnl": "日均盈亏",
"daily_commission": "日均手续费",
"daily_slippage": "日均滑点",
"daily_turnover": "日均成交额",
"daily_trade_count": "日均成交笔数",
"daily_return": "日均收益率",
"return_std": "收益标准差",
"sharpe_ratio": "夏普比率",
"return_drawdown_ratio": "收益回撤比"
}
def __init__(self):
""""""
super().__init__()
self.cells = {}
self.init_ui()
def init_ui(self):
""""""
self.setRowCount(len(self.KEY_NAME_MAP))
self.setVerticalHeaderLabels(list(self.KEY_NAME_MAP.values()))
self.setColumnCount(1)
self.horizontalHeader().setVisible(False)
self.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.Stretch
)
self.setEditTriggers(self.NoEditTriggers)
for row, key in enumerate(self.KEY_NAME_MAP.keys()):
cell = QtWidgets.QTableWidgetItem()
self.setItem(row, 0, cell)
self.cells[key] = cell
def clear_data(self):
""""""
for cell in self.cells.values():
cell.setText("")
def set_data(self, data: dict):
""""""
data["capital"] = f"{data["capital"]:,.2f}"
data["end_balance"] = f"{data["end_balance"]:,.2f}"
data["total_return"] = f"{data["total_return"]:,.2f}%"
data["annual_return"] = f"{data["annual_return"]:,.2f}%"
data["max_drawdown"] = f"{data["max_drawdown"]:,.2f}"
data["max_ddpercent"] = f"{data["max_ddpercent"]:,.2f}%"
data["total_net_pnl"] = f"{data["total_net_pnl"]:,.2f}"
data["total_commission"] = f"{data["total_commission"]:,.2f}"
data["total_slippage"] = f"{data["total_slippage"]:,.2f}"
data["total_turnover"] = f"{data["total_turnover"]:,.2f}"
data["daily_net_pnl"] = f"{data["daily_net_pnl"]:,.2f}"
data["daily_commission"] = f"{data["daily_commission"]:,.2f}"
data["daily_slippage"] = f"{data["daily_slippage"]:,.2f}"
data["daily_turnover"] = f"{data["daily_turnover"]:,.2f}"
data["daily_return"] = f"{data["daily_return"]:,.2f}%"
data["return_std"] = f"{data["return_std"]:,.2f}%"
data["sharpe_ratio"] = f"{data["sharpe_ratio"]:,.2f}"
data["return_drawdown_ratio"] = f"{data["return_drawdown_ratio"]:,.2f}"
for key, cell in self.cells.items():
value = data.get(key, "")
cell.setText(str(value))
class BacktestingSettingEditor(QtWidgets.QDialog):
"""
For creating new strategy and editing strategy parameters.
"""
def __init__(
self, class_name: str, parameters: dict
):
""""""
super(BacktestingSettingEditor, self).__init__()
self.class_name = class_name
self.parameters = parameters
self.edits = {}
self.init_ui()
def init_ui(self):
""""""
form = QtWidgets.QFormLayout()
# Add vt_symbol and name edit if add new strategy
self.setWindowTitle(f"策略参数配置:{self.class_name}")
button_text = "确定"
parameters = self.parameters
for name, value in parameters.items():
type_ = type(value)
edit = QtWidgets.QLineEdit(str(value))
if type_ is int:
validator = QtGui.QIntValidator()
edit.setValidator(validator)
elif type_ is float:
validator = QtGui.QDoubleValidator()
edit.setValidator(validator)
form.addRow(f"{name} {type_}", edit)
self.edits[name] = (edit, type_)
button = QtWidgets.QPushButton(button_text)
button.clicked.connect(self.accept)
form.addRow(button)
self.setLayout(form)
def get_setting(self):
""""""
setting = {}
for name, tp in self.edits.items():
edit, type_ = tp
value_text = edit.text()
if type_ == bool:
if value_text == "True":
value = True
else:
value = False
else:
value = type_(value_text)
setting[name] = value
return setting
class BacktesterChart(pg.GraphicsWindow):
""""""
def __init__(self):
""""""
super().__init__(title="Backtester Chart")
self.dates = {}
self.init_ui()
def init_ui(self):
""""""
pg.setConfigOptions(antialias=True)
# Create plot widgets
self.balance_plot = self.addPlot(
title="账户净值",
axisItems={"bottom": DateAxis(self.dates, orientation="bottom")}
)
self.nextRow()
self.drawdown_plot = self.addPlot(
title="净值回撤",
axisItems={"bottom": DateAxis(self.dates, orientation="bottom")}
)
self.nextRow()
self.pnl_plot = self.addPlot(
title="每日盈亏",
axisItems={"bottom": DateAxis(self.dates, orientation="bottom")}
)
self.nextRow()
self.distribution_plot = self.addPlot(title="盈亏分布")
# Add curves and bars on plot widgets
self.balance_curve = self.balance_plot.plot(
pen=pg.mkPen("#ffc107", width=3)
)
dd_color = "#303f9f"
self.drawdown_curve = self.drawdown_plot.plot(
fillLevel=-0.3, brush=dd_color, pen=dd_color
)
profit_color = 'r'
loss_color = 'g'
self.profit_pnl_bar = pg.BarGraphItem(
x=[], height=[], width=0.3, brush=profit_color, pen=profit_color
)
self.loss_pnl_bar = pg.BarGraphItem(
x=[], height=[], width=0.3, brush=loss_color, pen=loss_color
)
self.pnl_plot.addItem(self.profit_pnl_bar)
self.pnl_plot.addItem(self.loss_pnl_bar)
distribution_color = "#6d4c41"
self.distribution_curve = self.distribution_plot.plot(
fillLevel=-0.3, brush=distribution_color, pen=distribution_color
)
def clear_data(self):
""""""
self.balance_curve.setData([], [])
self.drawdown_curve.setData([], [])
self.profit_pnl_bar.setOpts(x=[], height=[])
self.loss_pnl_bar.setOpts(x=[], height=[])
self.distribution_curve.setData([], [])
def set_data(self, df):
""""""
if df is None:
return
count = len(df)
self.dates.clear()
for n, date in enumerate(df.index):
self.dates[n] = date
# Set data for curve of balance and drawdown
self.balance_curve.setData(df["balance"])
self.drawdown_curve.setData(df["drawdown"])
# Set data for daily pnl bar
profit_pnl_x = []
profit_pnl_height = []
loss_pnl_x = []
loss_pnl_height = []
for count, pnl in enumerate(df["net_pnl"]):
if pnl >= 0:
profit_pnl_height.append(pnl)
profit_pnl_x.append(count)
else:
loss_pnl_height.append(pnl)
loss_pnl_x.append(count)
self.profit_pnl_bar.setOpts(x=profit_pnl_x, height=profit_pnl_height)
self.loss_pnl_bar.setOpts(x=loss_pnl_x, height=loss_pnl_height)
# Set data for pnl distribution
hist, x = np.histogram(df["net_pnl"], bins="auto")
x = x[:-1]
self.distribution_curve.setData(x, hist)
class DateAxis(pg.AxisItem):
"""Axis for showing date data"""
def __init__(self, dates: dict, *args, **kwargs):
""""""
super().__init__(*args, **kwargs)
self.dates = dates
def tickStrings(self, values, scale, spacing):
""""""
strings = []
for v in values:
dt = self.dates.get(v, "")
strings.append(str(dt))
return strings
class OptimizationSettingEditor(QtWidgets.QDialog):
"""
For setting up parameters for optimization.
"""
DISPLAY_NAME_MAP = {
"总收益率": "total_return",
"夏普比率": "sharpe_ratio",
"收益回撤比": "return_drawdown_ratio",
"日均盈亏": "daily_net_pnl"
}
def __init__(
self, class_name: str, parameters: dict
):
""""""
super().__init__()
self.class_name = class_name
self.parameters = parameters
self.edits = {}
self.optimization_setting = None
self.use_ga = False
self.init_ui()
def init_ui(self):
""""""
QLabel = QtWidgets.QLabel
self.target_combo = QtWidgets.QComboBox()
self.target_combo.addItems(list(self.DISPLAY_NAME_MAP.keys()))
grid = QtWidgets.QGridLayout()
grid.addWidget(QLabel("目标"), 0, 0)
grid.addWidget(self.target_combo, 0, 1, 1, 3)
grid.addWidget(QLabel("参数"), 1, 0)
grid.addWidget(QLabel("开始"), 1, 1)
grid.addWidget(QLabel("步进"), 1, 2)
grid.addWidget(QLabel("结束"), 1, 3)
# Add vt_symbol and name edit if add new strategy
self.setWindowTitle(f"优化参数配置:{self.class_name}")
validator = QtGui.QDoubleValidator()
row = 2
for name, value in self.parameters.items():
type_ = type(value)
if type_ not in [int, float]:
continue
start_edit = QtWidgets.QLineEdit(str(value))
step_edit = QtWidgets.QLineEdit(str(1))
end_edit = QtWidgets.QLineEdit(str(value))
for edit in [start_edit, step_edit, end_edit]:
edit.setValidator(validator)
grid.addWidget(QLabel(name), row, 0)
grid.addWidget(start_edit, row, 1)
grid.addWidget(step_edit, row, 2)
grid.addWidget(end_edit, row, 3)
self.edits[name] = {
"type": type_,
"start": start_edit,
"step": step_edit,
"end": end_edit
}
row += 1
parallel_button = QtWidgets.QPushButton("多进程优化")
parallel_button.clicked.connect(self.generate_parallel_setting)
grid.addWidget(parallel_button, row, 0, 1, 4)
row += 1
ga_button = QtWidgets.QPushButton("遗传算法优化")
ga_button.clicked.connect(self.generate_ga_setting)
grid.addWidget(ga_button, row, 0, 1, 4)
self.setLayout(grid)
def generate_ga_setting(self):
""""""
self.use_ga = True
self.generate_setting()
def generate_parallel_setting(self):
""""""
self.use_ga = False
self.generate_setting()
def generate_setting(self):
""""""
self.optimization_setting = OptimizationSetting()
self.target_display = self.target_combo.currentText()
target_name = self.DISPLAY_NAME_MAP[self.target_display]
self.optimization_setting.set_target(target_name)
for name, d in self.edits.items():
type_ = d["type"]
start_value = type_(d["start"].text())
step_value = type_(d["step"].text())
end_value = type_(d["end"].text())
if start_value == end_value:
self.optimization_setting.add_parameter(name, start_value)
else:
self.optimization_setting.add_parameter(
name,
start_value,
end_value,
step_value
)
self.accept()
def get_setting(self):
""""""
return self.optimization_setting, self.use_ga
class OptimizationResultMonitor(QtWidgets.QDialog):
"""
For viewing optimization result.
"""
def __init__(
self, result_values: list, target_display: str
):
""""""
super().__init__()
self.result_values = result_values
self.target_display = target_display
self.init_ui()
def init_ui(self):
""""""
self.setWindowTitle("参数优化结果")
self.resize(1100, 500)
table = QtWidgets.QTableWidget()
table.setColumnCount(2)
table.setRowCount(len(self.result_values))
table.setHorizontalHeaderLabels(["参数", self.target_display])
table.setEditTriggers(table.NoEditTriggers)
table.verticalHeader().setVisible(False)
table.horizontalHeader().setSectionResizeMode(
0, QtWidgets.QHeaderView.ResizeToContents
)
table.horizontalHeader().setSectionResizeMode(
1, QtWidgets.QHeaderView.Stretch
)
for n, tp in enumerate(self.result_values):
setting, target_value, _ = tp
setting_cell = QtWidgets.QTableWidgetItem(str(setting))
target_cell = QtWidgets.QTableWidgetItem(str(target_value))
setting_cell.setTextAlignment(QtCore.Qt.AlignCenter)
target_cell.setTextAlignment(QtCore.Qt.AlignCenter)
table.setItem(n, 0, setting_cell)
table.setItem(n, 1, target_cell)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(table)
self.setLayout(vbox)
class BacktestingTradeMonitor(BaseMonitor):
"""
Monitor for backtesting trade data.
"""
headers = {
"tradeid": {"display": "成交号 ", "cell": BaseCell, "update": False},
"orderid": {"display": "委托号", "cell": BaseCell, "update": False},
"symbol": {"display": "代码", "cell": BaseCell, "update": False},
"exchange": {"display": "交易所", "cell": EnumCell, "update": False},
"direction": {"display": "方向", "cell": DirectionCell, "update": False},
"offset": {"display": "开平", "cell": EnumCell, "update": False},
"price": {"display": "价格", "cell": BaseCell, "update": False},
"volume": {"display": "数量", "cell": BaseCell, "update": False},
"datetime": {"display": "时间", "cell": BaseCell, "update": False},
"gateway_name": {"display": "接口", "cell": BaseCell, "update": False},
}
class BacktestingOrderMonitor(BaseMonitor):
"""
Monitor for backtesting order data.
"""
headers = {
"orderid": {"display": "委托号", "cell": BaseCell, "update": False},
"symbol": {"display": "代码", "cell": BaseCell, "update": False},
"exchange": {"display": "交易所", "cell": EnumCell, "update": False},
"type": {"display": "类型", "cell": EnumCell, "update": False},
"direction": {"display": "方向", "cell": DirectionCell, "update": False},
"offset": {"display": "开平", "cell": EnumCell, "update": False},
"price": {"display": "价格", "cell": BaseCell, "update": False},
"volume": {"display": "总数量", "cell": BaseCell, "update": False},
"traded": {"display": "已成交", "cell": BaseCell, "update": False},
"status": {"display": "状态", "cell": EnumCell, "update": False},
"datetime": {"display": "时间", "cell": BaseCell, "update": False},
"gateway_name": {"display": "接口", "cell": BaseCell, "update": False},
}
class DailyResultMonitor(BaseMonitor):
"""
Monitor for backtesting daily result.
"""
headers = {
"date": {"display": "日期", "cell": BaseCell, "update": False},
"trade_count": {"display": "成交笔数", "cell": BaseCell, "update": False},
"start_pos": {"display": "开盘持仓", "cell": BaseCell, "update": False},
"end_pos": {"display": "收盘持仓", "cell": BaseCell, "update": False},
"turnover": {"display": "成交额", "cell": BaseCell, "update": False},
"commission": {"display": "手续费", "cell": BaseCell, "update": False},
"slippage": {"display": "滑点", "cell": BaseCell, "update": False},
"trading_pnl": {"display": "交易盈亏", "cell": BaseCell, "update": False},
"holding_pnl": {"display": "持仓盈亏", "cell": BaseCell, "update": False},
"total_pnl": {"display": "总盈亏", "cell": BaseCell, "update": False},
"net_pnl": {"display": "净盈亏", "cell": BaseCell, "update": False},
}
class BacktestingResultDialog(QtWidgets.QDialog):
"""
"""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
title: str,
table_class: QtWidgets.QTableWidget
):
""""""
super().__init__()
self.main_engine = main_engine
self.event_engine = event_engine
self.title = title
self.table_class = table_class
self.updated = False
self.init_ui()
def init_ui(self):
""""""
self.setWindowTitle(self.title)
self.resize(1100, 600)
self.table = self.table_class(self.main_engine, self.event_engine)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.table)
self.setLayout(vbox)
def clear_data(self):
""""""
self.updated = False
self.table.setRowCount(0)
def update_data(self, data: list):
""""""
self.updated = True
data.reverse()
for obj in data:
self.table.insert_new_row(obj)
def is_updated(self):
""""""
return self.updated
class CandleChartDialog(QtWidgets.QDialog):
"""
"""
def __init__(self):
""""""
super().__init__()
self.dt_ix_map = {}
self.updated = False
self.init_ui()
def init_ui(self):
""""""
self.setWindowTitle("回测K线图表")
self.resize(1400, 800)
# Create chart widget
self.chart = ChartWidget()
self.chart.add_plot("candle", hide_x_axis=True)
self.chart.add_plot("volume", maximum_height=200)
self.chart.add_item(CandleItem, "candle", "candle")
self.chart.add_item(VolumeItem, "volume", "volume")
self.chart.add_cursor()
# Add scatter item for showing tradings
self.trade_scatter = pg.ScatterPlotItem()
candle_plot = self.chart.get_plot("candle")
candle_plot.addItem(self.trade_scatter)
# Set layout
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.chart)
self.setLayout(vbox)
def update_history(self, history: list):
""""""
self.updated = True
self.chart.update_history(history)
for ix, bar in enumerate(history):
self.dt_ix_map[bar.datetime] = ix
def update_trades(self, trades: list):
""""""
trade_data = []
for trade in trades:
ix = self.dt_ix_map[trade.datetime]
scatter = {
"pos": (ix, trade.price),
"data": 1,
"size": 14,
"pen": pg.mkPen((255, 255, 255))
}
if trade.direction == Direction.LONG:
scatter_symbol = "t1" # Up arrow
else:
scatter_symbol = "t" # Down arrow
if trade.offset == Offset.OPEN:
scatter_brush = pg.mkBrush((255, 255, 0)) # Yellow
else:
scatter_brush = pg.mkBrush((0, 0, 255)) # Blue
scatter["symbol"] = scatter_symbol
scatter["brush"] = scatter_brush
trade_data.append(scatter)
self.trade_scatter.setData(trade_data)
def clear_data(self):
""""""
self.updated = False
self.chart.clear_all()
self.dt_ix_map.clear()
self.trade_scatter.clear()
def is_updated(self):
""""""
return self.updated
| import numpy as np
import pyqtgraph as pg
from datetime import datetime, timedelta
from vnpy.trader.constant import Interval, Direction, Offset
from vnpy.trader.engine import MainEngine
from vnpy.trader.ui import QtCore, QtWidgets, QtGui
from vnpy.trader.ui.widget import BaseMonitor, BaseCell, DirectionCell, EnumCell
from vnpy.trader.ui.editor import CodeEditor
from vnpy.event import Event, EventEngine
from vnpy.chart import ChartWidget, CandleItem, VolumeItem
from vnpy.trader.utility import load_json, save_json
from ..engine import (
APP_NAME,
EVENT_BACKTESTER_LOG,
EVENT_BACKTESTER_BACKTESTING_FINISHED,
EVENT_BACKTESTER_OPTIMIZATION_FINISHED,
OptimizationSetting
)
class BacktesterManager(QtWidgets.QWidget):
""""""
setting_filename = "cta_backtester_setting.json"
signal_log = QtCore.pyqtSignal(Event)
signal_backtesting_finished = QtCore.pyqtSignal(Event)
signal_optimization_finished = QtCore.pyqtSignal(Event)
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__()
self.main_engine = main_engine
self.event_engine = event_engine
self.backtester_engine = main_engine.get_engine(APP_NAME)
self.class_names = []
self.settings = {}
self.target_display = ""
self.init_ui()
self.register_event()
self.backtester_engine.init_engine()
self.init_strategy_settings()
def init_strategy_settings(self):
""""""
self.class_names = self.backtester_engine.get_strategy_class_names()
for class_name in self.class_names:
setting = self.backtester_engine.get_default_setting(class_name)
self.settings[class_name] = setting
self.class_combo.addItems(self.class_names)
def init_ui(self):
""""""
self.setWindowTitle("CTA回测")
# Setting Part
self.class_combo = QtWidgets.QComboBox()
self.symbol_line = QtWidgets.QLineEdit("IF88.CFFEX")
self.interval_combo = QtWidgets.QComboBox()
for inteval in Interval:
self.interval_combo.addItem(inteval.value)
end_dt = datetime.now()
start_dt = end_dt - timedelta(days=3 * 365)
self.start_date_edit = QtWidgets.QDateEdit(
QtCore.QDate(
start_dt.year,
start_dt.month,
start_dt.day
)
)
self.end_date_edit = QtWidgets.QDateEdit(
QtCore.QDate.currentDate()
)
self.rate_line = QtWidgets.QLineEdit("0.000025")
self.slippage_line = QtWidgets.QLineEdit("0.2")
self.size_line = QtWidgets.QLineEdit("300")
self.pricetick_line = QtWidgets.QLineEdit("0.2")
self.capital_line = QtWidgets.QLineEdit("1000000")
self.inverse_combo = QtWidgets.QComboBox()
self.inverse_combo.addItems(["正向", "反向"])
backtesting_button = QtWidgets.QPushButton("开始回测")
backtesting_button.clicked.connect(self.start_backtesting)
optimization_button = QtWidgets.QPushButton("参数优化")
optimization_button.clicked.connect(self.start_optimization)
self.result_button = QtWidgets.QPushButton("优化结果")
self.result_button.clicked.connect(self.show_optimization_result)
self.result_button.setEnabled(False)
downloading_button = QtWidgets.QPushButton("下载数据")
downloading_button.clicked.connect(self.start_downloading)
self.order_button = QtWidgets.QPushButton("委托记录")
self.order_button.clicked.connect(self.show_backtesting_orders)
self.order_button.setEnabled(False)
self.trade_button = QtWidgets.QPushButton("成交记录")
self.trade_button.clicked.connect(self.show_backtesting_trades)
self.trade_button.setEnabled(False)
self.daily_button = QtWidgets.QPushButton("每日盈亏")
self.daily_button.clicked.connect(self.show_daily_results)
self.daily_button.setEnabled(False)
self.candle_button = QtWidgets.QPushButton("K线图表")
self.candle_button.clicked.connect(self.show_candle_chart)
self.candle_button.setEnabled(False)
edit_button = QtWidgets.QPushButton("代码编辑")
edit_button.clicked.connect(self.edit_strategy_code)
reload_button = QtWidgets.QPushButton("策略重载")
reload_button.clicked.connect(self.reload_strategy_class)
for button in [
backtesting_button,
optimization_button,
downloading_button,
self.result_button,
self.order_button,
self.trade_button,
self.daily_button,
self.candle_button,
edit_button,
reload_button
]:
button.setFixedHeight(button.sizeHint().height() * 2)
form = QtWidgets.QFormLayout()
form.addRow("交易策略", self.class_combo)
form.addRow("本地代码", self.symbol_line)
form.addRow("K线周期", self.interval_combo)
form.addRow("开始日期", self.start_date_edit)
form.addRow("结束日期", self.end_date_edit)
form.addRow("手续费率", self.rate_line)
form.addRow("交易滑点", self.slippage_line)
form.addRow("合约乘数", self.size_line)
form.addRow("价格跳动", self.pricetick_line)
form.addRow("回测资金", self.capital_line)
form.addRow("合约模式", self.inverse_combo)
result_grid = QtWidgets.QGridLayout()
result_grid.addWidget(self.trade_button, 0, 0)
result_grid.addWidget(self.order_button, 0, 1)
result_grid.addWidget(self.daily_button, 1, 0)
result_grid.addWidget(self.candle_button, 1, 1)
left_vbox = QtWidgets.QVBoxLayout()
left_vbox.addLayout(form)
left_vbox.addWidget(backtesting_button)
left_vbox.addWidget(downloading_button)
left_vbox.addStretch()
left_vbox.addLayout(result_grid)
left_vbox.addStretch()
left_vbox.addWidget(optimization_button)
left_vbox.addWidget(self.result_button)
left_vbox.addStretch()
left_vbox.addWidget(edit_button)
left_vbox.addWidget(reload_button)
# Result part
self.statistics_monitor = StatisticsMonitor()
self.log_monitor = QtWidgets.QTextEdit()
self.log_monitor.setMaximumHeight(400)
self.chart = BacktesterChart()
self.chart.setMinimumWidth(1000)
self.trade_dialog = BacktestingResultDialog(
self.main_engine,
self.event_engine,
"回测成交记录",
BacktestingTradeMonitor
)
self.order_dialog = BacktestingResultDialog(
self.main_engine,
self.event_engine,
"回测委托记录",
BacktestingOrderMonitor
)
self.daily_dialog = BacktestingResultDialog(
self.main_engine,
self.event_engine,
"回测每日盈亏",
DailyResultMonitor
)
# Candle Chart
self.candle_dialog = CandleChartDialog()
# Layout
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.statistics_monitor)
vbox.addWidget(self.log_monitor)
hbox = QtWidgets.QHBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(vbox)
hbox.addWidget(self.chart)
self.setLayout(hbox)
# Code Editor
self.editor = CodeEditor(self.main_engine, self.event_engine)
# Load setting
setting = load_json(self.setting_filename)
if not setting:
return
self.class_combo.setCurrentIndex(
self.class_combo.findText(setting["class_name"])
)
self.symbol_line.setText(setting["vt_symbol"])
self.interval_combo.setCurrentIndex(
self.interval_combo.findText(setting["interval"])
)
self.rate_line.setText(str(setting["rate"]))
self.slippage_line.setText(str(setting["slippage"]))
self.size_line.setText(str(setting["size"]))
self.pricetick_line.setText(str(setting["pricetick"]))
self.capital_line.setText(str(setting["capital"]))
if not setting["inverse"]:
self.inverse_combo.setCurrentIndex(0)
else:
self.inverse_combo.setCurrentIndex(1)
def register_event(self):
""""""
self.signal_log.connect(self.process_log_event)
self.signal_backtesting_finished.connect(
self.process_backtesting_finished_event)
self.signal_optimization_finished.connect(
self.process_optimization_finished_event)
self.event_engine.register(EVENT_BACKTESTER_LOG, self.signal_log.emit)
self.event_engine.register(
EVENT_BACKTESTER_BACKTESTING_FINISHED, self.signal_backtesting_finished.emit)
self.event_engine.register(
EVENT_BACKTESTER_OPTIMIZATION_FINISHED, self.signal_optimization_finished.emit)
def process_log_event(self, event: Event):
""""""
msg = event.data
self.write_log(msg)
def write_log(self, msg):
""""""
timestamp = datetime.now().strftime("%H:%M:%S")
msg = f"{timestamp}\t{msg}"
self.log_monitor.append(msg)
def process_backtesting_finished_event(self, event: Event):
""""""
statistics = self.backtester_engine.get_result_statistics()
self.statistics_monitor.set_data(statistics)
df = self.backtester_engine.get_result_df()
self.chart.set_data(df)
self.trade_button.setEnabled(True)
self.order_button.setEnabled(True)
self.daily_button.setEnabled(True)
self.candle_button.setEnabled(True)
def process_optimization_finished_event(self, event: Event):
""""""
self.write_log("请点击[优化结果]按钮查看")
self.result_button.setEnabled(True)
def start_backtesting(self):
""""""
class_name = self.class_combo.currentText()
vt_symbol = self.symbol_line.text()
interval = self.interval_combo.currentText()
start = self.start_date_edit.date().toPyDate()
end = self.end_date_edit.date().toPyDate()
rate = float(self.rate_line.text())
slippage = float(self.slippage_line.text())
size = float(self.size_line.text())
pricetick = float(self.pricetick_line.text())
capital = float(self.capital_line.text())
if self.inverse_combo.currentText() == "正向":
inverse = False
else:
inverse = True
# Save backtesting parameters
backtesting_setting = {
"class_name": class_name,
"vt_symbol": vt_symbol,
"interval": interval,
"rate": rate,
"slippage": slippage,
"size": size,
"pricetick": pricetick,
"capital": capital,
"inverse": inverse,
}
save_json(self.setting_filename, backtesting_setting)
# Get strategy setting
old_setting = self.settings[class_name]
dialog = BacktestingSettingEditor(class_name, old_setting)
i = dialog.exec()
if i != dialog.Accepted:
return
new_setting = dialog.get_setting()
self.settings[class_name] = new_setting
result = self.backtester_engine.start_backtesting(
class_name,
vt_symbol,
interval,
start,
end,
rate,
slippage,
size,
pricetick,
capital,
inverse,
new_setting
)
if result:
self.statistics_monitor.clear_data()
self.chart.clear_data()
self.trade_button.setEnabled(False)
self.order_button.setEnabled(False)
self.daily_button.setEnabled(False)
self.candle_button.setEnabled(False)
self.trade_dialog.clear_data()
self.order_dialog.clear_data()
self.daily_dialog.clear_data()
self.candle_dialog.clear_data()
def start_optimization(self):
""""""
class_name = self.class_combo.currentText()
vt_symbol = self.symbol_line.text()
interval = self.interval_combo.currentText()
start = self.start_date_edit.date().toPyDate()
end = self.end_date_edit.date().toPyDate()
rate = float(self.rate_line.text())
slippage = float(self.slippage_line.text())
size = float(self.size_line.text())
pricetick = float(self.pricetick_line.text())
capital = float(self.capital_line.text())
if self.inverse_combo.currentText() == "正向":
inverse = False
else:
inverse = True
parameters = self.settings[class_name]
dialog = OptimizationSettingEditor(class_name, parameters)
i = dialog.exec()
if i != dialog.Accepted:
return
optimization_setting, use_ga = dialog.get_setting()
self.target_display = dialog.target_display
self.backtester_engine.start_optimization(
class_name,
vt_symbol,
interval,
start,
end,
rate,
slippage,
size,
pricetick,
capital,
inverse,
optimization_setting,
use_ga
)
self.result_button.setEnabled(False)
def start_downloading(self):
""""""
vt_symbol = self.symbol_line.text()
interval = self.interval_combo.currentText()
start_date = self.start_date_edit.date()
end_date = self.end_date_edit.date()
start = datetime(start_date.year(), start_date.month(), start_date.day())
end = datetime(end_date.year(), end_date.month(), end_date.day(), 23, 59, 59)
self.backtester_engine.start_downloading(
vt_symbol,
interval,
start,
end
)
def show_optimization_result(self):
""""""
result_values = self.backtester_engine.get_result_values()
dialog = OptimizationResultMonitor(
result_values,
self.target_display
)
dialog.exec_()
def show_backtesting_trades(self):
""""""
if not self.trade_dialog.is_updated():
trades = self.backtester_engine.get_all_trades()
self.trade_dialog.update_data(trades)
self.trade_dialog.exec_()
def show_backtesting_orders(self):
""""""
if not self.order_dialog.is_updated():
orders = self.backtester_engine.get_all_orders()
self.order_dialog.update_data(orders)
self.order_dialog.exec_()
def show_daily_results(self):
""""""
if not self.daily_dialog.is_updated():
results = self.backtester_engine.get_all_daily_results()
self.daily_dialog.update_data(results)
self.daily_dialog.exec_()
def show_candle_chart(self):
""""""
if not self.candle_dialog.is_updated():
history = self.backtester_engine.get_history_data()
self.candle_dialog.update_history(history)
trades = self.backtester_engine.get_all_trades()
self.candle_dialog.update_trades(trades)
self.candle_dialog.exec_()
def edit_strategy_code(self):
""""""
class_name = self.class_combo.currentText()
file_path = self.backtester_engine.get_strategy_class_file(class_name)
self.editor.open_editor(file_path)
self.editor.show()
def reload_strategy_class(self):
""""""
self.backtester_engine.reload_strategy_class()
self.class_combo.clear()
self.init_strategy_settings()
def show(self):
""""""
self.showMaximized()
class StatisticsMonitor(QtWidgets.QTableWidget):
""""""
KEY_NAME_MAP = {
"start_date": "首个交易日",
"end_date": "最后交易日",
"total_days": "总交易日",
"profit_days": "盈利交易日",
"loss_days": "亏损交易日",
"capital": "起始资金",
"end_balance": "结束资金",
"total_return": "总收益率",
"annual_return": "年化收益",
"max_drawdown": "最大回撤",
"max_ddpercent": "百分比最大回撤",
"total_net_pnl": "总盈亏",
"total_commission": "总手续费",
"total_slippage": "总滑点",
"total_turnover": "总成交额",
"total_trade_count": "总成交笔数",
"daily_net_pnl": "日均盈亏",
"daily_commission": "日均手续费",
"daily_slippage": "日均滑点",
"daily_turnover": "日均成交额",
"daily_trade_count": "日均成交笔数",
"daily_return": "日均收益率",
"return_std": "收益标准差",
"sharpe_ratio": "夏普比率",
"return_drawdown_ratio": "收益回撤比"
}
def __init__(self):
""""""
super().__init__()
self.cells = {}
self.init_ui()
def init_ui(self):
""""""
self.setRowCount(len(self.KEY_NAME_MAP))
self.setVerticalHeaderLabels(list(self.KEY_NAME_MAP.values()))
self.setColumnCount(1)
self.horizontalHeader().setVisible(False)
self.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.Stretch
)
self.setEditTriggers(self.NoEditTriggers)
for row, key in enumerate(self.KEY_NAME_MAP.keys()):
cell = QtWidgets.QTableWidgetItem()
self.setItem(row, 0, cell)
self.cells[key] = cell
def clear_data(self):
""""""
for cell in self.cells.values():
cell.setText("")
def set_data(self, data: dict):
""""""
data["capital"] = f"{data['capital']:,.2f}"
data["end_balance"] = f"{data['end_balance']:,.2f}"
data["total_return"] = f"{data['total_return']:,.2f}%"
data["annual_return"] = f"{data['annual_return']:,.2f}%"
data["max_drawdown"] = f"{data['max_drawdown']:,.2f}"
data["max_ddpercent"] = f"{data['max_ddpercent']:,.2f}%"
data["total_net_pnl"] = f"{data['total_net_pnl']:,.2f}"
data["total_commission"] = f"{data['total_commission']:,.2f}"
data["total_slippage"] = f"{data['total_slippage']:,.2f}"
data["total_turnover"] = f"{data['total_turnover']:,.2f}"
data["daily_net_pnl"] = f"{data['daily_net_pnl']:,.2f}"
data["daily_commission"] = f"{data['daily_commission']:,.2f}"
data["daily_slippage"] = f"{data['daily_slippage']:,.2f}"
data["daily_turnover"] = f"{data['daily_turnover']:,.2f}"
data["daily_return"] = f"{data['daily_return']:,.2f}%"
data["return_std"] = f"{data['return_std']:,.2f}%"
data["sharpe_ratio"] = f"{data['sharpe_ratio']:,.2f}"
data["return_drawdown_ratio"] = f"{data['return_drawdown_ratio']:,.2f}"
for key, cell in self.cells.items():
value = data.get(key, "")
cell.setText(str(value))
class BacktestingSettingEditor(QtWidgets.QDialog):
"""
For creating new strategy and editing strategy parameters.
"""
def __init__(
self, class_name: str, parameters: dict
):
""""""
super(BacktestingSettingEditor, self).__init__()
self.class_name = class_name
self.parameters = parameters
self.edits = {}
self.init_ui()
def init_ui(self):
""""""
form = QtWidgets.QFormLayout()
# Add vt_symbol and name edit if add new strategy
self.setWindowTitle(f"策略参数配置:{self.class_name}")
button_text = "确定"
parameters = self.parameters
for name, value in parameters.items():
type_ = type(value)
edit = QtWidgets.QLineEdit(str(value))
if type_ is int:
validator = QtGui.QIntValidator()
edit.setValidator(validator)
elif type_ is float:
validator = QtGui.QDoubleValidator()
edit.setValidator(validator)
form.addRow(f"{name} {type_}", edit)
self.edits[name] = (edit, type_)
button = QtWidgets.QPushButton(button_text)
button.clicked.connect(self.accept)
form.addRow(button)
self.setLayout(form)
def get_setting(self):
""""""
setting = {}
for name, tp in self.edits.items():
edit, type_ = tp
value_text = edit.text()
if type_ == bool:
if value_text == "True":
value = True
else:
value = False
else:
value = type_(value_text)
setting[name] = value
return setting
class BacktesterChart(pg.GraphicsWindow):
""""""
def __init__(self):
""""""
super().__init__(title="Backtester Chart")
self.dates = {}
self.init_ui()
def init_ui(self):
""""""
pg.setConfigOptions(antialias=True)
# Create plot widgets
self.balance_plot = self.addPlot(
title="账户净值",
axisItems={"bottom": DateAxis(self.dates, orientation="bottom")}
)
self.nextRow()
self.drawdown_plot = self.addPlot(
title="净值回撤",
axisItems={"bottom": DateAxis(self.dates, orientation="bottom")}
)
self.nextRow()
self.pnl_plot = self.addPlot(
title="每日盈亏",
axisItems={"bottom": DateAxis(self.dates, orientation="bottom")}
)
self.nextRow()
self.distribution_plot = self.addPlot(title="盈亏分布")
# Add curves and bars on plot widgets
self.balance_curve = self.balance_plot.plot(
pen=pg.mkPen("#ffc107", width=3)
)
dd_color = "#303f9f"
self.drawdown_curve = self.drawdown_plot.plot(
fillLevel=-0.3, brush=dd_color, pen=dd_color
)
profit_color = 'r'
loss_color = 'g'
self.profit_pnl_bar = pg.BarGraphItem(
x=[], height=[], width=0.3, brush=profit_color, pen=profit_color
)
self.loss_pnl_bar = pg.BarGraphItem(
x=[], height=[], width=0.3, brush=loss_color, pen=loss_color
)
self.pnl_plot.addItem(self.profit_pnl_bar)
self.pnl_plot.addItem(self.loss_pnl_bar)
distribution_color = "#6d4c41"
self.distribution_curve = self.distribution_plot.plot(
fillLevel=-0.3, brush=distribution_color, pen=distribution_color
)
def clear_data(self):
""""""
self.balance_curve.setData([], [])
self.drawdown_curve.setData([], [])
self.profit_pnl_bar.setOpts(x=[], height=[])
self.loss_pnl_bar.setOpts(x=[], height=[])
self.distribution_curve.setData([], [])
def set_data(self, df):
""""""
if df is None:
return
count = len(df)
self.dates.clear()
for n, date in enumerate(df.index):
self.dates[n] = date
# Set data for curve of balance and drawdown
self.balance_curve.setData(df["balance"])
self.drawdown_curve.setData(df["drawdown"])
# Set data for daily pnl bar
profit_pnl_x = []
profit_pnl_height = []
loss_pnl_x = []
loss_pnl_height = []
for count, pnl in enumerate(df["net_pnl"]):
if pnl >= 0:
profit_pnl_height.append(pnl)
profit_pnl_x.append(count)
else:
loss_pnl_height.append(pnl)
loss_pnl_x.append(count)
self.profit_pnl_bar.setOpts(x=profit_pnl_x, height=profit_pnl_height)
self.loss_pnl_bar.setOpts(x=loss_pnl_x, height=loss_pnl_height)
# Set data for pnl distribution
hist, x = np.histogram(df["net_pnl"], bins="auto")
x = x[:-1]
self.distribution_curve.setData(x, hist)
class DateAxis(pg.AxisItem):
"""Axis for showing date data"""
def __init__(self, dates: dict, *args, **kwargs):
""""""
super().__init__(*args, **kwargs)
self.dates = dates
def tickStrings(self, values, scale, spacing):
""""""
strings = []
for v in values:
dt = self.dates.get(v, "")
strings.append(str(dt))
return strings
class OptimizationSettingEditor(QtWidgets.QDialog):
"""
For setting up parameters for optimization.
"""
DISPLAY_NAME_MAP = {
"总收益率": "total_return",
"夏普比率": "sharpe_ratio",
"收益回撤比": "return_drawdown_ratio",
"日均盈亏": "daily_net_pnl"
}
def __init__(
self, class_name: str, parameters: dict
):
""""""
super().__init__()
self.class_name = class_name
self.parameters = parameters
self.edits = {}
self.optimization_setting = None
self.use_ga = False
self.init_ui()
def init_ui(self):
""""""
QLabel = QtWidgets.QLabel
self.target_combo = QtWidgets.QComboBox()
self.target_combo.addItems(list(self.DISPLAY_NAME_MAP.keys()))
grid = QtWidgets.QGridLayout()
grid.addWidget(QLabel("目标"), 0, 0)
grid.addWidget(self.target_combo, 0, 1, 1, 3)
grid.addWidget(QLabel("参数"), 1, 0)
grid.addWidget(QLabel("开始"), 1, 1)
grid.addWidget(QLabel("步进"), 1, 2)
grid.addWidget(QLabel("结束"), 1, 3)
# Add vt_symbol and name edit if add new strategy
self.setWindowTitle(f"优化参数配置:{self.class_name}")
validator = QtGui.QDoubleValidator()
row = 2
for name, value in self.parameters.items():
type_ = type(value)
if type_ not in [int, float]:
continue
start_edit = QtWidgets.QLineEdit(str(value))
step_edit = QtWidgets.QLineEdit(str(1))
end_edit = QtWidgets.QLineEdit(str(value))
for edit in [start_edit, step_edit, end_edit]:
edit.setValidator(validator)
grid.addWidget(QLabel(name), row, 0)
grid.addWidget(start_edit, row, 1)
grid.addWidget(step_edit, row, 2)
grid.addWidget(end_edit, row, 3)
self.edits[name] = {
"type": type_,
"start": start_edit,
"step": step_edit,
"end": end_edit
}
row += 1
parallel_button = QtWidgets.QPushButton("多进程优化")
parallel_button.clicked.connect(self.generate_parallel_setting)
grid.addWidget(parallel_button, row, 0, 1, 4)
row += 1
ga_button = QtWidgets.QPushButton("遗传算法优化")
ga_button.clicked.connect(self.generate_ga_setting)
grid.addWidget(ga_button, row, 0, 1, 4)
self.setLayout(grid)
def generate_ga_setting(self):
""""""
self.use_ga = True
self.generate_setting()
def generate_parallel_setting(self):
""""""
self.use_ga = False
self.generate_setting()
def generate_setting(self):
""""""
self.optimization_setting = OptimizationSetting()
self.target_display = self.target_combo.currentText()
target_name = self.DISPLAY_NAME_MAP[self.target_display]
self.optimization_setting.set_target(target_name)
for name, d in self.edits.items():
type_ = d["type"]
start_value = type_(d["start"].text())
step_value = type_(d["step"].text())
end_value = type_(d["end"].text())
if start_value == end_value:
self.optimization_setting.add_parameter(name, start_value)
else:
self.optimization_setting.add_parameter(
name,
start_value,
end_value,
step_value
)
self.accept()
def get_setting(self):
""""""
return self.optimization_setting, self.use_ga
class OptimizationResultMonitor(QtWidgets.QDialog):
"""
For viewing optimization result.
"""
def __init__(
self, result_values: list, target_display: str
):
""""""
super().__init__()
self.result_values = result_values
self.target_display = target_display
self.init_ui()
def init_ui(self):
""""""
self.setWindowTitle("参数优化结果")
self.resize(1100, 500)
table = QtWidgets.QTableWidget()
table.setColumnCount(2)
table.setRowCount(len(self.result_values))
table.setHorizontalHeaderLabels(["参数", self.target_display])
table.setEditTriggers(table.NoEditTriggers)
table.verticalHeader().setVisible(False)
table.horizontalHeader().setSectionResizeMode(
0, QtWidgets.QHeaderView.ResizeToContents
)
table.horizontalHeader().setSectionResizeMode(
1, QtWidgets.QHeaderView.Stretch
)
for n, tp in enumerate(self.result_values):
setting, target_value, _ = tp
setting_cell = QtWidgets.QTableWidgetItem(str(setting))
target_cell = QtWidgets.QTableWidgetItem(str(target_value))
setting_cell.setTextAlignment(QtCore.Qt.AlignCenter)
target_cell.setTextAlignment(QtCore.Qt.AlignCenter)
table.setItem(n, 0, setting_cell)
table.setItem(n, 1, target_cell)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(table)
self.setLayout(vbox)
class BacktestingTradeMonitor(BaseMonitor):
"""
Monitor for backtesting trade data.
"""
headers = {
"tradeid": {"display": "成交号 ", "cell": BaseCell, "update": False},
"orderid": {"display": "委托号", "cell": BaseCell, "update": False},
"symbol": {"display": "代码", "cell": BaseCell, "update": False},
"exchange": {"display": "交易所", "cell": EnumCell, "update": False},
"direction": {"display": "方向", "cell": DirectionCell, "update": False},
"offset": {"display": "开平", "cell": EnumCell, "update": False},
"price": {"display": "价格", "cell": BaseCell, "update": False},
"volume": {"display": "数量", "cell": BaseCell, "update": False},
"datetime": {"display": "时间", "cell": BaseCell, "update": False},
"gateway_name": {"display": "接口", "cell": BaseCell, "update": False},
}
class BacktestingOrderMonitor(BaseMonitor):
"""
Monitor for backtesting order data.
"""
headers = {
"orderid": {"display": "委托号", "cell": BaseCell, "update": False},
"symbol": {"display": "代码", "cell": BaseCell, "update": False},
"exchange": {"display": "交易所", "cell": EnumCell, "update": False},
"type": {"display": "类型", "cell": EnumCell, "update": False},
"direction": {"display": "方向", "cell": DirectionCell, "update": False},
"offset": {"display": "开平", "cell": EnumCell, "update": False},
"price": {"display": "价格", "cell": BaseCell, "update": False},
"volume": {"display": "总数量", "cell": BaseCell, "update": False},
"traded": {"display": "已成交", "cell": BaseCell, "update": False},
"status": {"display": "状态", "cell": EnumCell, "update": False},
"datetime": {"display": "时间", "cell": BaseCell, "update": False},
"gateway_name": {"display": "接口", "cell": BaseCell, "update": False},
}
class DailyResultMonitor(BaseMonitor):
"""
Monitor for backtesting daily result.
"""
headers = {
"date": {"display": "日期", "cell": BaseCell, "update": False},
"trade_count": {"display": "成交笔数", "cell": BaseCell, "update": False},
"start_pos": {"display": "开盘持仓", "cell": BaseCell, "update": False},
"end_pos": {"display": "收盘持仓", "cell": BaseCell, "update": False},
"turnover": {"display": "成交额", "cell": BaseCell, "update": False},
"commission": {"display": "手续费", "cell": BaseCell, "update": False},
"slippage": {"display": "滑点", "cell": BaseCell, "update": False},
"trading_pnl": {"display": "交易盈亏", "cell": BaseCell, "update": False},
"holding_pnl": {"display": "持仓盈亏", "cell": BaseCell, "update": False},
"total_pnl": {"display": "总盈亏", "cell": BaseCell, "update": False},
"net_pnl": {"display": "净盈亏", "cell": BaseCell, "update": False},
}
class BacktestingResultDialog(QtWidgets.QDialog):
"""
"""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
title: str,
table_class: QtWidgets.QTableWidget
):
""""""
super().__init__()
self.main_engine = main_engine
self.event_engine = event_engine
self.title = title
self.table_class = table_class
self.updated = False
self.init_ui()
def init_ui(self):
""""""
self.setWindowTitle(self.title)
self.resize(1100, 600)
self.table = self.table_class(self.main_engine, self.event_engine)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.table)
self.setLayout(vbox)
def clear_data(self):
""""""
self.updated = False
self.table.setRowCount(0)
def update_data(self, data: list):
""""""
self.updated = True
data.reverse()
for obj in data:
self.table.insert_new_row(obj)
def is_updated(self):
""""""
return self.updated
class CandleChartDialog(QtWidgets.QDialog):
"""
"""
def __init__(self):
""""""
super().__init__()
self.dt_ix_map = {}
self.updated = False
self.init_ui()
def init_ui(self):
""""""
self.setWindowTitle("回测K线图表")
self.resize(1400, 800)
# Create chart widget
self.chart = ChartWidget()
self.chart.add_plot("candle", hide_x_axis=True)
self.chart.add_plot("volume", maximum_height=200)
self.chart.add_item(CandleItem, "candle", "candle")
self.chart.add_item(VolumeItem, "volume", "volume")
self.chart.add_cursor()
# Add scatter item for showing tradings
self.trade_scatter = pg.ScatterPlotItem()
candle_plot = self.chart.get_plot("candle")
candle_plot.addItem(self.trade_scatter)
# Set layout
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.chart)
self.setLayout(vbox)
def update_history(self, history: list):
""""""
self.updated = True
self.chart.update_history(history)
for ix, bar in enumerate(history):
self.dt_ix_map[bar.datetime] = ix
def update_trades(self, trades: list):
""""""
trade_data = []
for trade in trades:
ix = self.dt_ix_map[trade.datetime]
scatter = {
"pos": (ix, trade.price),
"data": 1,
"size": 14,
"pen": pg.mkPen((255, 255, 255))
}
if trade.direction == Direction.LONG:
scatter_symbol = "t1" # Up arrow
else:
scatter_symbol = "t" # Down arrow
if trade.offset == Offset.OPEN:
scatter_brush = pg.mkBrush((255, 255, 0)) # Yellow
else:
scatter_brush = pg.mkBrush((0, 0, 255)) # Blue
scatter["symbol"] = scatter_symbol
scatter["brush"] = scatter_brush
trade_data.append(scatter)
self.trade_scatter.setData(trade_data)
def clear_data(self):
""""""
self.updated = False
self.chart.clear_all()
self.dt_ix_map.clear()
self.trade_scatter.clear()
def is_updated(self):
""""""
return self.updated
|
#!/usr/bin/env python
"""
Predefined bluesky scan plans
"""
import numpy as np
import bluesky.plans as bp
import bluesky.preprocessors as bpp
import bluesky.plan_stubs as bps
from .utility import load_config
#@bpp.run_decorator()
def collect_white_field(experiment, cfg_tomo, atfront=True):
"""
Collect white/flat field images by moving the sample out of the FOV
"""
# unpack devices
det = experiment.det
tomostage = experiment.tomostage
# move sample out of the way
_x = cfg_tomo['fronte_white_ksamX'] if atfront else cfg_tomo['back_white_ksamX']
_z = cfg_tomo['fronte_white_ksamZ'] if atfront else cfg_tomo['back_white_ksamZ']
yield from bps.mv(tomostage.ksamX, _x)
yield from bps.mv(tomostage.ksamZ, _z)
# setup detector
yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')
yield from bps.mv(det.tiff1.nd_array_port, 'PROC1')
yield from bps.mv(det.proc1.enable, 1)
yield from bps.mv(det.proc1.reset_filter, 1)
yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames'])
yield from bps.mv(det.cam.trigger_mode, "Internal")
yield from bps.mv(det.cam.image_mode, "Multiple")
yield from bps.mv(det.cam.num_images, cfg_tomo['n_frames']*cfg_tomo['n_white'])
yield from bps.trigger_and_read([det])
# move sample back to FOV
# NOTE:
# not sure is this will work or not...
yield from bps.mv(tomostage.ksamX, cfg_tomo['initial_ksamX'])
yield from bps.mv(tomostage.ksamZ, cfg_tomo['initial_ksamZ'])
#@bpp.run_decorator()
def collect_dark_field(experiment, cfg_tomo):
"""
Collect dark field images by close the shutter
"""
det = experiment.det
yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')
yield from bps.mv(det.tiff1.nd_array_port, 'PROC1')
yield from bps.mv(det.proc1.enable, 1)
yield from bps.mv(det.proc1.reset_filter, 1)
yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames'])
yield from bps.mv(det.cam.trigger_mode, "Internal")
yield from bps.mv(det.cam.image_mode, "Multiple")
yield from bps.mv(det.cam.num_images, cfg_tomo['n_frames']*cfg_tomo['n_dark'])
yield from bps.trigger_and_read([det])
#@bpp.run_decorator()
def step_scan(experiment, cfg_tomo):
"""
Collect projects with step motion
"""
# unpack devices
det = experiment.det
tomostage = experiment.tomostage
yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')
yield from bps.mv(det.tiff1.nd_array_port, 'PROC1')
yield from bps.mv(det.proc1.enable, 1)
yield from bps.mv(det.proc1.reset_filter, 1)
yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames'])
angs = np.arange(
cfg_tomo['omega_start'],
cfg_tomo['omega_end']+cfg_tomo['omega_step']/2,
cfg_tomo['omega_step'],
)
for ang in angs:
yield from bps.checkpoint()
yield from bps.mv(tomostage.preci, ang)
yield from bps.trigger_and_read([det])
#@bpp.run_decorator()
def fly_scan(experiment, cfg_tomo):
"""
Collect projections with fly motion
"""
det = experiment.det
psofly = experiment.psofly
yield from bps.mv(det.hdf1.nd_array_port, 'PG1')
yield from bps.mv(det.tiff1.nd_array_port, 'PG1')
# we are assuming that the global psofly is available
yield from bps.mv(
psofly.start, cfg_tomo['omega_start'],
psofly.end, cfg_tomo['omega_end'],
psofly.scan_delta, abs(cfg_tomo['omega_step']),
psofly.slew_speed, cfg_tomo['slew_speed'],
)
# taxi
yield from bps.mv(psofly.taxi, "Taxi")
yield from bps.mv(
det.cam.num_images, cfg_tomo['n_projections'],
det.cam.trigger_mode, "Overlapped",
)
# start the fly scan
yield from bps.trigger(det, group='fly')
yield from bps.abs_set(psofly.fly, "Fly", group='fly')
yield from bps.wait(group='fly')
def tomo_scan(experiment, cfg):
"""
Tomography scan plan based on given configuration
"""
# unpack devices
det = experiment.det
tomostage = experiment.tomostage
shutter = experiment.shutter
shutter_suspender = experiment.suspend_shutter
cfg = load_config(cfg) if type(cfg) != dict else cfg
# update the cached motor position in the dict in case exp goes wrong
_cahed_position = experiment.cache_motor_position()
# step 0: preparation
acquire_time = cfg['tomo']['acquire_time']
n_white = cfg['tomo']['n_white']
n_dark = cfg['tomo']['n_dark']
angs = np.arange(
cfg['tomo']['omega_start'],
cfg['tomo']['omega_end']+cfg['tomo']['omega_step']/2,
cfg['tomo']['omega_step'],
)
n_projections = len(angs)
cfg['tomo']['n_projections'] = n_projections
total_images = n_white + n_projections + n_white + n_dark
fp = cfg['output']['filepath']
fn = cfg['output']['fileprefix']
# calculate slew speed for fly scan
# https://github.com/decarlof/tomo2bm/blob/master/flir/libs/aps2bm_lib.py
# TODO: considering blue pixels, use 2BM code as ref
if cfg['tomo']['type'].lower() == 'fly':
scan_time = (acquire_time+cfg['tomo']['readout_time'])*n_projections
slew_speed = (angs.max() - angs.min())/scan_time
cfg['tomo']['slew_speed'] = slew_speed
# need to make sure that the sample out position is the same for both front and back
x0, z0 = tomostage.ksamX.position, tomostage.ksamZ.position
dfx, dfz = cfg['tomo']['sample_out_position']['samX'], cfg['tomo']['sample_out_position']['samZ']
rotang = np.radians(cfg['tomo']['omega_end']-cfg['tomo']['omega_start'])
rotm = np.array([[ np.cos(rotang), np.sin(rotang)],
[-np.sin(rotang), np.cos(rotang)]])
dbxz = np.dot(rotm, np.array([dfx, dfz]))
dbx = dbxz[0] if abs(dbxz[0]) > 1e-8 else 0.0
dbz = dbxz[1] if abs(dbxz[1]) > 1e-8 else 0.0
# now put the value to dict
cfg['tomo']['initial_ksamX'] = x0
cfg['tomo']['initial_ksamZ'] = z0
cfg['tomo']['fronte_white_ksamX'] = x0 + dfx
cfg['tomo']['fronte_white_ksamZ'] = z0 + dfz
cfg['tomo']['back_white_ksamX'] = x0 + dbx
cfg['tomo']['back_white_ksamZ'] = z0 + dbz
@bpp.run_decorator()
@bpp.stage_decorator([det])
def scan_closure():
# open shutter for beam
yield from bps.mv(shutter, 'open')
yield from bps.install_suspender(shutter_suspender)
# config output
for me in [det.tiff1, det.hdf1]:
yield from bps.mv(me.file_path, fp)
yield from bps.mv(me.file_name, fn)
yield from bps.mv(me.file_write_mode, 2)
yield from bps.mv(me.num_capture, total_images)
yield from bps.mv(me.file_template, ".".join([r"%s%s_%06d",cfg['output']['type'].lower()]))
if cfg['output']['type'] in ['tif', 'tiff']:
yield from bps.mv(det.tiff1.enable, 1)
yield from bps.mv(det.tiff1.capture, 1)
yield from bps.mv(det.hdf1.enable, 0)
elif cfg['output']['type'] in ['hdf', 'hdf1', 'hdf5']:
yield from bps.mv(det.tiff1.enable, 0)
yield from bps.mv(det.hdf1.enable, 1)
yield from bps.mv(det.hdf1.capture, 1)
else:
raise ValueError(f"Unsupported output type {cfg["output"]["type"]}")
# collect front white field
yield from bps.mv(det.cam.frame_type, 0) # for HDF5 dxchange data structure
yield from collect_white_field(experiment, cfg['tomo'], atfront=True)
# collect projections
yield from bps.mv(det.cam.frame_type, 1) # for HDF5 dxchange data structure
if cfg['tomo']['type'].lower() == 'step':
yield from step_scan(experiment, cfg['tomo'])
elif cfg['tomo']['type'].lower() == 'fly':
yield from fly_scan(experiment, cfg['tomo'])
else:
raise ValueError(f"Unsupported scan type: {cfg["tomo"]["type"]}")
# collect back white field
yield from bps.mv(det.cam.frame_type, 2) # for HDF5 dxchange data structure
yield from collect_white_field(experiment, cfg['tomo'], atfront=False)
# collect back dark field
yield from bps.mv(det.cam.frame_type, 3) # for HDF5 dxchange data structure
yield from bps.remove_suspender(shutter_suspender)
yield from bps.mv(shutter, "close")
yield from collect_dark_field(experiment, cfg['tomo'])
return (yield from scan_closure())
| #!/usr/bin/env python
"""
Predefined bluesky scan plans
"""
import numpy as np
import bluesky.plans as bp
import bluesky.preprocessors as bpp
import bluesky.plan_stubs as bps
from .utility import load_config
#@bpp.run_decorator()
def collect_white_field(experiment, cfg_tomo, atfront=True):
"""
Collect white/flat field images by moving the sample out of the FOV
"""
# unpack devices
det = experiment.det
tomostage = experiment.tomostage
# move sample out of the way
_x = cfg_tomo['fronte_white_ksamX'] if atfront else cfg_tomo['back_white_ksamX']
_z = cfg_tomo['fronte_white_ksamZ'] if atfront else cfg_tomo['back_white_ksamZ']
yield from bps.mv(tomostage.ksamX, _x)
yield from bps.mv(tomostage.ksamZ, _z)
# setup detector
yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')
yield from bps.mv(det.tiff1.nd_array_port, 'PROC1')
yield from bps.mv(det.proc1.enable, 1)
yield from bps.mv(det.proc1.reset_filter, 1)
yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames'])
yield from bps.mv(det.cam.trigger_mode, "Internal")
yield from bps.mv(det.cam.image_mode, "Multiple")
yield from bps.mv(det.cam.num_images, cfg_tomo['n_frames']*cfg_tomo['n_white'])
yield from bps.trigger_and_read([det])
# move sample back to FOV
# NOTE:
# not sure is this will work or not...
yield from bps.mv(tomostage.ksamX, cfg_tomo['initial_ksamX'])
yield from bps.mv(tomostage.ksamZ, cfg_tomo['initial_ksamZ'])
#@bpp.run_decorator()
def collect_dark_field(experiment, cfg_tomo):
"""
Collect dark field images by close the shutter
"""
det = experiment.det
yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')
yield from bps.mv(det.tiff1.nd_array_port, 'PROC1')
yield from bps.mv(det.proc1.enable, 1)
yield from bps.mv(det.proc1.reset_filter, 1)
yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames'])
yield from bps.mv(det.cam.trigger_mode, "Internal")
yield from bps.mv(det.cam.image_mode, "Multiple")
yield from bps.mv(det.cam.num_images, cfg_tomo['n_frames']*cfg_tomo['n_dark'])
yield from bps.trigger_and_read([det])
#@bpp.run_decorator()
def step_scan(experiment, cfg_tomo):
"""
Collect projects with step motion
"""
# unpack devices
det = experiment.det
tomostage = experiment.tomostage
yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')
yield from bps.mv(det.tiff1.nd_array_port, 'PROC1')
yield from bps.mv(det.proc1.enable, 1)
yield from bps.mv(det.proc1.reset_filter, 1)
yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames'])
angs = np.arange(
cfg_tomo['omega_start'],
cfg_tomo['omega_end']+cfg_tomo['omega_step']/2,
cfg_tomo['omega_step'],
)
for ang in angs:
yield from bps.checkpoint()
yield from bps.mv(tomostage.preci, ang)
yield from bps.trigger_and_read([det])
#@bpp.run_decorator()
def fly_scan(experiment, cfg_tomo):
"""
Collect projections with fly motion
"""
det = experiment.det
psofly = experiment.psofly
yield from bps.mv(det.hdf1.nd_array_port, 'PG1')
yield from bps.mv(det.tiff1.nd_array_port, 'PG1')
# we are assuming that the global psofly is available
yield from bps.mv(
psofly.start, cfg_tomo['omega_start'],
psofly.end, cfg_tomo['omega_end'],
psofly.scan_delta, abs(cfg_tomo['omega_step']),
psofly.slew_speed, cfg_tomo['slew_speed'],
)
# taxi
yield from bps.mv(psofly.taxi, "Taxi")
yield from bps.mv(
det.cam.num_images, cfg_tomo['n_projections'],
det.cam.trigger_mode, "Overlapped",
)
# start the fly scan
yield from bps.trigger(det, group='fly')
yield from bps.abs_set(psofly.fly, "Fly", group='fly')
yield from bps.wait(group='fly')
def tomo_scan(experiment, cfg):
"""
Tomography scan plan based on given configuration
"""
# unpack devices
det = experiment.det
tomostage = experiment.tomostage
shutter = experiment.shutter
shutter_suspender = experiment.suspend_shutter
cfg = load_config(cfg) if type(cfg) != dict else cfg
# update the cached motor position in the dict in case exp goes wrong
_cahed_position = experiment.cache_motor_position()
# step 0: preparation
acquire_time = cfg['tomo']['acquire_time']
n_white = cfg['tomo']['n_white']
n_dark = cfg['tomo']['n_dark']
angs = np.arange(
cfg['tomo']['omega_start'],
cfg['tomo']['omega_end']+cfg['tomo']['omega_step']/2,
cfg['tomo']['omega_step'],
)
n_projections = len(angs)
cfg['tomo']['n_projections'] = n_projections
total_images = n_white + n_projections + n_white + n_dark
fp = cfg['output']['filepath']
fn = cfg['output']['fileprefix']
# calculate slew speed for fly scan
# https://github.com/decarlof/tomo2bm/blob/master/flir/libs/aps2bm_lib.py
# TODO: considering blue pixels, use 2BM code as ref
if cfg['tomo']['type'].lower() == 'fly':
scan_time = (acquire_time+cfg['tomo']['readout_time'])*n_projections
slew_speed = (angs.max() - angs.min())/scan_time
cfg['tomo']['slew_speed'] = slew_speed
# need to make sure that the sample out position is the same for both front and back
x0, z0 = tomostage.ksamX.position, tomostage.ksamZ.position
dfx, dfz = cfg['tomo']['sample_out_position']['samX'], cfg['tomo']['sample_out_position']['samZ']
rotang = np.radians(cfg['tomo']['omega_end']-cfg['tomo']['omega_start'])
rotm = np.array([[ np.cos(rotang), np.sin(rotang)],
[-np.sin(rotang), np.cos(rotang)]])
dbxz = np.dot(rotm, np.array([dfx, dfz]))
dbx = dbxz[0] if abs(dbxz[0]) > 1e-8 else 0.0
dbz = dbxz[1] if abs(dbxz[1]) > 1e-8 else 0.0
# now put the value to dict
cfg['tomo']['initial_ksamX'] = x0
cfg['tomo']['initial_ksamZ'] = z0
cfg['tomo']['fronte_white_ksamX'] = x0 + dfx
cfg['tomo']['fronte_white_ksamZ'] = z0 + dfz
cfg['tomo']['back_white_ksamX'] = x0 + dbx
cfg['tomo']['back_white_ksamZ'] = z0 + dbz
@bpp.run_decorator()
@bpp.stage_decorator([det])
def scan_closure():
# open shutter for beam
yield from bps.mv(shutter, 'open')
yield from bps.install_suspender(shutter_suspender)
# config output
for me in [det.tiff1, det.hdf1]:
yield from bps.mv(me.file_path, fp)
yield from bps.mv(me.file_name, fn)
yield from bps.mv(me.file_write_mode, 2)
yield from bps.mv(me.num_capture, total_images)
yield from bps.mv(me.file_template, ".".join([r"%s%s_%06d",cfg['output']['type'].lower()]))
if cfg['output']['type'] in ['tif', 'tiff']:
yield from bps.mv(det.tiff1.enable, 1)
yield from bps.mv(det.tiff1.capture, 1)
yield from bps.mv(det.hdf1.enable, 0)
elif cfg['output']['type'] in ['hdf', 'hdf1', 'hdf5']:
yield from bps.mv(det.tiff1.enable, 0)
yield from bps.mv(det.hdf1.enable, 1)
yield from bps.mv(det.hdf1.capture, 1)
else:
raise ValueError(f"Unsupported output type {cfg['output']['type']}")
# collect front white field
yield from bps.mv(det.cam.frame_type, 0) # for HDF5 dxchange data structure
yield from collect_white_field(experiment, cfg['tomo'], atfront=True)
# collect projections
yield from bps.mv(det.cam.frame_type, 1) # for HDF5 dxchange data structure
if cfg['tomo']['type'].lower() == 'step':
yield from step_scan(experiment, cfg['tomo'])
elif cfg['tomo']['type'].lower() == 'fly':
yield from fly_scan(experiment, cfg['tomo'])
else:
raise ValueError(f"Unsupported scan type: {cfg['tomo']['type']}")
# collect back white field
yield from bps.mv(det.cam.frame_type, 2) # for HDF5 dxchange data structure
yield from collect_white_field(experiment, cfg['tomo'], atfront=False)
# collect back dark field
yield from bps.mv(det.cam.frame_type, 3) # for HDF5 dxchange data structure
yield from bps.remove_suspender(shutter_suspender)
yield from bps.mv(shutter, "close")
yield from collect_dark_field(experiment, cfg['tomo'])
return (yield from scan_closure())
|
"""Provides the Objector class."""
from json import loads
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from .exceptions import ClientException, RedditAPIException
from .models.reddit.base import RedditBase
from .util import snake_case_keys
if TYPE_CHECKING: # pragma: no cover
from ... import praw
class Objector:
"""The objector builds :class:`.RedditBase` objects."""
@classmethod
def parse_error(
cls, data: Union[List[Any], Dict[str, Dict[str, str]]]
) -> Optional[RedditAPIException]:
"""Convert JSON response into an error object.
:param data: The dict to be converted.
:returns: An instance of :class:`~.RedditAPIException`, or ``None`` if ``data``
doesn't fit this model.
"""
if isinstance(data, list):
# Fetching a Submission returns a list (of two items). Although it's handled
# manually in `Submission._fetch()`, assume it's a possibility here.
return None
errors = data.get("json", {}).get("errors")
if errors is None:
return None
if len(errors) < 1:
# See `Collection._fetch()`.
raise ClientException("successful error response", data)
return RedditAPIException(errors)
@classmethod
def check_error(cls, data: Union[List[Any], Dict[str, Dict[str, str]]]):
"""Raise an error if the argument resolves to an error object."""
error = cls.parse_error(data)
if error:
raise error
def __init__(self, reddit: "praw.Reddit", parsers: Optional[Dict[str, Any]] = None):
"""Initialize an Objector instance.
:param reddit: An instance of :class:`~.Reddit`.
"""
self.parsers = {} if parsers is None else parsers
self._reddit = reddit
def _objectify_dict(self, data):
"""Create RedditBase objects from dicts.
:param data: The structured data, assumed to be a dict.
:returns: An instance of :class:`~.RedditBase`.
"""
if {"conversation", "messages", "modActions"}.issubset(data):
parser = self.parsers["ModmailConversation"]
elif {"actionTypeId", "author", "date"}.issubset(data):
# Modmail mod action
data = snake_case_keys(data)
parser = self.parsers["ModmailAction"]
elif {"bodyMarkdown", "isInternal"}.issubset(data):
# Modmail message
data = snake_case_keys(data)
parser = self.parsers["ModmailMessage"]
elif {"kind", "short_name", "violation_reason"}.issubset(data):
# This is a Rule
parser = self.parsers["rule"]
elif {"isAdmin", "isDeleted"}.issubset(data):
# Modmail author
data = snake_case_keys(data)
# Prevent clobbering base-36 id
del data["id"]
data["is_subreddit_mod"] = data.pop("is_mod")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"banStatus", "muteStatus", "recentComments"}.issubset(data):
# Modmail user
data = snake_case_keys(data)
data["created_string"] = data.pop("created")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"displayName", "id", "type"}.issubset(data):
# Modmail subreddit
data = snake_case_keys(data)
parser = self.parsers[self._reddit.config.kinds[data["type"]]]
elif {"date", "id", "name"}.issubset(data) or {
"id",
"name",
"permissions",
}.issubset(data):
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"text", "url"}.issubset(data):
if "color" in data or "linkUrl" in data:
parser = self.parsers["Button"]
else:
parser = self.parsers["MenuLink"]
elif {"children", "text"}.issubset(data):
parser = self.parsers["Submenu"]
elif {"height", "url", "width"}.issubset(data):
parser = self.parsers["Image"]
elif {"isSubscribed", "name", "subscribers"}.issubset(data):
# discards icon and subscribed information
return self._reddit.subreddit(data["name"])
elif {"authorFlairType", "name"}.issubset(data):
# discards flair information
return self._reddit.redditor(data["name"])
elif {"parent_id"}.issubset(data):
parser = self.parsers[self._reddit.config.kinds["comment"]]
elif "collection_id" in data.keys():
parser = self.parsers["Collection"]
elif {"moderators", "moderatorIds", "allUsersLoaded", "subredditId"}.issubset(
data
):
data = snake_case_keys(data)
moderators = []
for mod_id in data["moderator_ids"]:
mod = snake_case_keys(data["moderators"][mod_id])
mod["mod_permissions"] = list(mod["mod_permissions"].keys())
moderators.append(mod)
data["moderators"] = moderators
parser = self.parsers["moderator-list"]
elif "username" in data.keys():
data["name"] = data.pop("username")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
else:
if "user" in data:
parser = self.parsers[self._reddit.config.kinds["redditor"]]
data["user"] = parser.parse({"name": data["user"]}, self._reddit)
return data
return parser.parse(data, self._reddit)
def objectify(
self, data: Optional[Union[Dict[str, Any], List[Any]]]
) -> Optional[Union[RedditBase, Dict[str, Any], List[Any]]]:
"""Create RedditBase objects from data.
:param data: The structured data.
:returns: An instance of :class:`~.RedditBase`, or ``None`` if given ``data`` is
``None``.
"""
# pylint: disable=too-many-return-statements
if data is None: # 204 no content
return None
if isinstance(data, list):
return [self.objectify(item) for item in data]
if "json" in data and "errors" in data["json"]:
errors = data["json"]["errors"]
if len(errors) > 0:
raise RedditAPIException(errors)
if "kind" in data and (
"shortName" in data or data["kind"] in ("menu", "moderators")
):
# This is a widget
parser = self.parsers.get(data["kind"], self.parsers["widget"])
return parser.parse(data, self._reddit)
if {"kind", "data"}.issubset(data) and data["kind"] in self.parsers:
parser = self.parsers[data["kind"]]
return parser.parse(data["data"], self._reddit)
if "json" in data and "data" in data["json"]:
if "websocket_url" in data["json"]["data"]:
return data
if "things" in data["json"]["data"]: # Submission.reply
return self.objectify(data["json"]["data"]["things"])
if "rules" in data["json"]["data"]:
return self.objectify(loads(data["json"]["data"]["rules"]))
if "url" in data["json"]["data"]: # Subreddit.submit
# The URL is the URL to the submission, so it's removed.
del data["json"]["data"]["url"]
parser = self.parsers[self._reddit.config.kinds["submission"]]
if data["json"]["data"]["id"].startswith(
f"{self._reddit.config.kinds["submission"]}_"
):
# With polls, Reddit returns a fullname but calls it an "id". This
# fixes this by coercing the fullname into an id.
data["json"]["data"]["id"] = data["json"]["data"]["id"].split(
"_", 1
)[1]
else:
parser = self.parsers["LiveUpdateEvent"]
return parser.parse(data["json"]["data"], self._reddit)
if "rules" in data:
return self.objectify(data["rules"])
elif isinstance(data, dict):
return self._objectify_dict(data)
return data
| """Provides the Objector class."""
from json import loads
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from .exceptions import ClientException, RedditAPIException
from .models.reddit.base import RedditBase
from .util import snake_case_keys
if TYPE_CHECKING: # pragma: no cover
from ... import praw
class Objector:
"""The objector builds :class:`.RedditBase` objects."""
@classmethod
def parse_error(
cls, data: Union[List[Any], Dict[str, Dict[str, str]]]
) -> Optional[RedditAPIException]:
"""Convert JSON response into an error object.
:param data: The dict to be converted.
:returns: An instance of :class:`~.RedditAPIException`, or ``None`` if ``data``
doesn't fit this model.
"""
if isinstance(data, list):
# Fetching a Submission returns a list (of two items). Although it's handled
# manually in `Submission._fetch()`, assume it's a possibility here.
return None
errors = data.get("json", {}).get("errors")
if errors is None:
return None
if len(errors) < 1:
# See `Collection._fetch()`.
raise ClientException("successful error response", data)
return RedditAPIException(errors)
@classmethod
def check_error(cls, data: Union[List[Any], Dict[str, Dict[str, str]]]):
"""Raise an error if the argument resolves to an error object."""
error = cls.parse_error(data)
if error:
raise error
def __init__(self, reddit: "praw.Reddit", parsers: Optional[Dict[str, Any]] = None):
"""Initialize an Objector instance.
:param reddit: An instance of :class:`~.Reddit`.
"""
self.parsers = {} if parsers is None else parsers
self._reddit = reddit
def _objectify_dict(self, data):
"""Create RedditBase objects from dicts.
:param data: The structured data, assumed to be a dict.
:returns: An instance of :class:`~.RedditBase`.
"""
if {"conversation", "messages", "modActions"}.issubset(data):
parser = self.parsers["ModmailConversation"]
elif {"actionTypeId", "author", "date"}.issubset(data):
# Modmail mod action
data = snake_case_keys(data)
parser = self.parsers["ModmailAction"]
elif {"bodyMarkdown", "isInternal"}.issubset(data):
# Modmail message
data = snake_case_keys(data)
parser = self.parsers["ModmailMessage"]
elif {"kind", "short_name", "violation_reason"}.issubset(data):
# This is a Rule
parser = self.parsers["rule"]
elif {"isAdmin", "isDeleted"}.issubset(data):
# Modmail author
data = snake_case_keys(data)
# Prevent clobbering base-36 id
del data["id"]
data["is_subreddit_mod"] = data.pop("is_mod")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"banStatus", "muteStatus", "recentComments"}.issubset(data):
# Modmail user
data = snake_case_keys(data)
data["created_string"] = data.pop("created")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"displayName", "id", "type"}.issubset(data):
# Modmail subreddit
data = snake_case_keys(data)
parser = self.parsers[self._reddit.config.kinds[data["type"]]]
elif {"date", "id", "name"}.issubset(data) or {
"id",
"name",
"permissions",
}.issubset(data):
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"text", "url"}.issubset(data):
if "color" in data or "linkUrl" in data:
parser = self.parsers["Button"]
else:
parser = self.parsers["MenuLink"]
elif {"children", "text"}.issubset(data):
parser = self.parsers["Submenu"]
elif {"height", "url", "width"}.issubset(data):
parser = self.parsers["Image"]
elif {"isSubscribed", "name", "subscribers"}.issubset(data):
# discards icon and subscribed information
return self._reddit.subreddit(data["name"])
elif {"authorFlairType", "name"}.issubset(data):
# discards flair information
return self._reddit.redditor(data["name"])
elif {"parent_id"}.issubset(data):
parser = self.parsers[self._reddit.config.kinds["comment"]]
elif "collection_id" in data.keys():
parser = self.parsers["Collection"]
elif {"moderators", "moderatorIds", "allUsersLoaded", "subredditId"}.issubset(
data
):
data = snake_case_keys(data)
moderators = []
for mod_id in data["moderator_ids"]:
mod = snake_case_keys(data["moderators"][mod_id])
mod["mod_permissions"] = list(mod["mod_permissions"].keys())
moderators.append(mod)
data["moderators"] = moderators
parser = self.parsers["moderator-list"]
elif "username" in data.keys():
data["name"] = data.pop("username")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
else:
if "user" in data:
parser = self.parsers[self._reddit.config.kinds["redditor"]]
data["user"] = parser.parse({"name": data["user"]}, self._reddit)
return data
return parser.parse(data, self._reddit)
def objectify(
self, data: Optional[Union[Dict[str, Any], List[Any]]]
) -> Optional[Union[RedditBase, Dict[str, Any], List[Any]]]:
"""Create RedditBase objects from data.
:param data: The structured data.
:returns: An instance of :class:`~.RedditBase`, or ``None`` if given ``data`` is
``None``.
"""
# pylint: disable=too-many-return-statements
if data is None: # 204 no content
return None
if isinstance(data, list):
return [self.objectify(item) for item in data]
if "json" in data and "errors" in data["json"]:
errors = data["json"]["errors"]
if len(errors) > 0:
raise RedditAPIException(errors)
if "kind" in data and (
"shortName" in data or data["kind"] in ("menu", "moderators")
):
# This is a widget
parser = self.parsers.get(data["kind"], self.parsers["widget"])
return parser.parse(data, self._reddit)
if {"kind", "data"}.issubset(data) and data["kind"] in self.parsers:
parser = self.parsers[data["kind"]]
return parser.parse(data["data"], self._reddit)
if "json" in data and "data" in data["json"]:
if "websocket_url" in data["json"]["data"]:
return data
if "things" in data["json"]["data"]: # Submission.reply
return self.objectify(data["json"]["data"]["things"])
if "rules" in data["json"]["data"]:
return self.objectify(loads(data["json"]["data"]["rules"]))
if "url" in data["json"]["data"]: # Subreddit.submit
# The URL is the URL to the submission, so it's removed.
del data["json"]["data"]["url"]
parser = self.parsers[self._reddit.config.kinds["submission"]]
if data["json"]["data"]["id"].startswith(
f"{self._reddit.config.kinds['submission']}_"
):
# With polls, Reddit returns a fullname but calls it an "id". This
# fixes this by coercing the fullname into an id.
data["json"]["data"]["id"] = data["json"]["data"]["id"].split(
"_", 1
)[1]
else:
parser = self.parsers["LiveUpdateEvent"]
return parser.parse(data["json"]["data"], self._reddit)
if "rules" in data:
return self.objectify(data["rules"])
elif isinstance(data, dict):
return self._objectify_dict(data)
return data
|
import numpy as np
def FNS(scores):
domination = np.all(scores[:, None, :] <= scores[None, :, :], axis=2) # domination[i, j] = "i dominuje j"
domination &= np.any(scores[:, None, :] < scores[None, :, :], axis=2)
Nx = domination.sum(0)
Pf = []
ranks = np.zeros(scores.shape[0])
r = 0
Q = np.nonzero(Nx == 0)[0]
while Q.size > 0:
Nx[Q] = -1
Pf.append(Q)
ranks[Q] = r
r += 1
for i in Q:
Nx[domination[i, :]] -= 1
Q = np.nonzero(Nx == 0)[0]
return Pf, ranks
def crowding_distance(scores):
indices = np.argsort(scores, 0)
sorted_scores = np.take_along_axis(scores, indices, 0)
cd = np.zeros(scores.shape[0])
for k in range(scores.shape[1]):
if sorted_scores[-1, k] != sorted_scores[0, k]:
cd[indices[[0, -1], k]] = np.inf
cd[indices[1:-1, k]] += (sorted_scores[2:, k] - sorted_scores[:-2, k]) / (
sorted_scores[-1, k] - sorted_scores[0, k])
return cd
def random_population(d, n, x_min, x_max):
return np.hstack([np.random.uniform(x_min, x_max, (n, d))])
def tournament_selection(ranks, dists, n):
candidates = np.random.choice(n, (n, 2), replace=True)
mask = np.where(
ranks[candidates[:, 0]] == ranks[candidates[:, 1]],
dists[candidates[:, 0]] > dists[candidates[:, 1]],
ranks[candidates[:, 0]] < ranks[candidates[:, 1]]
)
result = candidates[:, 1]
result[mask] = candidates[mask, 0]
return result
def crossover(x, p, eta): # simulated binary crossover
n, d = x.shape
l = n // 2
mask = np.random.random((l, d)) <= p
m = np.sum(mask)
mi = np.random.random(m)
beta = np.where(
mi < 0.5,
np.power(2 * mi, 1. / (eta + 1.)),
np.power(1. / (2. * (1 - mi)), 1. / (eta + 1.))
)
c1 = x[:l, :].copy()
c2 = x[l:, :].copy()
c1[mask] = 0.5 * (1 + beta) * x[:l, :][mask] + 0.5 * (1 - beta) * x[l:, :][mask]
c2[mask] = 0.5 * (1 + beta) * x[:l, :][mask] + 0.5 * (1 - beta) * x[l:, :][mask]
return np.vstack([c1, c2])
def mutation(x, x_min, x_max, p, eta): # polynomial mutation
n, d = x.shape
mask = np.random.random((n, d)) <= p
if isinstance(x_min, np.ndarray):
x_min = np.repeat(x_min[None, :], n, axis=0)
x_min = x_min[mask]
if isinstance(x_max, np.ndarray):
x_max = np.repeat(x_max[None, :], n, axis=0)
x_max = x_max[mask]
m = np.sum(mask)
mi = np.random.random(m)
beta = np.where(
mi < 0.5,
np.power(2 * mi, 1. / (eta + 1.)) - 1.,
1. - np.power(2. * (1 - mi), 1. / (eta + 1.))
)
y = x.copy()
y[mask] = np.where(
mi < 0.5,
x[mask] + beta * (x[mask] - x_min),
x[mask] + beta * (x_max - x[mask])
)
return y
def elitist_selection(fronts, dists, to_take):
taken = []
for front in fronts:
if len(front) <= to_take:
taken += list(front)
if len(front) == to_take:
break
to_take -= len(front)
else:
indices = np.argsort(-dists[front])[:to_take]
taken += list(front[indices])
break
return taken
def constraint_violation(constraints):
n, d = constraints.shape
sort_indices = np.argsort(constraints, 0)
violations = np.zeros(n)
for i in range(d):
values, counts = np.unique(constraints[:, i], return_counts=True) # unikalne wartości są zwracane posortowane
counts = np.cumsum(counts)
counts = list(counts)
if values[0] != 0:
counts = [0] + counts
for rank, (j, k) in enumerate(zip([0] + counts, counts + [len(counts)])):
violations[sort_indices[j:k, i]] += rank
return violations
def evaluation(objective, n_constraints, population):
obj_results = objective(population)
constraint_values = obj_results[:, -n_constraints:]
violation_measure = constraint_violation(constraint_values)
scores = np.concatenate([obj_results[:, :-n_constraints], violation_measure[:, None]], 1)
return scores
def split_and_select(population, scores, n_f, n_inf):
dists = crowding_distance(scores)
mask_f = scores[:, -1] == 0
population_f = population[mask_f, :]
scores_f = scores[mask_f, :]
dists_f = dists[mask_f]
population_inf = population[~mask_f, :]
scores_inf = scores[~mask_f, :]
dists_inf = dists[~mask_f]
s_f = population_f.shape[0]
s_inf = population_inf.shape[0]
n = n_f + n_inf
if s_f < n_f:
to_take_f = s_f
to_take_inf = n - s_f
elif s_inf < n_inf:
to_take_inf = s_inf
to_take_f = n - s_inf
else:
to_take_f = n_f
to_take_inf = n_inf
fronts_f, ranks_f = FNS(scores_f)
taken_f = elitist_selection(fronts_f, dists_f, to_take_f)
fronts_inf, ranks_inf = FNS(scores_inf)
taken_inf = elitist_selection(fronts_inf, dists_inf, to_take_inf)
return population_f[taken_f, :], population_inf[taken_inf, :], scores_f[taken_f, :], scores_inf[taken_inf, :]
def IDEA(objective, n_constraints, x_min, x_max, d, n, *args, **kwargs):
population = random_population(d, n, x_min, x_max)
return sub_IDEA(population, objective, n_constraints, x_min, x_max, n, *args, **kwargs)
def dynamic_IDEA(objective, n_constraints, T, x_min, x_max, d, n, alpha_inf,
*args, num_iterations_init, num_iterations, n_immigrants=0, **kwargs):
population = random_population(d, n, x_min, x_max)
print("=" * 80)
print("t=0")
print("=" * 80)
t = 0
def round_objective(round_population):
return objective(t, round_population)
p, s = sub_IDEA(population, round_objective, n_constraints, x_min, x_max, n, alpha_inf, *args,
num_iterations=num_iterations_init, **kwargs)
population_history = [p]
score_history = [s]
n_to_keep = n - n_immigrants
n_inf = int(n_to_keep * alpha_inf)
n_f = n_to_keep - n_inf
for t in range(1, T):
print("=" * 80)
print(f"t={t}")
print("=" * 80)
population = p[-1, :, :]
scores = s[-1, :, :]
if n_immigrants > 0:
population_f, population_inf, scores_f, scores_inf = split_and_select(population, scores, n_f, n_inf)
immigrants = random_population(d, n_immigrants, x_min, x_max)
population = np.vstack([population_f, population_inf, immigrants])
assert population.shape[0] == n
p, s = sub_IDEA(population, round_objective, n_constraints, x_min, x_max, n, alpha_inf, *args,
num_iterations=num_iterations, **kwargs)
population_history.append(p)
score_history.append(s)
return population_history, score_history
def sub_IDEA(population, objective, n_constraints, x_min, x_max, n, alpha_inf,
eta_c, eta_m, p_c, p_m, num_iterations, log_interval=10):
n_inf = int(n * alpha_inf)
n_f = n - n_inf
populations = []
scores = evaluation(objective, n_constraints, population)
scores_hist = []
fronts, ranks = FNS(scores)
dists = crowding_distance(scores)
def log_message():
count_f = population_f.shape[0]
count_inf = population_inf.shape[0]
print(
f"Iteration {iter_}, " +
f"#feasible: {count_f}, best: {scores_f[:, :-1].min(0) if count_f > 0 else "-"}, " +
f"#infeasible: {count_inf}, best: {scores_inf.min(0) if count_inf > 0 else "-"}"
)
for iter_ in range(num_iterations):
parent_indices = tournament_selection(ranks, dists, n)
offspring = crossover(population[parent_indices, :], p_c, eta_c)
offspring = np.clip(offspring, x_min, x_max)
offspring = mutation(offspring, x_min, x_max, p_m, eta_m)
offspring_scores = evaluation(objective, n_constraints, offspring)
population = np.vstack([population, offspring])
scores = np.vstack([scores, offspring_scores])
population_f, population_inf, scores_f, scores_inf = split_and_select(population, scores, n_f, n_inf)
population = np.vstack([population_f, population_inf])
scores = np.vstack([scores_f, scores_inf])
fronts, ranks = FNS(scores)
dists = crowding_distance(scores)
populations.append(population.copy())
scores_hist.append(scores.copy())
if iter_ % log_interval == 0:
log_message()
log_message()
return np.stack(populations, 0), np.stack(scores_hist, 0)
| import numpy as np
def FNS(scores):
domination = np.all(scores[:, None, :] <= scores[None, :, :], axis=2) # domination[i, j] = "i dominuje j"
domination &= np.any(scores[:, None, :] < scores[None, :, :], axis=2)
Nx = domination.sum(0)
Pf = []
ranks = np.zeros(scores.shape[0])
r = 0
Q = np.nonzero(Nx == 0)[0]
while Q.size > 0:
Nx[Q] = -1
Pf.append(Q)
ranks[Q] = r
r += 1
for i in Q:
Nx[domination[i, :]] -= 1
Q = np.nonzero(Nx == 0)[0]
return Pf, ranks
def crowding_distance(scores):
indices = np.argsort(scores, 0)
sorted_scores = np.take_along_axis(scores, indices, 0)
cd = np.zeros(scores.shape[0])
for k in range(scores.shape[1]):
if sorted_scores[-1, k] != sorted_scores[0, k]:
cd[indices[[0, -1], k]] = np.inf
cd[indices[1:-1, k]] += (sorted_scores[2:, k] - sorted_scores[:-2, k]) / (
sorted_scores[-1, k] - sorted_scores[0, k])
return cd
def random_population(d, n, x_min, x_max):
return np.hstack([np.random.uniform(x_min, x_max, (n, d))])
def tournament_selection(ranks, dists, n):
candidates = np.random.choice(n, (n, 2), replace=True)
mask = np.where(
ranks[candidates[:, 0]] == ranks[candidates[:, 1]],
dists[candidates[:, 0]] > dists[candidates[:, 1]],
ranks[candidates[:, 0]] < ranks[candidates[:, 1]]
)
result = candidates[:, 1]
result[mask] = candidates[mask, 0]
return result
def crossover(x, p, eta): # simulated binary crossover
n, d = x.shape
l = n // 2
mask = np.random.random((l, d)) <= p
m = np.sum(mask)
mi = np.random.random(m)
beta = np.where(
mi < 0.5,
np.power(2 * mi, 1. / (eta + 1.)),
np.power(1. / (2. * (1 - mi)), 1. / (eta + 1.))
)
c1 = x[:l, :].copy()
c2 = x[l:, :].copy()
c1[mask] = 0.5 * (1 + beta) * x[:l, :][mask] + 0.5 * (1 - beta) * x[l:, :][mask]
c2[mask] = 0.5 * (1 + beta) * x[:l, :][mask] + 0.5 * (1 - beta) * x[l:, :][mask]
return np.vstack([c1, c2])
def mutation(x, x_min, x_max, p, eta): # polynomial mutation
n, d = x.shape
mask = np.random.random((n, d)) <= p
if isinstance(x_min, np.ndarray):
x_min = np.repeat(x_min[None, :], n, axis=0)
x_min = x_min[mask]
if isinstance(x_max, np.ndarray):
x_max = np.repeat(x_max[None, :], n, axis=0)
x_max = x_max[mask]
m = np.sum(mask)
mi = np.random.random(m)
beta = np.where(
mi < 0.5,
np.power(2 * mi, 1. / (eta + 1.)) - 1.,
1. - np.power(2. * (1 - mi), 1. / (eta + 1.))
)
y = x.copy()
y[mask] = np.where(
mi < 0.5,
x[mask] + beta * (x[mask] - x_min),
x[mask] + beta * (x_max - x[mask])
)
return y
def elitist_selection(fronts, dists, to_take):
taken = []
for front in fronts:
if len(front) <= to_take:
taken += list(front)
if len(front) == to_take:
break
to_take -= len(front)
else:
indices = np.argsort(-dists[front])[:to_take]
taken += list(front[indices])
break
return taken
def constraint_violation(constraints):
n, d = constraints.shape
sort_indices = np.argsort(constraints, 0)
violations = np.zeros(n)
for i in range(d):
values, counts = np.unique(constraints[:, i], return_counts=True) # unikalne wartości są zwracane posortowane
counts = np.cumsum(counts)
counts = list(counts)
if values[0] != 0:
counts = [0] + counts
for rank, (j, k) in enumerate(zip([0] + counts, counts + [len(counts)])):
violations[sort_indices[j:k, i]] += rank
return violations
def evaluation(objective, n_constraints, population):
obj_results = objective(population)
constraint_values = obj_results[:, -n_constraints:]
violation_measure = constraint_violation(constraint_values)
scores = np.concatenate([obj_results[:, :-n_constraints], violation_measure[:, None]], 1)
return scores
def split_and_select(population, scores, n_f, n_inf):
dists = crowding_distance(scores)
mask_f = scores[:, -1] == 0
population_f = population[mask_f, :]
scores_f = scores[mask_f, :]
dists_f = dists[mask_f]
population_inf = population[~mask_f, :]
scores_inf = scores[~mask_f, :]
dists_inf = dists[~mask_f]
s_f = population_f.shape[0]
s_inf = population_inf.shape[0]
n = n_f + n_inf
if s_f < n_f:
to_take_f = s_f
to_take_inf = n - s_f
elif s_inf < n_inf:
to_take_inf = s_inf
to_take_f = n - s_inf
else:
to_take_f = n_f
to_take_inf = n_inf
fronts_f, ranks_f = FNS(scores_f)
taken_f = elitist_selection(fronts_f, dists_f, to_take_f)
fronts_inf, ranks_inf = FNS(scores_inf)
taken_inf = elitist_selection(fronts_inf, dists_inf, to_take_inf)
return population_f[taken_f, :], population_inf[taken_inf, :], scores_f[taken_f, :], scores_inf[taken_inf, :]
def IDEA(objective, n_constraints, x_min, x_max, d, n, *args, **kwargs):
population = random_population(d, n, x_min, x_max)
return sub_IDEA(population, objective, n_constraints, x_min, x_max, n, *args, **kwargs)
def dynamic_IDEA(objective, n_constraints, T, x_min, x_max, d, n, alpha_inf,
*args, num_iterations_init, num_iterations, n_immigrants=0, **kwargs):
population = random_population(d, n, x_min, x_max)
print("=" * 80)
print("t=0")
print("=" * 80)
t = 0
def round_objective(round_population):
return objective(t, round_population)
p, s = sub_IDEA(population, round_objective, n_constraints, x_min, x_max, n, alpha_inf, *args,
num_iterations=num_iterations_init, **kwargs)
population_history = [p]
score_history = [s]
n_to_keep = n - n_immigrants
n_inf = int(n_to_keep * alpha_inf)
n_f = n_to_keep - n_inf
for t in range(1, T):
print("=" * 80)
print(f"t={t}")
print("=" * 80)
population = p[-1, :, :]
scores = s[-1, :, :]
if n_immigrants > 0:
population_f, population_inf, scores_f, scores_inf = split_and_select(population, scores, n_f, n_inf)
immigrants = random_population(d, n_immigrants, x_min, x_max)
population = np.vstack([population_f, population_inf, immigrants])
assert population.shape[0] == n
p, s = sub_IDEA(population, round_objective, n_constraints, x_min, x_max, n, alpha_inf, *args,
num_iterations=num_iterations, **kwargs)
population_history.append(p)
score_history.append(s)
return population_history, score_history
def sub_IDEA(population, objective, n_constraints, x_min, x_max, n, alpha_inf,
eta_c, eta_m, p_c, p_m, num_iterations, log_interval=10):
n_inf = int(n * alpha_inf)
n_f = n - n_inf
populations = []
scores = evaluation(objective, n_constraints, population)
scores_hist = []
fronts, ranks = FNS(scores)
dists = crowding_distance(scores)
def log_message():
count_f = population_f.shape[0]
count_inf = population_inf.shape[0]
print(
f"Iteration {iter_}, " +
f"#feasible: {count_f}, best: {scores_f[:, :-1].min(0) if count_f > 0 else '-'}, " +
f"#infeasible: {count_inf}, best: {scores_inf.min(0) if count_inf > 0 else '-'}"
)
for iter_ in range(num_iterations):
parent_indices = tournament_selection(ranks, dists, n)
offspring = crossover(population[parent_indices, :], p_c, eta_c)
offspring = np.clip(offspring, x_min, x_max)
offspring = mutation(offspring, x_min, x_max, p_m, eta_m)
offspring_scores = evaluation(objective, n_constraints, offspring)
population = np.vstack([population, offspring])
scores = np.vstack([scores, offspring_scores])
population_f, population_inf, scores_f, scores_inf = split_and_select(population, scores, n_f, n_inf)
population = np.vstack([population_f, population_inf])
scores = np.vstack([scores_f, scores_inf])
fronts, ranks = FNS(scores)
dists = crowding_distance(scores)
populations.append(population.copy())
scores_hist.append(scores.copy())
if iter_ % log_interval == 0:
log_message()
log_message()
return np.stack(populations, 0), np.stack(scores_hist, 0)
|
from loguru import logger
from flask import request
from flasgger import swag_from
from flask_restful import Resource
from jwt.exceptions import ExpiredSignatureError
from ada_friend_app.modulo.cripto import Sha256
from ada_friend_app.modulo.jwt_auth import Token
from ada_friend_app.api.resposta_api import Resposta
from ada_friend_app.servico.mod_database import Database
class Login(Resource):
@swag_from('../../docs/api/login_post.yml')
def post(self):
json = request.json
if json.get('email', False) and json.get('senha', False):
senha = Sha256(json['senha']).hash
usuario = Database().get_document('usuarios', {'_id': json['email'], 'senha': senha})
if usuario:
usuario = usuario[0]
logger.debug(f"{json["email"]} - CONECTADO")
try:
token = Token.gerar(usuario['senha'], usuario['_id'])
return Resposta.token_validado(token)
except ExpiredSignatureError:
return Resposta.nao_aceito('Token expirado')
except Exception as e:
return Resposta.error(str(e))
else:
logger.debug(f"{json["email"]} - ERRO DE ACESSO")
return Resposta.nao_aceito('Usuário ou senha inválido!')
else:
return Resposta.error('JSON Inválido!')
| from loguru import logger
from flask import request
from flasgger import swag_from
from flask_restful import Resource
from jwt.exceptions import ExpiredSignatureError
from ada_friend_app.modulo.cripto import Sha256
from ada_friend_app.modulo.jwt_auth import Token
from ada_friend_app.api.resposta_api import Resposta
from ada_friend_app.servico.mod_database import Database
class Login(Resource):
@swag_from('../../docs/api/login_post.yml')
def post(self):
json = request.json
if json.get('email', False) and json.get('senha', False):
senha = Sha256(json['senha']).hash
usuario = Database().get_document('usuarios', {'_id': json['email'], 'senha': senha})
if usuario:
usuario = usuario[0]
logger.debug(f"{json['email']} - CONECTADO")
try:
token = Token.gerar(usuario['senha'], usuario['_id'])
return Resposta.token_validado(token)
except ExpiredSignatureError:
return Resposta.nao_aceito('Token expirado')
except Exception as e:
return Resposta.error(str(e))
else:
logger.debug(f"{json['email']} - ERRO DE ACESSO")
return Resposta.nao_aceito('Usuário ou senha inválido!')
else:
return Resposta.error('JSON Inválido!')
|
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Also available under a BSD-style license. See LICENSE.
"""Queries the pytorch op registry and generates ODS and CC sources for the ops.
"""
from typing import List, Optional, TextIO
import argparse
import logging
import os
import sys
from .utils import TextEmitter
from .registry import Registry, JitOperator
# Mapping from torch types to their corresponding ODS type predicates.
# Use `get_ods_type` instead of using this directly.
TORCH_TYPE_TO_ODS_TYPE = {
"Tensor": "AnyTorchTensorType",
"Tensor?": "AnyTorchOptionalTensorType",
"Tensor?[]": "AnyTorchListOfOptionalTensorType",
"Tensor[]": "AnyTorchListOfTensorType",
"Scalar": "AnyTorchScalarType",
"Scalar?": "AnyTorchOptionalScalarType",
"int": "Torch_IntType",
"int[]": "AnyTorchListOfTorchIntType",
"int?": "AnyTorchOptionalIntType",
"int[]?": "AnyTorchOptionalListOfTorchIntType",
"bool": "Torch_BoolType",
"bool[]": "AnyTorchListOfTorchBoolType",
"bool?": "AnyTorchOptionalBoolType",
"float": "Torch_FloatType",
"float?": "AnyTorchOptionalFloatType",
"t[]": "AnyTorchListType",
"t": "AnyTorchType",
"t1": "AnyTorchType",
"t2": "AnyTorchType",
"Any": "AnyTorchType",
"Device": "Torch_DeviceType",
"Device?": "AnyTorchOptionalDeviceType",
"Generator": "Torch_GeneratorType",
"Generator?": "AnyTorchOptionalGeneratorType",
"str": "Torch_StringType",
"str?": "AnyTorchOptionalStringType",
"str[]": "AnyTorchListOfTorchStringType",
"Dict": "Torch_DictType",
"__torch__.torch.classes.quantized.LinearPackedParamsBase": "Torch_LinearParamsType",
}
def get_ods_type(type: str):
# TODO: Increase precision on dict type modeling.
if type.startswith("Dict("):
type = "Dict"
ods_type = TORCH_TYPE_TO_ODS_TYPE.get(type)
if ods_type is None:
raise Exception(
f"{type!r} not in TORCH_TYPE_TO_ODS_TYPE mapping. Please add it!")
return ods_type
def _get_main_module_name() -> str:
# pytype: disable=attribute-error
return sys.modules["__main__"].__loader__.name
# pytype: enable=attribute-error
ODS_BANNER = f"""//===-------------------------------------------------------*- tablegen -*-===//
//
// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// Also available under a BSD-style license. See LICENSE.
//
// Operation summaries and descriptions were systematically derived from public
// API docstrings and are licensed accordingly:
// https://github.com/pytorch/pytorch/blob/master/LICENSE
//===----------------------------------------------------------------------===//
//
// This file is automatically generated. Please do not edit.
// Generated via:
// ```
// python -m {_get_main_module_name()}
// ```
//
//===----------------------------------------------------------------------===//
"""
def raw_emit_op(operator: JitOperator,
emitter_td: TextEmitter,
*, traits: List[str],
has_folder: bool, has_canonicalizer: bool):
"""Emit the ODS for a JitOperator to a textual file.
This is the lowest level of emission and is responsible for low-level
textual emission details. This function should not have any "smarts"
for deducing traits/etc.
You probably don't want to call this directly.
"""
p_td = lambda *args: emitter_td.print(*args)
op_name, cpp_class_name = operator.get_mlir_names()
# Generate unique result names for ops with nameless results
multiple_results = len(operator.returns) > 1
def generic_result_name(i):
return "result" + (str(i) if multiple_results else "")
p_td(
f"def Torch_{cpp_class_name} : Torch_Op<{emitter_td.quote(op_name)}, [")
with emitter_td.indent():
with emitter_td.indent():
p_td(",\n".join(traits))
p_td("]> {")
with emitter_td.indent():
summary = f"Generated op for `{operator.unique_key}`"
p_td(f"let summary = {emitter_td.quote(summary)};")
p_td(f"let arguments = (ins")
with emitter_td.indent():
if operator.is_vararg:
p_td("Variadic<AnyTorchType>:$operands")
else:
p_td(",\n".join([
f"""{get_ods_type(arg["type"])}:${arg["name"]}"""
for arg in operator.arguments
]))
p_td(");")
p_td(f"let results = (outs")
with emitter_td.indent():
if operator.is_varret:
p_td("Variadic<AnyTorchType>:$results")
else:
p_td(",\n".join([
f"""{get_ods_type(ret["type"])}:${ret["name"] or generic_result_name(e)}"""
for e, ret in enumerate(operator.returns)
]))
p_td(");")
if operator.is_vararg or operator.is_varret:
if operator.is_vararg:
assembly_operands = "`(` $operands `)`"
assembly_operand_types = "qualified(type($operands))"
else:
assembly_operands = " `,` ".join("$" + arg["name"]
for arg in operator.arguments)
assembly_operand_types = " `,` ".join(
f"""qualified(type(${arg["name"]}))""" for arg in operator.arguments)
if operator.is_varret:
assembly_result_types = "qualified(type($results))"
else:
assembly_result_types = " `,` ".join(
f"""qualified(type(${ret["name"] or generic_result_name(e)}))"""
for e, ret in enumerate(operator.returns))
if assembly_operand_types and assembly_result_types:
maybe_arrow = " `->` "
else:
maybe_arrow = ""
assembly_format = f"{assembly_operands} attr-dict `:` {assembly_operand_types}{maybe_arrow}{assembly_result_types}"
p_td(f"let assemblyFormat = {emitter_td.quote(assembly_format)};")
else:
p_td(f"let hasCustomAssemblyFormat = 1;")
p_td(f"""let extraClassDefinition = [{{
ParseResult {cpp_class_name}::parse(OpAsmParser &parser, OperationState &result) {{
return parseDefaultTorchOp(parser, result, {len(operator.arguments)}, {len(operator.returns)});
}}
void {cpp_class_name}::print(OpAsmPrinter &printer) {{
printDefaultTorchOp(printer, *this, {len(operator.arguments)}, {len(operator.returns)});
}}
}}];
""")
if has_folder:
p_td("let hasFolder = 1;")
if has_canonicalizer:
p_td("let hasCanonicalizer = 1;")
p_td("}")
p_td("\n")
def emit_op(operator: JitOperator,
emitter_td: TextEmitter,
*,
traits: Optional[List[str]] = None,
has_folder: bool = False,
has_canonicalizer: bool = False):
"""Main entry point for op emission.
Besides emitting the op, it deduces / adds traits based on the operator
information.
"""
if traits is None:
traits = []
# All Torch operators allow type refinement.
traits += ["AllowsTypeRefinement"]
if operator.has_value_semantics():
traits += ["HasValueSemantics"]
if operator.is_readonly():
traits += ["ReadOnly"]
raw_emit_op(operator,
emitter_td,
traits=traits,
has_folder=has_folder,
has_canonicalizer=has_canonicalizer)
def emit_ops(emitter_td: TextEmitter, registry: Registry):
def emit(key, **kwargs):
emit_op(registry[key], emitter_td, **kwargs)
def emit_with_mutating_variants(key, **kwargs):
operator = registry[key]
emit_op(operator, emitter_td, **kwargs)
ns, unqual, overload = operator.triple
emit_op(registry.get_by_triple((ns, unqual + "_", overload)),
emitter_td,
traits=["IsTrailingUnderscoreInplaceVariant"])
# ==========================================================================
# `aten::` namespace.
# ==========================================================================
# Elementwise tensor compute ops
for key in [
"aten::tanh : (Tensor) -> (Tensor)",
"aten::hardtanh : (Tensor, Scalar, Scalar) -> (Tensor)",
"aten::relu : (Tensor) -> (Tensor)",
"aten::leaky_relu : (Tensor, Scalar) -> (Tensor)",
"aten::log : (Tensor) -> (Tensor)",
"aten::sigmoid : (Tensor) -> (Tensor)",
"aten::hardsigmoid : (Tensor) -> (Tensor)",
"aten::hardswish : (Tensor) -> (Tensor)",
"aten::erf : (Tensor) -> (Tensor)",
"aten::silu : (Tensor) -> (Tensor)",
"aten::sin : (Tensor) -> (Tensor)",
"aten::exp : (Tensor) -> (Tensor)",
"aten::cos : (Tensor) -> (Tensor)",
"aten::neg : (Tensor) -> (Tensor)",
"aten::floor : (Tensor) -> (Tensor)",
"aten::ceil : (Tensor) -> (Tensor)",
"aten::bitwise_not : (Tensor) -> (Tensor)",
"aten::add.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)",
"aten::sub.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)",
"aten::mul.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::div.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::lerp.Tensor : (Tensor, Tensor, Tensor) -> (Tensor)",
"aten::eq.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::gt.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::lt.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::ne.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::add.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)",
"aten::sub.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)",
"aten::mul.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::div.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::ne.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::eq.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::gt.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::ge.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::lt.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::le.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::fmod.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::masked_fill.Scalar : (Tensor, Tensor, Scalar) -> (Tensor)",
"aten::clamp : (Tensor, Scalar?, Scalar?) -> (Tensor)",
"aten::log2 : (Tensor) -> (Tensor)",
"aten::rsqrt : (Tensor) -> (Tensor)",
"aten::abs : (Tensor) -> (Tensor)",
"aten::reciprocal : (Tensor) -> (Tensor)",
"aten::bitwise_and.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::threshold : (Tensor, Scalar, Scalar) -> (Tensor)",
"aten::square : (Tensor) -> (Tensor)",
]:
emit_with_mutating_variants(key)
# Elementwise tensor compute ops that don't have the standard mutating
# variants.
emit("aten::addcmul : (Tensor, Tensor, Tensor, Scalar) -> (Tensor)")
emit("aten::addcdiv : (Tensor, Tensor, Tensor, Scalar) -> (Tensor)")
emit("aten::maximum : (Tensor, Tensor) -> (Tensor)")
emit("aten::minimum : (Tensor, Tensor) -> (Tensor)")
emit("aten::rsub.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)")
emit("aten::gelu : (Tensor, str) -> (Tensor)")
emit("aten::pow.Tensor_Scalar : (Tensor, Scalar) -> (Tensor)")
emit("aten::threshold_backward : (Tensor, Tensor, Scalar) -> (Tensor)")
# Ops without value semantics but the corresponding without trailing
# underscore variant doesn't exist.
emit("aten::fill_.Scalar : (Tensor, Scalar) -> (Tensor)")
emit("aten::uniform_ : (Tensor, float, float, Generator?) -> (Tensor)")
emit("aten::rand_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::bernoulli : (Tensor, Generator?) -> (Tensor)")
emit("aten::bernoulli_.float : (Tensor, float, Generator?) -> (Tensor)")
emit("aten::bernoulli_.Tensor : (Tensor, Tensor, Generator?) -> (Tensor)")
emit_with_mutating_variants("aten::triu : (Tensor, int) -> (Tensor)")
emit_with_mutating_variants(
"aten::index_put : (Tensor, Tensor?[], Tensor, bool) -> (Tensor)")
emit_with_mutating_variants(
"aten::index_put.hacked_twin : (Tensor, Tensor[], Tensor, bool) -> (Tensor)")
# Non-elementwise tensor compute ops
emit("aten::linear : (Tensor, Tensor, Tensor?) -> (Tensor)")
emit("aten::mm : (Tensor, Tensor) -> (Tensor)")
emit("aten::addmm : (Tensor, Tensor, Tensor, Scalar, Scalar) -> (Tensor)")
emit("aten::matmul : (Tensor, Tensor) -> (Tensor)")
emit(
"aten::conv2d : (Tensor, Tensor, Tensor?, int[], int[], int[], int) -> (Tensor)"
)
emit("aten::convolution : (Tensor, Tensor, Tensor?, int[], int[], int[], bool, int[], int) -> (Tensor)")
emit("aten::convolution_overrideable : (Tensor, Tensor, Tensor?, int[], int[], int[], bool, int[], int) -> (Tensor)")
emit("aten::flip : (Tensor, int[]) -> (Tensor)")
emit(
"aten::native_batch_norm : (Tensor, Tensor?, Tensor?, Tensor?, Tensor?, bool, float, float) -> (Tensor, Tensor, Tensor)"
)
emit(
"aten::batch_norm : (Tensor, Tensor?, Tensor?, Tensor?, Tensor?, bool, float, float, bool) -> (Tensor)"
)
emit(
"aten::layer_norm : (Tensor, int[], Tensor?, Tensor?, float, bool) -> (Tensor)"
)
emit(
"aten::native_layer_norm : (Tensor, int[], Tensor?, Tensor?, float) -> (Tensor, Tensor, Tensor)"
)
emit(
"aten::max_pool2d : (Tensor, int[], int[], int[], int[], bool) -> (Tensor)"
)
emit(
"aten::max_pool2d_with_indices : (Tensor, int[], int[], int[], int[], bool) -> (Tensor, Tensor)"
)
emit(
"aten::max_pool2d_with_indices_backward : (Tensor, Tensor, int[], int[], int[], int[], bool, Tensor) -> (Tensor)"
)
emit(
"aten::avg_pool2d : (Tensor, int[], int[], int[], bool, bool, int?) -> (Tensor)"
)
emit(
"aten::softmax.int : (Tensor, int, int?) -> (Tensor)"
)
emit(
"aten::log_softmax.int : (Tensor, int, int?) -> (Tensor)"
)
emit(
"aten::_log_softmax : (Tensor, int, bool) -> (Tensor)"
)
emit("aten::adaptive_avg_pool2d : (Tensor, int[]) -> (Tensor)")
emit("aten::topk : (Tensor, int, int, bool, bool) -> (Tensor, Tensor)")
emit("aten::transpose.int : (Tensor, int, int) -> (Tensor)")
emit("aten::permute : (Tensor, int[]) -> (Tensor)")
emit("aten::bmm : (Tensor, Tensor) -> (Tensor)")
emit("aten::cumsum : (Tensor, int, int?) -> (Tensor)")
emit("aten::floor_divide.Scalar : (Tensor, Scalar) -> (Tensor)")
emit("aten::logsumexp : (Tensor, int[], bool) -> (Tensor)")
emit("aten::mean.dim : (Tensor, int[], bool, int?) -> (Tensor)")
emit("aten::__and__.Tensor : (Tensor, Tensor) -> (Tensor)")
emit("aten::sqrt : (Tensor) -> (Tensor)")
emit("aten::_softmax : (Tensor, int, bool) -> (Tensor)")
emit("aten::mean : (Tensor, int?) -> (Tensor)")
emit("aten::std : (Tensor, bool) -> (Tensor)")
emit("aten::var : (Tensor, bool) -> (Tensor)")
emit("aten::nll_loss_forward : (Tensor, Tensor, Tensor?, int, int) -> (Tensor, Tensor)")
emit("aten::nll_loss_backward : (Tensor, Tensor, Tensor, Tensor?, int, int, Tensor) -> (Tensor)")
emit("aten::bincount : (Tensor, Tensor?, int) -> (Tensor)")
# Misc tensor ops.
emit("aten::constant_pad_nd : (Tensor, int[], Scalar) -> (Tensor)")
emit("aten::pad : (Tensor, int[], str, float?) -> (Tensor)")
emit("aten::squeeze.dim : (Tensor, int) -> (Tensor)", has_folder=True)
emit("aten::unsqueeze : (Tensor, int) -> (Tensor)")
emit("aten::squeeze : (Tensor) -> (Tensor)", has_folder=True)
emit("aten::flatten.using_ints : (Tensor, int, int) -> (Tensor)")
emit("aten::dim : (Tensor) -> (int)", has_folder=True)
emit("aten::size : (Tensor) -> (int[])", has_canonicalizer=True)
emit("aten::Bool.Tensor : (Tensor) -> (bool)")
emit("aten::ones : (int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::new_ones : (Tensor, int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::zeros : (int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::zero_ : (Tensor) -> (Tensor)")
emit("aten::new_zeros : (Tensor, int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::tensor : (t[], int?, Device?, bool) -> (Tensor)")
emit("aten::tensor.bool : (bool, int?, Device?, bool) -> (Tensor)")
emit("aten::tensor.int : (int, int?, Device?, bool) -> (Tensor)")
emit("aten::_shape_as_tensor : (Tensor) -> (Tensor)")
emit("aten::all : (Tensor) -> (Tensor)")
emit("aten::any : (Tensor) -> (Tensor)")
emit("aten::any.dim : (Tensor, int, bool) -> (Tensor)")
emit("aten::arange : (Scalar, int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::arange.start : (Scalar, Scalar, int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::arange.start_step : (Scalar, Scalar, Scalar, int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::argmax : (Tensor, int?, bool) -> (Tensor)")
emit("aten::bucketize.Tensor : (Tensor, Tensor, bool, bool) -> (Tensor)")
emit("aten::clone : (Tensor, int?) -> (Tensor)")
emit("aten::contiguous : (Tensor, int) -> (Tensor)")
emit("aten::copy_ : (Tensor, Tensor, bool) -> (Tensor)")
emit("aten::_to_copy : (Tensor, int?, int?, Device?, bool?, bool, int?) -> (Tensor)")
emit("aten::detach : (Tensor) -> (Tensor)")
emit("aten::embedding : (Tensor, Tensor, int, bool, bool) -> (Tensor)")
emit("aten::empty_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::new_empty : (Tensor, int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::zeros_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::ones_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::empty.memory_format : (int[], int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::expand : (Tensor, int[], bool) -> (Tensor)")
emit("aten::expand_as : (Tensor, Tensor) -> (Tensor)")
emit("aten::broadcast_to : (Tensor, int[]) -> (Tensor)")
emit("aten::index.Tensor : (Tensor, Tensor?[]) -> (Tensor)")
emit("aten::index_select : (Tensor, int, Tensor) -> (Tensor)")
emit("aten::_index_put_impl_ : (Tensor, Tensor?[], Tensor, bool, bool) -> (Tensor)")
emit("aten::item : (Tensor) -> (Scalar)")
emit("aten::masked_select : (Tensor, Tensor) -> (Tensor)")
emit("aten::numel : (Tensor) -> (int)")
emit("aten::repeat : (Tensor, int[]) -> (Tensor)")
emit("aten::reshape : (Tensor, int[]) -> (Tensor)")
emit("aten::_reshape_alias : (Tensor, int[], int[]) -> (Tensor)")
emit("aten::resize_ : (Tensor, int[], int?) -> (Tensor)")
emit("aten::select.int : (Tensor, int, int) -> (Tensor)")
emit("aten::size.int : (Tensor, int) -> (int)", has_folder=True)
emit("aten::stack : (Tensor[], int) -> (Tensor)")
emit("aten::sum : (Tensor, int?) -> (Tensor)")
emit("aten::sum.dim_IntList : (Tensor, int[], bool, int?) -> (Tensor)")
emit("aten::max : (Tensor) -> (Tensor)")
emit("aten::max.dim : (Tensor, int, bool) -> (Tensor, Tensor)")
emit("aten::to.dtype : (Tensor, int, bool, bool, int?) -> (Tensor)", has_folder=True)
emit("aten::to.dtype_layout : (Tensor, int?, int?, Device?, bool?, bool, bool, int?) -> (Tensor)", has_folder=True)
emit("aten::to.other : (Tensor, Tensor, bool, bool, int?) -> (Tensor)")
emit("aten::to.prim_Device : (Tensor, Device?, int?, bool, bool) -> (Tensor)")
emit("aten::type_as : (Tensor, Tensor) -> (Tensor)")
emit("aten::view : (Tensor, int[]) -> (Tensor)", has_folder=True)
emit("aten::_unsafe_view : (Tensor, int[]) -> (Tensor)")
emit("aten::where.self : (Tensor, Tensor, Tensor) -> (Tensor)")
emit("aten::where.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)")
emit("aten::where.ScalarOther : (Tensor, Tensor, Scalar) -> (Tensor)")
emit("aten::where.ScalarSelf : (Tensor, Scalar, Tensor) -> (Tensor)")
emit("aten::slice.Tensor : (Tensor, int, int?, int?, int) -> (Tensor)")
emit("aten::len.Tensor : (Tensor) -> (int)")
emit("aten::cpu : (Tensor) -> (Tensor)")
emit("aten::gather : (Tensor, int, Tensor, bool) -> (Tensor)")
emit("aten::IntImplicit : (Tensor) -> (int)")
emit("aten::tensor.float : (float, int?, Device?, bool) -> (Tensor)")
emit("aten::Int.Tensor : (Tensor) -> (int)", has_folder=True)
emit("aten::Float.Tensor : (Tensor) -> (float)", has_folder=True)
emit_with_mutating_variants("aten::dropout : (Tensor, float, bool) -> (Tensor)")
emit("aten::t : (Tensor) -> (Tensor)")
emit("aten::full : (int[], Scalar, int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::full_like : (Tensor, Scalar, int?, int?, Device?, bool?, int?) -> (Tensor)")
# Dict ops.
emit("aten::__contains__.str : (Dict(str, t), str) -> (bool)", has_folder=True)
emit("aten::__getitem__.Dict_str : (Dict(str, t), str) -> (t)", has_folder=True)
emit("aten::_set_item.str : (Dict(str, t), str, t) -> ()")
emit("aten::keys.str : (Dict(str, t)) -> (str[])")
emit("aten::get.default_str : (Dict(str, t), str, t) -> (t)")
emit("aten::Delete.Dict_str : (Dict(str, t), str) -> ()")
# List ops.
emit("aten::cat : (Tensor[], int) -> (Tensor)")
emit("aten::append.t : (t[], t) -> (t[])")
emit("aten::add.t : (t[], t[]) -> (t[])")
emit("aten::eq.int_list : (int[], int[]) -> (bool)", has_folder=True)
emit("aten::list.t : (t[]) -> (t[])")
emit("aten::slice.t : (t[], int?, int?, int) -> (t[])")
emit("aten::insert.t : (t[], int, t) -> ()")
emit("aten::ne.int_list : (int[], int[]) -> (bool)")
# Str ops.
emit("aten::add.str : (str, str) -> (str)")
emit("aten::eq.str : (str, str) -> (bool)", has_folder=True)
emit("aten::str : (t) -> (str)")
emit("aten::format : (...) -> (str)")
emit("aten::join : (str, str[]) -> (str)")
# Type conversion ops.
emit("aten::Float.Scalar : (Scalar) -> (float)", has_folder=True)
emit("aten::Float.str : (str) -> (float)")
emit("aten::Int.float : (float) -> (int)")
# Primitive ops
emit("aten::__range_length : (int, int, int) -> (int)", has_folder=True)
emit("aten::__derive_index : (int, int, int) -> (int)", has_folder=True)
emit("aten::gt.int : (int, int) -> (bool)", has_folder=True)
emit("aten::ge.int : (int, int) -> (bool)", has_folder=True)
emit("aten::lt.int : (int, int) -> (bool)", has_folder=True)
emit("aten::le.int : (int, int) -> (bool)", has_folder=True)
emit("aten::ne.int : (int, int) -> (bool)", has_folder=True)
emit("aten::eq.int : (int, int) -> (bool)", has_folder=True)
emit("aten::floordiv.int : (int, int) -> (int)", has_folder=True)
emit("aten::remainder.int : (int, int) -> (int)", has_folder=True)
emit("aten::add.int : (int, int) -> (int)", has_folder=True)
emit("aten::sub.int : (int, int) -> (int)", has_folder=True)
emit("aten::mul.int : (int, int) -> (int)", has_folder=True)
emit("aten::neg.int : (int) -> (int)", has_folder=True)
emit("aten::log.int : (int) -> (float)")
emit("aten::add.float_int : (float, int) -> (float)")
emit("aten::sub.float : (float, float) -> (float)")
emit("aten::mul.float : (float, float) -> (float)")
emit("aten::div.float : (float, float) -> (float)", has_folder=True)
emit("aten::neg.float : (float) -> (float)")
emit("aten::eq.float : (float, float) -> (bool)", has_folder=True)
emit("aten::gt.float : (float, float) -> (bool)", has_folder=True)
emit("aten::ge.float : (float, float) -> (bool)", has_folder=True)
emit("aten::lt.float : (float, float) -> (bool)", has_folder=True)
emit("aten::lt.float_int : (float, int) -> (bool)")
emit("aten::ge.float_int : (float, int) -> (bool)")
emit("aten::ne.float_int : (float, int) -> (bool)")
emit("aten::gt.float_int : (float, int) -> (bool)")
emit("aten::__and__.bool : (bool, bool) -> (bool)")
emit("aten::ne.bool : (bool, bool) -> (bool)", has_folder=True)
emit("aten::__is__ : (t1, t2) -> (bool)", has_folder=True)
emit("aten::__isnot__ : (t1, t2) -> (bool)", has_folder=True)
emit("aten::__not__ : (bool) -> (bool)", has_folder=True)
emit("aten::len.t : (t[]) -> (int)",
has_folder=True,
has_canonicalizer=True)
emit("aten::__getitem__.t : (t[], int) -> (t)", has_canonicalizer=True)
emit("aten::_set_item.t : (t[], int, t) -> (t[])")
emit("aten::div : (Scalar, Scalar) -> (float)")
emit("aten::add : (Scalar, Scalar) -> (Scalar)")
emit("aten::eq.device : (Device, Device) -> (bool)")
emit("aten::ceil.float : (float) -> (int)", has_folder=True)
# backprop ops
emit("aten::_softmax_backward_data : (Tensor, Tensor, int, int) -> (Tensor)")
emit("aten::tanh_backward : (Tensor, Tensor) -> (Tensor)")
emit("aten::gelu_backward : (Tensor, Tensor, str) -> (Tensor)")
emit("aten::_log_softmax_backward_data : (Tensor, Tensor, int, int) -> (Tensor)")
# ==========================================================================
# `prim::` namespace.
# ==========================================================================
emit("prim::layout : (Tensor) -> (int)")
emit("prim::TupleIndex : (Any, int) -> (Any)", has_canonicalizer=True)
emit("prim::device : (Tensor) -> (Device)")
emit("prim::dtype : (Tensor) -> (int)", has_folder=True)
emit("prim::TupleUnpack : (Any) -> (...)", has_canonicalizer=True)
emit("prim::NumToTensor.Scalar : (Scalar) -> (Tensor)")
emit("prim::min.self_int : (int[]) -> (int)", has_folder=True)
emit("prim::min.int : (int, int) -> (int)")
emit("prim::max.self_int : (int[]) -> (int)")
emit("prim::max.int : (int, int) -> (int)", has_folder=True)
emit("prim::RaiseException : (str, str?) -> ()")
emit("prim::Uninitialized : () -> (Any)",
has_canonicalizer=True, traits=["NoSideEffect"])
emit("prim::unchecked_cast : (t) -> (t)", has_folder=True,
traits=["DeclareOpInterfaceMethods<CastOpInterface>"])
emit("prim::Print : (...) -> ()")
emit("prim::tolist : (...) -> (...)")
emit("prim::abs.Scalar : (Scalar) -> (Scalar)")
# ==========================================================================
# `quantized::` namespace.
# ==========================================================================
emit(
"quantized::linear : (Tensor, __torch__.torch.classes.quantized.LinearPackedParamsBase, float, int) -> (Tensor)",
traits=["HasValueSemantics"])
def dump_registered_ops(outfile: TextIO, registry: Registry):
for _, v in sorted(registry.by_unique_key.items()):
outfile.write(repr(v))
def main(args: argparse.Namespace):
registry = Registry.load()
if args.debug_registry_dump:
with open(args.debug_registry_dump, "w") as debug_registry_dump:
dump_registered_ops(debug_registry_dump, registry)
td_path = os.path.join(args.torch_ir_include_dir, "GeneratedTorchOps.td")
with open(td_path, "w") as f_td:
emitter_td = TextEmitter(f_td)
emitter_td.print(ODS_BANNER)
emit_ops(emitter_td, registry)
def _create_argparse() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(prog="generate_ods")
parser.add_argument(
"--torch_ir_include_dir",
required=True,
help="Directory in include/ containing the Torch dialect")
parser.add_argument(
"--debug_registry_dump",
help="File to dump the the PyTorch JIT operator registry into")
return parser
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
parser = _create_argparse()
args = parser.parse_args()
main(args)
| # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Also available under a BSD-style license. See LICENSE.
"""Queries the pytorch op registry and generates ODS and CC sources for the ops.
"""
from typing import List, Optional, TextIO
import argparse
import logging
import os
import sys
from .utils import TextEmitter
from .registry import Registry, JitOperator
# Mapping from torch types to their corresponding ODS type predicates.
# Use `get_ods_type` instead of using this directly.
TORCH_TYPE_TO_ODS_TYPE = {
"Tensor": "AnyTorchTensorType",
"Tensor?": "AnyTorchOptionalTensorType",
"Tensor?[]": "AnyTorchListOfOptionalTensorType",
"Tensor[]": "AnyTorchListOfTensorType",
"Scalar": "AnyTorchScalarType",
"Scalar?": "AnyTorchOptionalScalarType",
"int": "Torch_IntType",
"int[]": "AnyTorchListOfTorchIntType",
"int?": "AnyTorchOptionalIntType",
"int[]?": "AnyTorchOptionalListOfTorchIntType",
"bool": "Torch_BoolType",
"bool[]": "AnyTorchListOfTorchBoolType",
"bool?": "AnyTorchOptionalBoolType",
"float": "Torch_FloatType",
"float?": "AnyTorchOptionalFloatType",
"t[]": "AnyTorchListType",
"t": "AnyTorchType",
"t1": "AnyTorchType",
"t2": "AnyTorchType",
"Any": "AnyTorchType",
"Device": "Torch_DeviceType",
"Device?": "AnyTorchOptionalDeviceType",
"Generator": "Torch_GeneratorType",
"Generator?": "AnyTorchOptionalGeneratorType",
"str": "Torch_StringType",
"str?": "AnyTorchOptionalStringType",
"str[]": "AnyTorchListOfTorchStringType",
"Dict": "Torch_DictType",
"__torch__.torch.classes.quantized.LinearPackedParamsBase": "Torch_LinearParamsType",
}
def get_ods_type(type: str):
# TODO: Increase precision on dict type modeling.
if type.startswith("Dict("):
type = "Dict"
ods_type = TORCH_TYPE_TO_ODS_TYPE.get(type)
if ods_type is None:
raise Exception(
f"{type!r} not in TORCH_TYPE_TO_ODS_TYPE mapping. Please add it!")
return ods_type
def _get_main_module_name() -> str:
# pytype: disable=attribute-error
return sys.modules["__main__"].__loader__.name
# pytype: enable=attribute-error
ODS_BANNER = f"""//===-------------------------------------------------------*- tablegen -*-===//
//
// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// Also available under a BSD-style license. See LICENSE.
//
// Operation summaries and descriptions were systematically derived from public
// API docstrings and are licensed accordingly:
// https://github.com/pytorch/pytorch/blob/master/LICENSE
//===----------------------------------------------------------------------===//
//
// This file is automatically generated. Please do not edit.
// Generated via:
// ```
// python -m {_get_main_module_name()}
// ```
//
//===----------------------------------------------------------------------===//
"""
def raw_emit_op(operator: JitOperator,
emitter_td: TextEmitter,
*, traits: List[str],
has_folder: bool, has_canonicalizer: bool):
"""Emit the ODS for a JitOperator to a textual file.
This is the lowest level of emission and is responsible for low-level
textual emission details. This function should not have any "smarts"
for deducing traits/etc.
You probably don't want to call this directly.
"""
p_td = lambda *args: emitter_td.print(*args)
op_name, cpp_class_name = operator.get_mlir_names()
# Generate unique result names for ops with nameless results
multiple_results = len(operator.returns) > 1
def generic_result_name(i):
return "result" + (str(i) if multiple_results else "")
p_td(
f"def Torch_{cpp_class_name} : Torch_Op<{emitter_td.quote(op_name)}, [")
with emitter_td.indent():
with emitter_td.indent():
p_td(",\n".join(traits))
p_td("]> {")
with emitter_td.indent():
summary = f"Generated op for `{operator.unique_key}`"
p_td(f"let summary = {emitter_td.quote(summary)};")
p_td(f"let arguments = (ins")
with emitter_td.indent():
if operator.is_vararg:
p_td("Variadic<AnyTorchType>:$operands")
else:
p_td(",\n".join([
f"""{get_ods_type(arg["type"])}:${arg["name"]}"""
for arg in operator.arguments
]))
p_td(");")
p_td(f"let results = (outs")
with emitter_td.indent():
if operator.is_varret:
p_td("Variadic<AnyTorchType>:$results")
else:
p_td(",\n".join([
f"""{get_ods_type(ret["type"])}:${ret["name"] or generic_result_name(e)}"""
for e, ret in enumerate(operator.returns)
]))
p_td(");")
if operator.is_vararg or operator.is_varret:
if operator.is_vararg:
assembly_operands = "`(` $operands `)`"
assembly_operand_types = "qualified(type($operands))"
else:
assembly_operands = " `,` ".join("$" + arg["name"]
for arg in operator.arguments)
assembly_operand_types = " `,` ".join(
f"""qualified(type(${arg["name"]}))""" for arg in operator.arguments)
if operator.is_varret:
assembly_result_types = "qualified(type($results))"
else:
assembly_result_types = " `,` ".join(
f"""qualified(type(${ret["name"] or generic_result_name(e)}))"""
for e, ret in enumerate(operator.returns))
if assembly_operand_types and assembly_result_types:
maybe_arrow = " `->` "
else:
maybe_arrow = ""
assembly_format = f"{assembly_operands} attr-dict `:` {assembly_operand_types}{maybe_arrow}{assembly_result_types}"
p_td(f"let assemblyFormat = {emitter_td.quote(assembly_format)};")
else:
p_td(f"let hasCustomAssemblyFormat = 1;")
p_td(f"""let extraClassDefinition = [{{
ParseResult {cpp_class_name}::parse(OpAsmParser &parser, OperationState &result) {{
return parseDefaultTorchOp(parser, result, {len(operator.arguments)}, {len(operator.returns)});
}}
void {cpp_class_name}::print(OpAsmPrinter &printer) {{
printDefaultTorchOp(printer, *this, {len(operator.arguments)}, {len(operator.returns)});
}}
}}];
""")
if has_folder:
p_td("let hasFolder = 1;")
if has_canonicalizer:
p_td("let hasCanonicalizer = 1;")
p_td("}")
p_td("\n")
def emit_op(operator: JitOperator,
emitter_td: TextEmitter,
*,
traits: Optional[List[str]] = None,
has_folder: bool = False,
has_canonicalizer: bool = False):
"""Main entry point for op emission.
Besides emitting the op, it deduces / adds traits based on the operator
information.
"""
if traits is None:
traits = []
# All Torch operators allow type refinement.
traits += ["AllowsTypeRefinement"]
if operator.has_value_semantics():
traits += ["HasValueSemantics"]
if operator.is_readonly():
traits += ["ReadOnly"]
raw_emit_op(operator,
emitter_td,
traits=traits,
has_folder=has_folder,
has_canonicalizer=has_canonicalizer)
def emit_ops(emitter_td: TextEmitter, registry: Registry):
def emit(key, **kwargs):
emit_op(registry[key], emitter_td, **kwargs)
def emit_with_mutating_variants(key, **kwargs):
operator = registry[key]
emit_op(operator, emitter_td, **kwargs)
ns, unqual, overload = operator.triple
emit_op(registry.get_by_triple((ns, unqual + "_", overload)),
emitter_td,
traits=["IsTrailingUnderscoreInplaceVariant"])
# ==========================================================================
# `aten::` namespace.
# ==========================================================================
# Elementwise tensor compute ops
for key in [
"aten::tanh : (Tensor) -> (Tensor)",
"aten::hardtanh : (Tensor, Scalar, Scalar) -> (Tensor)",
"aten::relu : (Tensor) -> (Tensor)",
"aten::leaky_relu : (Tensor, Scalar) -> (Tensor)",
"aten::log : (Tensor) -> (Tensor)",
"aten::sigmoid : (Tensor) -> (Tensor)",
"aten::hardsigmoid : (Tensor) -> (Tensor)",
"aten::hardswish : (Tensor) -> (Tensor)",
"aten::erf : (Tensor) -> (Tensor)",
"aten::silu : (Tensor) -> (Tensor)",
"aten::sin : (Tensor) -> (Tensor)",
"aten::exp : (Tensor) -> (Tensor)",
"aten::cos : (Tensor) -> (Tensor)",
"aten::neg : (Tensor) -> (Tensor)",
"aten::floor : (Tensor) -> (Tensor)",
"aten::ceil : (Tensor) -> (Tensor)",
"aten::bitwise_not : (Tensor) -> (Tensor)",
"aten::add.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)",
"aten::sub.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)",
"aten::mul.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::div.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::lerp.Tensor : (Tensor, Tensor, Tensor) -> (Tensor)",
"aten::eq.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::gt.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::lt.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::ne.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::add.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)",
"aten::sub.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)",
"aten::mul.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::div.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::ne.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::eq.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::gt.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::ge.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::lt.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::le.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::fmod.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::masked_fill.Scalar : (Tensor, Tensor, Scalar) -> (Tensor)",
"aten::clamp : (Tensor, Scalar?, Scalar?) -> (Tensor)",
"aten::log2 : (Tensor) -> (Tensor)",
"aten::rsqrt : (Tensor) -> (Tensor)",
"aten::abs : (Tensor) -> (Tensor)",
"aten::reciprocal : (Tensor) -> (Tensor)",
"aten::bitwise_and.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::threshold : (Tensor, Scalar, Scalar) -> (Tensor)",
"aten::square : (Tensor) -> (Tensor)",
]:
emit_with_mutating_variants(key)
# Elementwise tensor compute ops that don't have the standard mutating
# variants.
emit("aten::addcmul : (Tensor, Tensor, Tensor, Scalar) -> (Tensor)")
emit("aten::addcdiv : (Tensor, Tensor, Tensor, Scalar) -> (Tensor)")
emit("aten::maximum : (Tensor, Tensor) -> (Tensor)")
emit("aten::minimum : (Tensor, Tensor) -> (Tensor)")
emit("aten::rsub.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)")
emit("aten::gelu : (Tensor, str) -> (Tensor)")
emit("aten::pow.Tensor_Scalar : (Tensor, Scalar) -> (Tensor)")
emit("aten::threshold_backward : (Tensor, Tensor, Scalar) -> (Tensor)")
# Ops without value semantics but the corresponding without trailing
# underscore variant doesn't exist.
emit("aten::fill_.Scalar : (Tensor, Scalar) -> (Tensor)")
emit("aten::uniform_ : (Tensor, float, float, Generator?) -> (Tensor)")
emit("aten::rand_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::bernoulli : (Tensor, Generator?) -> (Tensor)")
emit("aten::bernoulli_.float : (Tensor, float, Generator?) -> (Tensor)")
emit("aten::bernoulli_.Tensor : (Tensor, Tensor, Generator?) -> (Tensor)")
emit_with_mutating_variants("aten::triu : (Tensor, int) -> (Tensor)")
emit_with_mutating_variants(
"aten::index_put : (Tensor, Tensor?[], Tensor, bool) -> (Tensor)")
emit_with_mutating_variants(
"aten::index_put.hacked_twin : (Tensor, Tensor[], Tensor, bool) -> (Tensor)")
# Non-elementwise tensor compute ops
emit("aten::linear : (Tensor, Tensor, Tensor?) -> (Tensor)")
emit("aten::mm : (Tensor, Tensor) -> (Tensor)")
emit("aten::addmm : (Tensor, Tensor, Tensor, Scalar, Scalar) -> (Tensor)")
emit("aten::matmul : (Tensor, Tensor) -> (Tensor)")
emit(
"aten::conv2d : (Tensor, Tensor, Tensor?, int[], int[], int[], int) -> (Tensor)"
)
emit("aten::convolution : (Tensor, Tensor, Tensor?, int[], int[], int[], bool, int[], int) -> (Tensor)")
emit("aten::convolution_overrideable : (Tensor, Tensor, Tensor?, int[], int[], int[], bool, int[], int) -> (Tensor)")
emit("aten::flip : (Tensor, int[]) -> (Tensor)")
emit(
"aten::native_batch_norm : (Tensor, Tensor?, Tensor?, Tensor?, Tensor?, bool, float, float) -> (Tensor, Tensor, Tensor)"
)
emit(
"aten::batch_norm : (Tensor, Tensor?, Tensor?, Tensor?, Tensor?, bool, float, float, bool) -> (Tensor)"
)
emit(
"aten::layer_norm : (Tensor, int[], Tensor?, Tensor?, float, bool) -> (Tensor)"
)
emit(
"aten::native_layer_norm : (Tensor, int[], Tensor?, Tensor?, float) -> (Tensor, Tensor, Tensor)"
)
emit(
"aten::max_pool2d : (Tensor, int[], int[], int[], int[], bool) -> (Tensor)"
)
emit(
"aten::max_pool2d_with_indices : (Tensor, int[], int[], int[], int[], bool) -> (Tensor, Tensor)"
)
emit(
"aten::max_pool2d_with_indices_backward : (Tensor, Tensor, int[], int[], int[], int[], bool, Tensor) -> (Tensor)"
)
emit(
"aten::avg_pool2d : (Tensor, int[], int[], int[], bool, bool, int?) -> (Tensor)"
)
emit(
"aten::softmax.int : (Tensor, int, int?) -> (Tensor)"
)
emit(
"aten::log_softmax.int : (Tensor, int, int?) -> (Tensor)"
)
emit(
"aten::_log_softmax : (Tensor, int, bool) -> (Tensor)"
)
emit("aten::adaptive_avg_pool2d : (Tensor, int[]) -> (Tensor)")
emit("aten::topk : (Tensor, int, int, bool, bool) -> (Tensor, Tensor)")
emit("aten::transpose.int : (Tensor, int, int) -> (Tensor)")
emit("aten::permute : (Tensor, int[]) -> (Tensor)")
emit("aten::bmm : (Tensor, Tensor) -> (Tensor)")
emit("aten::cumsum : (Tensor, int, int?) -> (Tensor)")
emit("aten::floor_divide.Scalar : (Tensor, Scalar) -> (Tensor)")
emit("aten::logsumexp : (Tensor, int[], bool) -> (Tensor)")
emit("aten::mean.dim : (Tensor, int[], bool, int?) -> (Tensor)")
emit("aten::__and__.Tensor : (Tensor, Tensor) -> (Tensor)")
emit("aten::sqrt : (Tensor) -> (Tensor)")
emit("aten::_softmax : (Tensor, int, bool) -> (Tensor)")
emit("aten::mean : (Tensor, int?) -> (Tensor)")
emit("aten::std : (Tensor, bool) -> (Tensor)")
emit("aten::var : (Tensor, bool) -> (Tensor)")
emit("aten::nll_loss_forward : (Tensor, Tensor, Tensor?, int, int) -> (Tensor, Tensor)")
emit("aten::nll_loss_backward : (Tensor, Tensor, Tensor, Tensor?, int, int, Tensor) -> (Tensor)")
emit("aten::bincount : (Tensor, Tensor?, int) -> (Tensor)")
# Misc tensor ops.
emit("aten::constant_pad_nd : (Tensor, int[], Scalar) -> (Tensor)")
emit("aten::pad : (Tensor, int[], str, float?) -> (Tensor)")
emit("aten::squeeze.dim : (Tensor, int) -> (Tensor)", has_folder=True)
emit("aten::unsqueeze : (Tensor, int) -> (Tensor)")
emit("aten::squeeze : (Tensor) -> (Tensor)", has_folder=True)
emit("aten::flatten.using_ints : (Tensor, int, int) -> (Tensor)")
emit("aten::dim : (Tensor) -> (int)", has_folder=True)
emit("aten::size : (Tensor) -> (int[])", has_canonicalizer=True)
emit("aten::Bool.Tensor : (Tensor) -> (bool)")
emit("aten::ones : (int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::new_ones : (Tensor, int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::zeros : (int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::zero_ : (Tensor) -> (Tensor)")
emit("aten::new_zeros : (Tensor, int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::tensor : (t[], int?, Device?, bool) -> (Tensor)")
emit("aten::tensor.bool : (bool, int?, Device?, bool) -> (Tensor)")
emit("aten::tensor.int : (int, int?, Device?, bool) -> (Tensor)")
emit("aten::_shape_as_tensor : (Tensor) -> (Tensor)")
emit("aten::all : (Tensor) -> (Tensor)")
emit("aten::any : (Tensor) -> (Tensor)")
emit("aten::any.dim : (Tensor, int, bool) -> (Tensor)")
emit("aten::arange : (Scalar, int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::arange.start : (Scalar, Scalar, int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::arange.start_step : (Scalar, Scalar, Scalar, int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::argmax : (Tensor, int?, bool) -> (Tensor)")
emit("aten::bucketize.Tensor : (Tensor, Tensor, bool, bool) -> (Tensor)")
emit("aten::clone : (Tensor, int?) -> (Tensor)")
emit("aten::contiguous : (Tensor, int) -> (Tensor)")
emit("aten::copy_ : (Tensor, Tensor, bool) -> (Tensor)")
emit("aten::_to_copy : (Tensor, int?, int?, Device?, bool?, bool, int?) -> (Tensor)")
emit("aten::detach : (Tensor) -> (Tensor)")
emit("aten::embedding : (Tensor, Tensor, int, bool, bool) -> (Tensor)")
emit("aten::empty_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::new_empty : (Tensor, int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::zeros_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::ones_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::empty.memory_format : (int[], int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::expand : (Tensor, int[], bool) -> (Tensor)")
emit("aten::expand_as : (Tensor, Tensor) -> (Tensor)")
emit("aten::broadcast_to : (Tensor, int[]) -> (Tensor)")
emit("aten::index.Tensor : (Tensor, Tensor?[]) -> (Tensor)")
emit("aten::index_select : (Tensor, int, Tensor) -> (Tensor)")
emit("aten::_index_put_impl_ : (Tensor, Tensor?[], Tensor, bool, bool) -> (Tensor)")
emit("aten::item : (Tensor) -> (Scalar)")
emit("aten::masked_select : (Tensor, Tensor) -> (Tensor)")
emit("aten::numel : (Tensor) -> (int)")
emit("aten::repeat : (Tensor, int[]) -> (Tensor)")
emit("aten::reshape : (Tensor, int[]) -> (Tensor)")
emit("aten::_reshape_alias : (Tensor, int[], int[]) -> (Tensor)")
emit("aten::resize_ : (Tensor, int[], int?) -> (Tensor)")
emit("aten::select.int : (Tensor, int, int) -> (Tensor)")
emit("aten::size.int : (Tensor, int) -> (int)", has_folder=True)
emit("aten::stack : (Tensor[], int) -> (Tensor)")
emit("aten::sum : (Tensor, int?) -> (Tensor)")
emit("aten::sum.dim_IntList : (Tensor, int[], bool, int?) -> (Tensor)")
emit("aten::max : (Tensor) -> (Tensor)")
emit("aten::max.dim : (Tensor, int, bool) -> (Tensor, Tensor)")
emit("aten::to.dtype : (Tensor, int, bool, bool, int?) -> (Tensor)", has_folder=True)
emit("aten::to.dtype_layout : (Tensor, int?, int?, Device?, bool?, bool, bool, int?) -> (Tensor)", has_folder=True)
emit("aten::to.other : (Tensor, Tensor, bool, bool, int?) -> (Tensor)")
emit("aten::to.prim_Device : (Tensor, Device?, int?, bool, bool) -> (Tensor)")
emit("aten::type_as : (Tensor, Tensor) -> (Tensor)")
emit("aten::view : (Tensor, int[]) -> (Tensor)", has_folder=True)
emit("aten::_unsafe_view : (Tensor, int[]) -> (Tensor)")
emit("aten::where.self : (Tensor, Tensor, Tensor) -> (Tensor)")
emit("aten::where.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)")
emit("aten::where.ScalarOther : (Tensor, Tensor, Scalar) -> (Tensor)")
emit("aten::where.ScalarSelf : (Tensor, Scalar, Tensor) -> (Tensor)")
emit("aten::slice.Tensor : (Tensor, int, int?, int?, int) -> (Tensor)")
emit("aten::len.Tensor : (Tensor) -> (int)")
emit("aten::cpu : (Tensor) -> (Tensor)")
emit("aten::gather : (Tensor, int, Tensor, bool) -> (Tensor)")
emit("aten::IntImplicit : (Tensor) -> (int)")
emit("aten::tensor.float : (float, int?, Device?, bool) -> (Tensor)")
emit("aten::Int.Tensor : (Tensor) -> (int)", has_folder=True)
emit("aten::Float.Tensor : (Tensor) -> (float)", has_folder=True)
emit_with_mutating_variants("aten::dropout : (Tensor, float, bool) -> (Tensor)")
emit("aten::t : (Tensor) -> (Tensor)")
emit("aten::full : (int[], Scalar, int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::full_like : (Tensor, Scalar, int?, int?, Device?, bool?, int?) -> (Tensor)")
# Dict ops.
emit("aten::__contains__.str : (Dict(str, t), str) -> (bool)", has_folder=True)
emit("aten::__getitem__.Dict_str : (Dict(str, t), str) -> (t)", has_folder=True)
emit("aten::_set_item.str : (Dict(str, t), str, t) -> ()")
emit("aten::keys.str : (Dict(str, t)) -> (str[])")
emit("aten::get.default_str : (Dict(str, t), str, t) -> (t)")
emit("aten::Delete.Dict_str : (Dict(str, t), str) -> ()")
# List ops.
emit("aten::cat : (Tensor[], int) -> (Tensor)")
emit("aten::append.t : (t[], t) -> (t[])")
emit("aten::add.t : (t[], t[]) -> (t[])")
emit("aten::eq.int_list : (int[], int[]) -> (bool)", has_folder=True)
emit("aten::list.t : (t[]) -> (t[])")
emit("aten::slice.t : (t[], int?, int?, int) -> (t[])")
emit("aten::insert.t : (t[], int, t) -> ()")
emit("aten::ne.int_list : (int[], int[]) -> (bool)")
# Str ops.
emit("aten::add.str : (str, str) -> (str)")
emit("aten::eq.str : (str, str) -> (bool)", has_folder=True)
emit("aten::str : (t) -> (str)")
emit("aten::format : (...) -> (str)")
emit("aten::join : (str, str[]) -> (str)")
# Type conversion ops.
emit("aten::Float.Scalar : (Scalar) -> (float)", has_folder=True)
emit("aten::Float.str : (str) -> (float)")
emit("aten::Int.float : (float) -> (int)")
# Primitive ops
emit("aten::__range_length : (int, int, int) -> (int)", has_folder=True)
emit("aten::__derive_index : (int, int, int) -> (int)", has_folder=True)
emit("aten::gt.int : (int, int) -> (bool)", has_folder=True)
emit("aten::ge.int : (int, int) -> (bool)", has_folder=True)
emit("aten::lt.int : (int, int) -> (bool)", has_folder=True)
emit("aten::le.int : (int, int) -> (bool)", has_folder=True)
emit("aten::ne.int : (int, int) -> (bool)", has_folder=True)
emit("aten::eq.int : (int, int) -> (bool)", has_folder=True)
emit("aten::floordiv.int : (int, int) -> (int)", has_folder=True)
emit("aten::remainder.int : (int, int) -> (int)", has_folder=True)
emit("aten::add.int : (int, int) -> (int)", has_folder=True)
emit("aten::sub.int : (int, int) -> (int)", has_folder=True)
emit("aten::mul.int : (int, int) -> (int)", has_folder=True)
emit("aten::neg.int : (int) -> (int)", has_folder=True)
emit("aten::log.int : (int) -> (float)")
emit("aten::add.float_int : (float, int) -> (float)")
emit("aten::sub.float : (float, float) -> (float)")
emit("aten::mul.float : (float, float) -> (float)")
emit("aten::div.float : (float, float) -> (float)", has_folder=True)
emit("aten::neg.float : (float) -> (float)")
emit("aten::eq.float : (float, float) -> (bool)", has_folder=True)
emit("aten::gt.float : (float, float) -> (bool)", has_folder=True)
emit("aten::ge.float : (float, float) -> (bool)", has_folder=True)
emit("aten::lt.float : (float, float) -> (bool)", has_folder=True)
emit("aten::lt.float_int : (float, int) -> (bool)")
emit("aten::ge.float_int : (float, int) -> (bool)")
emit("aten::ne.float_int : (float, int) -> (bool)")
emit("aten::gt.float_int : (float, int) -> (bool)")
emit("aten::__and__.bool : (bool, bool) -> (bool)")
emit("aten::ne.bool : (bool, bool) -> (bool)", has_folder=True)
emit("aten::__is__ : (t1, t2) -> (bool)", has_folder=True)
emit("aten::__isnot__ : (t1, t2) -> (bool)", has_folder=True)
emit("aten::__not__ : (bool) -> (bool)", has_folder=True)
emit("aten::len.t : (t[]) -> (int)",
has_folder=True,
has_canonicalizer=True)
emit("aten::__getitem__.t : (t[], int) -> (t)", has_canonicalizer=True)
emit("aten::_set_item.t : (t[], int, t) -> (t[])")
emit("aten::div : (Scalar, Scalar) -> (float)")
emit("aten::add : (Scalar, Scalar) -> (Scalar)")
emit("aten::eq.device : (Device, Device) -> (bool)")
emit("aten::ceil.float : (float) -> (int)", has_folder=True)
# backprop ops
emit("aten::_softmax_backward_data : (Tensor, Tensor, int, int) -> (Tensor)")
emit("aten::tanh_backward : (Tensor, Tensor) -> (Tensor)")
emit("aten::gelu_backward : (Tensor, Tensor, str) -> (Tensor)")
emit("aten::_log_softmax_backward_data : (Tensor, Tensor, int, int) -> (Tensor)")
# ==========================================================================
# `prim::` namespace.
# ==========================================================================
emit("prim::layout : (Tensor) -> (int)")
emit("prim::TupleIndex : (Any, int) -> (Any)", has_canonicalizer=True)
emit("prim::device : (Tensor) -> (Device)")
emit("prim::dtype : (Tensor) -> (int)", has_folder=True)
emit("prim::TupleUnpack : (Any) -> (...)", has_canonicalizer=True)
emit("prim::NumToTensor.Scalar : (Scalar) -> (Tensor)")
emit("prim::min.self_int : (int[]) -> (int)", has_folder=True)
emit("prim::min.int : (int, int) -> (int)")
emit("prim::max.self_int : (int[]) -> (int)")
emit("prim::max.int : (int, int) -> (int)", has_folder=True)
emit("prim::RaiseException : (str, str?) -> ()")
emit("prim::Uninitialized : () -> (Any)",
has_canonicalizer=True, traits=["NoSideEffect"])
emit("prim::unchecked_cast : (t) -> (t)", has_folder=True,
traits=["DeclareOpInterfaceMethods<CastOpInterface>"])
emit("prim::Print : (...) -> ()")
emit("prim::tolist : (...) -> (...)")
emit("prim::abs.Scalar : (Scalar) -> (Scalar)")
# ==========================================================================
# `quantized::` namespace.
# ==========================================================================
emit(
"quantized::linear : (Tensor, __torch__.torch.classes.quantized.LinearPackedParamsBase, float, int) -> (Tensor)",
traits=["HasValueSemantics"])
def dump_registered_ops(outfile: TextIO, registry: Registry):
for _, v in sorted(registry.by_unique_key.items()):
outfile.write(repr(v))
def main(args: argparse.Namespace):
registry = Registry.load()
if args.debug_registry_dump:
with open(args.debug_registry_dump, "w") as debug_registry_dump:
dump_registered_ops(debug_registry_dump, registry)
td_path = os.path.join(args.torch_ir_include_dir, "GeneratedTorchOps.td")
with open(td_path, "w") as f_td:
emitter_td = TextEmitter(f_td)
emitter_td.print(ODS_BANNER)
emit_ops(emitter_td, registry)
def _create_argparse() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(prog="generate_ods")
parser.add_argument(
"--torch_ir_include_dir",
required=True,
help="Directory in include/ containing the Torch dialect")
parser.add_argument(
"--debug_registry_dump",
help="File to dump the the PyTorch JIT operator registry into")
return parser
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
parser = _create_argparse()
args = parser.parse_args()
main(args)
|
import asyncio
import logging
import time
from datetime import datetime
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple
from blspy import PrivateKey, G1Element
from seno.cmds.init_funcs import check_keys
from seno.consensus.block_rewards import calculate_base_farmer_reward
from seno.protocols.protocol_message_types import ProtocolMessageTypes
from seno.server.outbound_message import NodeType, make_msg
from seno.simulator.simulator_protocol import FarmNewBlockProtocol
from seno.types.blockchain_format.coin import Coin
from seno.types.blockchain_format.sized_bytes import bytes32
from seno.util.bech32m import decode_puzzle_hash, encode_puzzle_hash
from seno.util.byte_types import hexstr_to_bytes
from seno.util.ints import uint32, uint64
from seno.util.keychain import bytes_to_mnemonic, generate_mnemonic
from seno.util.path import path_from_root
from seno.util.ws_message import WsRpcMessage, create_payload_dict
from seno.wallet.cc_wallet.cc_wallet import CCWallet
from seno.wallet.rl_wallet.rl_wallet import RLWallet
from seno.wallet.did_wallet.did_wallet import DIDWallet
from seno.wallet.trade_record import TradeRecord
from seno.wallet.transaction_record import TransactionRecord
from seno.wallet.util.backup_utils import download_backup, get_backup_info, upload_backup
from seno.wallet.util.trade_utils import trade_record_to_dict
from seno.wallet.util.transaction_type import TransactionType
from seno.wallet.util.wallet_types import WalletType
from seno.wallet.wallet_info import WalletInfo
from seno.wallet.wallet_node import WalletNode
# Timeout for response from wallet/full node for sending a transaction
TIMEOUT = 30
log = logging.getLogger(__name__)
class WalletRpcApi:
def __init__(self, wallet_node: WalletNode):
assert wallet_node is not None
self.service = wallet_node
self.service_name = "seno_wallet"
def get_routes(self) -> Dict[str, Callable]:
return {
# Key management
"/log_in": self.log_in,
"/get_public_keys": self.get_public_keys,
"/get_private_key": self.get_private_key,
"/generate_mnemonic": self.generate_mnemonic,
"/add_key": self.add_key,
"/delete_key": self.delete_key,
"/delete_all_keys": self.delete_all_keys,
# Wallet node
"/get_sync_status": self.get_sync_status,
"/get_height_info": self.get_height_info,
"/farm_block": self.farm_block, # Only when node simulator is running
"/get_initial_freeze_period": self.get_initial_freeze_period,
"/get_network_info": self.get_network_info,
# Wallet management
"/get_wallets": self.get_wallets,
"/create_new_wallet": self.create_new_wallet,
# Wallet
"/get_wallet_balance": self.get_wallet_balance,
"/get_transaction": self.get_transaction,
"/get_transactions": self.get_transactions,
"/get_next_address": self.get_next_address,
"/send_transaction": self.send_transaction,
"/create_backup": self.create_backup,
"/get_transaction_count": self.get_transaction_count,
"/get_farmed_amount": self.get_farmed_amount,
"/create_signed_transaction": self.create_signed_transaction,
# Coloured coins and trading
"/cc_set_name": self.cc_set_name,
"/cc_get_name": self.cc_get_name,
"/cc_spend": self.cc_spend,
"/cc_get_colour": self.cc_get_colour,
"/create_offer_for_ids": self.create_offer_for_ids,
"/get_discrepancies_for_offer": self.get_discrepancies_for_offer,
"/respond_to_offer": self.respond_to_offer,
"/get_trade": self.get_trade,
"/get_all_trades": self.get_all_trades,
"/cancel_trade": self.cancel_trade,
# DID Wallet
"/did_update_recovery_ids": self.did_update_recovery_ids,
"/did_spend": self.did_spend,
"/did_get_pubkey": self.did_get_pubkey,
"/did_get_did": self.did_get_did,
"/did_recovery_spend": self.did_recovery_spend,
"/did_get_recovery_list": self.did_get_recovery_list,
"/did_create_attest": self.did_create_attest,
"/did_get_information_needed_for_recovery": self.did_get_information_needed_for_recovery,
"/did_create_backup_file": self.did_create_backup_file,
# RL wallet
"/rl_set_user_info": self.rl_set_user_info,
"/send_clawback_transaction:": self.send_clawback_transaction,
"/add_rate_limited_funds:": self.add_rate_limited_funds,
}
async def _state_changed(self, *args) -> List[WsRpcMessage]:
"""
Called by the WalletNode or WalletStateManager when something has changed in the wallet. This
gives us an opportunity to send notifications to all connected clients via WebSocket.
"""
if len(args) < 2:
return []
data = {
"state": args[0],
}
if args[1] is not None:
data["wallet_id"] = args[1]
if args[2] is not None:
data["additional_data"] = args[2]
return [create_payload_dict("state_changed", data, "seno_wallet", "wallet_ui")]
async def _stop_wallet(self):
"""
Stops a currently running wallet/key, which allows starting the wallet with a new key.
Each key has it's own wallet database.
"""
if self.service is not None:
self.service._close()
await self.service._await_closed()
##########################################################################################
# Key management
##########################################################################################
async def log_in(self, request):
"""
Logs in the wallet with a specific key.
"""
fingerprint = request["fingerprint"]
if self.service.logged_in_fingerprint == fingerprint:
return {"fingerprint": fingerprint}
await self._stop_wallet()
log_in_type = request["type"]
recovery_host = request["host"]
testing = False
if "testing" in self.service.config and self.service.config["testing"] is True:
testing = True
if log_in_type == "skip":
started = await self.service._start(fingerprint=fingerprint, skip_backup_import=True)
elif log_in_type == "restore_backup":
file_path = Path(request["file_path"])
started = await self.service._start(fingerprint=fingerprint, backup_file=file_path)
else:
started = await self.service._start(fingerprint)
if started is True:
return {"fingerprint": fingerprint}
elif testing is True and self.service.backup_initialized is False:
response = {"success": False, "error": "not_initialized"}
return response
elif self.service.backup_initialized is False:
backup_info = None
backup_path = None
try:
private_key = self.service.get_key_for_fingerprint(fingerprint)
last_recovery = await download_backup(recovery_host, private_key)
backup_path = path_from_root(self.service.root_path, "last_recovery")
if backup_path.exists():
backup_path.unlink()
backup_path.write_text(last_recovery)
backup_info = get_backup_info(backup_path, private_key)
backup_info["backup_host"] = recovery_host
backup_info["downloaded"] = True
except Exception as e:
log.error(f"error {e}")
response = {"success": False, "error": "not_initialized"}
if backup_info is not None:
response["backup_info"] = backup_info
response["backup_path"] = f"{backup_path}"
return response
return {"success": False, "error": "Unknown Error"}
async def get_public_keys(self, request: Dict):
fingerprints = [sk.get_g1().get_fingerprint() for (sk, seed) in self.service.keychain.get_all_private_keys()]
return {"public_key_fingerprints": fingerprints}
async def _get_private_key(self, fingerprint) -> Tuple[Optional[PrivateKey], Optional[bytes]]:
for sk, seed in self.service.keychain.get_all_private_keys():
if sk.get_g1().get_fingerprint() == fingerprint:
return sk, seed
return None, None
async def get_private_key(self, request):
fingerprint = request["fingerprint"]
sk, seed = await self._get_private_key(fingerprint)
if sk is not None:
s = bytes_to_mnemonic(seed) if seed is not None else None
return {
"private_key": {
"fingerprint": fingerprint,
"sk": bytes(sk).hex(),
"pk": bytes(sk.get_g1()).hex(),
"seed": s,
},
}
return {"success": False, "private_key": {"fingerprint": fingerprint}}
async def generate_mnemonic(self, request: Dict):
return {"mnemonic": generate_mnemonic().split(" ")}
async def add_key(self, request):
if "mnemonic" not in request:
raise ValueError("Mnemonic not in request")
# Adding a key from 24 word mnemonic
mnemonic = request["mnemonic"]
passphrase = ""
try:
sk = self.service.keychain.add_private_key(" ".join(mnemonic), passphrase)
except KeyError as e:
return {
"success": False,
"error": f"The word '{e.args[0]}' is incorrect.'",
"word": e.args[0],
}
fingerprint = sk.get_g1().get_fingerprint()
await self._stop_wallet()
# Makes sure the new key is added to config properly
started = False
check_keys(self.service.root_path)
request_type = request["type"]
if request_type == "new_wallet":
started = await self.service._start(fingerprint=fingerprint, new_wallet=True)
elif request_type == "skip":
started = await self.service._start(fingerprint=fingerprint, skip_backup_import=True)
elif request_type == "restore_backup":
file_path = Path(request["file_path"])
started = await self.service._start(fingerprint=fingerprint, backup_file=file_path)
if started is True:
return {"fingerprint": fingerprint}
raise ValueError("Failed to start")
async def delete_key(self, request):
await self._stop_wallet()
fingerprint = request["fingerprint"]
self.service.keychain.delete_key_by_fingerprint(fingerprint)
path = path_from_root(
self.service.root_path,
f"{self.service.config["database_path"]}-{fingerprint}",
)
if path.exists():
path.unlink()
return {}
async def delete_all_keys(self, request: Dict):
await self._stop_wallet()
self.service.keychain.delete_all_keys()
path = path_from_root(self.service.root_path, self.service.config["database_path"])
if path.exists():
path.unlink()
return {}
##########################################################################################
# Wallet Node
##########################################################################################
async def get_sync_status(self, request: Dict):
assert self.service.wallet_state_manager is not None
syncing = self.service.wallet_state_manager.sync_mode
synced = await self.service.wallet_state_manager.synced()
return {"synced": synced, "syncing": syncing, "genesis_initialized": True}
async def get_height_info(self, request: Dict):
assert self.service.wallet_state_manager is not None
peak = self.service.wallet_state_manager.peak
if peak is None:
return {"height": 0}
else:
return {"height": peak.height}
async def get_network_info(self, request: Dict):
assert self.service.wallet_state_manager is not None
network_name = self.service.config["selected_network"]
address_prefix = self.service.config["network_overrides"]["config"][network_name]["address_prefix"]
return {"network_name": network_name, "network_prefix": address_prefix}
async def farm_block(self, request):
raw_puzzle_hash = decode_puzzle_hash(request["address"])
request = FarmNewBlockProtocol(raw_puzzle_hash)
msg = make_msg(ProtocolMessageTypes.farm_new_block, request)
await self.service.server.send_to_all([msg], NodeType.FULL_NODE)
return {}
##########################################################################################
# Wallet Management
##########################################################################################
async def get_wallets(self, request: Dict):
assert self.service.wallet_state_manager is not None
wallets: List[WalletInfo] = await self.service.wallet_state_manager.get_all_wallet_info_entries()
return {"wallets": wallets}
async def _create_backup_and_upload(self, host) -> None:
assert self.service.wallet_state_manager is not None
try:
if "testing" in self.service.config and self.service.config["testing"] is True:
return None
now = time.time()
file_name = f"backup_{now}"
path = path_from_root(self.service.root_path, file_name)
await self.service.wallet_state_manager.create_wallet_backup(path)
backup_text = path.read_text()
response = await upload_backup(host, backup_text)
success = response["success"]
if success is False:
log.error("Failed to upload backup to wallet backup service")
elif success is True:
log.info("Finished upload of the backup file")
except Exception as e:
log.error(f"Exception in upload backup. Error: {e}")
async def create_new_wallet(self, request: Dict):
assert self.service.wallet_state_manager is not None
wallet_state_manager = self.service.wallet_state_manager
main_wallet = wallet_state_manager.main_wallet
host = request["host"]
if request["wallet_type"] == "cc_wallet":
if request["mode"] == "new":
async with self.service.wallet_state_manager.lock:
cc_wallet: CCWallet = await CCWallet.create_new_cc(
wallet_state_manager, main_wallet, request["amount"]
)
colour = cc_wallet.get_colour()
asyncio.create_task(self._create_backup_and_upload(host))
return {
"type": cc_wallet.type(),
"colour": colour,
"wallet_id": cc_wallet.id(),
}
elif request["mode"] == "existing":
async with self.service.wallet_state_manager.lock:
cc_wallet = await CCWallet.create_wallet_for_cc(
wallet_state_manager, main_wallet, request["colour"]
)
asyncio.create_task(self._create_backup_and_upload(host))
return {"type": cc_wallet.type()}
elif request["wallet_type"] == "rl_wallet":
if request["rl_type"] == "admin":
log.info("Create rl admin wallet")
async with self.service.wallet_state_manager.lock:
rl_admin: RLWallet = await RLWallet.create_rl_admin(wallet_state_manager)
success = await rl_admin.admin_create_coin(
uint64(int(request["interval"])),
uint64(int(request["limit"])),
request["pubkey"],
uint64(int(request["amount"])),
uint64(int(request["fee"])) if "fee" in request else uint64(0),
)
asyncio.create_task(self._create_backup_and_upload(host))
assert rl_admin.rl_info.admin_pubkey is not None
return {
"success": success,
"id": rl_admin.id(),
"type": rl_admin.type(),
"origin": rl_admin.rl_info.rl_origin,
"pubkey": rl_admin.rl_info.admin_pubkey.hex(),
}
elif request["rl_type"] == "user":
log.info("Create rl user wallet")
async with self.service.wallet_state_manager.lock:
rl_user: RLWallet = await RLWallet.create_rl_user(wallet_state_manager)
asyncio.create_task(self._create_backup_and_upload(host))
assert rl_user.rl_info.user_pubkey is not None
return {
"id": rl_user.id(),
"type": rl_user.type(),
"pubkey": rl_user.rl_info.user_pubkey.hex(),
}
elif request["wallet_type"] == "did_wallet":
if request["did_type"] == "new":
backup_dids = []
num_needed = 0
for d in request["backup_dids"]:
backup_dids.append(hexstr_to_bytes(d))
if len(backup_dids) > 0:
num_needed = uint64(request["num_of_backup_ids_needed"])
async with self.service.wallet_state_manager.lock:
did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_state_manager,
main_wallet,
int(request["amount"]),
backup_dids,
uint64(num_needed),
)
my_did = did_wallet.get_my_DID()
return {
"success": True,
"type": did_wallet.type(),
"my_did": my_did,
"wallet_id": did_wallet.id(),
}
elif request["did_type"] == "recovery":
async with self.service.wallet_state_manager.lock:
did_wallet = await DIDWallet.create_new_did_wallet_from_recovery(
wallet_state_manager, main_wallet, request["filename"]
)
assert did_wallet.did_info.temp_coin is not None
assert did_wallet.did_info.temp_puzhash is not None
assert did_wallet.did_info.temp_pubkey is not None
my_did = did_wallet.get_my_DID()
coin_name = did_wallet.did_info.temp_coin.name().hex()
coin_list = did_wallet.did_info.temp_coin.as_list()
newpuzhash = did_wallet.did_info.temp_puzhash
pubkey = did_wallet.did_info.temp_pubkey
return {
"success": True,
"type": did_wallet.type(),
"my_did": my_did,
"wallet_id": did_wallet.id(),
"coin_name": coin_name,
"coin_list": coin_list,
"newpuzhash": newpuzhash.hex(),
"pubkey": pubkey.hex(),
"backup_dids": did_wallet.did_info.backup_ids,
"num_verifications_required": did_wallet.did_info.num_of_backup_ids_needed,
}
##########################################################################################
# Wallet
##########################################################################################
async def get_wallet_balance(self, request: Dict) -> Dict:
assert self.service.wallet_state_manager is not None
wallet_id = uint32(int(request["wallet_id"]))
wallet = self.service.wallet_state_manager.wallets[wallet_id]
async with self.service.wallet_state_manager.lock:
unspent_records = await self.service.wallet_state_manager.coin_store.get_unspent_coins_for_wallet(wallet_id)
balance = await wallet.get_confirmed_balance(unspent_records)
pending_balance = await wallet.get_unconfirmed_balance(unspent_records)
spendable_balance = await wallet.get_spendable_balance(unspent_records)
pending_change = await wallet.get_pending_change_balance()
max_send_amount = await wallet.get_max_send_amount(unspent_records)
unconfirmed_removals: Dict[
bytes32, Coin
] = await wallet.wallet_state_manager.unconfirmed_removals_for_wallet(wallet_id)
wallet_balance = {
"wallet_id": wallet_id,
"confirmed_wallet_balance": balance,
"unconfirmed_wallet_balance": pending_balance,
"spendable_balance": spendable_balance,
"pending_change": pending_change,
"max_send_amount": max_send_amount,
"unspent_coin_count": len(unspent_records),
"pending_coin_removal_count": len(unconfirmed_removals),
}
return {"wallet_balance": wallet_balance}
async def get_transaction(self, request: Dict) -> Dict:
assert self.service.wallet_state_manager is not None
transaction_id: bytes32 = bytes32(hexstr_to_bytes(request["transaction_id"]))
tr: Optional[TransactionRecord] = await self.service.wallet_state_manager.get_transaction(transaction_id)
if tr is None:
raise ValueError(f"Transaction 0x{transaction_id.hex()} not found")
return {
"transaction": tr,
"transaction_id": tr.name,
}
async def get_transactions(self, request: Dict) -> Dict:
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
if "start" in request:
start = request["start"]
else:
start = 0
if "end" in request:
end = request["end"]
else:
end = 50
transactions = await self.service.wallet_state_manager.tx_store.get_transactions_between(wallet_id, start, end)
formatted_transactions = []
selected = self.service.config["selected_network"]
prefix = self.service.config["network_overrides"]["config"][selected]["address_prefix"]
for tx in transactions:
formatted = tx.to_json_dict()
formatted["to_address"] = encode_puzzle_hash(tx.to_puzzle_hash, prefix)
formatted_transactions.append(formatted)
return {
"transactions": formatted_transactions,
"wallet_id": wallet_id,
}
async def get_initial_freeze_period(self, _: Dict):
freeze_period = self.service.constants.INITIAL_FREEZE_END_TIMESTAMP
return {"INITIAL_FREEZE_END_TIMESTAMP": freeze_period}
async def get_next_address(self, request: Dict) -> Dict:
"""
Returns a new address
"""
assert self.service.wallet_state_manager is not None
if request["new_address"] is True:
create_new = True
else:
create_new = False
wallet_id = uint32(int(request["wallet_id"]))
wallet = self.service.wallet_state_manager.wallets[wallet_id]
selected = self.service.config["selected_network"]
prefix = self.service.config["network_overrides"]["config"][selected]["address_prefix"]
if wallet.type() == WalletType.STANDARD_WALLET:
raw_puzzle_hash = await wallet.get_puzzle_hash(create_new)
address = encode_puzzle_hash(raw_puzzle_hash, prefix)
elif wallet.type() == WalletType.COLOURED_COIN:
raw_puzzle_hash = await wallet.get_puzzle_hash(create_new)
address = encode_puzzle_hash(raw_puzzle_hash, prefix)
else:
raise ValueError(f"Wallet type {wallet.type()} cannot create puzzle hashes")
return {
"wallet_id": wallet_id,
"address": address,
}
async def send_transaction(self, request):
assert self.service.wallet_state_manager is not None
if await self.service.wallet_state_manager.synced() is False:
raise ValueError("Wallet needs to be fully synced before sending transactions")
if int(time.time()) < self.service.constants.INITIAL_FREEZE_END_TIMESTAMP:
end_date = datetime.fromtimestamp(float(self.service.constants.INITIAL_FREEZE_END_TIMESTAMP))
raise ValueError(f"No transactions before: {end_date}")
wallet_id = int(request["wallet_id"])
wallet = self.service.wallet_state_manager.wallets[wallet_id]
if not isinstance(request["amount"], int) or not isinstance(request["fee"], int):
raise ValueError("An integer amount or fee is required (too many decimals)")
amount: uint64 = uint64(request["amount"])
puzzle_hash: bytes32 = decode_puzzle_hash(request["address"])
if "fee" in request:
fee = uint64(request["fee"])
else:
fee = uint64(0)
async with self.service.wallet_state_manager.lock:
tx: TransactionRecord = await wallet.generate_signed_transaction(amount, puzzle_hash, fee)
await wallet.push_transaction(tx)
# Transaction may not have been included in the mempool yet. Use get_transaction to check.
return {
"transaction": tx,
"transaction_id": tx.name,
}
async def get_transaction_count(self, request):
wallet_id = int(request["wallet_id"])
count = await self.service.wallet_state_manager.tx_store.get_transaction_count_for_wallet(wallet_id)
return {"wallet_id": wallet_id, "count": count}
async def create_backup(self, request):
assert self.service.wallet_state_manager is not None
file_path = Path(request["file_path"])
await self.service.wallet_state_manager.create_wallet_backup(file_path)
return {}
##########################################################################################
# Coloured Coins and Trading
##########################################################################################
async def cc_set_name(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
await wallet.set_name(str(request["name"]))
return {"wallet_id": wallet_id}
async def cc_get_name(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
name: str = await wallet.get_name()
return {"wallet_id": wallet_id, "name": name}
async def cc_spend(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
puzzle_hash: bytes32 = decode_puzzle_hash(request["inner_address"])
if not isinstance(request["amount"], int) or not isinstance(request["amount"], int):
raise ValueError("An integer amount or fee is required (too many decimals)")
amount: uint64 = uint64(request["amount"])
if "fee" in request:
fee = uint64(request["fee"])
else:
fee = uint64(0)
async with self.service.wallet_state_manager.lock:
tx: TransactionRecord = await wallet.generate_signed_transaction([amount], [puzzle_hash], fee)
await wallet.push_transaction(tx)
return {
"transaction": tx,
"transaction_id": tx.name,
}
async def cc_get_colour(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
colour: str = wallet.get_colour()
return {"colour": colour, "wallet_id": wallet_id}
async def create_offer_for_ids(self, request):
assert self.service.wallet_state_manager is not None
offer = request["ids"]
file_name = request["filename"]
async with self.service.wallet_state_manager.lock:
(
success,
spend_bundle,
error,
) = await self.service.wallet_state_manager.trade_manager.create_offer_for_ids(offer, file_name)
if success:
self.service.wallet_state_manager.trade_manager.write_offer_to_disk(Path(file_name), spend_bundle)
return {}
raise ValueError(error)
async def get_discrepancies_for_offer(self, request):
assert self.service.wallet_state_manager is not None
file_name = request["filename"]
file_path = Path(file_name)
async with self.service.wallet_state_manager.lock:
(
success,
discrepancies,
error,
) = await self.service.wallet_state_manager.trade_manager.get_discrepancies_for_offer(file_path)
if success:
return {"discrepancies": discrepancies}
raise ValueError(error)
async def respond_to_offer(self, request):
assert self.service.wallet_state_manager is not None
file_path = Path(request["filename"])
async with self.service.wallet_state_manager.lock:
(
success,
trade_record,
error,
) = await self.service.wallet_state_manager.trade_manager.respond_to_offer(file_path)
if not success:
raise ValueError(error)
return {}
async def get_trade(self, request: Dict):
assert self.service.wallet_state_manager is not None
trade_mgr = self.service.wallet_state_manager.trade_manager
trade_id = request["trade_id"]
trade: Optional[TradeRecord] = await trade_mgr.get_trade_by_id(trade_id)
if trade is None:
raise ValueError(f"No trade with trade id: {trade_id}")
result = trade_record_to_dict(trade)
return {"trade": result}
async def get_all_trades(self, request: Dict):
assert self.service.wallet_state_manager is not None
trade_mgr = self.service.wallet_state_manager.trade_manager
all_trades = await trade_mgr.get_all_trades()
result = []
for trade in all_trades:
result.append(trade_record_to_dict(trade))
return {"trades": result}
async def cancel_trade(self, request: Dict):
assert self.service.wallet_state_manager is not None
wsm = self.service.wallet_state_manager
secure = request["secure"]
trade_id = hexstr_to_bytes(request["trade_id"])
async with self.service.wallet_state_manager.lock:
if secure:
await wsm.trade_manager.cancel_pending_offer_safely(trade_id)
else:
await wsm.trade_manager.cancel_pending_offer(trade_id)
return {}
async def get_backup_info(self, request: Dict):
file_path = Path(request["file_path"])
sk = None
if "words" in request:
mnemonic = request["words"]
passphrase = ""
try:
sk = self.service.keychain.add_private_key(" ".join(mnemonic), passphrase)
except KeyError as e:
return {
"success": False,
"error": f"The word '{e.args[0]}' is incorrect.'",
"word": e.args[0],
}
elif "fingerprint" in request:
sk, seed = await self._get_private_key(request["fingerprint"])
if sk is None:
raise ValueError("Unable to decrypt the backup file.")
backup_info = get_backup_info(file_path, sk)
return {"backup_info": backup_info}
##########################################################################################
# Distributed Identities
##########################################################################################
async def did_update_recovery_ids(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
recovery_list = []
for _ in request["new_list"]:
recovery_list.append(hexstr_to_bytes(_))
if "num_verifications_required" in request:
new_amount_verifications_required = uint64(request["num_verifications_required"])
else:
new_amount_verifications_required = len(recovery_list)
async with self.service.wallet_state_manager.lock:
success = await wallet.update_recovery_list(recovery_list, new_amount_verifications_required)
# Update coin with new ID info
updated_puz = await wallet.get_new_puzzle()
spend_bundle = await wallet.create_spend(updated_puz.get_tree_hash())
if spend_bundle is not None and success:
return {"success": True}
return {"success": False}
async def did_spend(self, request):
wallet_id = int(request["wallet_id"])
async with self.service.wallet_state_manager.lock:
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
spend_bundle = await wallet.create_spend(request["puzzlehash"])
if spend_bundle is not None:
return {"success": True}
return {"success": False}
async def did_get_did(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
my_did: str = wallet.get_my_DID()
async with self.service.wallet_state_manager.lock:
coins = await wallet.select_coins(1)
if coins is None or coins == set():
return {"success": True, "wallet_id": wallet_id, "my_did": my_did}
else:
coin = coins.pop()
return {"success": True, "wallet_id": wallet_id, "my_did": my_did, "coin_id": coin.name()}
async def did_get_recovery_list(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
recovery_list = wallet.did_info.backup_ids
recover_hex_list = []
for _ in recovery_list:
recover_hex_list.append(_.hex())
return {
"success": True,
"wallet_id": wallet_id,
"recover_list": recover_hex_list,
"num_required": wallet.did_info.num_of_backup_ids_needed,
}
async def did_recovery_spend(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
if len(request["attest_filenames"]) < wallet.did_info.num_of_backup_ids_needed:
return {"success": False, "reason": "insufficient messages"}
async with self.service.wallet_state_manager.lock:
(
info_list,
message_spend_bundle,
) = await wallet.load_attest_files_for_recovery_spend(request["attest_filenames"])
if "pubkey" in request:
pubkey = G1Element.from_bytes(hexstr_to_bytes(request["pubkey"]))
else:
assert wallet.did_info.temp_pubkey is not None
pubkey = wallet.did_info.temp_pubkey
if "puzhash" in request:
puzhash = hexstr_to_bytes(request["puzhash"])
else:
assert wallet.did_info.temp_puzhash is not None
puzhash = wallet.did_info.temp_puzhash
success = await wallet.recovery_spend(
wallet.did_info.temp_coin,
puzhash,
info_list,
pubkey,
message_spend_bundle,
)
return {"success": success}
async def did_get_pubkey(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
pubkey = bytes((await wallet.wallet_state_manager.get_unused_derivation_record(wallet_id)).pubkey).hex()
return {"success": True, "pubkey": pubkey}
async def did_create_attest(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
async with self.service.wallet_state_manager.lock:
info = await wallet.get_info_for_recovery()
coin = hexstr_to_bytes(request["coin_name"])
pubkey = G1Element.from_bytes(hexstr_to_bytes(request["pubkey"]))
spend_bundle = await wallet.create_attestment(
coin, hexstr_to_bytes(request["puzhash"]), pubkey, request["filename"]
)
if spend_bundle is not None:
return {
"success": True,
"message_spend_bundle": bytes(spend_bundle).hex(),
"info": [info[0].hex(), info[1].hex(), info[2]],
}
else:
return {"success": False}
async def did_get_information_needed_for_recovery(self, request):
wallet_id = int(request["wallet_id"])
did_wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
my_did = did_wallet.get_my_DID()
coin_name = did_wallet.did_info.temp_coin.name().hex()
return {
"success": True,
"wallet_id": wallet_id,
"my_did": my_did,
"coin_name": coin_name,
"newpuzhash": did_wallet.did_info.temp_puzhash,
"pubkey": did_wallet.did_info.temp_pubkey,
"backup_dids": did_wallet.did_info.backup_ids,
}
async def did_create_backup_file(self, request):
try:
wallet_id = int(request["wallet_id"])
did_wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
did_wallet.create_backup(request["filename"])
return {"wallet_id": wallet_id, "success": True}
except Exception:
return {"wallet_id": wallet_id, "success": False}
##########################################################################################
# Rate Limited Wallet
##########################################################################################
async def rl_set_user_info(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = uint32(int(request["wallet_id"]))
rl_user = self.service.wallet_state_manager.wallets[wallet_id]
origin = request["origin"]
async with self.service.wallet_state_manager.lock:
await rl_user.set_user_info(
uint64(request["interval"]),
uint64(request["limit"]),
origin["parent_coin_info"],
origin["puzzle_hash"],
origin["amount"],
request["admin_pubkey"],
)
return {}
async def send_clawback_transaction(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: RLWallet = self.service.wallet_state_manager.wallets[wallet_id]
fee = int(request["fee"])
async with self.service.wallet_state_manager.lock:
tx = await wallet.clawback_rl_coin_transaction(fee)
await wallet.push_transaction(tx)
# Transaction may not have been included in the mempool yet. Use get_transaction to check.
return {
"transaction": tx,
"transaction_id": tx.name,
}
async def add_rate_limited_funds(self, request):
wallet_id = uint32(request["wallet_id"])
wallet: RLWallet = self.service.wallet_state_manager.wallets[wallet_id]
puzzle_hash = wallet.rl_get_aggregation_puzzlehash(wallet.rl_info.rl_puzzle_hash)
request["wallet_id"] = 1
request["puzzle_hash"] = puzzle_hash
async with self.service.wallet_state_manager.lock:
await wallet.rl_add_funds(request["amount"], puzzle_hash, request["fee"])
return {"status": "SUCCESS"}
async def get_farmed_amount(self, request):
tx_records: List[TransactionRecord] = await self.service.wallet_state_manager.tx_store.get_farming_rewards()
amount = 0
pool_reward_amount = 0
farmer_reward_amount = 0
fee_amount = 0
last_height_farmed = 0
for record in tx_records:
height = record.height_farmed(self.service.constants.GENESIS_CHALLENGE)
if height > last_height_farmed:
last_height_farmed = height
if record.type == TransactionType.COINBASE_REWARD:
pool_reward_amount += record.amount
if record.type == TransactionType.FEE_REWARD:
fee_amount += record.amount - calculate_base_farmer_reward(height)
farmer_reward_amount += calculate_base_farmer_reward(height)
amount += record.amount
assert amount == pool_reward_amount + farmer_reward_amount + fee_amount
return {
"farmed_amount": amount,
"pool_reward_amount": pool_reward_amount,
"farmer_reward_amount": farmer_reward_amount,
"fee_amount": fee_amount,
"last_height_farmed": last_height_farmed,
}
async def create_signed_transaction(self, request):
if "additions" not in request or len(request["additions"]) < 1:
raise ValueError("Specify additions list")
additions: List[Dict] = request["additions"]
amount_0: uint64 = uint64(additions[0]["amount"])
assert amount_0 <= self.service.constants.MAX_COIN_AMOUNT
puzzle_hash_0 = hexstr_to_bytes(additions[0]["puzzle_hash"])
if len(puzzle_hash_0) != 32:
raise ValueError(f"Address must be 32 bytes. {puzzle_hash_0}")
additional_outputs = []
for addition in additions[1:]:
receiver_ph = hexstr_to_bytes(addition["puzzle_hash"])
if len(receiver_ph) != 32:
raise ValueError(f"Address must be 32 bytes. {receiver_ph}")
amount = uint64(addition["amount"])
if amount > self.service.constants.MAX_COIN_AMOUNT:
raise ValueError(f"Coin amount cannot exceed {self.service.constants.MAX_COIN_AMOUNT}")
additional_outputs.append({"puzzlehash": receiver_ph, "amount": amount})
fee = uint64(0)
if "fee" in request:
fee = uint64(request["fee"])
coins = None
if "coins" in request and len(request["coins"]) > 0:
coins = set([Coin.from_json_dict(coin_json) for coin_json in request["coins"]])
async with self.service.wallet_state_manager.lock:
signed_tx = await self.service.wallet_state_manager.main_wallet.generate_signed_transaction(
amount_0, puzzle_hash_0, fee, coins=coins, ignore_max_send_amount=True, primaries=additional_outputs
)
return {"signed_tx": signed_tx}
| import asyncio
import logging
import time
from datetime import datetime
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple
from blspy import PrivateKey, G1Element
from seno.cmds.init_funcs import check_keys
from seno.consensus.block_rewards import calculate_base_farmer_reward
from seno.protocols.protocol_message_types import ProtocolMessageTypes
from seno.server.outbound_message import NodeType, make_msg
from seno.simulator.simulator_protocol import FarmNewBlockProtocol
from seno.types.blockchain_format.coin import Coin
from seno.types.blockchain_format.sized_bytes import bytes32
from seno.util.bech32m import decode_puzzle_hash, encode_puzzle_hash
from seno.util.byte_types import hexstr_to_bytes
from seno.util.ints import uint32, uint64
from seno.util.keychain import bytes_to_mnemonic, generate_mnemonic
from seno.util.path import path_from_root
from seno.util.ws_message import WsRpcMessage, create_payload_dict
from seno.wallet.cc_wallet.cc_wallet import CCWallet
from seno.wallet.rl_wallet.rl_wallet import RLWallet
from seno.wallet.did_wallet.did_wallet import DIDWallet
from seno.wallet.trade_record import TradeRecord
from seno.wallet.transaction_record import TransactionRecord
from seno.wallet.util.backup_utils import download_backup, get_backup_info, upload_backup
from seno.wallet.util.trade_utils import trade_record_to_dict
from seno.wallet.util.transaction_type import TransactionType
from seno.wallet.util.wallet_types import WalletType
from seno.wallet.wallet_info import WalletInfo
from seno.wallet.wallet_node import WalletNode
# Timeout for response from wallet/full node for sending a transaction
TIMEOUT = 30
log = logging.getLogger(__name__)
class WalletRpcApi:
def __init__(self, wallet_node: WalletNode):
assert wallet_node is not None
self.service = wallet_node
self.service_name = "seno_wallet"
def get_routes(self) -> Dict[str, Callable]:
return {
# Key management
"/log_in": self.log_in,
"/get_public_keys": self.get_public_keys,
"/get_private_key": self.get_private_key,
"/generate_mnemonic": self.generate_mnemonic,
"/add_key": self.add_key,
"/delete_key": self.delete_key,
"/delete_all_keys": self.delete_all_keys,
# Wallet node
"/get_sync_status": self.get_sync_status,
"/get_height_info": self.get_height_info,
"/farm_block": self.farm_block, # Only when node simulator is running
"/get_initial_freeze_period": self.get_initial_freeze_period,
"/get_network_info": self.get_network_info,
# Wallet management
"/get_wallets": self.get_wallets,
"/create_new_wallet": self.create_new_wallet,
# Wallet
"/get_wallet_balance": self.get_wallet_balance,
"/get_transaction": self.get_transaction,
"/get_transactions": self.get_transactions,
"/get_next_address": self.get_next_address,
"/send_transaction": self.send_transaction,
"/create_backup": self.create_backup,
"/get_transaction_count": self.get_transaction_count,
"/get_farmed_amount": self.get_farmed_amount,
"/create_signed_transaction": self.create_signed_transaction,
# Coloured coins and trading
"/cc_set_name": self.cc_set_name,
"/cc_get_name": self.cc_get_name,
"/cc_spend": self.cc_spend,
"/cc_get_colour": self.cc_get_colour,
"/create_offer_for_ids": self.create_offer_for_ids,
"/get_discrepancies_for_offer": self.get_discrepancies_for_offer,
"/respond_to_offer": self.respond_to_offer,
"/get_trade": self.get_trade,
"/get_all_trades": self.get_all_trades,
"/cancel_trade": self.cancel_trade,
# DID Wallet
"/did_update_recovery_ids": self.did_update_recovery_ids,
"/did_spend": self.did_spend,
"/did_get_pubkey": self.did_get_pubkey,
"/did_get_did": self.did_get_did,
"/did_recovery_spend": self.did_recovery_spend,
"/did_get_recovery_list": self.did_get_recovery_list,
"/did_create_attest": self.did_create_attest,
"/did_get_information_needed_for_recovery": self.did_get_information_needed_for_recovery,
"/did_create_backup_file": self.did_create_backup_file,
# RL wallet
"/rl_set_user_info": self.rl_set_user_info,
"/send_clawback_transaction:": self.send_clawback_transaction,
"/add_rate_limited_funds:": self.add_rate_limited_funds,
}
async def _state_changed(self, *args) -> List[WsRpcMessage]:
"""
Called by the WalletNode or WalletStateManager when something has changed in the wallet. This
gives us an opportunity to send notifications to all connected clients via WebSocket.
"""
if len(args) < 2:
return []
data = {
"state": args[0],
}
if args[1] is not None:
data["wallet_id"] = args[1]
if args[2] is not None:
data["additional_data"] = args[2]
return [create_payload_dict("state_changed", data, "seno_wallet", "wallet_ui")]
async def _stop_wallet(self):
"""
Stops a currently running wallet/key, which allows starting the wallet with a new key.
Each key has it's own wallet database.
"""
if self.service is not None:
self.service._close()
await self.service._await_closed()
##########################################################################################
# Key management
##########################################################################################
async def log_in(self, request):
"""
Logs in the wallet with a specific key.
"""
fingerprint = request["fingerprint"]
if self.service.logged_in_fingerprint == fingerprint:
return {"fingerprint": fingerprint}
await self._stop_wallet()
log_in_type = request["type"]
recovery_host = request["host"]
testing = False
if "testing" in self.service.config and self.service.config["testing"] is True:
testing = True
if log_in_type == "skip":
started = await self.service._start(fingerprint=fingerprint, skip_backup_import=True)
elif log_in_type == "restore_backup":
file_path = Path(request["file_path"])
started = await self.service._start(fingerprint=fingerprint, backup_file=file_path)
else:
started = await self.service._start(fingerprint)
if started is True:
return {"fingerprint": fingerprint}
elif testing is True and self.service.backup_initialized is False:
response = {"success": False, "error": "not_initialized"}
return response
elif self.service.backup_initialized is False:
backup_info = None
backup_path = None
try:
private_key = self.service.get_key_for_fingerprint(fingerprint)
last_recovery = await download_backup(recovery_host, private_key)
backup_path = path_from_root(self.service.root_path, "last_recovery")
if backup_path.exists():
backup_path.unlink()
backup_path.write_text(last_recovery)
backup_info = get_backup_info(backup_path, private_key)
backup_info["backup_host"] = recovery_host
backup_info["downloaded"] = True
except Exception as e:
log.error(f"error {e}")
response = {"success": False, "error": "not_initialized"}
if backup_info is not None:
response["backup_info"] = backup_info
response["backup_path"] = f"{backup_path}"
return response
return {"success": False, "error": "Unknown Error"}
async def get_public_keys(self, request: Dict):
fingerprints = [sk.get_g1().get_fingerprint() for (sk, seed) in self.service.keychain.get_all_private_keys()]
return {"public_key_fingerprints": fingerprints}
async def _get_private_key(self, fingerprint) -> Tuple[Optional[PrivateKey], Optional[bytes]]:
for sk, seed in self.service.keychain.get_all_private_keys():
if sk.get_g1().get_fingerprint() == fingerprint:
return sk, seed
return None, None
async def get_private_key(self, request):
fingerprint = request["fingerprint"]
sk, seed = await self._get_private_key(fingerprint)
if sk is not None:
s = bytes_to_mnemonic(seed) if seed is not None else None
return {
"private_key": {
"fingerprint": fingerprint,
"sk": bytes(sk).hex(),
"pk": bytes(sk.get_g1()).hex(),
"seed": s,
},
}
return {"success": False, "private_key": {"fingerprint": fingerprint}}
async def generate_mnemonic(self, request: Dict):
return {"mnemonic": generate_mnemonic().split(" ")}
async def add_key(self, request):
if "mnemonic" not in request:
raise ValueError("Mnemonic not in request")
# Adding a key from 24 word mnemonic
mnemonic = request["mnemonic"]
passphrase = ""
try:
sk = self.service.keychain.add_private_key(" ".join(mnemonic), passphrase)
except KeyError as e:
return {
"success": False,
"error": f"The word '{e.args[0]}' is incorrect.'",
"word": e.args[0],
}
fingerprint = sk.get_g1().get_fingerprint()
await self._stop_wallet()
# Makes sure the new key is added to config properly
started = False
check_keys(self.service.root_path)
request_type = request["type"]
if request_type == "new_wallet":
started = await self.service._start(fingerprint=fingerprint, new_wallet=True)
elif request_type == "skip":
started = await self.service._start(fingerprint=fingerprint, skip_backup_import=True)
elif request_type == "restore_backup":
file_path = Path(request["file_path"])
started = await self.service._start(fingerprint=fingerprint, backup_file=file_path)
if started is True:
return {"fingerprint": fingerprint}
raise ValueError("Failed to start")
async def delete_key(self, request):
await self._stop_wallet()
fingerprint = request["fingerprint"]
self.service.keychain.delete_key_by_fingerprint(fingerprint)
path = path_from_root(
self.service.root_path,
f"{self.service.config['database_path']}-{fingerprint}",
)
if path.exists():
path.unlink()
return {}
async def delete_all_keys(self, request: Dict):
await self._stop_wallet()
self.service.keychain.delete_all_keys()
path = path_from_root(self.service.root_path, self.service.config["database_path"])
if path.exists():
path.unlink()
return {}
##########################################################################################
# Wallet Node
##########################################################################################
async def get_sync_status(self, request: Dict):
assert self.service.wallet_state_manager is not None
syncing = self.service.wallet_state_manager.sync_mode
synced = await self.service.wallet_state_manager.synced()
return {"synced": synced, "syncing": syncing, "genesis_initialized": True}
async def get_height_info(self, request: Dict):
assert self.service.wallet_state_manager is not None
peak = self.service.wallet_state_manager.peak
if peak is None:
return {"height": 0}
else:
return {"height": peak.height}
async def get_network_info(self, request: Dict):
assert self.service.wallet_state_manager is not None
network_name = self.service.config["selected_network"]
address_prefix = self.service.config["network_overrides"]["config"][network_name]["address_prefix"]
return {"network_name": network_name, "network_prefix": address_prefix}
async def farm_block(self, request):
raw_puzzle_hash = decode_puzzle_hash(request["address"])
request = FarmNewBlockProtocol(raw_puzzle_hash)
msg = make_msg(ProtocolMessageTypes.farm_new_block, request)
await self.service.server.send_to_all([msg], NodeType.FULL_NODE)
return {}
##########################################################################################
# Wallet Management
##########################################################################################
async def get_wallets(self, request: Dict):
assert self.service.wallet_state_manager is not None
wallets: List[WalletInfo] = await self.service.wallet_state_manager.get_all_wallet_info_entries()
return {"wallets": wallets}
async def _create_backup_and_upload(self, host) -> None:
assert self.service.wallet_state_manager is not None
try:
if "testing" in self.service.config and self.service.config["testing"] is True:
return None
now = time.time()
file_name = f"backup_{now}"
path = path_from_root(self.service.root_path, file_name)
await self.service.wallet_state_manager.create_wallet_backup(path)
backup_text = path.read_text()
response = await upload_backup(host, backup_text)
success = response["success"]
if success is False:
log.error("Failed to upload backup to wallet backup service")
elif success is True:
log.info("Finished upload of the backup file")
except Exception as e:
log.error(f"Exception in upload backup. Error: {e}")
async def create_new_wallet(self, request: Dict):
assert self.service.wallet_state_manager is not None
wallet_state_manager = self.service.wallet_state_manager
main_wallet = wallet_state_manager.main_wallet
host = request["host"]
if request["wallet_type"] == "cc_wallet":
if request["mode"] == "new":
async with self.service.wallet_state_manager.lock:
cc_wallet: CCWallet = await CCWallet.create_new_cc(
wallet_state_manager, main_wallet, request["amount"]
)
colour = cc_wallet.get_colour()
asyncio.create_task(self._create_backup_and_upload(host))
return {
"type": cc_wallet.type(),
"colour": colour,
"wallet_id": cc_wallet.id(),
}
elif request["mode"] == "existing":
async with self.service.wallet_state_manager.lock:
cc_wallet = await CCWallet.create_wallet_for_cc(
wallet_state_manager, main_wallet, request["colour"]
)
asyncio.create_task(self._create_backup_and_upload(host))
return {"type": cc_wallet.type()}
elif request["wallet_type"] == "rl_wallet":
if request["rl_type"] == "admin":
log.info("Create rl admin wallet")
async with self.service.wallet_state_manager.lock:
rl_admin: RLWallet = await RLWallet.create_rl_admin(wallet_state_manager)
success = await rl_admin.admin_create_coin(
uint64(int(request["interval"])),
uint64(int(request["limit"])),
request["pubkey"],
uint64(int(request["amount"])),
uint64(int(request["fee"])) if "fee" in request else uint64(0),
)
asyncio.create_task(self._create_backup_and_upload(host))
assert rl_admin.rl_info.admin_pubkey is not None
return {
"success": success,
"id": rl_admin.id(),
"type": rl_admin.type(),
"origin": rl_admin.rl_info.rl_origin,
"pubkey": rl_admin.rl_info.admin_pubkey.hex(),
}
elif request["rl_type"] == "user":
log.info("Create rl user wallet")
async with self.service.wallet_state_manager.lock:
rl_user: RLWallet = await RLWallet.create_rl_user(wallet_state_manager)
asyncio.create_task(self._create_backup_and_upload(host))
assert rl_user.rl_info.user_pubkey is not None
return {
"id": rl_user.id(),
"type": rl_user.type(),
"pubkey": rl_user.rl_info.user_pubkey.hex(),
}
elif request["wallet_type"] == "did_wallet":
if request["did_type"] == "new":
backup_dids = []
num_needed = 0
for d in request["backup_dids"]:
backup_dids.append(hexstr_to_bytes(d))
if len(backup_dids) > 0:
num_needed = uint64(request["num_of_backup_ids_needed"])
async with self.service.wallet_state_manager.lock:
did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_state_manager,
main_wallet,
int(request["amount"]),
backup_dids,
uint64(num_needed),
)
my_did = did_wallet.get_my_DID()
return {
"success": True,
"type": did_wallet.type(),
"my_did": my_did,
"wallet_id": did_wallet.id(),
}
elif request["did_type"] == "recovery":
async with self.service.wallet_state_manager.lock:
did_wallet = await DIDWallet.create_new_did_wallet_from_recovery(
wallet_state_manager, main_wallet, request["filename"]
)
assert did_wallet.did_info.temp_coin is not None
assert did_wallet.did_info.temp_puzhash is not None
assert did_wallet.did_info.temp_pubkey is not None
my_did = did_wallet.get_my_DID()
coin_name = did_wallet.did_info.temp_coin.name().hex()
coin_list = did_wallet.did_info.temp_coin.as_list()
newpuzhash = did_wallet.did_info.temp_puzhash
pubkey = did_wallet.did_info.temp_pubkey
return {
"success": True,
"type": did_wallet.type(),
"my_did": my_did,
"wallet_id": did_wallet.id(),
"coin_name": coin_name,
"coin_list": coin_list,
"newpuzhash": newpuzhash.hex(),
"pubkey": pubkey.hex(),
"backup_dids": did_wallet.did_info.backup_ids,
"num_verifications_required": did_wallet.did_info.num_of_backup_ids_needed,
}
##########################################################################################
# Wallet
##########################################################################################
async def get_wallet_balance(self, request: Dict) -> Dict:
assert self.service.wallet_state_manager is not None
wallet_id = uint32(int(request["wallet_id"]))
wallet = self.service.wallet_state_manager.wallets[wallet_id]
async with self.service.wallet_state_manager.lock:
unspent_records = await self.service.wallet_state_manager.coin_store.get_unspent_coins_for_wallet(wallet_id)
balance = await wallet.get_confirmed_balance(unspent_records)
pending_balance = await wallet.get_unconfirmed_balance(unspent_records)
spendable_balance = await wallet.get_spendable_balance(unspent_records)
pending_change = await wallet.get_pending_change_balance()
max_send_amount = await wallet.get_max_send_amount(unspent_records)
unconfirmed_removals: Dict[
bytes32, Coin
] = await wallet.wallet_state_manager.unconfirmed_removals_for_wallet(wallet_id)
wallet_balance = {
"wallet_id": wallet_id,
"confirmed_wallet_balance": balance,
"unconfirmed_wallet_balance": pending_balance,
"spendable_balance": spendable_balance,
"pending_change": pending_change,
"max_send_amount": max_send_amount,
"unspent_coin_count": len(unspent_records),
"pending_coin_removal_count": len(unconfirmed_removals),
}
return {"wallet_balance": wallet_balance}
async def get_transaction(self, request: Dict) -> Dict:
assert self.service.wallet_state_manager is not None
transaction_id: bytes32 = bytes32(hexstr_to_bytes(request["transaction_id"]))
tr: Optional[TransactionRecord] = await self.service.wallet_state_manager.get_transaction(transaction_id)
if tr is None:
raise ValueError(f"Transaction 0x{transaction_id.hex()} not found")
return {
"transaction": tr,
"transaction_id": tr.name,
}
async def get_transactions(self, request: Dict) -> Dict:
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
if "start" in request:
start = request["start"]
else:
start = 0
if "end" in request:
end = request["end"]
else:
end = 50
transactions = await self.service.wallet_state_manager.tx_store.get_transactions_between(wallet_id, start, end)
formatted_transactions = []
selected = self.service.config["selected_network"]
prefix = self.service.config["network_overrides"]["config"][selected]["address_prefix"]
for tx in transactions:
formatted = tx.to_json_dict()
formatted["to_address"] = encode_puzzle_hash(tx.to_puzzle_hash, prefix)
formatted_transactions.append(formatted)
return {
"transactions": formatted_transactions,
"wallet_id": wallet_id,
}
async def get_initial_freeze_period(self, _: Dict):
freeze_period = self.service.constants.INITIAL_FREEZE_END_TIMESTAMP
return {"INITIAL_FREEZE_END_TIMESTAMP": freeze_period}
async def get_next_address(self, request: Dict) -> Dict:
"""
Returns a new address
"""
assert self.service.wallet_state_manager is not None
if request["new_address"] is True:
create_new = True
else:
create_new = False
wallet_id = uint32(int(request["wallet_id"]))
wallet = self.service.wallet_state_manager.wallets[wallet_id]
selected = self.service.config["selected_network"]
prefix = self.service.config["network_overrides"]["config"][selected]["address_prefix"]
if wallet.type() == WalletType.STANDARD_WALLET:
raw_puzzle_hash = await wallet.get_puzzle_hash(create_new)
address = encode_puzzle_hash(raw_puzzle_hash, prefix)
elif wallet.type() == WalletType.COLOURED_COIN:
raw_puzzle_hash = await wallet.get_puzzle_hash(create_new)
address = encode_puzzle_hash(raw_puzzle_hash, prefix)
else:
raise ValueError(f"Wallet type {wallet.type()} cannot create puzzle hashes")
return {
"wallet_id": wallet_id,
"address": address,
}
async def send_transaction(self, request):
assert self.service.wallet_state_manager is not None
if await self.service.wallet_state_manager.synced() is False:
raise ValueError("Wallet needs to be fully synced before sending transactions")
if int(time.time()) < self.service.constants.INITIAL_FREEZE_END_TIMESTAMP:
end_date = datetime.fromtimestamp(float(self.service.constants.INITIAL_FREEZE_END_TIMESTAMP))
raise ValueError(f"No transactions before: {end_date}")
wallet_id = int(request["wallet_id"])
wallet = self.service.wallet_state_manager.wallets[wallet_id]
if not isinstance(request["amount"], int) or not isinstance(request["fee"], int):
raise ValueError("An integer amount or fee is required (too many decimals)")
amount: uint64 = uint64(request["amount"])
puzzle_hash: bytes32 = decode_puzzle_hash(request["address"])
if "fee" in request:
fee = uint64(request["fee"])
else:
fee = uint64(0)
async with self.service.wallet_state_manager.lock:
tx: TransactionRecord = await wallet.generate_signed_transaction(amount, puzzle_hash, fee)
await wallet.push_transaction(tx)
# Transaction may not have been included in the mempool yet. Use get_transaction to check.
return {
"transaction": tx,
"transaction_id": tx.name,
}
async def get_transaction_count(self, request):
wallet_id = int(request["wallet_id"])
count = await self.service.wallet_state_manager.tx_store.get_transaction_count_for_wallet(wallet_id)
return {"wallet_id": wallet_id, "count": count}
async def create_backup(self, request):
assert self.service.wallet_state_manager is not None
file_path = Path(request["file_path"])
await self.service.wallet_state_manager.create_wallet_backup(file_path)
return {}
##########################################################################################
# Coloured Coins and Trading
##########################################################################################
async def cc_set_name(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
await wallet.set_name(str(request["name"]))
return {"wallet_id": wallet_id}
async def cc_get_name(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
name: str = await wallet.get_name()
return {"wallet_id": wallet_id, "name": name}
async def cc_spend(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
puzzle_hash: bytes32 = decode_puzzle_hash(request["inner_address"])
if not isinstance(request["amount"], int) or not isinstance(request["amount"], int):
raise ValueError("An integer amount or fee is required (too many decimals)")
amount: uint64 = uint64(request["amount"])
if "fee" in request:
fee = uint64(request["fee"])
else:
fee = uint64(0)
async with self.service.wallet_state_manager.lock:
tx: TransactionRecord = await wallet.generate_signed_transaction([amount], [puzzle_hash], fee)
await wallet.push_transaction(tx)
return {
"transaction": tx,
"transaction_id": tx.name,
}
async def cc_get_colour(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
colour: str = wallet.get_colour()
return {"colour": colour, "wallet_id": wallet_id}
async def create_offer_for_ids(self, request):
assert self.service.wallet_state_manager is not None
offer = request["ids"]
file_name = request["filename"]
async with self.service.wallet_state_manager.lock:
(
success,
spend_bundle,
error,
) = await self.service.wallet_state_manager.trade_manager.create_offer_for_ids(offer, file_name)
if success:
self.service.wallet_state_manager.trade_manager.write_offer_to_disk(Path(file_name), spend_bundle)
return {}
raise ValueError(error)
async def get_discrepancies_for_offer(self, request):
assert self.service.wallet_state_manager is not None
file_name = request["filename"]
file_path = Path(file_name)
async with self.service.wallet_state_manager.lock:
(
success,
discrepancies,
error,
) = await self.service.wallet_state_manager.trade_manager.get_discrepancies_for_offer(file_path)
if success:
return {"discrepancies": discrepancies}
raise ValueError(error)
async def respond_to_offer(self, request):
assert self.service.wallet_state_manager is not None
file_path = Path(request["filename"])
async with self.service.wallet_state_manager.lock:
(
success,
trade_record,
error,
) = await self.service.wallet_state_manager.trade_manager.respond_to_offer(file_path)
if not success:
raise ValueError(error)
return {}
async def get_trade(self, request: Dict):
assert self.service.wallet_state_manager is not None
trade_mgr = self.service.wallet_state_manager.trade_manager
trade_id = request["trade_id"]
trade: Optional[TradeRecord] = await trade_mgr.get_trade_by_id(trade_id)
if trade is None:
raise ValueError(f"No trade with trade id: {trade_id}")
result = trade_record_to_dict(trade)
return {"trade": result}
async def get_all_trades(self, request: Dict):
assert self.service.wallet_state_manager is not None
trade_mgr = self.service.wallet_state_manager.trade_manager
all_trades = await trade_mgr.get_all_trades()
result = []
for trade in all_trades:
result.append(trade_record_to_dict(trade))
return {"trades": result}
async def cancel_trade(self, request: Dict):
assert self.service.wallet_state_manager is not None
wsm = self.service.wallet_state_manager
secure = request["secure"]
trade_id = hexstr_to_bytes(request["trade_id"])
async with self.service.wallet_state_manager.lock:
if secure:
await wsm.trade_manager.cancel_pending_offer_safely(trade_id)
else:
await wsm.trade_manager.cancel_pending_offer(trade_id)
return {}
async def get_backup_info(self, request: Dict):
file_path = Path(request["file_path"])
sk = None
if "words" in request:
mnemonic = request["words"]
passphrase = ""
try:
sk = self.service.keychain.add_private_key(" ".join(mnemonic), passphrase)
except KeyError as e:
return {
"success": False,
"error": f"The word '{e.args[0]}' is incorrect.'",
"word": e.args[0],
}
elif "fingerprint" in request:
sk, seed = await self._get_private_key(request["fingerprint"])
if sk is None:
raise ValueError("Unable to decrypt the backup file.")
backup_info = get_backup_info(file_path, sk)
return {"backup_info": backup_info}
##########################################################################################
# Distributed Identities
##########################################################################################
async def did_update_recovery_ids(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
recovery_list = []
for _ in request["new_list"]:
recovery_list.append(hexstr_to_bytes(_))
if "num_verifications_required" in request:
new_amount_verifications_required = uint64(request["num_verifications_required"])
else:
new_amount_verifications_required = len(recovery_list)
async with self.service.wallet_state_manager.lock:
success = await wallet.update_recovery_list(recovery_list, new_amount_verifications_required)
# Update coin with new ID info
updated_puz = await wallet.get_new_puzzle()
spend_bundle = await wallet.create_spend(updated_puz.get_tree_hash())
if spend_bundle is not None and success:
return {"success": True}
return {"success": False}
async def did_spend(self, request):
wallet_id = int(request["wallet_id"])
async with self.service.wallet_state_manager.lock:
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
spend_bundle = await wallet.create_spend(request["puzzlehash"])
if spend_bundle is not None:
return {"success": True}
return {"success": False}
async def did_get_did(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
my_did: str = wallet.get_my_DID()
async with self.service.wallet_state_manager.lock:
coins = await wallet.select_coins(1)
if coins is None or coins == set():
return {"success": True, "wallet_id": wallet_id, "my_did": my_did}
else:
coin = coins.pop()
return {"success": True, "wallet_id": wallet_id, "my_did": my_did, "coin_id": coin.name()}
async def did_get_recovery_list(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
recovery_list = wallet.did_info.backup_ids
recover_hex_list = []
for _ in recovery_list:
recover_hex_list.append(_.hex())
return {
"success": True,
"wallet_id": wallet_id,
"recover_list": recover_hex_list,
"num_required": wallet.did_info.num_of_backup_ids_needed,
}
async def did_recovery_spend(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
if len(request["attest_filenames"]) < wallet.did_info.num_of_backup_ids_needed:
return {"success": False, "reason": "insufficient messages"}
async with self.service.wallet_state_manager.lock:
(
info_list,
message_spend_bundle,
) = await wallet.load_attest_files_for_recovery_spend(request["attest_filenames"])
if "pubkey" in request:
pubkey = G1Element.from_bytes(hexstr_to_bytes(request["pubkey"]))
else:
assert wallet.did_info.temp_pubkey is not None
pubkey = wallet.did_info.temp_pubkey
if "puzhash" in request:
puzhash = hexstr_to_bytes(request["puzhash"])
else:
assert wallet.did_info.temp_puzhash is not None
puzhash = wallet.did_info.temp_puzhash
success = await wallet.recovery_spend(
wallet.did_info.temp_coin,
puzhash,
info_list,
pubkey,
message_spend_bundle,
)
return {"success": success}
async def did_get_pubkey(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
pubkey = bytes((await wallet.wallet_state_manager.get_unused_derivation_record(wallet_id)).pubkey).hex()
return {"success": True, "pubkey": pubkey}
async def did_create_attest(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
async with self.service.wallet_state_manager.lock:
info = await wallet.get_info_for_recovery()
coin = hexstr_to_bytes(request["coin_name"])
pubkey = G1Element.from_bytes(hexstr_to_bytes(request["pubkey"]))
spend_bundle = await wallet.create_attestment(
coin, hexstr_to_bytes(request["puzhash"]), pubkey, request["filename"]
)
if spend_bundle is not None:
return {
"success": True,
"message_spend_bundle": bytes(spend_bundle).hex(),
"info": [info[0].hex(), info[1].hex(), info[2]],
}
else:
return {"success": False}
async def did_get_information_needed_for_recovery(self, request):
wallet_id = int(request["wallet_id"])
did_wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
my_did = did_wallet.get_my_DID()
coin_name = did_wallet.did_info.temp_coin.name().hex()
return {
"success": True,
"wallet_id": wallet_id,
"my_did": my_did,
"coin_name": coin_name,
"newpuzhash": did_wallet.did_info.temp_puzhash,
"pubkey": did_wallet.did_info.temp_pubkey,
"backup_dids": did_wallet.did_info.backup_ids,
}
async def did_create_backup_file(self, request):
try:
wallet_id = int(request["wallet_id"])
did_wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
did_wallet.create_backup(request["filename"])
return {"wallet_id": wallet_id, "success": True}
except Exception:
return {"wallet_id": wallet_id, "success": False}
##########################################################################################
# Rate Limited Wallet
##########################################################################################
async def rl_set_user_info(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = uint32(int(request["wallet_id"]))
rl_user = self.service.wallet_state_manager.wallets[wallet_id]
origin = request["origin"]
async with self.service.wallet_state_manager.lock:
await rl_user.set_user_info(
uint64(request["interval"]),
uint64(request["limit"]),
origin["parent_coin_info"],
origin["puzzle_hash"],
origin["amount"],
request["admin_pubkey"],
)
return {}
async def send_clawback_transaction(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: RLWallet = self.service.wallet_state_manager.wallets[wallet_id]
fee = int(request["fee"])
async with self.service.wallet_state_manager.lock:
tx = await wallet.clawback_rl_coin_transaction(fee)
await wallet.push_transaction(tx)
# Transaction may not have been included in the mempool yet. Use get_transaction to check.
return {
"transaction": tx,
"transaction_id": tx.name,
}
async def add_rate_limited_funds(self, request):
wallet_id = uint32(request["wallet_id"])
wallet: RLWallet = self.service.wallet_state_manager.wallets[wallet_id]
puzzle_hash = wallet.rl_get_aggregation_puzzlehash(wallet.rl_info.rl_puzzle_hash)
request["wallet_id"] = 1
request["puzzle_hash"] = puzzle_hash
async with self.service.wallet_state_manager.lock:
await wallet.rl_add_funds(request["amount"], puzzle_hash, request["fee"])
return {"status": "SUCCESS"}
async def get_farmed_amount(self, request):
tx_records: List[TransactionRecord] = await self.service.wallet_state_manager.tx_store.get_farming_rewards()
amount = 0
pool_reward_amount = 0
farmer_reward_amount = 0
fee_amount = 0
last_height_farmed = 0
for record in tx_records:
height = record.height_farmed(self.service.constants.GENESIS_CHALLENGE)
if height > last_height_farmed:
last_height_farmed = height
if record.type == TransactionType.COINBASE_REWARD:
pool_reward_amount += record.amount
if record.type == TransactionType.FEE_REWARD:
fee_amount += record.amount - calculate_base_farmer_reward(height)
farmer_reward_amount += calculate_base_farmer_reward(height)
amount += record.amount
assert amount == pool_reward_amount + farmer_reward_amount + fee_amount
return {
"farmed_amount": amount,
"pool_reward_amount": pool_reward_amount,
"farmer_reward_amount": farmer_reward_amount,
"fee_amount": fee_amount,
"last_height_farmed": last_height_farmed,
}
async def create_signed_transaction(self, request):
if "additions" not in request or len(request["additions"]) < 1:
raise ValueError("Specify additions list")
additions: List[Dict] = request["additions"]
amount_0: uint64 = uint64(additions[0]["amount"])
assert amount_0 <= self.service.constants.MAX_COIN_AMOUNT
puzzle_hash_0 = hexstr_to_bytes(additions[0]["puzzle_hash"])
if len(puzzle_hash_0) != 32:
raise ValueError(f"Address must be 32 bytes. {puzzle_hash_0}")
additional_outputs = []
for addition in additions[1:]:
receiver_ph = hexstr_to_bytes(addition["puzzle_hash"])
if len(receiver_ph) != 32:
raise ValueError(f"Address must be 32 bytes. {receiver_ph}")
amount = uint64(addition["amount"])
if amount > self.service.constants.MAX_COIN_AMOUNT:
raise ValueError(f"Coin amount cannot exceed {self.service.constants.MAX_COIN_AMOUNT}")
additional_outputs.append({"puzzlehash": receiver_ph, "amount": amount})
fee = uint64(0)
if "fee" in request:
fee = uint64(request["fee"])
coins = None
if "coins" in request and len(request["coins"]) > 0:
coins = set([Coin.from_json_dict(coin_json) for coin_json in request["coins"]])
async with self.service.wallet_state_manager.lock:
signed_tx = await self.service.wallet_state_manager.main_wallet.generate_signed_transaction(
amount_0, puzzle_hash_0, fee, coins=coins, ignore_max_send_amount=True, primaries=additional_outputs
)
return {"signed_tx": signed_tx}
|
from os import system
def comprar(comida, juguetes):
comprado = ""
while not comprado:
system("cls")
comprar = (input("Que quiere comprar? Alimentos | Juguetes : ")).lower()
if comprar == "alimento":
print(f"Carne: {comida["carne"]["cantidad"]}|Agua: {comida["agua"]["cantidad"]}|Huesos: {comida["hueso"]["cantidad"]}")
producto = (input("Que queres comprar?: ")).lower()
if producto in comida.keys():
cantidad = input("Cuánto quieres comprar?: ")
if cantidad.isdecimal():
comida[producto]['cantidad'] += int(cantidad)
comprado = producto
if comprar == "juguete":
print("Pelota | Soga | Muñeco")
producto = (input("Que quieres comprar?: ")).lower()
if producto in juguetes.keys():
juguetes[producto] = "si"
comprado = producto | from os import system
def comprar(comida, juguetes):
comprado = ""
while not comprado:
system("cls")
comprar = (input("Que quiere comprar? Alimentos | Juguetes : ")).lower()
if comprar == "alimento":
print(f"Carne: {comida['carne']['cantidad']}|Agua: {comida['agua']['cantidad']}|Huesos: {comida['hueso']['cantidad']}")
producto = (input("Que queres comprar?: ")).lower()
if producto in comida.keys():
cantidad = input("Cuánto quieres comprar?: ")
if cantidad.isdecimal():
comida[producto]['cantidad'] += int(cantidad)
comprado = producto
if comprar == "juguete":
print("Pelota | Soga | Muñeco")
producto = (input("Que quieres comprar?: ")).lower()
if producto in juguetes.keys():
juguetes[producto] = "si"
comprado = producto |
import operator
from functools import reduce
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.db.models import Q, Sum
from django.shortcuts import HttpResponse, get_object_or_404, redirect, render
from django.views.generic import View
from django.views.generic.base import TemplateView
from .forms import RecipeForm
from .models import (Purchase, Recipe, Subscription)
from .utils import paginator_data
User = get_user_model()
def index(request):
'''Вьюха отображения главной страницы'''
# получаем список тегов из GET запроса
tags = request.GET.getlist('tag')
if tags:
# фильтрация по совокупности выбранных тегов
query = reduce(operator.or_, (Q(tags__contains=tag) for tag in tags))
recipies = Recipe.objects.filter(query).order_by('-date_pub')
else:
recipies = Recipe.objects.all().order_by('-date_pub')
# Т.к. паджинатор есть почти на каждой странице - вынес некоторые моменты
# в отдельную функцию в utils.py
page, paginator = paginator_data(request, recipies)
return render(request, 'index.html', context={'page': page,
'paginator': paginator,
'tags': tags})
def recipe_detail(request, slug):
'''Вьюха отображения страницы рецепта'''
recipe = get_object_or_404(Recipe, slug__iexact=slug)
return render(request, 'recipe_detail.html', context={'recipe': recipe})
def profile_index(request, username):
'''Персональная страница пользователя'''
author = get_object_or_404(User, username=username)
user = request.user
tags = request.GET.getlist('tag')
if tags:
# фильтрация по совокупности выбранных тегов
query = reduce(operator.or_, (Q(tags__contains=tag) for tag in tags))
recipies = author.recipes.filter(query).order_by('-date_pub')
else:
recipies = author.recipes.all().order_by('-date_pub')
following = Subscription.objects.filter(user__username=user,
author=author).count()
return render(request, 'profile.html', context={'recipies': recipies,
'author': author,
'user': user,
'following': following,
'tags': tags})
@login_required
def subscription_index(request):
'''Страница подписок пользователя'''
follow_authors = User.objects.filter(
following__user=request.user).prefetch_related('recipes')
page, paginator = paginator_data(request, follow_authors)
return render(request, 'subscription_index.html',
context={'page': page, 'paginator': paginator, })
@login_required
def favorite_index(request):
'''Страница подписок пользователя'''
tags = request.GET.getlist('tag')
if tags:
# фильтрация по совокупности выбранных тегов
query = reduce(operator.or_, (Q(tags__contains=tag) for tag in tags))
recipies = Recipe.objects.filter(query).order_by('-date_pub').filter(
favorites__user=request.user).select_related('author')
else:
recipies = Recipe.objects.all().order_by('-date_pub').filter(
favorites__user=request.user).select_related('author')
page, paginator = paginator_data(request, recipies)
return render(request, 'favorite_index.html',
context={'page': page,
'paginator': paginator,
'tags': tags})
@login_required
def purchase_index(request):
'''Список покупок'''
recipies = Recipe.objects.filter(
purchases__user=request.user)
return render(request, 'purchase_index.html', context={
'recipies': recipies})
@login_required
def get_purchase_list(request):
'''Загрузка txt файла со списком ингридиентов выбранных рецептов'''
file_name = 'Purchase_list.txt'
txt = ''
purchase = Purchase.objects.filter(user=request.user)
ingredients = purchase.values('recipe__ingredients__title',
'recipe__ingredients__dimension').annotate(
total_amount=Sum('recipe__ingredients__ingredient_recipe__amount'
''))
result = set()
for ingredient in ingredients:
if ingredient['recipe__ingredients__title'] not in result:
item = (f'{ingredient['recipe__ingredients__title']} '
f'{ingredient['total_amount']} '
f'{ingredient['recipe__ingredients__dimension']}'
)
result.add(ingredient['recipe__ingredients__title'])
txt += item + '\n'
response = HttpResponse(txt, content_type='application/text charset=utf-8')
response['Content-Disposition'] = f'attachment; filename={file_name}'
return response
class RecipeCreateUpdate(View):
'''Создание или редактирование рецепта'''
def get(self, request, slug=None):
if slug:
recipe = get_object_or_404(Recipe,
author__username=(self.request.
user.username),
slug__iexact=slug)
form = RecipeForm(instance=recipe)
title = 'Редактирование рецепта'
botton_name = 'Изменить рецепт'
context = {
'form': form,
'botton_name': botton_name,
'title': title,
'recipe': recipe,
}
else:
form = RecipeForm()
title = 'Создание рецепта'
botton_name = 'Создать рецепт'
context = {
'form': form,
'botton_name': botton_name,
'title': title
}
template = 'recipe_create_or_update.html'
return render(request, template, context)
def post(self, request, slug=None):
if slug:
recipe = get_object_or_404(Recipe,
author__username=(self.request.
user.username),
slug__iexact=slug)
if request.user != recipe.author:
return redirect('index')
bound_form = RecipeForm(request.POST or None,
files=request.FILES or None,
instance=recipe,
initial={"request": request})
context = {
'form': bound_form,
'title': 'Редактирование рецепта',
'botton_name': 'Редактирование рецепта',
'recipe': recipe
}
else:
bound_form = RecipeForm(request.POST or None,
files=request.FILES or None,
initial={"request": request})
context = {
'form': bound_form,
'title': 'Создание рецепта',
'botton_name': 'Создать рецепт'
}
if bound_form.is_valid():
new_recipe = bound_form.save(commit=False)
new_recipe.tags = request.POST.getlist('tags')
return redirect(new_recipe)
return render(request, 'recipe_create_or_update.html',
context=context)
class RecipeDelete(View):
'''Удаление рецепта'''
def get(self, request, pk):
recipe = get_object_or_404(Recipe, author=request.user, id=pk)
recipe.delete()
return redirect('index')
class About(TemplateView):
'''Об авторе'''
template_name = 'about.html'
class Technologies(TemplateView):
'''Технологии'''
template_name = 'technologies.html'
| import operator
from functools import reduce
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.db.models import Q, Sum
from django.shortcuts import HttpResponse, get_object_or_404, redirect, render
from django.views.generic import View
from django.views.generic.base import TemplateView
from .forms import RecipeForm
from .models import (Purchase, Recipe, Subscription)
from .utils import paginator_data
User = get_user_model()
def index(request):
'''Вьюха отображения главной страницы'''
# получаем список тегов из GET запроса
tags = request.GET.getlist('tag')
if tags:
# фильтрация по совокупности выбранных тегов
query = reduce(operator.or_, (Q(tags__contains=tag) for tag in tags))
recipies = Recipe.objects.filter(query).order_by('-date_pub')
else:
recipies = Recipe.objects.all().order_by('-date_pub')
# Т.к. паджинатор есть почти на каждой странице - вынес некоторые моменты
# в отдельную функцию в utils.py
page, paginator = paginator_data(request, recipies)
return render(request, 'index.html', context={'page': page,
'paginator': paginator,
'tags': tags})
def recipe_detail(request, slug):
'''Вьюха отображения страницы рецепта'''
recipe = get_object_or_404(Recipe, slug__iexact=slug)
return render(request, 'recipe_detail.html', context={'recipe': recipe})
def profile_index(request, username):
'''Персональная страница пользователя'''
author = get_object_or_404(User, username=username)
user = request.user
tags = request.GET.getlist('tag')
if tags:
# фильтрация по совокупности выбранных тегов
query = reduce(operator.or_, (Q(tags__contains=tag) for tag in tags))
recipies = author.recipes.filter(query).order_by('-date_pub')
else:
recipies = author.recipes.all().order_by('-date_pub')
following = Subscription.objects.filter(user__username=user,
author=author).count()
return render(request, 'profile.html', context={'recipies': recipies,
'author': author,
'user': user,
'following': following,
'tags': tags})
@login_required
def subscription_index(request):
'''Страница подписок пользователя'''
follow_authors = User.objects.filter(
following__user=request.user).prefetch_related('recipes')
page, paginator = paginator_data(request, follow_authors)
return render(request, 'subscription_index.html',
context={'page': page, 'paginator': paginator, })
@login_required
def favorite_index(request):
'''Страница подписок пользователя'''
tags = request.GET.getlist('tag')
if tags:
# фильтрация по совокупности выбранных тегов
query = reduce(operator.or_, (Q(tags__contains=tag) for tag in tags))
recipies = Recipe.objects.filter(query).order_by('-date_pub').filter(
favorites__user=request.user).select_related('author')
else:
recipies = Recipe.objects.all().order_by('-date_pub').filter(
favorites__user=request.user).select_related('author')
page, paginator = paginator_data(request, recipies)
return render(request, 'favorite_index.html',
context={'page': page,
'paginator': paginator,
'tags': tags})
@login_required
def purchase_index(request):
'''Список покупок'''
recipies = Recipe.objects.filter(
purchases__user=request.user)
return render(request, 'purchase_index.html', context={
'recipies': recipies})
@login_required
def get_purchase_list(request):
'''Загрузка txt файла со списком ингридиентов выбранных рецептов'''
file_name = 'Purchase_list.txt'
txt = ''
purchase = Purchase.objects.filter(user=request.user)
ingredients = purchase.values('recipe__ingredients__title',
'recipe__ingredients__dimension').annotate(
total_amount=Sum('recipe__ingredients__ingredient_recipe__amount'
''))
result = set()
for ingredient in ingredients:
if ingredient['recipe__ingredients__title'] not in result:
item = (f'{ingredient["recipe__ingredients__title"]} '
f'{ingredient["total_amount"]} '
f'{ingredient["recipe__ingredients__dimension"]}'
)
result.add(ingredient['recipe__ingredients__title'])
txt += item + '\n'
response = HttpResponse(txt, content_type='application/text charset=utf-8')
response['Content-Disposition'] = f'attachment; filename={file_name}'
return response
class RecipeCreateUpdate(View):
'''Создание или редактирование рецепта'''
def get(self, request, slug=None):
if slug:
recipe = get_object_or_404(Recipe,
author__username=(self.request.
user.username),
slug__iexact=slug)
form = RecipeForm(instance=recipe)
title = 'Редактирование рецепта'
botton_name = 'Изменить рецепт'
context = {
'form': form,
'botton_name': botton_name,
'title': title,
'recipe': recipe,
}
else:
form = RecipeForm()
title = 'Создание рецепта'
botton_name = 'Создать рецепт'
context = {
'form': form,
'botton_name': botton_name,
'title': title
}
template = 'recipe_create_or_update.html'
return render(request, template, context)
def post(self, request, slug=None):
if slug:
recipe = get_object_or_404(Recipe,
author__username=(self.request.
user.username),
slug__iexact=slug)
if request.user != recipe.author:
return redirect('index')
bound_form = RecipeForm(request.POST or None,
files=request.FILES or None,
instance=recipe,
initial={"request": request})
context = {
'form': bound_form,
'title': 'Редактирование рецепта',
'botton_name': 'Редактирование рецепта',
'recipe': recipe
}
else:
bound_form = RecipeForm(request.POST or None,
files=request.FILES or None,
initial={"request": request})
context = {
'form': bound_form,
'title': 'Создание рецепта',
'botton_name': 'Создать рецепт'
}
if bound_form.is_valid():
new_recipe = bound_form.save(commit=False)
new_recipe.tags = request.POST.getlist('tags')
return redirect(new_recipe)
return render(request, 'recipe_create_or_update.html',
context=context)
class RecipeDelete(View):
'''Удаление рецепта'''
def get(self, request, pk):
recipe = get_object_or_404(Recipe, author=request.user, id=pk)
recipe.delete()
return redirect('index')
class About(TemplateView):
'''Об авторе'''
template_name = 'about.html'
class Technologies(TemplateView):
'''Технологии'''
template_name = 'technologies.html'
|
import json
import os
import sys
import disnake
from disnake.ext import commands
from disnake.ext.commands import Context
from helpers import json_manager, checks
import logging
if not os.path.isfile("../config.json"):
sys.exit("'config.json' not found by general-normal! Please add it and try again.")
else:
with open("../config.json") as file:
config = json.load(file)
''' Logging '''
logger = logging.getLogger('discord')
logger.setLevel(logging.INFO)
handler = logging.FileHandler(filename='../logs/discord.log', encoding='utf-8',mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
class Owner(commands.Cog, name="owner-normal"):
def __init__(self, bot):
self.bot = bot
@commands.command(
name="shutdown",
description="Make the bot shutdown.",
)
@checks.is_owner()
async def shutdown(self, context: Context):
"""
Makes the bot shutdown.
"""
embed = disnake.Embed(
description="Shutting down. Bye! :wave:",
color=0x9C84EF
)
logger.info(f"Shutting down. Bye! :wave:")
await context.send(embed=embed)
await self.bot.close()
@commands.command(
name="say",
description="The bot will say anything you want.",
)
@checks.is_owner()
async def say(self, context: Context, *, message: str):
"""
The bot will say anything you want.
"""
logger.info(f"Saying '{message}'")
await context.send(message)
@commands.command(
name="embed",
description="The bot will say anything you want, but within embeds.",
)
@checks.is_owner()
async def embed(self, context: Context, *, message: str):
"""
The bot will say anything you want, but within embeds.
"""
embed = disnake.Embed(
description=message,
color=0x9C84EF
)
logger.info(f"Saying '{message}'")
await context.send(embed=embed)
@commands.group(
name="blacklist"
)
async def blacklist(self, context: Context):
"""
Lets you add or remove a user from not being able to use the bot.
"""
if context.invoked_subcommand is None:
with open("../data/blacklist.json") as file:
blacklist = json.load(file)
embed = disnake.Embed(
title=f"There are currently {len(blacklist["ids"])} blacklisted IDs",
description=f"{", ".join(str(id) for id in blacklist["ids"])}",
color=0x9C84EF
)
await context.send(embed=embed)
@blacklist.command(
name="add"
)
async def blacklist_add(self, context: Context, member: disnake.Member = None):
"""
Lets you add a user from not being able to use the bot.
"""
try:
user_id = member.id
with open("../data/blacklist.json") as file:
blacklist = json.load(file)
if user_id in blacklist['ids']:
embed = disnake.Embed(
title="Error!",
description=f"**{member.name}** is already in the blacklist.",
color=0xE02B2B
)
return await context.send(embed=embed)
json_manager.add_user_to_blacklist(user_id)
embed = disnake.Embed(
title="User Blacklisted",
description=f"**{member.name}** has been successfully added to the blacklist",
color=0x9C84EF
)
with open("../data/blacklist.json") as file:
blacklist = json.load(file)
embed.set_footer(
text=f"There are now {len(blacklist["ids"])} users in the blacklist"
)
logger.info(f"{member.name} has been added to the blacklist.")
await context.send(embed=embed)
except:
embed = disnake.Embed(
title="Error!",
description=f"An unknown error occurred when trying to add **{member.name}** to the blacklist.",
color=0xE02B2B
)
await context.send(embed=embed)
@blacklist.command(
name="remove"
)
async def blacklist_remove(self, context, member: disnake.Member = None):
"""
Lets you remove a user from not being able to use the bot.
"""
try:
user_id = member.id
json_manager.remove_user_from_blacklist(user_id)
embed = disnake.Embed(
title="User removed from blacklist",
description=f"**{member.name}** has been successfully removed from the blacklist",
color=0x9C84EF
)
with open("../data/blacklist.json") as file:
blacklist = json.load(file)
embed.set_footer(
text=f"There are now {len(blacklist["ids"])} users in the blacklist"
)
logger.info(f"{member.name} has been removed from the blacklist.")
await context.send(embed=embed)
except:
embed = disnake.Embed(
title="Error!",
description=f"**{member.name}** is not in the blacklist.",
color=0xE02B2B
)
await context.send(embed=embed)
def setup(bot):
bot.add_cog(Owner(bot)) | import json
import os
import sys
import disnake
from disnake.ext import commands
from disnake.ext.commands import Context
from helpers import json_manager, checks
import logging
if not os.path.isfile("../config.json"):
sys.exit("'config.json' not found by general-normal! Please add it and try again.")
else:
with open("../config.json") as file:
config = json.load(file)
''' Logging '''
logger = logging.getLogger('discord')
logger.setLevel(logging.INFO)
handler = logging.FileHandler(filename='../logs/discord.log', encoding='utf-8',mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
class Owner(commands.Cog, name="owner-normal"):
def __init__(self, bot):
self.bot = bot
@commands.command(
name="shutdown",
description="Make the bot shutdown.",
)
@checks.is_owner()
async def shutdown(self, context: Context):
"""
Makes the bot shutdown.
"""
embed = disnake.Embed(
description="Shutting down. Bye! :wave:",
color=0x9C84EF
)
logger.info(f"Shutting down. Bye! :wave:")
await context.send(embed=embed)
await self.bot.close()
@commands.command(
name="say",
description="The bot will say anything you want.",
)
@checks.is_owner()
async def say(self, context: Context, *, message: str):
"""
The bot will say anything you want.
"""
logger.info(f"Saying '{message}'")
await context.send(message)
@commands.command(
name="embed",
description="The bot will say anything you want, but within embeds.",
)
@checks.is_owner()
async def embed(self, context: Context, *, message: str):
"""
The bot will say anything you want, but within embeds.
"""
embed = disnake.Embed(
description=message,
color=0x9C84EF
)
logger.info(f"Saying '{message}'")
await context.send(embed=embed)
@commands.group(
name="blacklist"
)
async def blacklist(self, context: Context):
"""
Lets you add or remove a user from not being able to use the bot.
"""
if context.invoked_subcommand is None:
with open("../data/blacklist.json") as file:
blacklist = json.load(file)
embed = disnake.Embed(
title=f"There are currently {len(blacklist['ids'])} blacklisted IDs",
description=f"{', '.join(str(id) for id in blacklist['ids'])}",
color=0x9C84EF
)
await context.send(embed=embed)
@blacklist.command(
name="add"
)
async def blacklist_add(self, context: Context, member: disnake.Member = None):
"""
Lets you add a user from not being able to use the bot.
"""
try:
user_id = member.id
with open("../data/blacklist.json") as file:
blacklist = json.load(file)
if user_id in blacklist['ids']:
embed = disnake.Embed(
title="Error!",
description=f"**{member.name}** is already in the blacklist.",
color=0xE02B2B
)
return await context.send(embed=embed)
json_manager.add_user_to_blacklist(user_id)
embed = disnake.Embed(
title="User Blacklisted",
description=f"**{member.name}** has been successfully added to the blacklist",
color=0x9C84EF
)
with open("../data/blacklist.json") as file:
blacklist = json.load(file)
embed.set_footer(
text=f"There are now {len(blacklist['ids'])} users in the blacklist"
)
logger.info(f"{member.name} has been added to the blacklist.")
await context.send(embed=embed)
except:
embed = disnake.Embed(
title="Error!",
description=f"An unknown error occurred when trying to add **{member.name}** to the blacklist.",
color=0xE02B2B
)
await context.send(embed=embed)
@blacklist.command(
name="remove"
)
async def blacklist_remove(self, context, member: disnake.Member = None):
"""
Lets you remove a user from not being able to use the bot.
"""
try:
user_id = member.id
json_manager.remove_user_from_blacklist(user_id)
embed = disnake.Embed(
title="User removed from blacklist",
description=f"**{member.name}** has been successfully removed from the blacklist",
color=0x9C84EF
)
with open("../data/blacklist.json") as file:
blacklist = json.load(file)
embed.set_footer(
text=f"There are now {len(blacklist['ids'])} users in the blacklist"
)
logger.info(f"{member.name} has been removed from the blacklist.")
await context.send(embed=embed)
except:
embed = disnake.Embed(
title="Error!",
description=f"**{member.name}** is not in the blacklist.",
color=0xE02B2B
)
await context.send(embed=embed)
def setup(bot):
bot.add_cog(Owner(bot)) |
from os import (
startfile,
getcwd
)
from os.path import join
from io import BytesIO
from csv import (
writer,
excel
)
from openpyxl import (
Workbook,
load_workbook
)
from statistics import (
mean,
variance,
stdev
)
from treetopper.plot import Plot
from treetopper.timber import (
TimberQuick,
TimberFull
)
from treetopper.log import Log
from treetopper.thin import (
ThinTPA,
ThinBA,
ThinRD
)
from treetopper._exceptions import TargetDensityError
from treetopper.fvs import FVS
from treetopper._constants import (
math,
ALL_SPECIES_NAMES,
GRADE_SORT,
LOG_LENGTHS,
SORTED_HEADS
)
from treetopper._utils import (
format_comma,
format_pct,
extension_check,
reorder_dict,
check_date,
add_logs_to_table_heads
)
from treetopper._import_from_sheets import import_from_sheet
from treetopper._print_console import (
print_stand_species,
print_stand_logs,
print_stand_stats
)
from treetopper._print_pdf import PDF
class Stand(object):
"""The Stand Class represents a stand of timber that has had an inventory conducted on it. It should made up of plots (Plot Class)
which contain trees (Timber Classes).
The Stand class will run calculations and statistics of the current stand conditions and it will run calculations of the log
merchantabilty for three metrics: logs per acre, log board feet per acre, and log cubic feet per acre, based on log grades,
log length ranges and species.
"""
def __init__(self, name: str, plot_factor: float, acres: float = None, inventory_date: str = None):
self.name = name.upper()
self.plot_factor = plot_factor
self.plots = []
self.plot_count = 0
self.tpa = 0
self.ba_ac = 0
self.qmd = 0
self.rd_ac = 0
self.bf_ac = 0
self.cf_ac = 0
self.avg_hgt = 0
self.hdr = 0
self.vbar = 0
self.tpa_stats = {}
self.ba_ac_stats = {}
self.rd_ac_stats = {}
self.bf_ac_stats = {}
self.cf_ac_stats = {}
self.species = {}
self.species_gross = {}
self.species_stats = {}
self.logs = {}
self.table_data = []
self.summary_stand = []
self.summary_logs = {}
self.summary_stats = []
self.metrics = ['tpa', 'ba_ac', 'rd_ac', 'bf_ac', 'cf_ac']
self.attrs = ['_gross', '_stats', '']
self.acres = acres
if inventory_date:
self.inv_date = check_date(inventory_date)
else:
self.inv_date = inventory_date
def __getitem__(self, attribute: str):
return self.__dict__[attribute]
def get_stand_table_text(self):
"""Returns a console-formatted string of current stand conditions"""
return print_stand_species(self.summary_stand)
def get_logs_table_text(self):
"""Returns a console-formatted string of stand logs data"""
return print_stand_logs(self.summary_logs)
def get_stats_table_text(self):
"""Returns and console-formatted string of stand stand statistics"""
return print_stand_stats(self.summary_stats)
def get_console_report_text(self):
"""Returns a console-formatted string of the complete stand report"""
return self._compile_report_text()
def console_report(self):
"""Prints a console-formatted string of the complete stand report"""
print(self._compile_report_text())
def get_pdf_report_bytes_io(self):
pdf = self._compile_pdf_report()
return BytesIO(pdf.output(dest='S').encode('latin-1'))
def pdf_report(self, filename: str, directory: str = None, start_file_upon_creation: bool = False):
"""Exports a pdf of the complete stand report to a user specified directory or if directory is None,
to the current working directory. Will open the created pdf report if start_file_upon_creation is True"""
check = extension_check(filename, '.pdf')
if directory:
file = join(directory, check)
else:
file = join(getcwd(), check)
pdf = self._compile_pdf_report()
pdf.output(file, 'F')
if start_file_upon_creation:
startfile(file)
def add_plot(self, plot: Plot):
"""Adds a plot to the stand's plots list and re-runs the calculations and statistics of the stand.
plot argument needs to be the a Plot Class"""
self.plots.append(plot)
self.plot_count += 1
for met in self.metrics:
self._update_metrics(met)
self.qmd = math.sqrt((self.ba_ac / self.tpa) / .005454)
self.vbar = self.bf_ac / self.ba_ac
self._update_species(plot)
self._update_logs(plot)
self.table_data = self._update_table_data()
self.summary_stand = self._update_summary_stand()
self.summary_logs = self._update_summary_logs()
self.summary_stats = self._update_summary_stats()
def import_sheet_quick(self, file_path: str):
"""Imports tree and plot data from a CSV or XLSX file for a quick cruise and adds that data to the stand"""
plots = import_from_sheet(file_path, self.name, 'q')
for plot_num in plots:
plot = Plot()
for tree in plots[plot_num]:
plot.add_tree(TimberQuick(self.plot_factor, *tree))
self.add_plot(plot)
def import_sheet_full(self, file_path: str):
"""Imports tree and plot data from a CSV or XLSX file for a full cruise and adds that data to the stand"""
plots = import_from_sheet(file_path, self.name, 'f')
for plot_num in plots:
plot = Plot()
for tree_data in plots[plot_num]:
args = tree_data[: -1]
logs = tree_data[-1]
tree = TimberFull(self.plot_factor, *args)
for log in logs:
tree.add_log(*log)
plot.add_tree(tree)
self.add_plot(plot)
def table_to_csv(self, filename: str, directory: str = None):
"""Creates or appends a CSV file with tree data from self.table_data"""
check = extension_check(filename, '.csv')
if directory:
file = join(directory, check)
else:
file = join(getcwd(), check)
if isfile(file):
allow = 'a'
start = 1
else:
allow = 'w'
start = 0
with open(file, allow, newline='') as csv_file:
csv_write = writer(csv_file, dialect=excel)
for i in self.table_data[start:]:
csv_write.writerow(i)
def table_to_excel(self, filename: str, directory: str = None):
"""Creates or appends an Excel file with tree data from self.table_data"""
check = extension_check(filename, '.xlsx')
if directory:
file = join(directory, check)
else:
file = join(getcwd(), check)
if isfile(file):
wb = load_workbook(file)
ws = wb.active
for i in self.table_data[1:]:
ws.append(i)
wb.save(file)
else:
wb = Workbook()
ws = wb.active
for i in self.table_data:
ws.append(i)
wb.save(file)
def _update_metrics(self, metric: str):
"""Updates stand metrics based on the metric entered in the argument, used internally"""
metric_list = [plot[metric] for plot in self.plots]
stats = self._get_stats(metric_list)
setattr(self, metric, stats['mean'])
setattr(self, f'{metric}_stats', stats)
def _update_species(self, plot):
"""Re-runs stand conditions calculations and statistics, used internally"""
update_after = ['qmd', 'vbar', 'avg_hgt', 'hdr']
if self.plot_count == 0:
return
else:
for species in plot.species:
if species not in self.species_gross:
for attr in self.attrs:
if attr == '_gross':
getattr(self, f'species{attr}')[species] = {met: [] for met in self.metrics}
else:
getattr(self, f'species{attr}')[species] = {met: 0 for met in self.metrics}
for key in plot.species[species]:
if key not in update_after:
self.species_gross[species][key].append(plot.species[species][key])
for species in self.species_gross:
for key in self.species_gross[species]:
if key not in update_after:
data = self.species_gross[species][key]
if len(data) < self.plot_count:
data += ([0] * (self.plot_count - len(data)))
stats = self._get_stats(data)
self.species[species][key] = stats['mean']
self.species_stats[species][key] = stats
self.species[species]['qmd'] = math.sqrt((self.species[species]['ba_ac'] / self.species[species]['tpa']) / 0.005454)
self.species[species]['vbar'] = self.species[species]['bf_ac'] / self.species[species]['ba_ac']
if species == 'totals_all':
self.species[species]['avg_hgt'] = mean([p.avg_hgt for p in self.plots])
self.species[species]['hdr'] = mean([p.hdr for p in self.plots])
else:
trees = []
for p in self.plots:
for t in p.trees:
trees.append(t)
self.species[species]['avg_hgt'] = mean([t.height for t in trees if t.species == species])
self.species[species]['hdr'] = mean([t.hdr for t in trees if t.species == species])
def _update_logs(self, plot):
"""Re-runs stand logs calculations, used internally"""
if self.plot_count == 0:
return
else:
subs = ['lpa', 'bf_ac', 'cf_ac']
for species in plot.logs:
if species not in self.logs:
self.logs[species] = {}
for grade in plot.logs[species]:
if grade not in self.logs[species]:
self.logs[species][grade] = {rng: {sub: {'gross': [], 'mean': 0} for sub in subs} for rng in LOG_LENGTHS}
self.logs[species][grade]['totals_by_grade'] = {sub: {'gross': [], 'mean': 0} for sub in subs}
for rng in plot.logs[species][grade]:
if rng != 'display':
for sub in subs:
self.logs[species][grade][rng][sub]['gross'].append(plot.logs[species][grade][rng][sub])
for species in self.logs:
for grade in self.logs[species]:
for rng in self.logs[species][grade]:
for sub in subs:
gross = self.logs[species][grade][rng][sub]['gross']
if len(gross) < self.plot_count:
gross += ([0] * (self.plot_count - len(gross)))
self.logs[species][grade][rng][sub]['mean'] = mean(gross)
def _update_table_data(self):
"""Converts stand data to plot/tree inventory data table layout, used internally"""
heads = ['Stand', 'Plot Number', 'Tree Number', 'Species', 'DBH', 'Height',
'Stump Height', 'Log 1 Length', 'Log 1 Grade', 'Log 1 Defect', 'Between Logs Feet']
master = []
max_logs = []
for i, plot in enumerate(self.plots):
for j, tree in enumerate(plot.trees):
temp = [self.name, i + 1, j + 1]
for key in ['species', 'dbh', 'height']:
temp.append(tree[key])
len_logs = len(tree.logs)
max_logs.append(len_logs)
for k, lnum in enumerate(tree.logs):
log = tree.logs[lnum]
if lnum == 1:
temp.append(log.stem_height - log.length - 1)
for lkey in ['length', 'grade', 'defect']:
temp.append(log[lkey])
if k < len(tree.logs) - 1:
between = tree.logs[lnum+1].stem_height - log.stem_height - tree.logs[lnum+1].length - 1
if between < 0:
temp.append(0)
else:
temp.append(between)
master.append(temp)
heads += add_logs_to_table_heads(max(max_logs))
len_heads = len(heads)
for i in master:
len_i = len(i)
if len_i < len_heads:
i += ['' for j in range(len_heads - len_i)]
master.insert(0, heads)
return master
def _update_summary_stand(self):
"""Updates the current stand conditions list of stand.summary_stand, used internally"""
heads = ['SPECIES'] + [head[1] for head in SORTED_HEADS]
body_data = []
for key in self.species:
if key == 'totals_all':
show = 'TOTALS'
else:
show = key
temp = [str(show)] + [format_comma(self.species[key][i[0]]) for i in SORTED_HEADS]
body_data.append(temp)
body_data.append(body_data.pop(0))
body_data.insert(0, heads)
return body_data
def _update_summary_logs(self):
"""Updates the stand logs summary dict, data-tables are broken down by metric type --> species, used internally.
Example: self.summary_logs['BOARD FEET PER ACRE']['DF'] --> data table"""
table_data = {}
tables = [['bf_ac', 'BOARD FEET PER ACRE'], ['cf_ac', 'CUBIC FEET PER ACRE'], ['lpa', 'LOGS PER ACRE']]
for table in tables:
metric_key = table[0]
key = table[1]
table_data[key] = {}
for species in self.logs:
if species == 'totals_all':
show = 'TOTALS'
else:
show = ALL_SPECIES_NAMES[species]
table_data[key][show] = [['LOG GRADES'] + [rng.upper() for rng in LOG_LENGTHS] + ['TOTALS']]
grade_sort = []
for grade in self.logs[species]:
values = [self.logs[species][grade][rng][metric_key]['mean'] for rng in self.logs[species][grade]]
if sum(values) > 0:
if grade == 'totals_by_length':
col_text = 'TOTALS'
else:
col_text = grade
grade_sort.append([col_text] + [format_comma(z) for z in values])
grade_sort = sorted(grade_sort, key=lambda x: GRADE_SORT[x[0]])
for g in grade_sort:
table_data[key][show].append(g)
table_data[key] = reorder_dict(table_data[key])
return table_data
def _update_summary_stats(self):
"""Updates the stand statistics dict, stats-tables are broken down by species, used internally.
Example: self.summary_stats['DF'] --> stats-table"""
tables = {}
for spp in self.species_stats:
if spp == 'totals_all':
show = 'TOTALS'
else:
show = ALL_SPECIES_NAMES[spp]
tables[show] = [['METRIC'] + [head.upper() for head in self.species_stats[spp]['tpa'] if head != 'low_avg_high'] + ['LOW',
'AVERAGE',
'HIGH']]
for key in self.species_stats[spp]:
temp = [key.upper()]
not_enough_data = False
for sub in self.species_stats[spp][key]:
x = self.species_stats[spp][key][sub]
if not_enough_data:
if x == 'Not enough data':
if sub == 'low_avg_high':
for i in range(3):
temp.append('-')
else:
temp.append('-')
else:
if x == 'Not enough data':
temp.append(x)
not_enough_data = True
else:
if sub == 'low_avg_high':
for i in x:
temp.append(format_comma(i))
elif sub == 'stderr_pct':
temp.append(format_pct(x))
else:
temp.append(format_comma(x))
tables[show].append(temp)
return reorder_dict(tables)
def _get_stats(self, data):
"""Runs the statistical calculations on a set of the stand conditions data, returns an updated sub dict, used internally"""
m = mean(data)
if len(data) >= 2:
std = stdev(data)
ste = std / math.sqrt(self.plot_count)
low_avg_high = [max(round(m - ste, 1), 0), m, m + ste]
d = {'mean': m,
'variance': variance(data),
'stdev': std,
'stderr': ste,
'stderr_pct': (ste / m) * 100,
'low_avg_high': low_avg_high}
else:
d = {'mean': m,
'variance': 'Not enough data',
'stdev': 'Not enough data',
'stderr': 'Not enough data',
'stderr_pct': 'Not enough data',
'low_avg_high': 'Not enough data'}
return d
def _compile_report_text(self):
"""Compiles the console-formatted report of all stand data and stats, used internally"""
n = '\n' * 4
console_text = f'{print_stand_species(self.summary_stand)}{n}'
console_text += f'{print_stand_logs(self.summary_logs)}{n}'
console_text += f'{print_stand_stats(self.summary_stats)}'
return console_text
def _compile_pdf_report(self):
pdf = PDF()
pdf.alias_nb_pages()
pdf.add_page()
pdf.compile_stand_report(self)
return pdf
if __name__ == '__main__':
import argparse
import traceback
import sys
from os import mkdir, getcwd
from os.path import join, isfile, isdir, expanduser
from treetopper._utils import get_desktop_path
def make_dir_and_subdir(workflow_num):
desktop = get_desktop_path()
tt_dir = join(desktop, 'treetopper_outputs')
if not isdir(tt_dir):
mkdir(tt_dir)
wf_dir = join(tt_dir, f'workflow_{workflow_num}')
if not isdir(wf_dir):
mkdir(wf_dir)
return wf_dir
def get_package_path(filename):
path = None
for i in sys.path:
if 'AppData' in i and i[-13:] == 'site-packages':
path = i
break
tt_path = join(path, 'treetopper')
sheet_path = join(tt_path, 'example_csv_and_xlsx')
final = join(sheet_path, filename)
return final
parser = argparse.ArgumentParser(description='treetopper Example Workflows')
parser.add_argument('workflow_number', help='Enter the number of the workflow to run.\n Valid workflow numbers: 1, 2, 3, 4, 5, 6)')
args = parser.parse_args()
wf = args.workflow_number
while True:
if wf not in ['1', '2', '3', '4', '5', '6']:
print('Please enter a workflow number 1, 2, 3, 4, 5, or 6')
wf = input('Workflow #: ')
else:
break
wf = int(wf)
def workflow_1(workflow_number):
stand = Stand('WF1', -20)
plot_factor = stand.plot_factor
tree_data = [
# Plot 1
[TimberQuick(plot_factor, 'DF', 29.5, 119), TimberQuick(plot_factor, 'WH', 18.9, 102),
TimberQuick(plot_factor, 'WH', 20.2, 101), TimberQuick(plot_factor, 'WH', 19.9, 100),
TimberQuick(plot_factor, 'DF', 20.6, 112)],
# Plot 2
[TimberQuick(plot_factor, 'DF', 25.0, 117), TimberQuick(plot_factor, 'DF', 14.3, 105),
TimberQuick(plot_factor, 'DF', 20.4, 119), TimberQuick(plot_factor, 'DF', 16.0, 108),
TimberQuick(plot_factor, 'RC', 20.2, 124), TimberQuick(plot_factor, 'RC', 19.5, 116),
TimberQuick(plot_factor, 'RC', 23.4, 121), TimberQuick(plot_factor, 'DF', 17.8, 116),
TimberQuick(plot_factor, 'DF', 22.3, 125)]
]
for trees in tree_data:
plot = Plot()
for tree in trees:
plot.add_tree(tree)
stand.add_plot(plot)
path = make_dir_and_subdir(workflow_number)
stand.console_report()
stand.table_to_csv(join(path, 'example_csv_export.csv'))
thin80tpa = ThinTPA(stand, 80)
thin80tpa.console_report()
end_message = """**WORKFLOW 1 created a QUICK CRUISE stand from manually entered tree data.
It then ran a thinning scenario with a target density of 80 Trees per Acre considering all species and diameter ranges.
Outputs:
Stand console report in terminal [print(stand_class.console_report)] ^above^
Thinning console report in terminal [print(thin_class.console_report))] ^above^
Plot data .csv "example_csv_export.csv" in desktop/treetopper_outputs/workflow_1/
"""
print(f'\n\n{end_message}')
def workflow_2(workflow_number):
stand = Stand('WF2', 33.3)
plot_factor = stand.plot_factor
tree_data = [
# Plot 1
[[TimberFull(plot_factor, 'DF', 29.5, 119), [[42, 40, 'S2', 5], [83, 40, 'S3', 0], [102, 18, 'S4', 10]]],
[TimberFull(plot_factor, 'WH', 18.9, 102), [[42, 40, 'S2', 0], [79, 36, 'S4', 5]]],
[TimberFull(plot_factor, 'WH', 20.2, 101), [[42, 40, 'S2', 5], [83, 40, 'S4', 0]]],
[TimberFull(plot_factor, 'WH', 19.9, 100), [[42, 40, 'S2', 0], [83, 40, 'S4', 15]]],
[TimberFull(plot_factor, 'DF', 20.6, 112), [[42, 40, 'S2', 0], [83, 40, 'S3', 5], [100, 16, 'UT', 10]]]],
# Plot 2
[[TimberFull(plot_factor, 'DF', 25.0, 117), [[42, 40, 'SM', 0], [83, 40, 'S3', 5], [100, 16, 'S4', 0]]],
[TimberFull(plot_factor, 'DF', 14.3, 105), [[42, 40, 'S3', 0], [79, 36, 'S4', 0]]],
[TimberFull(plot_factor, 'DF', 20.4, 119), [[42, 40, 'S2', 5], [83, 40, 'S3', 5], [100, 16, 'S4', 5]]],
[TimberFull(plot_factor, 'DF', 16.0, 108), [[42, 40, 'S3', 5], [83, 40, 'S3', 10]]],
[TimberFull(plot_factor, 'RC', 20.2, 124), [[42, 40, 'CR', 5], [83, 40, 'CR', 5], [104, 20, 'CR', 5]]],
[TimberFull(plot_factor, 'RC', 19.5, 116), [[42, 40, 'CR', 10], [83, 40, 'CR', 5], [100, 16, 'CR', 0]]],
[TimberFull(plot_factor, 'RC', 23.4, 121), [[42, 40, 'CR', 0], [83, 40, 'CR', 0], [106, 22, 'CR', 5]]],
[TimberFull(plot_factor, 'DF', 17.8, 116), [[42, 40, 'S2', 0], [83, 40, 'S3', 0], [100, 16, 'S4', 10]]],
[TimberFull(plot_factor, 'DF', 22.3, 125), [[42, 40, 'SM', 0], [83, 40, 'S3', 5], [108, 24, 'S4', 0]]]]
]
for trees in tree_data:
plot = Plot()
for tree, logs in trees:
for log in logs:
tree.add_log(*log)
plot.add_tree(tree)
stand.add_plot(plot)
path = make_dir_and_subdir(workflow_number)
stand.console_report()
stand.table_to_excel(join(path, 'example_xlsx_export.xlsx'))
thin120ba = ThinBA(stand, 120, species_to_cut=['DF', 'WH'])
thin120ba.console_report()
end_message = """**WORKFLOW 2 created a FULL CRUISE stand from manually entered tree data.
It then ran a thinning scenario with a target density of 120 Basal Area per Acre harvesting only DF and WH considering all diameter ranges.
Outputs:
Stand console report in terminal [print(stand_class.console_report)] ^above^
Thinning console report in terminal [print(thin_class.console_report))] ^above^
Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_2/
"""
print(f'\n\n{end_message}')
def workflow_3(workflow_number):
path = make_dir_and_subdir(workflow_number)
stand = Stand('EX4', -30)
stand.import_sheet_quick(get_package_path('Example_Excel_quick.xlsx'))
stand.console_report()
stand.table_to_excel(join(path, 'example_xlsx_export.xlsx'))
thin25rd = ThinRD(stand, 25, species_to_cut=['DF', 'WH'], min_dbh_to_cut=10, max_dbh_to_cut=18)
thin25rd.console_report()
end_message = """**WORKFLOW 3 created a QUICK CRUISE stand from importing plot data from an excel sheet.
It then ran a thinning scenario with a target density of 25 Relative Density per Acre harvesting only DF and WH, with a
minimum dbh of 10 inches and a maximum dbh of 18 inches. ** Note this thinning density won't be able to be achieved
fully because our parameters don't allow for the needed harvest density, but this is to illustrate that the thinning
will let the user know how much density was taken and how much more is needed to achieve the desired density target
Outputs:
Stand console report in terminal [print(stand_class.console_report)] ^above^
Thinning console report in terminal [print(thin_class.console_report))] ^above^
Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_3/
"""
print(f'\n\n{end_message}')
def workflow_4(workflow_number):
path = make_dir_and_subdir(workflow_number)
stand = Stand('OK2', 46.94)
stand.import_sheet_full(get_package_path('Example_CSV_full.csv'))
stand.console_report()
stand.table_to_excel(join(path, 'example_xlsx_export.xlsx'))
try:
thin100tpa = ThinTPA(stand, 100)
thin100tpa.console_report()
except TargetDensityError as e:
print(traceback.format_exc())
end_message = """**WORKFLOW 4 created a FULL CRUISE stand from importing plot data from an csv sheet.
It then ran a thinning scenario with a target density of 100 Trees per Acre considering all species and diameter ranges.
** Note this thinning density is greater than the current stand density and the Thin Class will throw a TargetDensityError exception
which will explain what went wrong.
Outputs:
Stand console report in terminal [print(stand_class.console_report)] ^above^
Thinning console report in terminal [print(thin_class.console_report))] ^above^
Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_4/
"""
print(f'\n\n{end_message}')
def workflow_5(workflow_number):
path = make_dir_and_subdir(workflow_number)
stand = Stand('EX3', 33.3)
stand.import_sheet_quick(get_package_path('Example_CSV_quick.csv'))
stand.pdf_report(join(path, 'stand_report.pdf'))
stand.table_to_excel(join(path, 'example_xlsx_export.xlsx'))
thin140ba = ThinBA(stand, 140, species_to_cut=['DF', 'WH', 'RA'], max_dbh_to_cut=24)
thin140ba.pdf_report(join(path, 'thin_report.pdf'))
end_message = """**WORKFLOW 5 created a QUICK CRUISE stand from importing plot data from an csv sheet.
It then ran a thinning scenario with a target density of 140 Basal Area per Acre harvesting only DF, WH and RA with a maximum diameter of 24 inches.
Outputs:
Stand PDF report "stand_report.pdf" from [stand_class.pdf_report()] in desktop/treetopper_outputs/workflow_5/
Thinning PDF report "thin_report.pdf" from [thin_class.pdf_report()] in desktop/treetopper_outputs/workflow_5/
Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_5/
"""
print(f'\n\n{end_message}')
def workflow_6(workflow_number):
path = make_dir_and_subdir(workflow_number)
stand = Stand('OK1', -30)
stand.import_sheet_full(get_package_path('Example_Excel_full.xlsx'))
stand.table_to_excel(join(path, 'example_xlsx_export.xlsx'))
fvs = FVS()
fvs.set_stand(stand, 'PN', 612, 6, 45, 'DF', 110)
fvs.access_db('access_db', directory=path)
fvs.sqlite_db('sqlite_db', directory=path)
fvs.excel_db('excel_db', directory=path)
end_message = """**WORKFLOW 6 created a FULL CRUISE stand from importing plot data from an excel sheet.
It then ran the FVS module to create FVS formatted databases from the stand data. FVS is the US Forest Service's Forest Vegetation Simulator.
Outputs:
FVS Access database "access_db.db" from [fvs_class.access_db()] in desktop/treetopper_outputs/workflow_6/
FVS Suppose file "Suppose.loc" in desktop/treetopper_outputs/workflow_6/. ** FVS Legacy needs a .loc file along with the database.
FVS SQLite database "sqlite_db.db" from [fvs_class.sqlite_db()] in desktop/treetopper_outputs/workflow_6/
FVS Excel database "excel_db.db" from [fvs_class.excel_db()] in desktop/treetopper_outputs/workflow_6/
Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_6/
"""
print(f'\n\n{end_message}')
def main(workflow_number):
opts = {
1: workflow_1,
2: workflow_2,
3: workflow_3,
4: workflow_4,
5: workflow_5,
6: workflow_6
}
opts[workflow_number](workflow_number)
print(f"\n\n{"-" * 200}\n\n")
main(wf)
print(f"\n\n{"-" * 200}\n\n")
| from os import (
startfile,
getcwd
)
from os.path import join
from io import BytesIO
from csv import (
writer,
excel
)
from openpyxl import (
Workbook,
load_workbook
)
from statistics import (
mean,
variance,
stdev
)
from treetopper.plot import Plot
from treetopper.timber import (
TimberQuick,
TimberFull
)
from treetopper.log import Log
from treetopper.thin import (
ThinTPA,
ThinBA,
ThinRD
)
from treetopper._exceptions import TargetDensityError
from treetopper.fvs import FVS
from treetopper._constants import (
math,
ALL_SPECIES_NAMES,
GRADE_SORT,
LOG_LENGTHS,
SORTED_HEADS
)
from treetopper._utils import (
format_comma,
format_pct,
extension_check,
reorder_dict,
check_date,
add_logs_to_table_heads
)
from treetopper._import_from_sheets import import_from_sheet
from treetopper._print_console import (
print_stand_species,
print_stand_logs,
print_stand_stats
)
from treetopper._print_pdf import PDF
class Stand(object):
"""The Stand Class represents a stand of timber that has had an inventory conducted on it. It should made up of plots (Plot Class)
which contain trees (Timber Classes).
The Stand class will run calculations and statistics of the current stand conditions and it will run calculations of the log
merchantabilty for three metrics: logs per acre, log board feet per acre, and log cubic feet per acre, based on log grades,
log length ranges and species.
"""
def __init__(self, name: str, plot_factor: float, acres: float = None, inventory_date: str = None):
self.name = name.upper()
self.plot_factor = plot_factor
self.plots = []
self.plot_count = 0
self.tpa = 0
self.ba_ac = 0
self.qmd = 0
self.rd_ac = 0
self.bf_ac = 0
self.cf_ac = 0
self.avg_hgt = 0
self.hdr = 0
self.vbar = 0
self.tpa_stats = {}
self.ba_ac_stats = {}
self.rd_ac_stats = {}
self.bf_ac_stats = {}
self.cf_ac_stats = {}
self.species = {}
self.species_gross = {}
self.species_stats = {}
self.logs = {}
self.table_data = []
self.summary_stand = []
self.summary_logs = {}
self.summary_stats = []
self.metrics = ['tpa', 'ba_ac', 'rd_ac', 'bf_ac', 'cf_ac']
self.attrs = ['_gross', '_stats', '']
self.acres = acres
if inventory_date:
self.inv_date = check_date(inventory_date)
else:
self.inv_date = inventory_date
def __getitem__(self, attribute: str):
return self.__dict__[attribute]
def get_stand_table_text(self):
"""Returns a console-formatted string of current stand conditions"""
return print_stand_species(self.summary_stand)
def get_logs_table_text(self):
"""Returns a console-formatted string of stand logs data"""
return print_stand_logs(self.summary_logs)
def get_stats_table_text(self):
"""Returns and console-formatted string of stand stand statistics"""
return print_stand_stats(self.summary_stats)
def get_console_report_text(self):
"""Returns a console-formatted string of the complete stand report"""
return self._compile_report_text()
def console_report(self):
"""Prints a console-formatted string of the complete stand report"""
print(self._compile_report_text())
def get_pdf_report_bytes_io(self):
pdf = self._compile_pdf_report()
return BytesIO(pdf.output(dest='S').encode('latin-1'))
def pdf_report(self, filename: str, directory: str = None, start_file_upon_creation: bool = False):
"""Exports a pdf of the complete stand report to a user specified directory or if directory is None,
to the current working directory. Will open the created pdf report if start_file_upon_creation is True"""
check = extension_check(filename, '.pdf')
if directory:
file = join(directory, check)
else:
file = join(getcwd(), check)
pdf = self._compile_pdf_report()
pdf.output(file, 'F')
if start_file_upon_creation:
startfile(file)
def add_plot(self, plot: Plot):
"""Adds a plot to the stand's plots list and re-runs the calculations and statistics of the stand.
plot argument needs to be the a Plot Class"""
self.plots.append(plot)
self.plot_count += 1
for met in self.metrics:
self._update_metrics(met)
self.qmd = math.sqrt((self.ba_ac / self.tpa) / .005454)
self.vbar = self.bf_ac / self.ba_ac
self._update_species(plot)
self._update_logs(plot)
self.table_data = self._update_table_data()
self.summary_stand = self._update_summary_stand()
self.summary_logs = self._update_summary_logs()
self.summary_stats = self._update_summary_stats()
def import_sheet_quick(self, file_path: str):
"""Imports tree and plot data from a CSV or XLSX file for a quick cruise and adds that data to the stand"""
plots = import_from_sheet(file_path, self.name, 'q')
for plot_num in plots:
plot = Plot()
for tree in plots[plot_num]:
plot.add_tree(TimberQuick(self.plot_factor, *tree))
self.add_plot(plot)
def import_sheet_full(self, file_path: str):
"""Imports tree and plot data from a CSV or XLSX file for a full cruise and adds that data to the stand"""
plots = import_from_sheet(file_path, self.name, 'f')
for plot_num in plots:
plot = Plot()
for tree_data in plots[plot_num]:
args = tree_data[: -1]
logs = tree_data[-1]
tree = TimberFull(self.plot_factor, *args)
for log in logs:
tree.add_log(*log)
plot.add_tree(tree)
self.add_plot(plot)
def table_to_csv(self, filename: str, directory: str = None):
"""Creates or appends a CSV file with tree data from self.table_data"""
check = extension_check(filename, '.csv')
if directory:
file = join(directory, check)
else:
file = join(getcwd(), check)
if isfile(file):
allow = 'a'
start = 1
else:
allow = 'w'
start = 0
with open(file, allow, newline='') as csv_file:
csv_write = writer(csv_file, dialect=excel)
for i in self.table_data[start:]:
csv_write.writerow(i)
def table_to_excel(self, filename: str, directory: str = None):
"""Creates or appends an Excel file with tree data from self.table_data"""
check = extension_check(filename, '.xlsx')
if directory:
file = join(directory, check)
else:
file = join(getcwd(), check)
if isfile(file):
wb = load_workbook(file)
ws = wb.active
for i in self.table_data[1:]:
ws.append(i)
wb.save(file)
else:
wb = Workbook()
ws = wb.active
for i in self.table_data:
ws.append(i)
wb.save(file)
def _update_metrics(self, metric: str):
"""Updates stand metrics based on the metric entered in the argument, used internally"""
metric_list = [plot[metric] for plot in self.plots]
stats = self._get_stats(metric_list)
setattr(self, metric, stats['mean'])
setattr(self, f'{metric}_stats', stats)
def _update_species(self, plot):
"""Re-runs stand conditions calculations and statistics, used internally"""
update_after = ['qmd', 'vbar', 'avg_hgt', 'hdr']
if self.plot_count == 0:
return
else:
for species in plot.species:
if species not in self.species_gross:
for attr in self.attrs:
if attr == '_gross':
getattr(self, f'species{attr}')[species] = {met: [] for met in self.metrics}
else:
getattr(self, f'species{attr}')[species] = {met: 0 for met in self.metrics}
for key in plot.species[species]:
if key not in update_after:
self.species_gross[species][key].append(plot.species[species][key])
for species in self.species_gross:
for key in self.species_gross[species]:
if key not in update_after:
data = self.species_gross[species][key]
if len(data) < self.plot_count:
data += ([0] * (self.plot_count - len(data)))
stats = self._get_stats(data)
self.species[species][key] = stats['mean']
self.species_stats[species][key] = stats
self.species[species]['qmd'] = math.sqrt((self.species[species]['ba_ac'] / self.species[species]['tpa']) / 0.005454)
self.species[species]['vbar'] = self.species[species]['bf_ac'] / self.species[species]['ba_ac']
if species == 'totals_all':
self.species[species]['avg_hgt'] = mean([p.avg_hgt for p in self.plots])
self.species[species]['hdr'] = mean([p.hdr for p in self.plots])
else:
trees = []
for p in self.plots:
for t in p.trees:
trees.append(t)
self.species[species]['avg_hgt'] = mean([t.height for t in trees if t.species == species])
self.species[species]['hdr'] = mean([t.hdr for t in trees if t.species == species])
def _update_logs(self, plot):
"""Re-runs stand logs calculations, used internally"""
if self.plot_count == 0:
return
else:
subs = ['lpa', 'bf_ac', 'cf_ac']
for species in plot.logs:
if species not in self.logs:
self.logs[species] = {}
for grade in plot.logs[species]:
if grade not in self.logs[species]:
self.logs[species][grade] = {rng: {sub: {'gross': [], 'mean': 0} for sub in subs} for rng in LOG_LENGTHS}
self.logs[species][grade]['totals_by_grade'] = {sub: {'gross': [], 'mean': 0} for sub in subs}
for rng in plot.logs[species][grade]:
if rng != 'display':
for sub in subs:
self.logs[species][grade][rng][sub]['gross'].append(plot.logs[species][grade][rng][sub])
for species in self.logs:
for grade in self.logs[species]:
for rng in self.logs[species][grade]:
for sub in subs:
gross = self.logs[species][grade][rng][sub]['gross']
if len(gross) < self.plot_count:
gross += ([0] * (self.plot_count - len(gross)))
self.logs[species][grade][rng][sub]['mean'] = mean(gross)
def _update_table_data(self):
"""Converts stand data to plot/tree inventory data table layout, used internally"""
heads = ['Stand', 'Plot Number', 'Tree Number', 'Species', 'DBH', 'Height',
'Stump Height', 'Log 1 Length', 'Log 1 Grade', 'Log 1 Defect', 'Between Logs Feet']
master = []
max_logs = []
for i, plot in enumerate(self.plots):
for j, tree in enumerate(plot.trees):
temp = [self.name, i + 1, j + 1]
for key in ['species', 'dbh', 'height']:
temp.append(tree[key])
len_logs = len(tree.logs)
max_logs.append(len_logs)
for k, lnum in enumerate(tree.logs):
log = tree.logs[lnum]
if lnum == 1:
temp.append(log.stem_height - log.length - 1)
for lkey in ['length', 'grade', 'defect']:
temp.append(log[lkey])
if k < len(tree.logs) - 1:
between = tree.logs[lnum+1].stem_height - log.stem_height - tree.logs[lnum+1].length - 1
if between < 0:
temp.append(0)
else:
temp.append(between)
master.append(temp)
heads += add_logs_to_table_heads(max(max_logs))
len_heads = len(heads)
for i in master:
len_i = len(i)
if len_i < len_heads:
i += ['' for j in range(len_heads - len_i)]
master.insert(0, heads)
return master
def _update_summary_stand(self):
"""Updates the current stand conditions list of stand.summary_stand, used internally"""
heads = ['SPECIES'] + [head[1] for head in SORTED_HEADS]
body_data = []
for key in self.species:
if key == 'totals_all':
show = 'TOTALS'
else:
show = key
temp = [str(show)] + [format_comma(self.species[key][i[0]]) for i in SORTED_HEADS]
body_data.append(temp)
body_data.append(body_data.pop(0))
body_data.insert(0, heads)
return body_data
def _update_summary_logs(self):
"""Updates the stand logs summary dict, data-tables are broken down by metric type --> species, used internally.
Example: self.summary_logs['BOARD FEET PER ACRE']['DF'] --> data table"""
table_data = {}
tables = [['bf_ac', 'BOARD FEET PER ACRE'], ['cf_ac', 'CUBIC FEET PER ACRE'], ['lpa', 'LOGS PER ACRE']]
for table in tables:
metric_key = table[0]
key = table[1]
table_data[key] = {}
for species in self.logs:
if species == 'totals_all':
show = 'TOTALS'
else:
show = ALL_SPECIES_NAMES[species]
table_data[key][show] = [['LOG GRADES'] + [rng.upper() for rng in LOG_LENGTHS] + ['TOTALS']]
grade_sort = []
for grade in self.logs[species]:
values = [self.logs[species][grade][rng][metric_key]['mean'] for rng in self.logs[species][grade]]
if sum(values) > 0:
if grade == 'totals_by_length':
col_text = 'TOTALS'
else:
col_text = grade
grade_sort.append([col_text] + [format_comma(z) for z in values])
grade_sort = sorted(grade_sort, key=lambda x: GRADE_SORT[x[0]])
for g in grade_sort:
table_data[key][show].append(g)
table_data[key] = reorder_dict(table_data[key])
return table_data
def _update_summary_stats(self):
"""Updates the stand statistics dict, stats-tables are broken down by species, used internally.
Example: self.summary_stats['DF'] --> stats-table"""
tables = {}
for spp in self.species_stats:
if spp == 'totals_all':
show = 'TOTALS'
else:
show = ALL_SPECIES_NAMES[spp]
tables[show] = [['METRIC'] + [head.upper() for head in self.species_stats[spp]['tpa'] if head != 'low_avg_high'] + ['LOW',
'AVERAGE',
'HIGH']]
for key in self.species_stats[spp]:
temp = [key.upper()]
not_enough_data = False
for sub in self.species_stats[spp][key]:
x = self.species_stats[spp][key][sub]
if not_enough_data:
if x == 'Not enough data':
if sub == 'low_avg_high':
for i in range(3):
temp.append('-')
else:
temp.append('-')
else:
if x == 'Not enough data':
temp.append(x)
not_enough_data = True
else:
if sub == 'low_avg_high':
for i in x:
temp.append(format_comma(i))
elif sub == 'stderr_pct':
temp.append(format_pct(x))
else:
temp.append(format_comma(x))
tables[show].append(temp)
return reorder_dict(tables)
def _get_stats(self, data):
"""Runs the statistical calculations on a set of the stand conditions data, returns an updated sub dict, used internally"""
m = mean(data)
if len(data) >= 2:
std = stdev(data)
ste = std / math.sqrt(self.plot_count)
low_avg_high = [max(round(m - ste, 1), 0), m, m + ste]
d = {'mean': m,
'variance': variance(data),
'stdev': std,
'stderr': ste,
'stderr_pct': (ste / m) * 100,
'low_avg_high': low_avg_high}
else:
d = {'mean': m,
'variance': 'Not enough data',
'stdev': 'Not enough data',
'stderr': 'Not enough data',
'stderr_pct': 'Not enough data',
'low_avg_high': 'Not enough data'}
return d
def _compile_report_text(self):
"""Compiles the console-formatted report of all stand data and stats, used internally"""
n = '\n' * 4
console_text = f'{print_stand_species(self.summary_stand)}{n}'
console_text += f'{print_stand_logs(self.summary_logs)}{n}'
console_text += f'{print_stand_stats(self.summary_stats)}'
return console_text
def _compile_pdf_report(self):
pdf = PDF()
pdf.alias_nb_pages()
pdf.add_page()
pdf.compile_stand_report(self)
return pdf
if __name__ == '__main__':
import argparse
import traceback
import sys
from os import mkdir, getcwd
from os.path import join, isfile, isdir, expanduser
from treetopper._utils import get_desktop_path
def make_dir_and_subdir(workflow_num):
desktop = get_desktop_path()
tt_dir = join(desktop, 'treetopper_outputs')
if not isdir(tt_dir):
mkdir(tt_dir)
wf_dir = join(tt_dir, f'workflow_{workflow_num}')
if not isdir(wf_dir):
mkdir(wf_dir)
return wf_dir
def get_package_path(filename):
path = None
for i in sys.path:
if 'AppData' in i and i[-13:] == 'site-packages':
path = i
break
tt_path = join(path, 'treetopper')
sheet_path = join(tt_path, 'example_csv_and_xlsx')
final = join(sheet_path, filename)
return final
parser = argparse.ArgumentParser(description='treetopper Example Workflows')
parser.add_argument('workflow_number', help='Enter the number of the workflow to run.\n Valid workflow numbers: 1, 2, 3, 4, 5, 6)')
args = parser.parse_args()
wf = args.workflow_number
while True:
if wf not in ['1', '2', '3', '4', '5', '6']:
print('Please enter a workflow number 1, 2, 3, 4, 5, or 6')
wf = input('Workflow #: ')
else:
break
wf = int(wf)
def workflow_1(workflow_number):
stand = Stand('WF1', -20)
plot_factor = stand.plot_factor
tree_data = [
# Plot 1
[TimberQuick(plot_factor, 'DF', 29.5, 119), TimberQuick(plot_factor, 'WH', 18.9, 102),
TimberQuick(plot_factor, 'WH', 20.2, 101), TimberQuick(plot_factor, 'WH', 19.9, 100),
TimberQuick(plot_factor, 'DF', 20.6, 112)],
# Plot 2
[TimberQuick(plot_factor, 'DF', 25.0, 117), TimberQuick(plot_factor, 'DF', 14.3, 105),
TimberQuick(plot_factor, 'DF', 20.4, 119), TimberQuick(plot_factor, 'DF', 16.0, 108),
TimberQuick(plot_factor, 'RC', 20.2, 124), TimberQuick(plot_factor, 'RC', 19.5, 116),
TimberQuick(plot_factor, 'RC', 23.4, 121), TimberQuick(plot_factor, 'DF', 17.8, 116),
TimberQuick(plot_factor, 'DF', 22.3, 125)]
]
for trees in tree_data:
plot = Plot()
for tree in trees:
plot.add_tree(tree)
stand.add_plot(plot)
path = make_dir_and_subdir(workflow_number)
stand.console_report()
stand.table_to_csv(join(path, 'example_csv_export.csv'))
thin80tpa = ThinTPA(stand, 80)
thin80tpa.console_report()
end_message = """**WORKFLOW 1 created a QUICK CRUISE stand from manually entered tree data.
It then ran a thinning scenario with a target density of 80 Trees per Acre considering all species and diameter ranges.
Outputs:
Stand console report in terminal [print(stand_class.console_report)] ^above^
Thinning console report in terminal [print(thin_class.console_report))] ^above^
Plot data .csv "example_csv_export.csv" in desktop/treetopper_outputs/workflow_1/
"""
print(f'\n\n{end_message}')
def workflow_2(workflow_number):
stand = Stand('WF2', 33.3)
plot_factor = stand.plot_factor
tree_data = [
# Plot 1
[[TimberFull(plot_factor, 'DF', 29.5, 119), [[42, 40, 'S2', 5], [83, 40, 'S3', 0], [102, 18, 'S4', 10]]],
[TimberFull(plot_factor, 'WH', 18.9, 102), [[42, 40, 'S2', 0], [79, 36, 'S4', 5]]],
[TimberFull(plot_factor, 'WH', 20.2, 101), [[42, 40, 'S2', 5], [83, 40, 'S4', 0]]],
[TimberFull(plot_factor, 'WH', 19.9, 100), [[42, 40, 'S2', 0], [83, 40, 'S4', 15]]],
[TimberFull(plot_factor, 'DF', 20.6, 112), [[42, 40, 'S2', 0], [83, 40, 'S3', 5], [100, 16, 'UT', 10]]]],
# Plot 2
[[TimberFull(plot_factor, 'DF', 25.0, 117), [[42, 40, 'SM', 0], [83, 40, 'S3', 5], [100, 16, 'S4', 0]]],
[TimberFull(plot_factor, 'DF', 14.3, 105), [[42, 40, 'S3', 0], [79, 36, 'S4', 0]]],
[TimberFull(plot_factor, 'DF', 20.4, 119), [[42, 40, 'S2', 5], [83, 40, 'S3', 5], [100, 16, 'S4', 5]]],
[TimberFull(plot_factor, 'DF', 16.0, 108), [[42, 40, 'S3', 5], [83, 40, 'S3', 10]]],
[TimberFull(plot_factor, 'RC', 20.2, 124), [[42, 40, 'CR', 5], [83, 40, 'CR', 5], [104, 20, 'CR', 5]]],
[TimberFull(plot_factor, 'RC', 19.5, 116), [[42, 40, 'CR', 10], [83, 40, 'CR', 5], [100, 16, 'CR', 0]]],
[TimberFull(plot_factor, 'RC', 23.4, 121), [[42, 40, 'CR', 0], [83, 40, 'CR', 0], [106, 22, 'CR', 5]]],
[TimberFull(plot_factor, 'DF', 17.8, 116), [[42, 40, 'S2', 0], [83, 40, 'S3', 0], [100, 16, 'S4', 10]]],
[TimberFull(plot_factor, 'DF', 22.3, 125), [[42, 40, 'SM', 0], [83, 40, 'S3', 5], [108, 24, 'S4', 0]]]]
]
for trees in tree_data:
plot = Plot()
for tree, logs in trees:
for log in logs:
tree.add_log(*log)
plot.add_tree(tree)
stand.add_plot(plot)
path = make_dir_and_subdir(workflow_number)
stand.console_report()
stand.table_to_excel(join(path, 'example_xlsx_export.xlsx'))
thin120ba = ThinBA(stand, 120, species_to_cut=['DF', 'WH'])
thin120ba.console_report()
end_message = """**WORKFLOW 2 created a FULL CRUISE stand from manually entered tree data.
It then ran a thinning scenario with a target density of 120 Basal Area per Acre harvesting only DF and WH considering all diameter ranges.
Outputs:
Stand console report in terminal [print(stand_class.console_report)] ^above^
Thinning console report in terminal [print(thin_class.console_report))] ^above^
Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_2/
"""
print(f'\n\n{end_message}')
def workflow_3(workflow_number):
path = make_dir_and_subdir(workflow_number)
stand = Stand('EX4', -30)
stand.import_sheet_quick(get_package_path('Example_Excel_quick.xlsx'))
stand.console_report()
stand.table_to_excel(join(path, 'example_xlsx_export.xlsx'))
thin25rd = ThinRD(stand, 25, species_to_cut=['DF', 'WH'], min_dbh_to_cut=10, max_dbh_to_cut=18)
thin25rd.console_report()
end_message = """**WORKFLOW 3 created a QUICK CRUISE stand from importing plot data from an excel sheet.
It then ran a thinning scenario with a target density of 25 Relative Density per Acre harvesting only DF and WH, with a
minimum dbh of 10 inches and a maximum dbh of 18 inches. ** Note this thinning density won't be able to be achieved
fully because our parameters don't allow for the needed harvest density, but this is to illustrate that the thinning
will let the user know how much density was taken and how much more is needed to achieve the desired density target
Outputs:
Stand console report in terminal [print(stand_class.console_report)] ^above^
Thinning console report in terminal [print(thin_class.console_report))] ^above^
Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_3/
"""
print(f'\n\n{end_message}')
def workflow_4(workflow_number):
path = make_dir_and_subdir(workflow_number)
stand = Stand('OK2', 46.94)
stand.import_sheet_full(get_package_path('Example_CSV_full.csv'))
stand.console_report()
stand.table_to_excel(join(path, 'example_xlsx_export.xlsx'))
try:
thin100tpa = ThinTPA(stand, 100)
thin100tpa.console_report()
except TargetDensityError as e:
print(traceback.format_exc())
end_message = """**WORKFLOW 4 created a FULL CRUISE stand from importing plot data from an csv sheet.
It then ran a thinning scenario with a target density of 100 Trees per Acre considering all species and diameter ranges.
** Note this thinning density is greater than the current stand density and the Thin Class will throw a TargetDensityError exception
which will explain what went wrong.
Outputs:
Stand console report in terminal [print(stand_class.console_report)] ^above^
Thinning console report in terminal [print(thin_class.console_report))] ^above^
Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_4/
"""
print(f'\n\n{end_message}')
def workflow_5(workflow_number):
path = make_dir_and_subdir(workflow_number)
stand = Stand('EX3', 33.3)
stand.import_sheet_quick(get_package_path('Example_CSV_quick.csv'))
stand.pdf_report(join(path, 'stand_report.pdf'))
stand.table_to_excel(join(path, 'example_xlsx_export.xlsx'))
thin140ba = ThinBA(stand, 140, species_to_cut=['DF', 'WH', 'RA'], max_dbh_to_cut=24)
thin140ba.pdf_report(join(path, 'thin_report.pdf'))
end_message = """**WORKFLOW 5 created a QUICK CRUISE stand from importing plot data from an csv sheet.
It then ran a thinning scenario with a target density of 140 Basal Area per Acre harvesting only DF, WH and RA with a maximum diameter of 24 inches.
Outputs:
Stand PDF report "stand_report.pdf" from [stand_class.pdf_report()] in desktop/treetopper_outputs/workflow_5/
Thinning PDF report "thin_report.pdf" from [thin_class.pdf_report()] in desktop/treetopper_outputs/workflow_5/
Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_5/
"""
print(f'\n\n{end_message}')
def workflow_6(workflow_number):
path = make_dir_and_subdir(workflow_number)
stand = Stand('OK1', -30)
stand.import_sheet_full(get_package_path('Example_Excel_full.xlsx'))
stand.table_to_excel(join(path, 'example_xlsx_export.xlsx'))
fvs = FVS()
fvs.set_stand(stand, 'PN', 612, 6, 45, 'DF', 110)
fvs.access_db('access_db', directory=path)
fvs.sqlite_db('sqlite_db', directory=path)
fvs.excel_db('excel_db', directory=path)
end_message = """**WORKFLOW 6 created a FULL CRUISE stand from importing plot data from an excel sheet.
It then ran the FVS module to create FVS formatted databases from the stand data. FVS is the US Forest Service's Forest Vegetation Simulator.
Outputs:
FVS Access database "access_db.db" from [fvs_class.access_db()] in desktop/treetopper_outputs/workflow_6/
FVS Suppose file "Suppose.loc" in desktop/treetopper_outputs/workflow_6/. ** FVS Legacy needs a .loc file along with the database.
FVS SQLite database "sqlite_db.db" from [fvs_class.sqlite_db()] in desktop/treetopper_outputs/workflow_6/
FVS Excel database "excel_db.db" from [fvs_class.excel_db()] in desktop/treetopper_outputs/workflow_6/
Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_6/
"""
print(f'\n\n{end_message}')
def main(workflow_number):
opts = {
1: workflow_1,
2: workflow_2,
3: workflow_3,
4: workflow_4,
5: workflow_5,
6: workflow_6
}
opts[workflow_number](workflow_number)
print(f"\n\n{'-' * 200}\n\n")
main(wf)
print(f"\n\n{'-' * 200}\n\n")
|
from typing import Tuple, Union, Callable, Optional, Sequence
from pytest_mock import MockerFixture
import pytest
import numpy as np
import dask.array as da
from squidpy.im import (
segment,
ImageContainer,
SegmentationCustom,
SegmentationWatershed,
)
from squidpy.im._segment import _SEG_DTYPE
from squidpy._constants._constants import SegmentationBackend
from squidpy._constants._pkg_constants import Key
def dummy_segment(arr: np.ndarray) -> np.ndarray:
assert isinstance(arr, np.ndarray)
assert arr.ndim == 3
return arr[..., 0].astype(np.uint32)
class TestGeneral:
@pytest.mark.parametrize("ndim", [2, 3])
def test_input_ndim(self, ndim: int):
img = np.zeros(shape=(10, 10))
if ndim == 3:
img = img[..., np.newaxis]
sc = SegmentationCustom(dummy_segment)
res = sc.segment(img)
assert isinstance(res, np.ndarray)
assert res.ndim == 3
if ndim == 2:
assert res.shape == img.shape + (1,)
else:
assert res.shape == img.shape
def test_segment_invalid_shape(self):
img = np.zeros(shape=(1, 10, 10, 2))
sc = SegmentationCustom(dummy_segment)
with pytest.raises(ValueError, match=r"Expected `2` or `3` dimensions"):
sc.segment(img)
def test_segment_container(self):
img = ImageContainer(np.zeros(shape=(10, 10, 1)), layer="image")
sc = SegmentationCustom(dummy_segment)
res = sc.segment(img, layer="image", library_id=img["image"].z.values[0])
assert isinstance(res, ImageContainer)
assert res.shape == img.shape
assert "image" in res
assert res["image"].dims == img["image"].dims
class TestWatershed:
@pytest.mark.parametrize("thresh", [None, 0.1, 0.5, 1.0])
def test_threshold(self, thresh: Optional[float], mocker: MockerFixture):
img = np.zeros((100, 200), dtype=np.float64)
img[2:10, 2:10] = 1.0
img[30:34, 10:16] = 1.0
img = ImageContainer(img, layer="image")
sw = SegmentationWatershed()
spy = mocker.spy(sw, "_segment")
res = sw.segment(img, layer="image", library_id=img["image"].z.values[0], fn_kwargs={"thresh": thresh})
assert isinstance(res, ImageContainer)
spy.assert_called_once()
call = spy.call_args_list[0]
assert call[1]["thresh"] == thresh
class TestHighLevel:
def test_invalid_layer(self, small_cont: ImageContainer):
with pytest.raises(KeyError, match=r"Image layer `foobar` not found in"):
segment(small_cont, layer="foobar")
@pytest.mark.parametrize("method", ["watershed", dummy_segment])
def test_method(self, small_cont: ImageContainer, method: Union[str, Callable]):
res = segment(small_cont, method=method, copy=True)
assert isinstance(res, ImageContainer)
assert res.shape == small_cont.shape
if callable(method):
method = SegmentationBackend.CUSTOM.s
assert Key.img.segment(method) in res
if method in ("log", "dog", "dog"):
assert res[Key.img.segment(method)].values.max() <= 1
@pytest.mark.parametrize("dy", [11, 0.5, None])
@pytest.mark.parametrize("dx", [15, 0.1, None])
def test_size(self, small_cont: ImageContainer, dy: Optional[Union[int, float]], dx: Optional[Union[int, float]]):
res = segment(small_cont, size=(dy, dx), copy=True)
assert isinstance(res, ImageContainer)
assert res.shape == small_cont.shape
@pytest.mark.parametrize("channel", [0, 1, 2])
def test_channel(self, small_cont: ImageContainer, channel: int):
segment(small_cont, copy=False, layer="image", channel=channel)
assert Key.img.segment("watershed") in small_cont
np.testing.assert_array_equal(
list(small_cont[Key.img.segment("watershed")].dims),
["y", "x", "z", f"{small_cont["image"].dims[-1]}:{channel}"],
)
def test_all_channels(self, small_cont: ImageContainer):
def func(arr: np.ndarray):
assert arr.shape == (small_cont.shape + (n_channels,))
return np.zeros(arr.shape[:2], dtype=np.uint8)
n_channels = small_cont["image"].sizes["channels"]
segment(small_cont, copy=False, layer="image", channel=None, method=func, layer_added="seg")
np.testing.assert_array_equal(small_cont["seg"], np.zeros(small_cont.shape + (1, 1)))
assert small_cont["seg"].dtype == _SEG_DTYPE
@pytest.mark.parametrize("key_added", [None, "foo"])
def test_key_added(self, small_cont: ImageContainer, key_added: Optional[str]):
res = segment(small_cont, copy=False, layer="image", layer_added=key_added)
assert res is None
assert Key.img.segment("watershed", layer_added=key_added) in small_cont
def test_passing_kwargs(self, small_cont: ImageContainer):
def func(chunk: np.ndarray, sentinel: bool = False):
assert sentinel, "Sentinel not set."
return np.zeros(chunk[..., 0].shape, dtype=_SEG_DTYPE)
segment(
small_cont, method=func, layer="image", layer_added="bar", chunks=25, lazy=False, depth=None, sentinel=True
)
assert small_cont["bar"].values.dtype == _SEG_DTYPE
np.testing.assert_array_equal(small_cont["bar"].values, 0)
@pytest.mark.parametrize("dask_input", [False, True])
@pytest.mark.parametrize("chunks", [25, (50, 50, 1), "auto"])
@pytest.mark.parametrize("lazy", [False, True])
def test_dask_segment(
self, small_cont: ImageContainer, dask_input: bool, chunks: Union[int, Tuple[int, ...], str], lazy: bool
):
def func(chunk: np.ndarray):
if isinstance(chunks, tuple):
np.testing.assert_array_equal(chunk.shape, [chunks[0] + 2 * d, chunks[1] + 2 * d, 1])
elif isinstance(chunks, int):
np.testing.assert_array_equal(chunk.shape, [chunks + 2 * d, chunks + 2 * d, 1])
return np.zeros(chunk[..., 0].shape, dtype=_SEG_DTYPE)
small_cont["foo"] = da.asarray(small_cont["image"].data) if dask_input else small_cont["image"].values
d = 10 # overlap depth
assert isinstance(small_cont["foo"].data, da.Array if dask_input else np.ndarray)
segment(small_cont, method=func, layer="foo", layer_added="bar", chunks=chunks, lazy=lazy, depth={0: d, 1: d})
if lazy:
assert isinstance(small_cont["bar"].data, da.Array)
small_cont.compute()
assert isinstance(small_cont["foo"].data, np.ndarray)
else:
# make sure we didn't accidentally trigger foo's computation
assert isinstance(small_cont["foo"].data, da.Array if dask_input else np.ndarray)
assert isinstance(small_cont["bar"].data, np.ndarray)
assert small_cont["bar"].values.dtype == _SEG_DTYPE
np.testing.assert_array_equal(small_cont["bar"].values, 0)
def test_copy(self, small_cont: ImageContainer):
prev_keys = set(small_cont)
res = segment(small_cont, copy=True, layer="image")
assert isinstance(res, ImageContainer)
assert set(small_cont) == prev_keys
assert Key.img.segment("watershed") in res
def test_parallelize(self, small_cont: ImageContainer):
res1 = segment(small_cont, layer="image", n_jobs=1, copy=True)
res2 = segment(small_cont, layer="image", n_jobs=2, copy=True)
np.testing.assert_array_equal(
res1[Key.img.segment("watershed")].values, res2[Key.img.segment("watershed")].values
)
@pytest.mark.parametrize("chunks", [25, 50])
def test_blocking(self, small_cont: ImageContainer, chunks: int):
def func(chunk: np.ndarray):
labels = np.zeros(chunk[..., 0].shape, dtype=np.uint32)
labels[0, 0] = 1
return labels
segment(small_cont, method=func, layer="image", layer_added="bar", chunks=chunks, lazy=False, depth=None)
# blocks are label from top-left to bottom-right in an ascending order [0, num_blocks - 1]
# lowest n bits are allocated for block, rest is for the label (i.e. for blocksize=25, we need 16 blocks ids
# from [0, 15], which can be stored in 4 bits, then we just prepend 1 bit (see the above `func`, resulting
# in unique 16 labels [10000, 11111]
expected = np.zeros_like(small_cont["bar"].values)
start = 16 if chunks == 25 else 4
for i in range(0, 100, chunks):
for j in range(0, 100, chunks):
expected[i, j] = start
start += 1
assert small_cont["bar"].values.dtype == _SEG_DTYPE
np.testing.assert_array_equal(small_cont["bar"].values, expected)
@pytest.mark.parametrize("size", [None, 11])
def test_watershed_works(self, size: Optional[int]):
img_orig = np.zeros((100, 200, 30), dtype=np.float64)
img_orig[2:10, 2:10] = 1.0
img_orig[30:34, 10:16] = 1.0
cont = ImageContainer(img_orig, layer="image_0")
segment(
img=cont,
method="watershed",
layer="image_0",
layer_added="segment",
size=size,
channel=0,
thresh=0.5,
)
# check that blobs are in segments
assert np.mean(cont.data["segment"].values[img_orig[:, :, 0] > 0] > 0) > 0.5
# for size=10, "fails with `size=10` due to border effects"
# the reason why there is no test for it that inside tox, it "works" (i.e. the assertion passes)
# but outside, the assertion fails, as it should
@pytest.mark.parametrize("library_id", [None, "3", ["1", "2"]])
def test_library_id(self, cont_4d: ImageContainer, library_id: Optional[Union[str, Sequence[str]]]):
def func(arr: np.ndarray):
assert arr.shape == cont_4d.shape + (1,)
return np.ones(arr[..., 0].shape, dtype=_SEG_DTYPE)
segment(cont_4d, method=func, layer="image", layer_added="image_seg", library_id=library_id, copy=False)
np.testing.assert_array_equal(cont_4d["image"].coords, cont_4d["image_seg"].coords)
if library_id is None:
np.testing.assert_array_equal(1, cont_4d["image_seg"])
else:
if isinstance(library_id, str):
library_id = [library_id]
for lid in library_id:
np.testing.assert_array_equal(1, cont_4d["image_seg"].sel(z=lid))
for lid in set(cont_4d.library_ids) - set(library_id):
# channels have been changed, apply sets to 0
np.testing.assert_array_equal(0, cont_4d["image_seg"].sel(z=lid))
| from typing import Tuple, Union, Callable, Optional, Sequence
from pytest_mock import MockerFixture
import pytest
import numpy as np
import dask.array as da
from squidpy.im import (
segment,
ImageContainer,
SegmentationCustom,
SegmentationWatershed,
)
from squidpy.im._segment import _SEG_DTYPE
from squidpy._constants._constants import SegmentationBackend
from squidpy._constants._pkg_constants import Key
def dummy_segment(arr: np.ndarray) -> np.ndarray:
assert isinstance(arr, np.ndarray)
assert arr.ndim == 3
return arr[..., 0].astype(np.uint32)
class TestGeneral:
@pytest.mark.parametrize("ndim", [2, 3])
def test_input_ndim(self, ndim: int):
img = np.zeros(shape=(10, 10))
if ndim == 3:
img = img[..., np.newaxis]
sc = SegmentationCustom(dummy_segment)
res = sc.segment(img)
assert isinstance(res, np.ndarray)
assert res.ndim == 3
if ndim == 2:
assert res.shape == img.shape + (1,)
else:
assert res.shape == img.shape
def test_segment_invalid_shape(self):
img = np.zeros(shape=(1, 10, 10, 2))
sc = SegmentationCustom(dummy_segment)
with pytest.raises(ValueError, match=r"Expected `2` or `3` dimensions"):
sc.segment(img)
def test_segment_container(self):
img = ImageContainer(np.zeros(shape=(10, 10, 1)), layer="image")
sc = SegmentationCustom(dummy_segment)
res = sc.segment(img, layer="image", library_id=img["image"].z.values[0])
assert isinstance(res, ImageContainer)
assert res.shape == img.shape
assert "image" in res
assert res["image"].dims == img["image"].dims
class TestWatershed:
@pytest.mark.parametrize("thresh", [None, 0.1, 0.5, 1.0])
def test_threshold(self, thresh: Optional[float], mocker: MockerFixture):
img = np.zeros((100, 200), dtype=np.float64)
img[2:10, 2:10] = 1.0
img[30:34, 10:16] = 1.0
img = ImageContainer(img, layer="image")
sw = SegmentationWatershed()
spy = mocker.spy(sw, "_segment")
res = sw.segment(img, layer="image", library_id=img["image"].z.values[0], fn_kwargs={"thresh": thresh})
assert isinstance(res, ImageContainer)
spy.assert_called_once()
call = spy.call_args_list[0]
assert call[1]["thresh"] == thresh
class TestHighLevel:
def test_invalid_layer(self, small_cont: ImageContainer):
with pytest.raises(KeyError, match=r"Image layer `foobar` not found in"):
segment(small_cont, layer="foobar")
@pytest.mark.parametrize("method", ["watershed", dummy_segment])
def test_method(self, small_cont: ImageContainer, method: Union[str, Callable]):
res = segment(small_cont, method=method, copy=True)
assert isinstance(res, ImageContainer)
assert res.shape == small_cont.shape
if callable(method):
method = SegmentationBackend.CUSTOM.s
assert Key.img.segment(method) in res
if method in ("log", "dog", "dog"):
assert res[Key.img.segment(method)].values.max() <= 1
@pytest.mark.parametrize("dy", [11, 0.5, None])
@pytest.mark.parametrize("dx", [15, 0.1, None])
def test_size(self, small_cont: ImageContainer, dy: Optional[Union[int, float]], dx: Optional[Union[int, float]]):
res = segment(small_cont, size=(dy, dx), copy=True)
assert isinstance(res, ImageContainer)
assert res.shape == small_cont.shape
@pytest.mark.parametrize("channel", [0, 1, 2])
def test_channel(self, small_cont: ImageContainer, channel: int):
segment(small_cont, copy=False, layer="image", channel=channel)
assert Key.img.segment("watershed") in small_cont
np.testing.assert_array_equal(
list(small_cont[Key.img.segment("watershed")].dims),
["y", "x", "z", f"{small_cont['image'].dims[-1]}:{channel}"],
)
def test_all_channels(self, small_cont: ImageContainer):
def func(arr: np.ndarray):
assert arr.shape == (small_cont.shape + (n_channels,))
return np.zeros(arr.shape[:2], dtype=np.uint8)
n_channels = small_cont["image"].sizes["channels"]
segment(small_cont, copy=False, layer="image", channel=None, method=func, layer_added="seg")
np.testing.assert_array_equal(small_cont["seg"], np.zeros(small_cont.shape + (1, 1)))
assert small_cont["seg"].dtype == _SEG_DTYPE
@pytest.mark.parametrize("key_added", [None, "foo"])
def test_key_added(self, small_cont: ImageContainer, key_added: Optional[str]):
res = segment(small_cont, copy=False, layer="image", layer_added=key_added)
assert res is None
assert Key.img.segment("watershed", layer_added=key_added) in small_cont
def test_passing_kwargs(self, small_cont: ImageContainer):
def func(chunk: np.ndarray, sentinel: bool = False):
assert sentinel, "Sentinel not set."
return np.zeros(chunk[..., 0].shape, dtype=_SEG_DTYPE)
segment(
small_cont, method=func, layer="image", layer_added="bar", chunks=25, lazy=False, depth=None, sentinel=True
)
assert small_cont["bar"].values.dtype == _SEG_DTYPE
np.testing.assert_array_equal(small_cont["bar"].values, 0)
@pytest.mark.parametrize("dask_input", [False, True])
@pytest.mark.parametrize("chunks", [25, (50, 50, 1), "auto"])
@pytest.mark.parametrize("lazy", [False, True])
def test_dask_segment(
self, small_cont: ImageContainer, dask_input: bool, chunks: Union[int, Tuple[int, ...], str], lazy: bool
):
def func(chunk: np.ndarray):
if isinstance(chunks, tuple):
np.testing.assert_array_equal(chunk.shape, [chunks[0] + 2 * d, chunks[1] + 2 * d, 1])
elif isinstance(chunks, int):
np.testing.assert_array_equal(chunk.shape, [chunks + 2 * d, chunks + 2 * d, 1])
return np.zeros(chunk[..., 0].shape, dtype=_SEG_DTYPE)
small_cont["foo"] = da.asarray(small_cont["image"].data) if dask_input else small_cont["image"].values
d = 10 # overlap depth
assert isinstance(small_cont["foo"].data, da.Array if dask_input else np.ndarray)
segment(small_cont, method=func, layer="foo", layer_added="bar", chunks=chunks, lazy=lazy, depth={0: d, 1: d})
if lazy:
assert isinstance(small_cont["bar"].data, da.Array)
small_cont.compute()
assert isinstance(small_cont["foo"].data, np.ndarray)
else:
# make sure we didn't accidentally trigger foo's computation
assert isinstance(small_cont["foo"].data, da.Array if dask_input else np.ndarray)
assert isinstance(small_cont["bar"].data, np.ndarray)
assert small_cont["bar"].values.dtype == _SEG_DTYPE
np.testing.assert_array_equal(small_cont["bar"].values, 0)
def test_copy(self, small_cont: ImageContainer):
prev_keys = set(small_cont)
res = segment(small_cont, copy=True, layer="image")
assert isinstance(res, ImageContainer)
assert set(small_cont) == prev_keys
assert Key.img.segment("watershed") in res
def test_parallelize(self, small_cont: ImageContainer):
res1 = segment(small_cont, layer="image", n_jobs=1, copy=True)
res2 = segment(small_cont, layer="image", n_jobs=2, copy=True)
np.testing.assert_array_equal(
res1[Key.img.segment("watershed")].values, res2[Key.img.segment("watershed")].values
)
@pytest.mark.parametrize("chunks", [25, 50])
def test_blocking(self, small_cont: ImageContainer, chunks: int):
def func(chunk: np.ndarray):
labels = np.zeros(chunk[..., 0].shape, dtype=np.uint32)
labels[0, 0] = 1
return labels
segment(small_cont, method=func, layer="image", layer_added="bar", chunks=chunks, lazy=False, depth=None)
# blocks are label from top-left to bottom-right in an ascending order [0, num_blocks - 1]
# lowest n bits are allocated for block, rest is for the label (i.e. for blocksize=25, we need 16 blocks ids
# from [0, 15], which can be stored in 4 bits, then we just prepend 1 bit (see the above `func`, resulting
# in unique 16 labels [10000, 11111]
expected = np.zeros_like(small_cont["bar"].values)
start = 16 if chunks == 25 else 4
for i in range(0, 100, chunks):
for j in range(0, 100, chunks):
expected[i, j] = start
start += 1
assert small_cont["bar"].values.dtype == _SEG_DTYPE
np.testing.assert_array_equal(small_cont["bar"].values, expected)
@pytest.mark.parametrize("size", [None, 11])
def test_watershed_works(self, size: Optional[int]):
img_orig = np.zeros((100, 200, 30), dtype=np.float64)
img_orig[2:10, 2:10] = 1.0
img_orig[30:34, 10:16] = 1.0
cont = ImageContainer(img_orig, layer="image_0")
segment(
img=cont,
method="watershed",
layer="image_0",
layer_added="segment",
size=size,
channel=0,
thresh=0.5,
)
# check that blobs are in segments
assert np.mean(cont.data["segment"].values[img_orig[:, :, 0] > 0] > 0) > 0.5
# for size=10, "fails with `size=10` due to border effects"
# the reason why there is no test for it that inside tox, it "works" (i.e. the assertion passes)
# but outside, the assertion fails, as it should
@pytest.mark.parametrize("library_id", [None, "3", ["1", "2"]])
def test_library_id(self, cont_4d: ImageContainer, library_id: Optional[Union[str, Sequence[str]]]):
def func(arr: np.ndarray):
assert arr.shape == cont_4d.shape + (1,)
return np.ones(arr[..., 0].shape, dtype=_SEG_DTYPE)
segment(cont_4d, method=func, layer="image", layer_added="image_seg", library_id=library_id, copy=False)
np.testing.assert_array_equal(cont_4d["image"].coords, cont_4d["image_seg"].coords)
if library_id is None:
np.testing.assert_array_equal(1, cont_4d["image_seg"])
else:
if isinstance(library_id, str):
library_id = [library_id]
for lid in library_id:
np.testing.assert_array_equal(1, cont_4d["image_seg"].sel(z=lid))
for lid in set(cont_4d.library_ids) - set(library_id):
# channels have been changed, apply sets to 0
np.testing.assert_array_equal(0, cont_4d["image_seg"].sel(z=lid))
|
def destructure(obj, *params):
import operator
return operator.itemgetter(*params)(obj)
def greet(**kwargs):
year, day, puzzle = destructure(kwargs, 'year', 'day', 'puzzle')
print('Advent of Code')
print(f'-> {year}-{day}-{puzzle}')
print('--------------')
def load_data(filename):
with filename.open('r') as handle:
return handle.read()
def start(fn):
import pathlib
base_path = pathlib.Path(__file__).parent.parent / 'data'
def wrapped(*args, **kwargs):
greet(**kwargs)
data = load_data(base_path / f'{kwargs['year']}.{kwargs['day']}.txt')
return fn(data, *args, **kwargs)
return wrapped
def flatten_json(nested_json):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
for i, a in enumerate(x):
flatten(a, name + str(i) + '_')
else:
out[name[:-1]] = x
flatten(nested_json)
return out
def sparse_matrix():
from collections import defaultdict
return defaultdict(lambda: 0)
| def destructure(obj, *params):
import operator
return operator.itemgetter(*params)(obj)
def greet(**kwargs):
year, day, puzzle = destructure(kwargs, 'year', 'day', 'puzzle')
print('Advent of Code')
print(f'-> {year}-{day}-{puzzle}')
print('--------------')
def load_data(filename):
with filename.open('r') as handle:
return handle.read()
def start(fn):
import pathlib
base_path = pathlib.Path(__file__).parent.parent / 'data'
def wrapped(*args, **kwargs):
greet(**kwargs)
data = load_data(base_path / f'{kwargs["year"]}.{kwargs["day"]}.txt')
return fn(data, *args, **kwargs)
return wrapped
def flatten_json(nested_json):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
for i, a in enumerate(x):
flatten(a, name + str(i) + '_')
else:
out[name[:-1]] = x
flatten(nested_json)
return out
def sparse_matrix():
from collections import defaultdict
return defaultdict(lambda: 0)
|
import os
import pytest
import sys
import random
import tempfile
import time
import requests
from pathlib import Path
import ray
from ray.exceptions import RuntimeEnvSetupError
from ray._private.test_utils import (
run_string_as_driver, run_string_as_driver_nonblocking, wait_for_condition)
from ray._private.utils import (get_wheel_filename, get_master_wheel_url,
get_release_wheel_url)
import ray.experimental.internal_kv as kv
from time import sleep
driver_script = """
from time import sleep
import sys
import logging
sys.path.insert(0, "{working_dir}")
import ray
import ray.util
import os
try:
import test_module
except:
pass
try:
job_config = ray.job_config.JobConfig(
runtime_env={runtime_env}
)
if not job_config.runtime_env:
job_config=None
if os.environ.get("USE_RAY_CLIENT"):
ray.client("{address}").env({runtime_env}).namespace("").connect()
else:
ray.init(address="{address}",
job_config=job_config,
logging_level=logging.DEBUG,
namespace=""
)
except ValueError:
print("ValueError")
sys.exit(0)
except TypeError:
print("TypeError")
sys.exit(0)
except:
print("ERROR")
sys.exit(0)
if os.environ.get("EXIT_AFTER_INIT"):
sys.exit(0)
@ray.remote
def run_test():
return test_module.one()
@ray.remote
def check_file(name):
try:
with open(name) as f:
return f.read()
except:
return "FAILED"
@ray.remote
class TestActor(object):
@ray.method(num_returns=1)
def one(self):
return test_module.one()
{execute_statement}
if os.environ.get("USE_RAY_CLIENT"):
ray.util.disconnect()
else:
ray.shutdown()
sleep(10)
"""
def create_file(p):
if not p.parent.exists():
p.parent.mkdir()
with p.open("w") as f:
f.write("Test")
@pytest.fixture(scope="function")
def working_dir():
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir)
module_path = path / "test_module"
module_path.mkdir(parents=True)
init_file = module_path / "__init__.py"
test_file = module_path / "test.py"
with test_file.open(mode="w") as f:
f.write("""
def one():
return 1
""")
with init_file.open(mode="w") as f:
f.write("""
from test_module.test import one
""")
old_dir = os.getcwd()
os.chdir(tmp_dir)
yield tmp_dir
os.chdir(old_dir)
def start_client_server(cluster, client_mode):
from ray._private.runtime_env import PKG_DIR
if not client_mode:
return (cluster.address, {}, PKG_DIR)
ray.worker._global_node._ray_params.ray_client_server_port = "10003"
ray.worker._global_node.start_ray_client_server()
return ("localhost:10003", {"USE_RAY_CLIENT": "1"}, PKG_DIR)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_travel():
import uuid
with tempfile.TemporaryDirectory() as tmp_dir:
dir_paths = set()
file_paths = set()
item_num = 0
excludes = []
root = Path(tmp_dir) / "test"
def construct(path, excluded=False, depth=0):
nonlocal item_num
path.mkdir(parents=True)
if not excluded:
dir_paths.add(str(path))
if depth > 8:
return
if item_num > 500:
return
dir_num = random.randint(0, 10)
file_num = random.randint(0, 10)
for _ in range(dir_num):
uid = str(uuid.uuid4()).split("-")[0]
dir_path = path / uid
exclud_sub = random.randint(0, 5) == 0
if not excluded and exclud_sub:
excludes.append(str(dir_path.relative_to(root)))
if not excluded:
construct(dir_path, exclud_sub or excluded, depth + 1)
item_num += 1
if item_num > 1000:
return
for _ in range(file_num):
uid = str(uuid.uuid4()).split("-")[0]
with (path / uid).open("w") as f:
v = random.randint(0, 1000)
f.write(str(v))
if not excluded:
if random.randint(0, 5) == 0:
excludes.append(
str((path / uid).relative_to(root)))
else:
file_paths.add((str(path / uid), str(v)))
item_num += 1
construct(root)
exclude_spec = ray._private.runtime_env._get_excludes(root, excludes)
visited_dir_paths = set()
visited_file_paths = set()
def handler(path):
if path.is_dir():
visited_dir_paths.add(str(path))
else:
with open(path) as f:
visited_file_paths.add((str(path), f.read()))
ray._private.runtime_env._dir_travel(root, [exclude_spec], handler)
assert file_paths == visited_file_paths
assert dir_paths == visited_dir_paths
"""
The following test cases are related with runtime env. It following these steps
1) Creating a temporary dir with fixture working_dir
2) Using a template named driver_script defined globally
3) Overwrite runtime_env and execute_statement in the template
4) Execute it as a separate driver and return the result
"""
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_empty_working_dir(ray_start_cluster_head, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
env["EXIT_AFTER_INIT"] = "1"
with tempfile.TemporaryDirectory() as working_dir:
runtime_env = f"""{{
"working_dir": r"{working_dir}",
"py_modules": [r"{working_dir}"]
}}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "sys.exit(0)"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out != "ERROR"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_invalid_working_dir(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
env["EXIT_AFTER_INIT"] = "1"
runtime_env = "{ 'working_dir': 10 }"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "TypeError"
runtime_env = "{ 'py_modules': [10] }"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "TypeError"
runtime_env = f"{{ "working_dir": os.path.join(r"{working_dir}', 'na') }}"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "ValueError"
runtime_env = f"{{ "py_modules": [os.path.join(r"{working_dir}', 'na')] }}"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "ValueError"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_single_node(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# Setup runtime env here
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# Testing runtime env with working_dir
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_module(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# test runtime_env iwth py_modules
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_local_file(two_node_cluster, working_dir, client_mode):
with open(os.path.join(working_dir, "test_file"), "w") as f:
f.write("1")
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# test runtime_env iwth working_dir
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
vals = ray.get([check_file.remote('test_file')] * 1000)
print(sum([int(v) for v in vals]))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_exclusion(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
working_path = Path(working_dir)
create_file(working_path / "tmp_dir" / "test_1")
create_file(working_path / "tmp_dir" / "test_2")
create_file(working_path / "tmp_dir" / "test_3")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_1")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_2")
create_file(working_path / "test1")
create_file(working_path / "test2")
create_file(working_path / "test3")
tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute())
runtime_env = f"""{{
"working_dir": r"{working_dir}",
}}"""
execute_statement = """
vals = ray.get([
check_file.remote('test1'),
check_file.remote('test2'),
check_file.remote('test3'),
check_file.remote(os.path.join('tmp_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'test_2')),
check_file.remote(os.path.join('tmp_dir', 'test_3')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')),
])
print(','.join(vals))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
# Test it works before
assert out.strip().split("\n")[-1] == \
"Test,Test,Test,Test,Test,Test,Test,Test"
runtime_env = f"""{{
"working_dir": r"{working_dir}",
"excludes": [
# exclude by relative path
r"test2",
# exclude by dir
r"{str(Path("tmp_dir") / "sub_dir")}",
# exclude part of the dir
r"{str(Path("tmp_dir") / "test_1")}",
# exclude part of the dir
r"{str(Path("tmp_dir") / "test_2")}",
]
}}"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split("\n")[-1] == \
"Test,FAILED,Test,FAILED,FAILED,Test,FAILED,FAILED"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_exclusion_2(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
working_path = Path(working_dir)
def create_file(p):
if not p.parent.exists():
p.parent.mkdir(parents=True)
with p.open("w") as f:
f.write("Test")
create_file(working_path / "tmp_dir" / "test_1")
create_file(working_path / "tmp_dir" / "test_2")
create_file(working_path / "tmp_dir" / "test_3")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_1")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_2")
create_file(working_path / "test1")
create_file(working_path / "test2")
create_file(working_path / "test3")
create_file(working_path / "cache" / "test_1")
create_file(working_path / "tmp_dir" / "cache" / "test_1")
create_file(working_path / "another_dir" / "cache" / "test_1")
tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute())
runtime_env = f"""{{
"working_dir": r"{working_dir}",
}}"""
execute_statement = """
vals = ray.get([
check_file.remote('test1'),
check_file.remote('test2'),
check_file.remote('test3'),
check_file.remote(os.path.join('tmp_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'test_2')),
check_file.remote(os.path.join('tmp_dir', 'test_3')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')),
check_file.remote(os.path.join("cache", "test_1")),
check_file.remote(os.path.join("tmp_dir", "cache", "test_1")),
check_file.remote(os.path.join("another_dir", "cache", "test_1")),
])
print(','.join(vals))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
# Test it works before
assert out.strip().split("\n")[-1] == \
"Test,Test,Test,Test,Test,Test,Test,Test,Test,Test,Test"
with open(f"{working_dir}/.gitignore", "w") as f:
f.write("""
# Comment
test_[12]
/test1
!/tmp_dir/sub_dir/test_1
cache/
""")
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
t = out.strip().split("\n")[-1]
assert out.strip().split("\n")[-1] == \
"FAILED,Test,Test,FAILED,FAILED,Test,Test,FAILED,FAILED,FAILED,FAILED"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_runtime_env_getter(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
print(ray.get_runtime_context().runtime_env["working_dir"])
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == working_dir
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_uri(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
import ray._private.runtime_env as runtime_env
import tempfile
with tempfile.NamedTemporaryFile(suffix="zip") as tmp_file:
pkg_name = runtime_env.get_project_package_name(working_dir, [], [])
pkg_uri = runtime_env.Protocol.PIN_GCS.value + "://" + pkg_name
runtime_env.create_project_package(working_dir, [], [], tmp_file.name)
runtime_env.push_package(pkg_uri, tmp_file.name)
runtime_env = f"""{{ "uris": ["{pkg_uri}"] }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
# pinned uri will not be deleted
print(list(kv._internal_kv_list("")))
assert len(kv._internal_kv_list("pingcs://")) == 1
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_regular_actors(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
test_actor = TestActor.options(name="test_actor").remote()
print(sum(ray.get([test_actor.one.remote()] * 1000)))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_detached_actors(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
test_actor = TestActor.options(name="test_actor", lifetime="detached").remote()
print(sum(ray.get([test_actor.one.remote()] * 1000)))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
# It's a detached actors, so it should still be there
assert len(kv._internal_kv_list("gcs://")) == 1
assert len(list(Path(PKG_DIR).iterdir())) == 2
pkg_dir = [f for f in Path(PKG_DIR).glob("*") if f.is_dir()][0]
import sys
sys.path.insert(0, str(pkg_dir))
test_actor = ray.get_actor("test_actor")
assert sum(ray.get([test_actor.one.remote()] * 1000)) == 1000
ray.kill(test_actor)
from time import sleep
sleep(5)
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_1(ray_start_cluster_head, working_dir):
# start job_config=None
# start job_config=something
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = None
# To make the first one hanging there
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
# Have one running with job config = None
proc = run_string_as_driver_nonblocking(script, env)
# waiting it to be up
sleep(5)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the second one which should work because Ray Client servers.
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
proc.kill()
proc.wait()
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_2(ray_start_cluster_head, working_dir):
# start job_config=something
# start job_config=None
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# To make the first one hanging there
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
proc = run_string_as_driver_nonblocking(script, env)
sleep(5)
runtime_env = None
# Execute the following in the second one which should
# succeed
execute_statement = "print('OK')"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "OK", out
proc.kill()
proc.wait()
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_3(ray_start_cluster_head, working_dir):
# start job_config=something
# start job_config=something else
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# To make the first one hanging ther
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
proc = run_string_as_driver_nonblocking(script, env)
sleep(5)
runtime_env = f"""
{{ "working_dir": test_module.__path__[0] }}""" # noqa: F541
# Execute the following cmd in the second one and ensure that
# it is able to run.
execute_statement = "print('OK')"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
proc.kill()
proc.wait()
assert out.strip().split()[-1] == "OK"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_util_without_job_config(shutdown_only):
from ray.cluster_utils import Cluster
with tempfile.TemporaryDirectory() as tmp_dir:
with (Path(tmp_dir) / "lib.py").open("w") as f:
f.write("""
def one():
return 1
""")
old_dir = os.getcwd()
os.chdir(tmp_dir)
cluster = Cluster()
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
(address, env, PKG_DIR) = start_client_server(cluster, True)
script = f"""
import ray
import ray.util
import os
ray.util.connect("{address}", job_config=None)
@ray.remote
def run():
from lib import one
return one()
print(ray.get([run.remote()])[0])
"""
out = run_string_as_driver(script, env)
print(out)
os.chdir(old_dir)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_init(shutdown_only):
with tempfile.TemporaryDirectory() as tmp_dir:
old_dir = os.getcwd()
os.chdir(tmp_dir)
with open("hello", "w") as f:
f.write("world")
ray.init(runtime_env={"working_dir": "."})
@ray.remote
class Test:
def test(self):
with open("hello") as f:
return f.read()
t = Test.remote()
assert ray.get(t.test.remote()) == "world"
os.chdir(old_dir)
def test_get_wheel_filename():
ray_version = "2.0.0.dev0"
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
filename = get_wheel_filename(sys_platform, ray_version,
py_version)
prefix = "https://s3-us-west-2.amazonaws.com/ray-wheels/latest/"
url = f"{prefix}{filename}"
assert requests.head(url).status_code == 200
def test_get_master_wheel_url():
ray_version = "2.0.0.dev0"
test_commit = "58a73821fbfefbf53a19b6c7ffd71e70ccf258c7"
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
url = get_master_wheel_url(test_commit, sys_platform, ray_version,
py_version)
assert requests.head(url).status_code == 200, url
def test_get_release_wheel_url():
test_commits = {"1.6.0": "5052fe67d99f1d4bfc81b2a8694dbf2aa807bbdc"}
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
for version, commit in test_commits.items():
url = get_release_wheel_url(commit, sys_platform, version,
py_version)
assert requests.head(url).status_code == 200, url
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_task(ray_start_cluster_head):
@ray.remote(runtime_env={"env_vars": {"foo": "bar"}})
def f():
return os.environ.get("foo")
assert ray.get(f.remote()) == "bar"
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_actor(ray_start_cluster_head):
@ray.remote(runtime_env={"env_vars": {"foo": "bar"}})
class A:
def g(self):
return os.environ.get("foo")
a = A.remote()
assert ray.get(a.g.remote()) == "bar"
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_complex(shutdown_only):
ray.init(
job_config=ray.job_config.JobConfig(
runtime_env={"env_vars": {
"foo": "job"
}}))
@ray.remote
def env_from_job():
return os.environ.get("foo")
assert ray.get(env_from_job.remote()) == "job"
@ray.remote(runtime_env={"env_vars": {"foo": "task"}})
def f():
return os.environ.get("foo")
assert ray.get(f.remote()) == "task"
@ray.remote(runtime_env={"env_vars": {"foo": "actor"}})
class A:
def g(self):
return os.environ.get("foo")
a = A.remote()
assert ray.get(a.g.remote()) == "actor"
# Test that runtime_env can be overridden by specifying .options().
assert ray.get(
f.options(runtime_env={
"env_vars": {
"foo": "new"
}
}).remote()) == "new"
a = A.options(runtime_env={"env_vars": {"foo": "new2"}}).remote()
assert ray.get(a.g.remote()) == "new2"
def test_container_option_serialize():
runtime_env = {
"container": {
"image": "ray:latest",
"run_options": ["--name=test"]
}
}
job_config = ray.job_config.JobConfig(runtime_env=runtime_env)
job_config_serialized = job_config.serialize()
# job_config_serialized is JobConfig protobuf serialized string,
# job_config.runtime_env.raw_json has container_option info
# job_config.serialized_runtime_env also has container_option info
assert job_config_serialized.count(b"image") == 2
def test_working_dir_override_failure(shutdown_only):
ray.init()
@ray.remote(runtime_env={"working_dir": "."})
def f():
pass
with pytest.raises(NotImplementedError):
f.remote()
@ray.remote
def g():
pass
with pytest.raises(NotImplementedError):
g.options(runtime_env={"working_dir": "."}).remote()
@ray.remote(runtime_env={"working_dir": "."})
class A:
pass
with pytest.raises(NotImplementedError):
A.remote()
@ray.remote
class B:
pass
with pytest.raises(NotImplementedError):
B.options(runtime_env={"working_dir": "."}).remote()
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_invalid_conda_env(shutdown_only):
ray.init()
@ray.remote
def f():
pass
start = time.time()
bad_env = {"conda": {"dependencies": ["this_doesnt_exist"]}}
with pytest.raises(RuntimeEnvSetupError):
ray.get(f.options(runtime_env=bad_env).remote())
first_time = time.time() - start
# Check that another valid task can run.
ray.get(f.remote())
# The second time this runs it should be faster as the error is cached.
start = time.time()
with pytest.raises(RuntimeEnvSetupError):
ray.get(f.options(runtime_env=bad_env).remote())
assert (time.time() - start) < (first_time / 2.0)
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
@pytest.mark.parametrize(
"ray_start_cluster", [{
"_system_config": {
"event_stats_print_interval_ms": 100,
"debug_dump_period_milliseconds": 100,
"event_stats": True
}
}],
indirect=True)
def test_no_spurious_worker_startup(ray_start_cluster):
"""Test that no extra workers start up during a long env installation."""
cluster = ray_start_cluster
# This hook sleeps for 15 seconds to simulate creating a runtime env.
cluster.add_node(
num_cpus=1,
runtime_env_setup_hook=(
"ray._private.test_utils.sleep_setup_runtime_env"))
# Set a nonempty runtime env so that the runtime env setup hook is called.
runtime_env = {"env_vars": {"a": "b"}}
ray.init(address=cluster.address)
@ray.remote
class Counter(object):
def __init__(self):
self.value = 0
def get(self):
return self.value
# Instantiate an actor that requires the long runtime env installation.
a = Counter.options(runtime_env=runtime_env).remote()
assert ray.get(a.get.remote()) == 0
# Check "debug_state.txt" to ensure no extra workers were started.
session_dir = ray.worker.global_worker.node.address_info["session_dir"]
session_path = Path(session_dir)
debug_state_path = session_path / "debug_state.txt"
def get_num_workers():
with open(debug_state_path) as f:
for line in f.readlines():
num_workers_prefix = "- num PYTHON workers: "
if num_workers_prefix in line:
return int(line[len(num_workers_prefix):])
return None
# Wait for "debug_state.txt" to be updated to reflect the started worker.
start = time.time()
wait_for_condition(lambda: get_num_workers() > 0)
time_waited = time.time() - start
print(f"Waited {time_waited} for debug_state.txt to be updated")
# If any workers were unnecessarily started during the initial env
# installation, they will bypass the runtime env setup hook because the
# created env will have been cached and should be added to num_workers
# within a few seconds. Adjusting the default update period for
# debut_state.txt via this cluster_utils pytest fixture seems to be broken,
# so just check it for the next 10 seconds (the default period).
for i in range(100):
# Check that no more workers were started.
assert get_num_workers() <= 1
time.sleep(0.1)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-sv", __file__]))
| import os
import pytest
import sys
import random
import tempfile
import time
import requests
from pathlib import Path
import ray
from ray.exceptions import RuntimeEnvSetupError
from ray._private.test_utils import (
run_string_as_driver, run_string_as_driver_nonblocking, wait_for_condition)
from ray._private.utils import (get_wheel_filename, get_master_wheel_url,
get_release_wheel_url)
import ray.experimental.internal_kv as kv
from time import sleep
driver_script = """
from time import sleep
import sys
import logging
sys.path.insert(0, "{working_dir}")
import ray
import ray.util
import os
try:
import test_module
except:
pass
try:
job_config = ray.job_config.JobConfig(
runtime_env={runtime_env}
)
if not job_config.runtime_env:
job_config=None
if os.environ.get("USE_RAY_CLIENT"):
ray.client("{address}").env({runtime_env}).namespace("").connect()
else:
ray.init(address="{address}",
job_config=job_config,
logging_level=logging.DEBUG,
namespace=""
)
except ValueError:
print("ValueError")
sys.exit(0)
except TypeError:
print("TypeError")
sys.exit(0)
except:
print("ERROR")
sys.exit(0)
if os.environ.get("EXIT_AFTER_INIT"):
sys.exit(0)
@ray.remote
def run_test():
return test_module.one()
@ray.remote
def check_file(name):
try:
with open(name) as f:
return f.read()
except:
return "FAILED"
@ray.remote
class TestActor(object):
@ray.method(num_returns=1)
def one(self):
return test_module.one()
{execute_statement}
if os.environ.get("USE_RAY_CLIENT"):
ray.util.disconnect()
else:
ray.shutdown()
sleep(10)
"""
def create_file(p):
if not p.parent.exists():
p.parent.mkdir()
with p.open("w") as f:
f.write("Test")
@pytest.fixture(scope="function")
def working_dir():
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir)
module_path = path / "test_module"
module_path.mkdir(parents=True)
init_file = module_path / "__init__.py"
test_file = module_path / "test.py"
with test_file.open(mode="w") as f:
f.write("""
def one():
return 1
""")
with init_file.open(mode="w") as f:
f.write("""
from test_module.test import one
""")
old_dir = os.getcwd()
os.chdir(tmp_dir)
yield tmp_dir
os.chdir(old_dir)
def start_client_server(cluster, client_mode):
from ray._private.runtime_env import PKG_DIR
if not client_mode:
return (cluster.address, {}, PKG_DIR)
ray.worker._global_node._ray_params.ray_client_server_port = "10003"
ray.worker._global_node.start_ray_client_server()
return ("localhost:10003", {"USE_RAY_CLIENT": "1"}, PKG_DIR)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_travel():
import uuid
with tempfile.TemporaryDirectory() as tmp_dir:
dir_paths = set()
file_paths = set()
item_num = 0
excludes = []
root = Path(tmp_dir) / "test"
def construct(path, excluded=False, depth=0):
nonlocal item_num
path.mkdir(parents=True)
if not excluded:
dir_paths.add(str(path))
if depth > 8:
return
if item_num > 500:
return
dir_num = random.randint(0, 10)
file_num = random.randint(0, 10)
for _ in range(dir_num):
uid = str(uuid.uuid4()).split("-")[0]
dir_path = path / uid
exclud_sub = random.randint(0, 5) == 0
if not excluded and exclud_sub:
excludes.append(str(dir_path.relative_to(root)))
if not excluded:
construct(dir_path, exclud_sub or excluded, depth + 1)
item_num += 1
if item_num > 1000:
return
for _ in range(file_num):
uid = str(uuid.uuid4()).split("-")[0]
with (path / uid).open("w") as f:
v = random.randint(0, 1000)
f.write(str(v))
if not excluded:
if random.randint(0, 5) == 0:
excludes.append(
str((path / uid).relative_to(root)))
else:
file_paths.add((str(path / uid), str(v)))
item_num += 1
construct(root)
exclude_spec = ray._private.runtime_env._get_excludes(root, excludes)
visited_dir_paths = set()
visited_file_paths = set()
def handler(path):
if path.is_dir():
visited_dir_paths.add(str(path))
else:
with open(path) as f:
visited_file_paths.add((str(path), f.read()))
ray._private.runtime_env._dir_travel(root, [exclude_spec], handler)
assert file_paths == visited_file_paths
assert dir_paths == visited_dir_paths
"""
The following test cases are related with runtime env. It following these steps
1) Creating a temporary dir with fixture working_dir
2) Using a template named driver_script defined globally
3) Overwrite runtime_env and execute_statement in the template
4) Execute it as a separate driver and return the result
"""
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_empty_working_dir(ray_start_cluster_head, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
env["EXIT_AFTER_INIT"] = "1"
with tempfile.TemporaryDirectory() as working_dir:
runtime_env = f"""{{
"working_dir": r"{working_dir}",
"py_modules": [r"{working_dir}"]
}}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "sys.exit(0)"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out != "ERROR"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_invalid_working_dir(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
env["EXIT_AFTER_INIT"] = "1"
runtime_env = "{ 'working_dir': 10 }"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "TypeError"
runtime_env = "{ 'py_modules': [10] }"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "TypeError"
runtime_env = f"{{ 'working_dir': os.path.join(r'{working_dir}', 'na') }}"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "ValueError"
runtime_env = f"{{ 'py_modules': [os.path.join(r'{working_dir}', 'na')] }}"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "ValueError"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_single_node(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# Setup runtime env here
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# Testing runtime env with working_dir
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_module(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# test runtime_env iwth py_modules
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_local_file(two_node_cluster, working_dir, client_mode):
with open(os.path.join(working_dir, "test_file"), "w") as f:
f.write("1")
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# test runtime_env iwth working_dir
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
vals = ray.get([check_file.remote('test_file')] * 1000)
print(sum([int(v) for v in vals]))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_exclusion(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
working_path = Path(working_dir)
create_file(working_path / "tmp_dir" / "test_1")
create_file(working_path / "tmp_dir" / "test_2")
create_file(working_path / "tmp_dir" / "test_3")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_1")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_2")
create_file(working_path / "test1")
create_file(working_path / "test2")
create_file(working_path / "test3")
tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute())
runtime_env = f"""{{
"working_dir": r"{working_dir}",
}}"""
execute_statement = """
vals = ray.get([
check_file.remote('test1'),
check_file.remote('test2'),
check_file.remote('test3'),
check_file.remote(os.path.join('tmp_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'test_2')),
check_file.remote(os.path.join('tmp_dir', 'test_3')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')),
])
print(','.join(vals))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
# Test it works before
assert out.strip().split("\n")[-1] == \
"Test,Test,Test,Test,Test,Test,Test,Test"
runtime_env = f"""{{
"working_dir": r"{working_dir}",
"excludes": [
# exclude by relative path
r"test2",
# exclude by dir
r"{str(Path("tmp_dir") / "sub_dir")}",
# exclude part of the dir
r"{str(Path("tmp_dir") / "test_1")}",
# exclude part of the dir
r"{str(Path("tmp_dir") / "test_2")}",
]
}}"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split("\n")[-1] == \
"Test,FAILED,Test,FAILED,FAILED,Test,FAILED,FAILED"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_exclusion_2(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
working_path = Path(working_dir)
def create_file(p):
if not p.parent.exists():
p.parent.mkdir(parents=True)
with p.open("w") as f:
f.write("Test")
create_file(working_path / "tmp_dir" / "test_1")
create_file(working_path / "tmp_dir" / "test_2")
create_file(working_path / "tmp_dir" / "test_3")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_1")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_2")
create_file(working_path / "test1")
create_file(working_path / "test2")
create_file(working_path / "test3")
create_file(working_path / "cache" / "test_1")
create_file(working_path / "tmp_dir" / "cache" / "test_1")
create_file(working_path / "another_dir" / "cache" / "test_1")
tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute())
runtime_env = f"""{{
"working_dir": r"{working_dir}",
}}"""
execute_statement = """
vals = ray.get([
check_file.remote('test1'),
check_file.remote('test2'),
check_file.remote('test3'),
check_file.remote(os.path.join('tmp_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'test_2')),
check_file.remote(os.path.join('tmp_dir', 'test_3')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')),
check_file.remote(os.path.join("cache", "test_1")),
check_file.remote(os.path.join("tmp_dir", "cache", "test_1")),
check_file.remote(os.path.join("another_dir", "cache", "test_1")),
])
print(','.join(vals))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
# Test it works before
assert out.strip().split("\n")[-1] == \
"Test,Test,Test,Test,Test,Test,Test,Test,Test,Test,Test"
with open(f"{working_dir}/.gitignore", "w") as f:
f.write("""
# Comment
test_[12]
/test1
!/tmp_dir/sub_dir/test_1
cache/
""")
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
t = out.strip().split("\n")[-1]
assert out.strip().split("\n")[-1] == \
"FAILED,Test,Test,FAILED,FAILED,Test,Test,FAILED,FAILED,FAILED,FAILED"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_runtime_env_getter(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
print(ray.get_runtime_context().runtime_env["working_dir"])
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == working_dir
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_uri(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
import ray._private.runtime_env as runtime_env
import tempfile
with tempfile.NamedTemporaryFile(suffix="zip") as tmp_file:
pkg_name = runtime_env.get_project_package_name(working_dir, [], [])
pkg_uri = runtime_env.Protocol.PIN_GCS.value + "://" + pkg_name
runtime_env.create_project_package(working_dir, [], [], tmp_file.name)
runtime_env.push_package(pkg_uri, tmp_file.name)
runtime_env = f"""{{ "uris": ["{pkg_uri}"] }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
# pinned uri will not be deleted
print(list(kv._internal_kv_list("")))
assert len(kv._internal_kv_list("pingcs://")) == 1
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_regular_actors(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
test_actor = TestActor.options(name="test_actor").remote()
print(sum(ray.get([test_actor.one.remote()] * 1000)))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_detached_actors(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
test_actor = TestActor.options(name="test_actor", lifetime="detached").remote()
print(sum(ray.get([test_actor.one.remote()] * 1000)))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
# It's a detached actors, so it should still be there
assert len(kv._internal_kv_list("gcs://")) == 1
assert len(list(Path(PKG_DIR).iterdir())) == 2
pkg_dir = [f for f in Path(PKG_DIR).glob("*") if f.is_dir()][0]
import sys
sys.path.insert(0, str(pkg_dir))
test_actor = ray.get_actor("test_actor")
assert sum(ray.get([test_actor.one.remote()] * 1000)) == 1000
ray.kill(test_actor)
from time import sleep
sleep(5)
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_1(ray_start_cluster_head, working_dir):
# start job_config=None
# start job_config=something
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = None
# To make the first one hanging there
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
# Have one running with job config = None
proc = run_string_as_driver_nonblocking(script, env)
# waiting it to be up
sleep(5)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the second one which should work because Ray Client servers.
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
proc.kill()
proc.wait()
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_2(ray_start_cluster_head, working_dir):
# start job_config=something
# start job_config=None
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# To make the first one hanging there
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
proc = run_string_as_driver_nonblocking(script, env)
sleep(5)
runtime_env = None
# Execute the following in the second one which should
# succeed
execute_statement = "print('OK')"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "OK", out
proc.kill()
proc.wait()
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_3(ray_start_cluster_head, working_dir):
# start job_config=something
# start job_config=something else
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# To make the first one hanging ther
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
proc = run_string_as_driver_nonblocking(script, env)
sleep(5)
runtime_env = f"""
{{ "working_dir": test_module.__path__[0] }}""" # noqa: F541
# Execute the following cmd in the second one and ensure that
# it is able to run.
execute_statement = "print('OK')"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
proc.kill()
proc.wait()
assert out.strip().split()[-1] == "OK"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_util_without_job_config(shutdown_only):
from ray.cluster_utils import Cluster
with tempfile.TemporaryDirectory() as tmp_dir:
with (Path(tmp_dir) / "lib.py").open("w") as f:
f.write("""
def one():
return 1
""")
old_dir = os.getcwd()
os.chdir(tmp_dir)
cluster = Cluster()
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
(address, env, PKG_DIR) = start_client_server(cluster, True)
script = f"""
import ray
import ray.util
import os
ray.util.connect("{address}", job_config=None)
@ray.remote
def run():
from lib import one
return one()
print(ray.get([run.remote()])[0])
"""
out = run_string_as_driver(script, env)
print(out)
os.chdir(old_dir)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_init(shutdown_only):
with tempfile.TemporaryDirectory() as tmp_dir:
old_dir = os.getcwd()
os.chdir(tmp_dir)
with open("hello", "w") as f:
f.write("world")
ray.init(runtime_env={"working_dir": "."})
@ray.remote
class Test:
def test(self):
with open("hello") as f:
return f.read()
t = Test.remote()
assert ray.get(t.test.remote()) == "world"
os.chdir(old_dir)
def test_get_wheel_filename():
ray_version = "2.0.0.dev0"
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
filename = get_wheel_filename(sys_platform, ray_version,
py_version)
prefix = "https://s3-us-west-2.amazonaws.com/ray-wheels/latest/"
url = f"{prefix}{filename}"
assert requests.head(url).status_code == 200
def test_get_master_wheel_url():
ray_version = "2.0.0.dev0"
test_commit = "58a73821fbfefbf53a19b6c7ffd71e70ccf258c7"
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
url = get_master_wheel_url(test_commit, sys_platform, ray_version,
py_version)
assert requests.head(url).status_code == 200, url
def test_get_release_wheel_url():
test_commits = {"1.6.0": "5052fe67d99f1d4bfc81b2a8694dbf2aa807bbdc"}
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
for version, commit in test_commits.items():
url = get_release_wheel_url(commit, sys_platform, version,
py_version)
assert requests.head(url).status_code == 200, url
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_task(ray_start_cluster_head):
@ray.remote(runtime_env={"env_vars": {"foo": "bar"}})
def f():
return os.environ.get("foo")
assert ray.get(f.remote()) == "bar"
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_actor(ray_start_cluster_head):
@ray.remote(runtime_env={"env_vars": {"foo": "bar"}})
class A:
def g(self):
return os.environ.get("foo")
a = A.remote()
assert ray.get(a.g.remote()) == "bar"
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_complex(shutdown_only):
ray.init(
job_config=ray.job_config.JobConfig(
runtime_env={"env_vars": {
"foo": "job"
}}))
@ray.remote
def env_from_job():
return os.environ.get("foo")
assert ray.get(env_from_job.remote()) == "job"
@ray.remote(runtime_env={"env_vars": {"foo": "task"}})
def f():
return os.environ.get("foo")
assert ray.get(f.remote()) == "task"
@ray.remote(runtime_env={"env_vars": {"foo": "actor"}})
class A:
def g(self):
return os.environ.get("foo")
a = A.remote()
assert ray.get(a.g.remote()) == "actor"
# Test that runtime_env can be overridden by specifying .options().
assert ray.get(
f.options(runtime_env={
"env_vars": {
"foo": "new"
}
}).remote()) == "new"
a = A.options(runtime_env={"env_vars": {"foo": "new2"}}).remote()
assert ray.get(a.g.remote()) == "new2"
def test_container_option_serialize():
runtime_env = {
"container": {
"image": "ray:latest",
"run_options": ["--name=test"]
}
}
job_config = ray.job_config.JobConfig(runtime_env=runtime_env)
job_config_serialized = job_config.serialize()
# job_config_serialized is JobConfig protobuf serialized string,
# job_config.runtime_env.raw_json has container_option info
# job_config.serialized_runtime_env also has container_option info
assert job_config_serialized.count(b"image") == 2
def test_working_dir_override_failure(shutdown_only):
ray.init()
@ray.remote(runtime_env={"working_dir": "."})
def f():
pass
with pytest.raises(NotImplementedError):
f.remote()
@ray.remote
def g():
pass
with pytest.raises(NotImplementedError):
g.options(runtime_env={"working_dir": "."}).remote()
@ray.remote(runtime_env={"working_dir": "."})
class A:
pass
with pytest.raises(NotImplementedError):
A.remote()
@ray.remote
class B:
pass
with pytest.raises(NotImplementedError):
B.options(runtime_env={"working_dir": "."}).remote()
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_invalid_conda_env(shutdown_only):
ray.init()
@ray.remote
def f():
pass
start = time.time()
bad_env = {"conda": {"dependencies": ["this_doesnt_exist"]}}
with pytest.raises(RuntimeEnvSetupError):
ray.get(f.options(runtime_env=bad_env).remote())
first_time = time.time() - start
# Check that another valid task can run.
ray.get(f.remote())
# The second time this runs it should be faster as the error is cached.
start = time.time()
with pytest.raises(RuntimeEnvSetupError):
ray.get(f.options(runtime_env=bad_env).remote())
assert (time.time() - start) < (first_time / 2.0)
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
@pytest.mark.parametrize(
"ray_start_cluster", [{
"_system_config": {
"event_stats_print_interval_ms": 100,
"debug_dump_period_milliseconds": 100,
"event_stats": True
}
}],
indirect=True)
def test_no_spurious_worker_startup(ray_start_cluster):
"""Test that no extra workers start up during a long env installation."""
cluster = ray_start_cluster
# This hook sleeps for 15 seconds to simulate creating a runtime env.
cluster.add_node(
num_cpus=1,
runtime_env_setup_hook=(
"ray._private.test_utils.sleep_setup_runtime_env"))
# Set a nonempty runtime env so that the runtime env setup hook is called.
runtime_env = {"env_vars": {"a": "b"}}
ray.init(address=cluster.address)
@ray.remote
class Counter(object):
def __init__(self):
self.value = 0
def get(self):
return self.value
# Instantiate an actor that requires the long runtime env installation.
a = Counter.options(runtime_env=runtime_env).remote()
assert ray.get(a.get.remote()) == 0
# Check "debug_state.txt" to ensure no extra workers were started.
session_dir = ray.worker.global_worker.node.address_info["session_dir"]
session_path = Path(session_dir)
debug_state_path = session_path / "debug_state.txt"
def get_num_workers():
with open(debug_state_path) as f:
for line in f.readlines():
num_workers_prefix = "- num PYTHON workers: "
if num_workers_prefix in line:
return int(line[len(num_workers_prefix):])
return None
# Wait for "debug_state.txt" to be updated to reflect the started worker.
start = time.time()
wait_for_condition(lambda: get_num_workers() > 0)
time_waited = time.time() - start
print(f"Waited {time_waited} for debug_state.txt to be updated")
# If any workers were unnecessarily started during the initial env
# installation, they will bypass the runtime env setup hook because the
# created env will have been cached and should be added to num_workers
# within a few seconds. Adjusting the default update period for
# debut_state.txt via this cluster_utils pytest fixture seems to be broken,
# so just check it for the next 10 seconds (the default period).
for i in range(100):
# Check that no more workers were started.
assert get_num_workers() <= 1
time.sleep(0.1)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-sv", __file__]))
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import configparser
import getpass
import itertools
import os
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass
from functools import partial
from hashlib import sha1
from typing import Any, ClassVar, Dict, Iterable, List, Mapping, Sequence, Union, cast
import toml
from typing_extensions import Protocol
from pants.base.build_environment import get_buildroot
from pants.option.ranked_value import Value
from pants.util.eval import parse_expression
from pants.util.ordered_set import OrderedSet
# A dict with optional override seed values for buildroot, pants_workdir, and pants_distdir.
SeedValues = Dict[str, Value]
class ConfigSource(Protocol):
"""A protocol that matches pants.engine.fs.FileContent.
Also matches the ad-hoc FileContent-like class we use during options bootstrapping, where we
cannot use pants.engine.fs.FileContent itself due to circular imports.
"""
@property
def path(self) -> str:
raise NotImplementedError()
@property
def content(self) -> bytes:
raise NotImplementedError()
class Config(ABC):
"""Encapsulates config file loading and access, including encapsulation of support for multiple
config files.
Supports variable substitution using old-style Python format strings. E.g., %(var_name)s will be
replaced with the value of var_name.
"""
DEFAULT_SECTION: ClassVar[str] = configparser.DEFAULTSECT
class ConfigError(Exception):
pass
class ConfigValidationError(ConfigError):
pass
@classmethod
def load(
cls,
file_contents: Iterable[ConfigSource],
*,
seed_values: SeedValues | None = None,
) -> Config:
"""Loads config from the given string payloads, with later payloads overriding earlier ones.
A handful of seed values will be set to act as if specified in the loaded config file's
DEFAULT section, and be available for use in substitutions. The caller may override some of
these seed values.
"""
single_file_configs = []
for file_content in file_contents:
content_digest = sha1(file_content.content).hexdigest()
normalized_seed_values = cls._determine_seed_values(seed_values=seed_values)
try:
config_values = cls._parse_toml(
file_content.content.decode(), normalized_seed_values
)
except Exception as e:
raise cls.ConfigError(
f"Config file {file_content.path} could not be parsed as TOML:\n {e}"
)
single_file_configs.append(
_SingleFileConfig(
config_path=file_content.path,
content_digest=content_digest,
values=config_values,
),
)
return _ChainedConfig(tuple(reversed(single_file_configs)))
@classmethod
def _parse_toml(
cls, config_content: str, normalized_seed_values: dict[str, str]
) -> _ConfigValues:
"""Attempt to parse as TOML, raising an exception on failure."""
toml_values = cast(Dict[str, Any], toml.loads(config_content))
toml_values["DEFAULT"] = {
**normalized_seed_values,
**toml_values.get("DEFAULT", {}),
}
return _ConfigValues(toml_values)
@staticmethod
def _determine_seed_values(*, seed_values: SeedValues | None = None) -> dict[str, str]:
"""We pre-populate several default values to allow %([key-name])s interpolation.
This sets up those defaults and checks if the user overrode any of the values.
"""
safe_seed_values = seed_values or {}
buildroot = cast(str, safe_seed_values.get("buildroot", get_buildroot()))
all_seed_values: dict[str, str] = {
"buildroot": buildroot,
"homedir": os.path.expanduser("~"),
"user": getpass.getuser(),
}
def update_seed_values(key: str, *, default_dir: str) -> None:
all_seed_values[key] = cast(
str, safe_seed_values.get(key, os.path.join(buildroot, default_dir))
)
update_seed_values("pants_workdir", default_dir=".pants.d")
update_seed_values("pants_distdir", default_dir="dist")
return all_seed_values
def get(self, section, option, type_=str, default=None):
"""Retrieves option from the specified section (or 'DEFAULT') and attempts to parse it as
type.
If the specified section does not exist or is missing a definition for the option, the value
is looked up in the DEFAULT section. If there is still no definition found, the default
value supplied is returned.
"""
if not self.has_option(section, option):
return default
raw_value = self.get_value(section, option)
if issubclass(type_, str):
return raw_value
key = f"{section}.{option}"
return parse_expression(
name=key, val=raw_value, acceptable_types=type_, raise_type=self.ConfigError
)
@abstractmethod
def configs(self) -> Sequence[_SingleFileConfig]:
"""Returns the underlying single-file configs represented by this object."""
@abstractmethod
def sources(self) -> list[str]:
"""Returns the sources of this config as a list of filenames."""
@abstractmethod
def sections(self) -> list[str]:
"""Returns the sections in this config (not including DEFAULT)."""
@abstractmethod
def has_section(self, section: str) -> bool:
"""Returns whether this config has the section."""
@abstractmethod
def has_option(self, section: str, option: str) -> bool:
"""Returns whether this config specified a value for the option."""
@abstractmethod
def get_value(self, section: str, option: str) -> str | None:
"""Returns the value of the option in this config as a string, or None if no value
specified."""
@abstractmethod
def get_source_for_option(self, section: str, option: str) -> str | None:
"""Returns the path to the source file the given option was defined in.
:param section: the scope of the option.
:param option: the name of the option.
:returns: the path to the config file, or None if the option was not defined by a config file.
"""
_TomlPrimitive = Union[bool, int, float, str]
_TomlValue = Union[_TomlPrimitive, List[_TomlPrimitive]]
@dataclass(frozen=True)
class _ConfigValues:
"""The parsed contents of a TOML config file."""
values: dict[str, Any]
@staticmethod
def _is_an_option(option_value: _TomlValue | dict) -> bool:
"""Determine if the value is actually an option belonging to that section.
This handles the special syntax of `my_list_option.add` and `my_list_option.remove`.
"""
if isinstance(option_value, dict):
return "add" in option_value or "remove" in option_value
return True
def _possibly_interpolate_value(
self,
raw_value: str,
*,
option: str,
section: str,
section_values: dict,
) -> str:
"""For any values with %(foo)s, substitute it with the corresponding value from DEFAULT or
the same section."""
def format_str(value: str) -> str:
# Because dictionaries use the symbols `{}`, we must proactively escape the symbols so
# that .format() does not try to improperly interpolate.
escaped_str = value.replace("{", "{{").replace("}", "}}")
new_style_format_str = re.sub(
pattern=r"%\((?P<interpolated>[a-zA-Z_0-9]*)\)s",
repl=r"{\g<interpolated>}",
string=escaped_str,
)
try:
possible_interpolations = {**self.defaults, **section_values}
return new_style_format_str.format(**possible_interpolations)
except KeyError as e:
bad_reference = e.args[0]
raise configparser.InterpolationMissingOptionError(
option,
section,
raw_value,
bad_reference,
)
def recursively_format_str(value: str) -> str:
# It's possible to interpolate with a value that itself has an interpolation. We must
# fully evaluate all expressions for parity with configparser.
match = re.search(r"%\(([a-zA-Z_0-9]*)\)s", value)
if not match:
return value
return recursively_format_str(value=format_str(value))
return recursively_format_str(raw_value)
def _stringify_val(
self,
raw_value: _TomlValue,
*,
option: str,
section: str,
section_values: dict,
interpolate: bool = True,
list_prefix: str | None = None,
) -> str:
"""For parity with configparser, we convert all values back to strings, which allows us to
avoid upstream changes to files like parser.py.
This is clunky. If we drop INI support, we should remove this and use native values
(although we must still support interpolation).
"""
possibly_interpolate = partial(
self._possibly_interpolate_value,
option=option,
section=section,
section_values=section_values,
)
if isinstance(raw_value, str):
return possibly_interpolate(raw_value) if interpolate else raw_value
if isinstance(raw_value, list):
def stringify_list_member(member: _TomlPrimitive) -> str:
if not isinstance(member, str):
return str(member)
interpolated_member = possibly_interpolate(member) if interpolate else member
return f'"{interpolated_member}"'
list_members = ", ".join(stringify_list_member(member) for member in raw_value)
return f"{list_prefix or ""}[{list_members}]"
return str(raw_value)
def _stringify_val_without_interpolation(self, raw_value: _TomlValue) -> str:
return self._stringify_val(
raw_value,
option="",
section="",
section_values={},
interpolate=False,
)
@property
def sections(self) -> list[str]:
return [scope for scope in self.values if scope != "DEFAULT"]
def has_section(self, section: str) -> bool:
return section in self.values
def has_option(self, section: str, option: str) -> bool:
if not self.has_section(section):
return False
return option in self.values[section] or option in self.defaults
def get_value(self, section: str, option: str) -> str | None:
section_values = self.values.get(section)
if section_values is None:
raise configparser.NoSectionError(section)
stringify = partial(
self._stringify_val,
option=option,
section=section,
section_values=section_values,
)
if option not in section_values:
if option in self.defaults:
return stringify(raw_value=self.defaults[option])
raise configparser.NoOptionError(option, section)
option_value = section_values[option]
if not isinstance(option_value, dict):
return stringify(option_value)
# Handle dict options, along with the special `my_list_option.add` and
# `my_list_option.remove` syntax. We only treat `add` and `remove` as the special list
# syntax if the values are lists to reduce the risk of incorrectly special casing.
has_add = isinstance(option_value.get("add"), list)
has_remove = isinstance(option_value.get("remove"), list)
if not has_add and not has_remove:
return stringify(option_value)
add_val = stringify(option_value["add"], list_prefix="+") if has_add else None
remove_val = stringify(option_value["remove"], list_prefix="-") if has_remove else None
if has_add and has_remove:
return f"{add_val},{remove_val}"
if has_add:
return add_val
return remove_val
def options(self, section: str) -> list[str]:
section_values = self.values.get(section)
if section_values is None:
raise configparser.NoSectionError(section)
return [
*section_values.keys(),
*(
default_option
for default_option in self.defaults
if default_option not in section_values
),
]
@property
def defaults(self) -> dict[str, str]:
return {
option: self._stringify_val_without_interpolation(option_val)
for option, option_val in self.values["DEFAULT"].items()
}
@dataclass(frozen=True, eq=False)
class _SingleFileConfig(Config):
"""Config read from a single file."""
config_path: str
content_digest: str
values: _ConfigValues
def configs(self) -> list[_SingleFileConfig]:
return [self]
def sources(self) -> list[str]:
return [self.config_path]
def sections(self) -> list[str]:
return self.values.sections
def has_section(self, section: str) -> bool:
return self.values.has_section(section)
def has_option(self, section: str, option: str) -> bool:
return self.values.has_option(section, option)
def get_value(self, section: str, option: str) -> str | None:
return self.values.get_value(section, option)
def get_source_for_option(self, section: str, option: str) -> str | None:
if self.has_option(section, option):
return self.sources()[0]
return None
def __repr__(self) -> str:
return f"SingleFileConfig({self.config_path})"
def __eq__(self, other: Any) -> bool:
if not isinstance(other, _SingleFileConfig):
return NotImplemented
return self.config_path == other.config_path and self.content_digest == other.content_digest
def __hash__(self) -> int:
return hash(self.content_digest)
@dataclass(frozen=True)
class _ChainedConfig(Config):
"""Config read from multiple sources."""
# Config instances to chain. Later instances take precedence over earlier ones.
chained_configs: tuple[_SingleFileConfig, ...]
@property
def _configs(self) -> tuple[_SingleFileConfig, ...]:
return self.chained_configs
def configs(self) -> tuple[_SingleFileConfig, ...]:
return self.chained_configs
def sources(self) -> list[str]:
# NB: Present the sources in the order we were given them.
return list(itertools.chain.from_iterable(cfg.sources() for cfg in reversed(self._configs)))
def sections(self) -> list[str]:
ret: OrderedSet[str] = OrderedSet()
for cfg in self._configs:
ret.update(cfg.sections())
return list(ret)
def has_section(self, section: str) -> bool:
for cfg in self._configs:
if cfg.has_section(section):
return True
return False
def has_option(self, section: str, option: str) -> bool:
for cfg in self._configs:
if cfg.has_option(section, option):
return True
return False
def get_value(self, section: str, option: str) -> str | None:
for cfg in self._configs:
try:
return cfg.get_value(section, option)
except (configparser.NoSectionError, configparser.NoOptionError):
pass
if not self.has_section(section):
raise configparser.NoSectionError(section)
raise configparser.NoOptionError(option, section)
def get_source_for_option(self, section: str, option: str) -> str | None:
for cfg in self._configs:
if cfg.has_option(section, option):
return cfg.get_source_for_option(section, option)
return None
def __repr__(self) -> str:
return f"ChainedConfig({self.sources()})"
@dataclass(frozen=True)
class TomlSerializer:
"""Convert a dictionary of option scopes -> Python values into TOML understood by Pants.
The constructor expects a dictionary of option scopes to their corresponding values as
represented in Python. For example:
{
"GLOBAL": {
"o1": True,
"o2": "hello",
"o3": [0, 1, 2],
},
"some-subsystem": {
"dict_option": {
"a": 0,
"b": 0,
},
},
}
"""
parsed: Mapping[str, dict[str, int | float | str | bool | list | dict]]
def normalize(self) -> dict:
def normalize_section_value(option, option_value) -> tuple[str, Any]:
# With TOML, we store dict values as strings (for now).
if isinstance(option_value, dict):
option_value = str(option_value)
if option.endswith(".add"):
option = option.rsplit(".", 1)[0]
option_value = f"+{option_value!r}"
elif option.endswith(".remove"):
option = option.rsplit(".", 1)[0]
option_value = f"-{option_value!r}"
return option, option_value
return {
section: dict(
normalize_section_value(option, option_value)
for option, option_value in section_values.items()
)
for section, section_values in self.parsed.items()
}
def serialize(self) -> str:
toml_values = self.normalize()
return toml.dumps(toml_values)
| # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import configparser
import getpass
import itertools
import os
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass
from functools import partial
from hashlib import sha1
from typing import Any, ClassVar, Dict, Iterable, List, Mapping, Sequence, Union, cast
import toml
from typing_extensions import Protocol
from pants.base.build_environment import get_buildroot
from pants.option.ranked_value import Value
from pants.util.eval import parse_expression
from pants.util.ordered_set import OrderedSet
# A dict with optional override seed values for buildroot, pants_workdir, and pants_distdir.
SeedValues = Dict[str, Value]
class ConfigSource(Protocol):
"""A protocol that matches pants.engine.fs.FileContent.
Also matches the ad-hoc FileContent-like class we use during options bootstrapping, where we
cannot use pants.engine.fs.FileContent itself due to circular imports.
"""
@property
def path(self) -> str:
raise NotImplementedError()
@property
def content(self) -> bytes:
raise NotImplementedError()
class Config(ABC):
"""Encapsulates config file loading and access, including encapsulation of support for multiple
config files.
Supports variable substitution using old-style Python format strings. E.g., %(var_name)s will be
replaced with the value of var_name.
"""
DEFAULT_SECTION: ClassVar[str] = configparser.DEFAULTSECT
class ConfigError(Exception):
pass
class ConfigValidationError(ConfigError):
pass
@classmethod
def load(
cls,
file_contents: Iterable[ConfigSource],
*,
seed_values: SeedValues | None = None,
) -> Config:
"""Loads config from the given string payloads, with later payloads overriding earlier ones.
A handful of seed values will be set to act as if specified in the loaded config file's
DEFAULT section, and be available for use in substitutions. The caller may override some of
these seed values.
"""
single_file_configs = []
for file_content in file_contents:
content_digest = sha1(file_content.content).hexdigest()
normalized_seed_values = cls._determine_seed_values(seed_values=seed_values)
try:
config_values = cls._parse_toml(
file_content.content.decode(), normalized_seed_values
)
except Exception as e:
raise cls.ConfigError(
f"Config file {file_content.path} could not be parsed as TOML:\n {e}"
)
single_file_configs.append(
_SingleFileConfig(
config_path=file_content.path,
content_digest=content_digest,
values=config_values,
),
)
return _ChainedConfig(tuple(reversed(single_file_configs)))
@classmethod
def _parse_toml(
cls, config_content: str, normalized_seed_values: dict[str, str]
) -> _ConfigValues:
"""Attempt to parse as TOML, raising an exception on failure."""
toml_values = cast(Dict[str, Any], toml.loads(config_content))
toml_values["DEFAULT"] = {
**normalized_seed_values,
**toml_values.get("DEFAULT", {}),
}
return _ConfigValues(toml_values)
@staticmethod
def _determine_seed_values(*, seed_values: SeedValues | None = None) -> dict[str, str]:
"""We pre-populate several default values to allow %([key-name])s interpolation.
This sets up those defaults and checks if the user overrode any of the values.
"""
safe_seed_values = seed_values or {}
buildroot = cast(str, safe_seed_values.get("buildroot", get_buildroot()))
all_seed_values: dict[str, str] = {
"buildroot": buildroot,
"homedir": os.path.expanduser("~"),
"user": getpass.getuser(),
}
def update_seed_values(key: str, *, default_dir: str) -> None:
all_seed_values[key] = cast(
str, safe_seed_values.get(key, os.path.join(buildroot, default_dir))
)
update_seed_values("pants_workdir", default_dir=".pants.d")
update_seed_values("pants_distdir", default_dir="dist")
return all_seed_values
def get(self, section, option, type_=str, default=None):
"""Retrieves option from the specified section (or 'DEFAULT') and attempts to parse it as
type.
If the specified section does not exist or is missing a definition for the option, the value
is looked up in the DEFAULT section. If there is still no definition found, the default
value supplied is returned.
"""
if not self.has_option(section, option):
return default
raw_value = self.get_value(section, option)
if issubclass(type_, str):
return raw_value
key = f"{section}.{option}"
return parse_expression(
name=key, val=raw_value, acceptable_types=type_, raise_type=self.ConfigError
)
@abstractmethod
def configs(self) -> Sequence[_SingleFileConfig]:
"""Returns the underlying single-file configs represented by this object."""
@abstractmethod
def sources(self) -> list[str]:
"""Returns the sources of this config as a list of filenames."""
@abstractmethod
def sections(self) -> list[str]:
"""Returns the sections in this config (not including DEFAULT)."""
@abstractmethod
def has_section(self, section: str) -> bool:
"""Returns whether this config has the section."""
@abstractmethod
def has_option(self, section: str, option: str) -> bool:
"""Returns whether this config specified a value for the option."""
@abstractmethod
def get_value(self, section: str, option: str) -> str | None:
"""Returns the value of the option in this config as a string, or None if no value
specified."""
@abstractmethod
def get_source_for_option(self, section: str, option: str) -> str | None:
"""Returns the path to the source file the given option was defined in.
:param section: the scope of the option.
:param option: the name of the option.
:returns: the path to the config file, or None if the option was not defined by a config file.
"""
_TomlPrimitive = Union[bool, int, float, str]
_TomlValue = Union[_TomlPrimitive, List[_TomlPrimitive]]
@dataclass(frozen=True)
class _ConfigValues:
"""The parsed contents of a TOML config file."""
values: dict[str, Any]
@staticmethod
def _is_an_option(option_value: _TomlValue | dict) -> bool:
"""Determine if the value is actually an option belonging to that section.
This handles the special syntax of `my_list_option.add` and `my_list_option.remove`.
"""
if isinstance(option_value, dict):
return "add" in option_value or "remove" in option_value
return True
def _possibly_interpolate_value(
self,
raw_value: str,
*,
option: str,
section: str,
section_values: dict,
) -> str:
"""For any values with %(foo)s, substitute it with the corresponding value from DEFAULT or
the same section."""
def format_str(value: str) -> str:
# Because dictionaries use the symbols `{}`, we must proactively escape the symbols so
# that .format() does not try to improperly interpolate.
escaped_str = value.replace("{", "{{").replace("}", "}}")
new_style_format_str = re.sub(
pattern=r"%\((?P<interpolated>[a-zA-Z_0-9]*)\)s",
repl=r"{\g<interpolated>}",
string=escaped_str,
)
try:
possible_interpolations = {**self.defaults, **section_values}
return new_style_format_str.format(**possible_interpolations)
except KeyError as e:
bad_reference = e.args[0]
raise configparser.InterpolationMissingOptionError(
option,
section,
raw_value,
bad_reference,
)
def recursively_format_str(value: str) -> str:
# It's possible to interpolate with a value that itself has an interpolation. We must
# fully evaluate all expressions for parity with configparser.
match = re.search(r"%\(([a-zA-Z_0-9]*)\)s", value)
if not match:
return value
return recursively_format_str(value=format_str(value))
return recursively_format_str(raw_value)
def _stringify_val(
self,
raw_value: _TomlValue,
*,
option: str,
section: str,
section_values: dict,
interpolate: bool = True,
list_prefix: str | None = None,
) -> str:
"""For parity with configparser, we convert all values back to strings, which allows us to
avoid upstream changes to files like parser.py.
This is clunky. If we drop INI support, we should remove this and use native values
(although we must still support interpolation).
"""
possibly_interpolate = partial(
self._possibly_interpolate_value,
option=option,
section=section,
section_values=section_values,
)
if isinstance(raw_value, str):
return possibly_interpolate(raw_value) if interpolate else raw_value
if isinstance(raw_value, list):
def stringify_list_member(member: _TomlPrimitive) -> str:
if not isinstance(member, str):
return str(member)
interpolated_member = possibly_interpolate(member) if interpolate else member
return f'"{interpolated_member}"'
list_members = ", ".join(stringify_list_member(member) for member in raw_value)
return f"{list_prefix or ''}[{list_members}]"
return str(raw_value)
def _stringify_val_without_interpolation(self, raw_value: _TomlValue) -> str:
return self._stringify_val(
raw_value,
option="",
section="",
section_values={},
interpolate=False,
)
@property
def sections(self) -> list[str]:
return [scope for scope in self.values if scope != "DEFAULT"]
def has_section(self, section: str) -> bool:
return section in self.values
def has_option(self, section: str, option: str) -> bool:
if not self.has_section(section):
return False
return option in self.values[section] or option in self.defaults
def get_value(self, section: str, option: str) -> str | None:
section_values = self.values.get(section)
if section_values is None:
raise configparser.NoSectionError(section)
stringify = partial(
self._stringify_val,
option=option,
section=section,
section_values=section_values,
)
if option not in section_values:
if option in self.defaults:
return stringify(raw_value=self.defaults[option])
raise configparser.NoOptionError(option, section)
option_value = section_values[option]
if not isinstance(option_value, dict):
return stringify(option_value)
# Handle dict options, along with the special `my_list_option.add` and
# `my_list_option.remove` syntax. We only treat `add` and `remove` as the special list
# syntax if the values are lists to reduce the risk of incorrectly special casing.
has_add = isinstance(option_value.get("add"), list)
has_remove = isinstance(option_value.get("remove"), list)
if not has_add and not has_remove:
return stringify(option_value)
add_val = stringify(option_value["add"], list_prefix="+") if has_add else None
remove_val = stringify(option_value["remove"], list_prefix="-") if has_remove else None
if has_add and has_remove:
return f"{add_val},{remove_val}"
if has_add:
return add_val
return remove_val
def options(self, section: str) -> list[str]:
section_values = self.values.get(section)
if section_values is None:
raise configparser.NoSectionError(section)
return [
*section_values.keys(),
*(
default_option
for default_option in self.defaults
if default_option not in section_values
),
]
@property
def defaults(self) -> dict[str, str]:
return {
option: self._stringify_val_without_interpolation(option_val)
for option, option_val in self.values["DEFAULT"].items()
}
@dataclass(frozen=True, eq=False)
class _SingleFileConfig(Config):
"""Config read from a single file."""
config_path: str
content_digest: str
values: _ConfigValues
def configs(self) -> list[_SingleFileConfig]:
return [self]
def sources(self) -> list[str]:
return [self.config_path]
def sections(self) -> list[str]:
return self.values.sections
def has_section(self, section: str) -> bool:
return self.values.has_section(section)
def has_option(self, section: str, option: str) -> bool:
return self.values.has_option(section, option)
def get_value(self, section: str, option: str) -> str | None:
return self.values.get_value(section, option)
def get_source_for_option(self, section: str, option: str) -> str | None:
if self.has_option(section, option):
return self.sources()[0]
return None
def __repr__(self) -> str:
return f"SingleFileConfig({self.config_path})"
def __eq__(self, other: Any) -> bool:
if not isinstance(other, _SingleFileConfig):
return NotImplemented
return self.config_path == other.config_path and self.content_digest == other.content_digest
def __hash__(self) -> int:
return hash(self.content_digest)
@dataclass(frozen=True)
class _ChainedConfig(Config):
"""Config read from multiple sources."""
# Config instances to chain. Later instances take precedence over earlier ones.
chained_configs: tuple[_SingleFileConfig, ...]
@property
def _configs(self) -> tuple[_SingleFileConfig, ...]:
return self.chained_configs
def configs(self) -> tuple[_SingleFileConfig, ...]:
return self.chained_configs
def sources(self) -> list[str]:
# NB: Present the sources in the order we were given them.
return list(itertools.chain.from_iterable(cfg.sources() for cfg in reversed(self._configs)))
def sections(self) -> list[str]:
ret: OrderedSet[str] = OrderedSet()
for cfg in self._configs:
ret.update(cfg.sections())
return list(ret)
def has_section(self, section: str) -> bool:
for cfg in self._configs:
if cfg.has_section(section):
return True
return False
def has_option(self, section: str, option: str) -> bool:
for cfg in self._configs:
if cfg.has_option(section, option):
return True
return False
def get_value(self, section: str, option: str) -> str | None:
for cfg in self._configs:
try:
return cfg.get_value(section, option)
except (configparser.NoSectionError, configparser.NoOptionError):
pass
if not self.has_section(section):
raise configparser.NoSectionError(section)
raise configparser.NoOptionError(option, section)
def get_source_for_option(self, section: str, option: str) -> str | None:
for cfg in self._configs:
if cfg.has_option(section, option):
return cfg.get_source_for_option(section, option)
return None
def __repr__(self) -> str:
return f"ChainedConfig({self.sources()})"
@dataclass(frozen=True)
class TomlSerializer:
"""Convert a dictionary of option scopes -> Python values into TOML understood by Pants.
The constructor expects a dictionary of option scopes to their corresponding values as
represented in Python. For example:
{
"GLOBAL": {
"o1": True,
"o2": "hello",
"o3": [0, 1, 2],
},
"some-subsystem": {
"dict_option": {
"a": 0,
"b": 0,
},
},
}
"""
parsed: Mapping[str, dict[str, int | float | str | bool | list | dict]]
def normalize(self) -> dict:
def normalize_section_value(option, option_value) -> tuple[str, Any]:
# With TOML, we store dict values as strings (for now).
if isinstance(option_value, dict):
option_value = str(option_value)
if option.endswith(".add"):
option = option.rsplit(".", 1)[0]
option_value = f"+{option_value!r}"
elif option.endswith(".remove"):
option = option.rsplit(".", 1)[0]
option_value = f"-{option_value!r}"
return option, option_value
return {
section: dict(
normalize_section_value(option, option_value)
for option, option_value in section_values.items()
)
for section, section_values in self.parsed.items()
}
def serialize(self) -> str:
toml_values = self.normalize()
return toml.dumps(toml_values)
|
import urllib
import jose.jwt
import time
import random
import sys
import requests
from flask import Flask, request, redirect, make_response, jsonify
import subprocess
# seconds until the token expires
TOKEN_EXPIRES = 2
# A mocked out oauth server, which serves all the endpoints needed by the oauth type.
class MockOauthApp:
def __init__(self, port):
self.port = port
# mock flask app
self.app = Flask("mock_oauth_app")
self.app.add_url_rule("/authorize", view_func=self.api_authorize)
self.app.add_url_rule("/oauth/token", view_func=self.api_oauth_token, methods=["POST"])
self.app.add_url_rule("/v2/logout", view_func=self.api_logout)
self.app.add_url_rule("/.well-known/openid-configuration", view_func=self.api_openid_configuration)
self.app.add_url_rule("/.well-known/jwks.json", view_func=self.api_jwks)
def api_authorize(self):
callback = request.args.get("redirect_uri")
state = request.args.get("state")
return redirect(callback + f"?code=fakecode&state={state}")
def api_oauth_token(self):
expires_at = time.time()
headers = dict(alg="RS256", kid="fake_kid")
payload = dict(
name="Fake User", sub="test_user_id", email="fake_user@email.com", email_verified=True, exp=expires_at
)
jwt = jose.jwt.encode(claims=payload, key="mysecret", algorithm="HS256", headers=headers)
r = {
"access_token": f"access-{time.time()}",
"id_token": jwt,
"refresh_token": f"random-{time.time()}",
"scope": "openid profile email offline",
"expires_in": TOKEN_EXPIRES,
"token_type": "Bearer",
"expires_at": expires_at,
}
return make_response(jsonify(r))
def api_logout(self):
return_to = request.args.get("returnTo")
return redirect(return_to)
def api_openid_configuration(self):
data = dict(jwks_uri=f"http://localhost:{self.port}/.well-known/jwks.json")
return make_response(jsonify(data))
def api_jwks(self):
data = dict(
alg="RS256",
kty="RSA",
use="sig",
kid="fake_kid",
)
return make_response(jsonify(dict(keys=[data])))
class MockOauthServer:
def __init__(self):
self.process = None
self.port = None
self.server_okay = False
def start(self):
self.port = random.randint(10000, 20000)
self.process = subprocess.Popen([sys.executable, __file__, str(self.port)])
# Verify that the mock oauth server is ready (accepting requests) before starting the tests.
self.server_okay = False
for _ in range(5):
try:
response = requests.get(f"http://localhost:{self.port}/.well-known/jwks.json")
if response.status_code == 200:
self.server_okay = True
break
except Exception:
pass
# wait one second and try again
time.sleep(1)
def terminate(self):
self.process.terminate()
def get_auth_token(app):
"""
Generated an auth token for testing.
:param app: a chalice app.
:return:
"""
headers = dict(host="localhost")
response = app.get("/dp/v1/login", headers=headers)
location = response.headers["Location"]
split = urllib.parse.urlsplit(location)
args = dict(urllib.parse.parse_qsl(split.query))
# follow redirect
url = f"/dp/v1/oauth2/callback?code=fakecode&state={args["state"]}"
response = app.get(url, headers=dict(host="localhost", Cookie=response.headers["Set-Cookie"]))
return response.headers["Set-Cookie"]
if __name__ == "__main__":
port = int(sys.argv[1])
mock_app = MockOauthApp(port)
mock_app.app.run(port=port, debug=True)
| import urllib
import jose.jwt
import time
import random
import sys
import requests
from flask import Flask, request, redirect, make_response, jsonify
import subprocess
# seconds until the token expires
TOKEN_EXPIRES = 2
# A mocked out oauth server, which serves all the endpoints needed by the oauth type.
class MockOauthApp:
def __init__(self, port):
self.port = port
# mock flask app
self.app = Flask("mock_oauth_app")
self.app.add_url_rule("/authorize", view_func=self.api_authorize)
self.app.add_url_rule("/oauth/token", view_func=self.api_oauth_token, methods=["POST"])
self.app.add_url_rule("/v2/logout", view_func=self.api_logout)
self.app.add_url_rule("/.well-known/openid-configuration", view_func=self.api_openid_configuration)
self.app.add_url_rule("/.well-known/jwks.json", view_func=self.api_jwks)
def api_authorize(self):
callback = request.args.get("redirect_uri")
state = request.args.get("state")
return redirect(callback + f"?code=fakecode&state={state}")
def api_oauth_token(self):
expires_at = time.time()
headers = dict(alg="RS256", kid="fake_kid")
payload = dict(
name="Fake User", sub="test_user_id", email="fake_user@email.com", email_verified=True, exp=expires_at
)
jwt = jose.jwt.encode(claims=payload, key="mysecret", algorithm="HS256", headers=headers)
r = {
"access_token": f"access-{time.time()}",
"id_token": jwt,
"refresh_token": f"random-{time.time()}",
"scope": "openid profile email offline",
"expires_in": TOKEN_EXPIRES,
"token_type": "Bearer",
"expires_at": expires_at,
}
return make_response(jsonify(r))
def api_logout(self):
return_to = request.args.get("returnTo")
return redirect(return_to)
def api_openid_configuration(self):
data = dict(jwks_uri=f"http://localhost:{self.port}/.well-known/jwks.json")
return make_response(jsonify(data))
def api_jwks(self):
data = dict(
alg="RS256",
kty="RSA",
use="sig",
kid="fake_kid",
)
return make_response(jsonify(dict(keys=[data])))
class MockOauthServer:
def __init__(self):
self.process = None
self.port = None
self.server_okay = False
def start(self):
self.port = random.randint(10000, 20000)
self.process = subprocess.Popen([sys.executable, __file__, str(self.port)])
# Verify that the mock oauth server is ready (accepting requests) before starting the tests.
self.server_okay = False
for _ in range(5):
try:
response = requests.get(f"http://localhost:{self.port}/.well-known/jwks.json")
if response.status_code == 200:
self.server_okay = True
break
except Exception:
pass
# wait one second and try again
time.sleep(1)
def terminate(self):
self.process.terminate()
def get_auth_token(app):
"""
Generated an auth token for testing.
:param app: a chalice app.
:return:
"""
headers = dict(host="localhost")
response = app.get("/dp/v1/login", headers=headers)
location = response.headers["Location"]
split = urllib.parse.urlsplit(location)
args = dict(urllib.parse.parse_qsl(split.query))
# follow redirect
url = f"/dp/v1/oauth2/callback?code=fakecode&state={args['state']}"
response = app.get(url, headers=dict(host="localhost", Cookie=response.headers["Set-Cookie"]))
return response.headers["Set-Cookie"]
if __name__ == "__main__":
port = int(sys.argv[1])
mock_app = MockOauthApp(port)
mock_app.app.run(port=port, debug=True)
|
__author__ = 'Alexandre Calil Martins Fonseca, github: xandao6'
# region TUTORIAL
'''
Go to region 'FOR SCRIPTING' and use the methods in your script!
EXAMPLE OF USAGE:
from wplay.pyppeteerUtils import pyppeteerConfig as pypConfig
from wplay.pyppeteerUtils import pyppeteerSearch as pypSearch
async def my_script(target):
pages, browser = wait pyp.configure_browser_and_load_whatsapp(pypConfig.websites['whatsapp'])
await pypSearch.search_for_target_and_get_ready_for_conversation(pages[0], target)
message = pypSearch.ask_user_for_message_breakline_mode()
await pypSearch.send_message(pages[0], message)
message2 = pypSearch.ask_user_for_message()
await pypSearch.send_message(pages[0], message2)
'''
# endregion
# region IMPORTS
from wplay.utils.helpers import whatsapp_selectors_dict
from wplay.utils import Logger
from wplay.utils.helpers import logs_path
from pyppeteer.errors import ElementHandleError
# endregion
# region FOR SCRIPTING
async def search_and_select_target(page, target, hide_groups=False):
await __open_new_chat(page)
await __type_in_new_chat_search_bar(page, target)
contact_list_elements_unchecked = await __get_contacts_elements_filtered(page, target)
group_list_elements_unchecked = await __get_groups_elements_filtered(page, target, hide_groups)
contact_titles_unchecked = await __get_contacts_titles_from_elements_unchecked(page, contact_list_elements_unchecked)
group_titles_unchecked = await __get_groups_titles_from_elements_unchecked(page, group_list_elements_unchecked)
contact_list_unchecked = __zip_contact_titles_and_elements_unchecked(
contact_titles_unchecked, contact_list_elements_unchecked)
group_list_unchecked = __zip_group_titles_and_elements_unchecked(
group_titles_unchecked, group_list_elements_unchecked)
contact_tuple = __check_contact_list(target, contact_list_unchecked)
group_tuple = __check_group_list(target, group_list_unchecked)
target_tuple = __get_target_tuple(contact_tuple, group_tuple)
__print_target_tuple(target_tuple)
target_index_choosed = __ask_user_to_choose_the_filtered_target(target_tuple)
choosed_target = __get_choosed_target(target_tuple, target_index_choosed)
await __navigate_to_target(page, choosed_target)
target_focused_title = await __get_focused_target_title(page, target)
if any(choosed_target[0] in i for i in contact_tuple):
complete_target_info = await get_complete_info_on_target(page)
print_complete_target_info(complete_target_info)
await close_contact_info_page(page)
else:
__print_selected_target_title(target_focused_title)
__check_target_focused_title(page, target, target_focused_title)
await __wait_for_message_area(page)
return target_focused_title
async def search_and_select_target_without_new_chat_button(page,target, hide_groups=False):
await __type_in_chat_or_message_search(page,target)
chats_messages_groups_elements_list = await __get_chats_messages_groups_elements(page)
contact_name_index_tuple_list = await __get_contacts_matched_with_query(chats_messages_groups_elements_list)
group_name_index_tuple_list = await __get_groups_matched_with_query(chats_messages_groups_elements_list,hide_groups)
target_tuple = (contact_name_index_tuple_list,group_name_index_tuple_list)
__print_target_tuple(target_tuple)
target_index_chosen = __ask_user_to_choose_the_filtered_target(target_tuple)
#chosen_target will be a tuple (a,b) such that a is the name of the target and b is the
#index of that element in chats_messages_groups_elements_list
chosen_target = __get_choosed_target(target_tuple, target_index_chosen)
await __open_selected_chat(chosen_target[1],chats_messages_groups_elements_list)
target_name = chosen_target[0]
if any(chosen_target[0] in i for i in contact_name_index_tuple_list):
complete_target_info = await get_complete_info_on_target(page)
print_complete_target_info(complete_target_info)
await close_contact_info_page(page)
else:
__print_selected_target_title(target_name)
await __wait_for_message_area(page)
return target_name
# endregion
#region LOGGER create
logger : Logger = Logger.setup_logger('logs',logs_path/'logs.log')
#endregion
# region SEARCH AND SELECT TARGET
async def __type_in_chat_or_message_search(page,target):
try:
print(f'Looking for: {target}')
await page.waitForSelector(
whatsapp_selectors_dict['chat_or_message_search'],
visible=True,
timeout=0
)
await page.waitFor(500)
await page.type(whatsapp_selectors_dict['chat_or_message_search'], target)
await page.waitFor(3000)
except Exception as e:
print(e)
async def __get_chats_messages_groups_elements(page):
chats_messages_groups_elements_list = [] # type : list[int]
try:
chats_messages_groups_elements_list = await page.querySelectorAll\
(whatsapp_selectors_dict['chats_groups_messages_elements'])
return chats_messages_groups_elements_list
except Exception as e:
print(e)
exit()
async def __get_contacts_matched_with_query(chats_groups_messages_elements_list):
contacts_to_choose_from = [] # type : list[str , int]
get_contact_node_title_function = 'node => node.parentNode.getAttribute("title")'
for idx, element in enumerate(chats_groups_messages_elements_list):
try:
contact_name = await element.querySelectorEval(whatsapp_selectors_dict['contact_element'],get_contact_node_title_function)
contacts_to_choose_from.append((contact_name,idx))
except ElementHandleError:
# if it is not a contact element, move to the next one
continue
except Exception as e:
print(e)
return contacts_to_choose_from
async def __get_groups_matched_with_query(chats_groups_messages_elements_list,hide_groups):
groups_to_choose_from = []
if hide_groups:
return groups_to_choose_from
get_group_node_title_function = 'node => node.parentNode.getAttribute("title")'
for idx, element in enumerate(chats_groups_messages_elements_list):
try:
group_name = await element.querySelectorEval(whatsapp_selectors_dict['group_element'],
get_group_node_title_function)
groups_to_choose_from.append((group_name,idx))
except ElementHandleError:
# if it is not a contact element, move to the next one
continue
except Exception as e:
print(e)
return groups_to_choose_from
async def __open_selected_chat(target_index,chats_messages_groups_elements_list):
try:
await chats_messages_groups_elements_list[target_index].click()
except Exception as e:
print(f"This target doesn't exist! Error: {str(e)}")
exit()
async def get_complete_info_on_target(page):
contact_page_elements = []
try:
await page.waitForSelector(
whatsapp_selectors_dict['target_chat_header'],
visible=True,
timeout=3000
)
await page.click(whatsapp_selectors_dict['target_chat_header'])
contact_page_elements = await get_contact_page_elements(page)
complete_target_info = {}
await get_contact_name_info(contact_page_elements[0], complete_target_info)
await get_contact_about_and_phone(contact_page_elements[3], complete_target_info)
await get_contact_groups_common_with_target(complete_target_info, page)
except Exception as e:
print(e)
return complete_target_info
async def get_contact_page_elements(page):
contact_page_elements = []
try:
await page.waitForSelector(
whatsapp_selectors_dict['contact_info_page_elements'],
visible=True,
timeout=8000
)
contact_page_elements = await page.querySelectorAll(whatsapp_selectors_dict['contact_info_page_elements'])
except Exception as e:
print(e)
return contact_page_elements
async def get_contact_name_info(contact_name_element,complete_target_info):
try:
complete_target_info['Name'] = await contact_name_element.querySelectorEval('span > span', 'element => element.innerText')
complete_target_info['Last_seen'] = await contact_name_element.querySelectorEval('div > span:last-of-type > div > span', 'element => element.getAttribute("title")')
except:
print(f'last seen not available')
async def get_contact_about_and_phone(contact_name_element, complete_target_info):
try:
complete_target_info['About'] = await contact_name_element.querySelectorEval('div:nth-child(2) > div > div > span > span', 'element => element.getAttribute("title")')
complete_target_info['Mobile'] = await contact_name_element.querySelectorEval('div:last-of-type > div > div > span > span', 'element => element.innerText')
except Exception as e:
print(e)
async def get_contact_groups_common_with_target(complete_target_info,page):
try:
await page.waitForSelector(
whatsapp_selectors_dict['contact_info_page_group_element_heading'],
visible= True,
timeout=3000
)
if (await page.evaluate(f'document.querySelector("{whatsapp_selectors_dict['contact_info_page_group_element_heading']}").innerText'))\
== "Groups in common":
group_elements = await page.querySelectorAll(whatsapp_selectors_dict['contact_info_page_group_elements'])
complete_target_info['Groups'] = [await ele.querySelectorEval('div>div>div:nth-child(2)>div:first-child>div>div>span', 'e => e.getAttribute("title")') for ele in group_elements]
else:
complete_target_info['Groups'] = []
except:
complete_target_info['Groups'] = []
print(f'No groups in common')
async def close_contact_info_page(page):
try:
await page.waitForSelector(
whatsapp_selectors_dict['contact_info_page_close_button'],
visible = True,
timeout = 5000
)
await page.click(whatsapp_selectors_dict['contact_info_page_close_button'])
except Exception as e:
print(e)
def print_complete_target_info(complete_target_info):
for key in complete_target_info.keys():
if key == "Groups":
print("Groups:")
print(*complete_target_info[key], sep=",")
else:
print(f'{key}: {complete_target_info[key]} ')
async def __open_new_chat(page):
await page.waitForSelector(
whatsapp_selectors_dict['new_chat_button'],
visible=True,
timeout=0
)
await page.waitFor(500)
await page.click(whatsapp_selectors_dict['new_chat_button'])
async def __type_in_new_chat_search_bar(page, target):
print(f'Looking for: {target}')
logger.info('Searching Target')
await page.waitForSelector(
whatsapp_selectors_dict['search_contact_input_new_chat'],
visible=True
)
await page.type(whatsapp_selectors_dict['search_contact_input_new_chat'], target)
await page.waitFor(3000)
async def __get_contacts_elements_filtered(page, target):
contact_list_elements_unchecked = list()
try:
await page.waitForSelector(
whatsapp_selectors_dict['contact_list_elements_filtered_new_chat'],
visible=True,
timeout=3000
)
contact_list_elements_unchecked = await page.querySelectorAll(
whatsapp_selectors_dict['contact_list_elements_filtered_new_chat']
)
except:
print(f'No contact named by "{target}"!')
logger.info('Target not found')
return contact_list_elements_unchecked
async def __get_groups_elements_filtered(page, target, hide_groups=False):
group_list_elements_unchecked = list()
if hide_groups:
return group_list_elements_unchecked
try:
await page.waitForSelector(
whatsapp_selectors_dict['group_list_elements_filtered_new_chat'],
visible=True,
timeout=3000
)
group_list_elements_unchecked = await page.querySelectorAll(
whatsapp_selectors_dict['group_list_elements_filtered_new_chat']
)
except:
print(f'No group named by "{target}"!')
logger.info('Target not found in groups')
return group_list_elements_unchecked
async def __get_contacts_titles_from_elements_unchecked(page, contact_list_elements_unchecked):
contact_titles_unchecked = []
for i in range(len(contact_list_elements_unchecked)):
contact_titles_unchecked\
.append(await page.evaluate(f'document.querySelectorAll("{whatsapp_selectors_dict['contact_list_elements_filtered_new_chat']}")[{i}].getAttribute("title")'))
return contact_titles_unchecked
async def __get_groups_titles_from_elements_unchecked(page, group_list_elements_unchecked):
group_titles_unchecked = []
for i in range(len(group_list_elements_unchecked)):
group_titles_unchecked.append(await page.evaluate(f'document.querySelectorAll("{whatsapp_selectors_dict['group_list_elements_filtered_new_chat']}")[{i}].getAttribute("title")'))
return group_titles_unchecked
# contact_list_unchecked is a zip (list of tuples) of contact_titles and
# contact elements, unchecked.
def __zip_contact_titles_and_elements_unchecked(contact_titles_unchecked, contact_list_elements_unchecked):
contact_list_unchecked = list(zip(contact_titles_unchecked, contact_list_elements_unchecked))
return contact_list_unchecked
def __zip_group_titles_and_elements_unchecked(group_titles_unchecked, group_list_elements_unchecked):
group_list_unchecked = list(zip(group_titles_unchecked, group_list_elements_unchecked))
return group_list_unchecked
# __checking_contact_list verify if target is in title, if not we pop from list
def __check_contact_list(target, contact_list_unchecked):
i = 0
while i < len(contact_list_unchecked):
if len(contact_list_unchecked) <= 0:
break
# we can add more verifications if we are getting false-positive contacts
if contact_list_unchecked[i][0].lower().find(target.lower()) == -1:
try:
contact_list_unchecked.pop(i)
except Exception as e:
print(f'Error: {str(e)}')
i -= 1
i += 1
contact_tuple = tuple(contact_list_unchecked)
return contact_tuple
def __check_group_list(target, group_list_unchecked):
i = 0
while i < len(group_list_unchecked):
if len(group_list_unchecked) <= 0:
break
# we can add more verifications if we are getting false-positive groups
if group_list_unchecked[i][0].lower().find(target.lower()) == -1:
try:
group_list_unchecked.pop(i)
except Exception as e:
print(f'Error: {str(e)}')
i -= 1
i += 1
group_tuple = tuple(group_list_unchecked)
return group_tuple
# target_list is like that: (((0, 'a'), (1, 'b')), ((3, 'c'), (4, 'd'))),
# but instead numbers and letters we have titles and elements
# the first index is the contacts and the second is the groups
def __get_target_tuple(contact_tuple, group_tuple):
target_tuple = (contact_tuple, group_tuple)
return target_tuple
def __print_target_tuple(target_tuple):
lenght_of_contacts_tuple = len(target_tuple[0])
lenght_of_groups_tuple = len(target_tuple[1])
for i in range(lenght_of_contacts_tuple):
if lenght_of_contacts_tuple <= 0:
break
if i == 0:
print("Contacts found:")
logger.info('List of Targets')
print(f'{i}: {target_tuple[0][i][0]}')
for i in range(lenght_of_contacts_tuple, lenght_of_groups_tuple + lenght_of_contacts_tuple):
if lenght_of_groups_tuple <= 0:
break
if i == lenght_of_contacts_tuple:
print("Groups found:")
logger.info('List of Target in groups')
print(f'{i}: {target_tuple[1][i-lenght_of_contacts_tuple][0]}')
def __ask_user_to_choose_the_filtered_target(target_tuple):
if len(target_tuple[0] + target_tuple[1]) > 0:
logger.info('Input Target Number')
target_index_choosed = int(
input('Enter the number of the target you wish to choose: '))
return target_index_choosed
def __get_choosed_target(target_tuple, target_index_choosed):
lenght_of_contacts_tuple = len(target_tuple[0])
if target_index_choosed is None:
exit()
try:
if target_index_choosed < lenght_of_contacts_tuple:
choosed_target = target_tuple[0][target_index_choosed]
elif target_index_choosed >= lenght_of_contacts_tuple:
choosed_target = target_tuple[1][target_index_choosed - lenght_of_contacts_tuple]
else:
print("This target doesn't exist!")
logger.error('Invalid Target')
exit()
except Exception as e:
print(f"This target doesn't exist! Error: {str(e)}")
logger.error('Invalid Target')
exit()
return choosed_target
async def __navigate_to_target(page, choosed_target):
try:
await choosed_target[1].click()
except Exception as e:
print(f"This target doesn't exist! Error: {str(e)}")
logger.error('Invalid Target')
exit()
async def __get_focused_target_title(page, target):
try:
await page.waitForSelector(whatsapp_selectors_dict['target_focused_title'])
target_focused_title = await page.evaluate(f'document.querySelector("{whatsapp_selectors_dict['target_focused_title']}").getAttribute("title")')
except Exception as e:
print(f'No target selected! Error: {str(e)}')
logger.error('Target not selected from list')
exit()
return target_focused_title
def __print_selected_target_title(target_focused_title):
print(f"You've selected the target named by: {target_focused_title}")
logger.info('Selected Target')
def __check_target_focused_title(page, target, target_focused_title):
if target_focused_title.lower().find(target.lower()) == -1:
print(f"You're focused in the wrong target, {target_focused_title}")
must_continue = str(input("Do you want to continue (yes/no)? "))
accepted_yes = {'yes', 'y'}
if must_continue.lower() in accepted_yes:
pass
else:
exit()
async def __wait_for_message_area(page):
try:
await page.waitForSelector(whatsapp_selectors_dict['message_area'])
except Exception as e:
print(f"You don't belong this group anymore! Error: {str(e)}")
# endregion
| __author__ = 'Alexandre Calil Martins Fonseca, github: xandao6'
# region TUTORIAL
'''
Go to region 'FOR SCRIPTING' and use the methods in your script!
EXAMPLE OF USAGE:
from wplay.pyppeteerUtils import pyppeteerConfig as pypConfig
from wplay.pyppeteerUtils import pyppeteerSearch as pypSearch
async def my_script(target):
pages, browser = wait pyp.configure_browser_and_load_whatsapp(pypConfig.websites['whatsapp'])
await pypSearch.search_for_target_and_get_ready_for_conversation(pages[0], target)
message = pypSearch.ask_user_for_message_breakline_mode()
await pypSearch.send_message(pages[0], message)
message2 = pypSearch.ask_user_for_message()
await pypSearch.send_message(pages[0], message2)
'''
# endregion
# region IMPORTS
from wplay.utils.helpers import whatsapp_selectors_dict
from wplay.utils import Logger
from wplay.utils.helpers import logs_path
from pyppeteer.errors import ElementHandleError
# endregion
# region FOR SCRIPTING
async def search_and_select_target(page, target, hide_groups=False):
await __open_new_chat(page)
await __type_in_new_chat_search_bar(page, target)
contact_list_elements_unchecked = await __get_contacts_elements_filtered(page, target)
group_list_elements_unchecked = await __get_groups_elements_filtered(page, target, hide_groups)
contact_titles_unchecked = await __get_contacts_titles_from_elements_unchecked(page, contact_list_elements_unchecked)
group_titles_unchecked = await __get_groups_titles_from_elements_unchecked(page, group_list_elements_unchecked)
contact_list_unchecked = __zip_contact_titles_and_elements_unchecked(
contact_titles_unchecked, contact_list_elements_unchecked)
group_list_unchecked = __zip_group_titles_and_elements_unchecked(
group_titles_unchecked, group_list_elements_unchecked)
contact_tuple = __check_contact_list(target, contact_list_unchecked)
group_tuple = __check_group_list(target, group_list_unchecked)
target_tuple = __get_target_tuple(contact_tuple, group_tuple)
__print_target_tuple(target_tuple)
target_index_choosed = __ask_user_to_choose_the_filtered_target(target_tuple)
choosed_target = __get_choosed_target(target_tuple, target_index_choosed)
await __navigate_to_target(page, choosed_target)
target_focused_title = await __get_focused_target_title(page, target)
if any(choosed_target[0] in i for i in contact_tuple):
complete_target_info = await get_complete_info_on_target(page)
print_complete_target_info(complete_target_info)
await close_contact_info_page(page)
else:
__print_selected_target_title(target_focused_title)
__check_target_focused_title(page, target, target_focused_title)
await __wait_for_message_area(page)
return target_focused_title
async def search_and_select_target_without_new_chat_button(page,target, hide_groups=False):
await __type_in_chat_or_message_search(page,target)
chats_messages_groups_elements_list = await __get_chats_messages_groups_elements(page)
contact_name_index_tuple_list = await __get_contacts_matched_with_query(chats_messages_groups_elements_list)
group_name_index_tuple_list = await __get_groups_matched_with_query(chats_messages_groups_elements_list,hide_groups)
target_tuple = (contact_name_index_tuple_list,group_name_index_tuple_list)
__print_target_tuple(target_tuple)
target_index_chosen = __ask_user_to_choose_the_filtered_target(target_tuple)
#chosen_target will be a tuple (a,b) such that a is the name of the target and b is the
#index of that element in chats_messages_groups_elements_list
chosen_target = __get_choosed_target(target_tuple, target_index_chosen)
await __open_selected_chat(chosen_target[1],chats_messages_groups_elements_list)
target_name = chosen_target[0]
if any(chosen_target[0] in i for i in contact_name_index_tuple_list):
complete_target_info = await get_complete_info_on_target(page)
print_complete_target_info(complete_target_info)
await close_contact_info_page(page)
else:
__print_selected_target_title(target_name)
await __wait_for_message_area(page)
return target_name
# endregion
#region LOGGER create
logger : Logger = Logger.setup_logger('logs',logs_path/'logs.log')
#endregion
# region SEARCH AND SELECT TARGET
async def __type_in_chat_or_message_search(page,target):
try:
print(f'Looking for: {target}')
await page.waitForSelector(
whatsapp_selectors_dict['chat_or_message_search'],
visible=True,
timeout=0
)
await page.waitFor(500)
await page.type(whatsapp_selectors_dict['chat_or_message_search'], target)
await page.waitFor(3000)
except Exception as e:
print(e)
async def __get_chats_messages_groups_elements(page):
chats_messages_groups_elements_list = [] # type : list[int]
try:
chats_messages_groups_elements_list = await page.querySelectorAll\
(whatsapp_selectors_dict['chats_groups_messages_elements'])
return chats_messages_groups_elements_list
except Exception as e:
print(e)
exit()
async def __get_contacts_matched_with_query(chats_groups_messages_elements_list):
contacts_to_choose_from = [] # type : list[str , int]
get_contact_node_title_function = 'node => node.parentNode.getAttribute("title")'
for idx, element in enumerate(chats_groups_messages_elements_list):
try:
contact_name = await element.querySelectorEval(whatsapp_selectors_dict['contact_element'],get_contact_node_title_function)
contacts_to_choose_from.append((contact_name,idx))
except ElementHandleError:
# if it is not a contact element, move to the next one
continue
except Exception as e:
print(e)
return contacts_to_choose_from
async def __get_groups_matched_with_query(chats_groups_messages_elements_list,hide_groups):
groups_to_choose_from = []
if hide_groups:
return groups_to_choose_from
get_group_node_title_function = 'node => node.parentNode.getAttribute("title")'
for idx, element in enumerate(chats_groups_messages_elements_list):
try:
group_name = await element.querySelectorEval(whatsapp_selectors_dict['group_element'],
get_group_node_title_function)
groups_to_choose_from.append((group_name,idx))
except ElementHandleError:
# if it is not a contact element, move to the next one
continue
except Exception as e:
print(e)
return groups_to_choose_from
async def __open_selected_chat(target_index,chats_messages_groups_elements_list):
try:
await chats_messages_groups_elements_list[target_index].click()
except Exception as e:
print(f"This target doesn't exist! Error: {str(e)}")
exit()
async def get_complete_info_on_target(page):
contact_page_elements = []
try:
await page.waitForSelector(
whatsapp_selectors_dict['target_chat_header'],
visible=True,
timeout=3000
)
await page.click(whatsapp_selectors_dict['target_chat_header'])
contact_page_elements = await get_contact_page_elements(page)
complete_target_info = {}
await get_contact_name_info(contact_page_elements[0], complete_target_info)
await get_contact_about_and_phone(contact_page_elements[3], complete_target_info)
await get_contact_groups_common_with_target(complete_target_info, page)
except Exception as e:
print(e)
return complete_target_info
async def get_contact_page_elements(page):
contact_page_elements = []
try:
await page.waitForSelector(
whatsapp_selectors_dict['contact_info_page_elements'],
visible=True,
timeout=8000
)
contact_page_elements = await page.querySelectorAll(whatsapp_selectors_dict['contact_info_page_elements'])
except Exception as e:
print(e)
return contact_page_elements
async def get_contact_name_info(contact_name_element,complete_target_info):
try:
complete_target_info['Name'] = await contact_name_element.querySelectorEval('span > span', 'element => element.innerText')
complete_target_info['Last_seen'] = await contact_name_element.querySelectorEval('div > span:last-of-type > div > span', 'element => element.getAttribute("title")')
except:
print(f'last seen not available')
async def get_contact_about_and_phone(contact_name_element, complete_target_info):
try:
complete_target_info['About'] = await contact_name_element.querySelectorEval('div:nth-child(2) > div > div > span > span', 'element => element.getAttribute("title")')
complete_target_info['Mobile'] = await contact_name_element.querySelectorEval('div:last-of-type > div > div > span > span', 'element => element.innerText')
except Exception as e:
print(e)
async def get_contact_groups_common_with_target(complete_target_info,page):
try:
await page.waitForSelector(
whatsapp_selectors_dict['contact_info_page_group_element_heading'],
visible= True,
timeout=3000
)
if (await page.evaluate(f'document.querySelector("{whatsapp_selectors_dict["contact_info_page_group_element_heading"]}").innerText'))\
== "Groups in common":
group_elements = await page.querySelectorAll(whatsapp_selectors_dict['contact_info_page_group_elements'])
complete_target_info['Groups'] = [await ele.querySelectorEval('div>div>div:nth-child(2)>div:first-child>div>div>span', 'e => e.getAttribute("title")') for ele in group_elements]
else:
complete_target_info['Groups'] = []
except:
complete_target_info['Groups'] = []
print(f'No groups in common')
async def close_contact_info_page(page):
try:
await page.waitForSelector(
whatsapp_selectors_dict['contact_info_page_close_button'],
visible = True,
timeout = 5000
)
await page.click(whatsapp_selectors_dict['contact_info_page_close_button'])
except Exception as e:
print(e)
def print_complete_target_info(complete_target_info):
for key in complete_target_info.keys():
if key == "Groups":
print("Groups:")
print(*complete_target_info[key], sep=",")
else:
print(f'{key}: {complete_target_info[key]} ')
async def __open_new_chat(page):
await page.waitForSelector(
whatsapp_selectors_dict['new_chat_button'],
visible=True,
timeout=0
)
await page.waitFor(500)
await page.click(whatsapp_selectors_dict['new_chat_button'])
async def __type_in_new_chat_search_bar(page, target):
print(f'Looking for: {target}')
logger.info('Searching Target')
await page.waitForSelector(
whatsapp_selectors_dict['search_contact_input_new_chat'],
visible=True
)
await page.type(whatsapp_selectors_dict['search_contact_input_new_chat'], target)
await page.waitFor(3000)
async def __get_contacts_elements_filtered(page, target):
contact_list_elements_unchecked = list()
try:
await page.waitForSelector(
whatsapp_selectors_dict['contact_list_elements_filtered_new_chat'],
visible=True,
timeout=3000
)
contact_list_elements_unchecked = await page.querySelectorAll(
whatsapp_selectors_dict['contact_list_elements_filtered_new_chat']
)
except:
print(f'No contact named by "{target}"!')
logger.info('Target not found')
return contact_list_elements_unchecked
async def __get_groups_elements_filtered(page, target, hide_groups=False):
group_list_elements_unchecked = list()
if hide_groups:
return group_list_elements_unchecked
try:
await page.waitForSelector(
whatsapp_selectors_dict['group_list_elements_filtered_new_chat'],
visible=True,
timeout=3000
)
group_list_elements_unchecked = await page.querySelectorAll(
whatsapp_selectors_dict['group_list_elements_filtered_new_chat']
)
except:
print(f'No group named by "{target}"!')
logger.info('Target not found in groups')
return group_list_elements_unchecked
async def __get_contacts_titles_from_elements_unchecked(page, contact_list_elements_unchecked):
contact_titles_unchecked = []
for i in range(len(contact_list_elements_unchecked)):
contact_titles_unchecked\
.append(await page.evaluate(f'document.querySelectorAll("{whatsapp_selectors_dict["contact_list_elements_filtered_new_chat"]}")[{i}].getAttribute("title")'))
return contact_titles_unchecked
async def __get_groups_titles_from_elements_unchecked(page, group_list_elements_unchecked):
group_titles_unchecked = []
for i in range(len(group_list_elements_unchecked)):
group_titles_unchecked.append(await page.evaluate(f'document.querySelectorAll("{whatsapp_selectors_dict["group_list_elements_filtered_new_chat"]}")[{i}].getAttribute("title")'))
return group_titles_unchecked
# contact_list_unchecked is a zip (list of tuples) of contact_titles and
# contact elements, unchecked.
def __zip_contact_titles_and_elements_unchecked(contact_titles_unchecked, contact_list_elements_unchecked):
contact_list_unchecked = list(zip(contact_titles_unchecked, contact_list_elements_unchecked))
return contact_list_unchecked
def __zip_group_titles_and_elements_unchecked(group_titles_unchecked, group_list_elements_unchecked):
group_list_unchecked = list(zip(group_titles_unchecked, group_list_elements_unchecked))
return group_list_unchecked
# __checking_contact_list verify if target is in title, if not we pop from list
def __check_contact_list(target, contact_list_unchecked):
i = 0
while i < len(contact_list_unchecked):
if len(contact_list_unchecked) <= 0:
break
# we can add more verifications if we are getting false-positive contacts
if contact_list_unchecked[i][0].lower().find(target.lower()) == -1:
try:
contact_list_unchecked.pop(i)
except Exception as e:
print(f'Error: {str(e)}')
i -= 1
i += 1
contact_tuple = tuple(contact_list_unchecked)
return contact_tuple
def __check_group_list(target, group_list_unchecked):
i = 0
while i < len(group_list_unchecked):
if len(group_list_unchecked) <= 0:
break
# we can add more verifications if we are getting false-positive groups
if group_list_unchecked[i][0].lower().find(target.lower()) == -1:
try:
group_list_unchecked.pop(i)
except Exception as e:
print(f'Error: {str(e)}')
i -= 1
i += 1
group_tuple = tuple(group_list_unchecked)
return group_tuple
# target_list is like that: (((0, 'a'), (1, 'b')), ((3, 'c'), (4, 'd'))),
# but instead numbers and letters we have titles and elements
# the first index is the contacts and the second is the groups
def __get_target_tuple(contact_tuple, group_tuple):
target_tuple = (contact_tuple, group_tuple)
return target_tuple
def __print_target_tuple(target_tuple):
lenght_of_contacts_tuple = len(target_tuple[0])
lenght_of_groups_tuple = len(target_tuple[1])
for i in range(lenght_of_contacts_tuple):
if lenght_of_contacts_tuple <= 0:
break
if i == 0:
print("Contacts found:")
logger.info('List of Targets')
print(f'{i}: {target_tuple[0][i][0]}')
for i in range(lenght_of_contacts_tuple, lenght_of_groups_tuple + lenght_of_contacts_tuple):
if lenght_of_groups_tuple <= 0:
break
if i == lenght_of_contacts_tuple:
print("Groups found:")
logger.info('List of Target in groups')
print(f'{i}: {target_tuple[1][i-lenght_of_contacts_tuple][0]}')
def __ask_user_to_choose_the_filtered_target(target_tuple):
if len(target_tuple[0] + target_tuple[1]) > 0:
logger.info('Input Target Number')
target_index_choosed = int(
input('Enter the number of the target you wish to choose: '))
return target_index_choosed
def __get_choosed_target(target_tuple, target_index_choosed):
lenght_of_contacts_tuple = len(target_tuple[0])
if target_index_choosed is None:
exit()
try:
if target_index_choosed < lenght_of_contacts_tuple:
choosed_target = target_tuple[0][target_index_choosed]
elif target_index_choosed >= lenght_of_contacts_tuple:
choosed_target = target_tuple[1][target_index_choosed - lenght_of_contacts_tuple]
else:
print("This target doesn't exist!")
logger.error('Invalid Target')
exit()
except Exception as e:
print(f"This target doesn't exist! Error: {str(e)}")
logger.error('Invalid Target')
exit()
return choosed_target
async def __navigate_to_target(page, choosed_target):
try:
await choosed_target[1].click()
except Exception as e:
print(f"This target doesn't exist! Error: {str(e)}")
logger.error('Invalid Target')
exit()
async def __get_focused_target_title(page, target):
try:
await page.waitForSelector(whatsapp_selectors_dict['target_focused_title'])
target_focused_title = await page.evaluate(f'document.querySelector("{whatsapp_selectors_dict["target_focused_title"]}").getAttribute("title")')
except Exception as e:
print(f'No target selected! Error: {str(e)}')
logger.error('Target not selected from list')
exit()
return target_focused_title
def __print_selected_target_title(target_focused_title):
print(f"You've selected the target named by: {target_focused_title}")
logger.info('Selected Target')
def __check_target_focused_title(page, target, target_focused_title):
if target_focused_title.lower().find(target.lower()) == -1:
print(f"You're focused in the wrong target, {target_focused_title}")
must_continue = str(input("Do you want to continue (yes/no)? "))
accepted_yes = {'yes', 'y'}
if must_continue.lower() in accepted_yes:
pass
else:
exit()
async def __wait_for_message_area(page):
try:
await page.waitForSelector(whatsapp_selectors_dict['message_area'])
except Exception as e:
print(f"You don't belong this group anymore! Error: {str(e)}")
# endregion
|
import importlib
import json
import os
import shutil
import subprocess
from pathlib import Path
from shutil import which
from typing import List, Optional, Tuple
from setuptools import find_packages
from typer import Argument, Option, Typer
from .paths import (
GLOBAL_APP_DIR,
GLOBAL_EXTENSIONS_DIR,
GLOBAL_FRONTEND_DIR,
GLOBAL_QUETZ_DIR,
LOCAL_APP_DIR,
)
from .utils import clean_dir, get_extensions_dir, get_federated_extensions
app = Typer()
@app.command()
def link_frontend(
dev_mode: bool = Option(
False, "--development", help="Whether to install it in dev mode or not"
)
) -> None:
"""Intall the Quetz-Frontend"""
assert LOCAL_APP_DIR.exists()
if not GLOBAL_FRONTEND_DIR.exists():
GLOBAL_FRONTEND_DIR.mkdir(parents=True, exist_ok=True)
if GLOBAL_APP_DIR.exists():
if GLOBAL_APP_DIR.is_symlink():
GLOBAL_APP_DIR.unlink()
else:
shutil.rmtree(GLOBAL_APP_DIR)
if dev_mode:
GLOBAL_APP_DIR.symlink_to(LOCAL_APP_DIR)
print(
f"""Symlink created:
Ori: {LOCAL_APP_DIR}
Dest: {GLOBAL_APP_DIR}
"""
)
else:
shutil.copytree(LOCAL_APP_DIR, GLOBAL_APP_DIR, symlinks=True)
print(
f"""App directory copied:
Ori: {LOCAL_APP_DIR}
Dest: {GLOBAL_APP_DIR}
"""
)
@app.command()
def clean_frontend() -> None:
"""Clean the Quetz-Frontend"""
if GLOBAL_APP_DIR.is_file() or GLOBAL_APP_DIR.is_symlink():
GLOBAL_APP_DIR.unlink()
elif GLOBAL_APP_DIR.is_dir():
shutil.rmtree(GLOBAL_APP_DIR)
@app.command()
def install(ext_path: str = Argument(Path(), help="The path of the extension")) -> None:
"""Build and install an extension"""
if not GLOBAL_EXTENSIONS_DIR.exists():
os.mkdir(GLOBAL_EXTENSIONS_DIR)
extension_path = Path(ext_path).resolve()
assert extension_path.joinpath("package.json").exists()
_build_extension(ext_path, True, False)
module, metadata = _get_extensions_metadata(extension_path)
src = Path(extension_path).joinpath(module.__name__, metadata[0]["src"])
dest = GLOBAL_EXTENSIONS_DIR.joinpath(metadata[0]["dest"])
clean_dir(dest)
shutil.copytree(src, dest, symlinks=True)
print(
f"""
Extension installed:
Path: {dest}
"""
)
@app.command()
def develop(ext_path: str = Argument(Path(), help="The path of the extension")) -> None:
"""Build and install an extension in dev mode"""
if not GLOBAL_EXTENSIONS_DIR.exists():
os.mkdir(GLOBAL_EXTENSIONS_DIR)
extension_path = Path(ext_path).resolve()
assert extension_path.joinpath("package.json").exists()
_build_extension(extension_path, True, False)
_develop_extension(extension_path)
@app.command()
def build(
ext_path: str = Argument(Path(), help="The path of the extension"),
dev_mode: bool = Option(False, "--development", help="Build in development"),
) -> None:
"""Build an extension"""
if not GLOBAL_EXTENSIONS_DIR.exists():
os.mkdir(GLOBAL_EXTENSIONS_DIR)
extension_path = Path(ext_path).resolve()
assert extension_path.joinpath("package.json").exists()
_build_extension(extension_path, dev_mode, False)
@app.command()
def watch(ext_path: str = Argument(Path(), help="The path of the extension")) -> None:
"""Watch an extension"""
if not GLOBAL_EXTENSIONS_DIR.exists():
os.mkdir(GLOBAL_EXTENSIONS_DIR)
extension_path = Path(ext_path).resolve()
assert extension_path.joinpath("package.json").exists()
_develop_extension(extension_path)
_build_extension(extension_path, True, True)
@app.command()
def uninstall(ext_name: str = Argument("", help="The name of the extension")) -> None:
"""Uninstall an extension"""
if not GLOBAL_EXTENSIONS_DIR.exists():
os.mkdir(GLOBAL_EXTENSIONS_DIR)
extension_path = Path(GLOBAL_EXTENSIONS_DIR, ext_name)
clean_dir(extension_path)
@app.command()
def list() -> None:
"""List of extensions"""
print(f"Installed extensions:")
print(f"---------------------")
print(f" Installation path: '{GLOBAL_EXTENSIONS_DIR}'\n")
extensions = get_federated_extensions([get_extensions_dir()])
if not extensions:
print("No installed extensions yet")
for ext in extensions.values():
print(f'\t- {Path(ext['ext_path']).relative_to(GLOBAL_EXTENSIONS_DIR)}')
print()
@app.command()
def clean() -> None:
"""Clean the extensions directory"""
if GLOBAL_EXTENSIONS_DIR.exists():
shutil.rmtree(GLOBAL_EXTENSIONS_DIR)
@app.command()
def paths() -> None:
"""Quetz installation paths"""
print(
f"""
System cofigured paths:
Quetz: {GLOBAL_QUETZ_DIR}
Frontend: {GLOBAL_FRONTEND_DIR}
App: {GLOBAL_APP_DIR}
Extensions: {GLOBAL_EXTENSIONS_DIR}
"""
)
def _develop_extension(ext_path: Path):
with (ext_path / "package.json").open(encoding="utf-8") as fid:
ext_data = json.load(fid)
_, metadata = _get_extensions_metadata(ext_path)
src = ext_path / ext_data["quetz"].get("outputDir", metadata[0]["src"])
dest = GLOBAL_EXTENSIONS_DIR.joinpath(ext_data["name"])
clean_dir(dest)
# Create parent directory if extension name is scoped
dest.parent.mkdir(parents=True, exist_ok=True)
dest.symlink_to(src)
print(
f"""
Symlink created:
Ori: {src!s}
Dest: {dest!s}
"""
)
def _build_extension(ext_path: Path, dev_mode: bool = False, watch: bool = False):
if not GLOBAL_APP_DIR.joinpath("package.json").exists():
print(f"Quetz frontend not fount at '{GLOBAL_APP_DIR!s}'")
builder_path = _find_builder(ext_path)
if builder_path is None:
print(f"Could not find @quetz-frontend/builder at {ext_path!s}")
print(f"Extensions require a devDependency '@quetz-frontend/builder'")
return
exe = "node"
exe_path = which(exe)
if not exe_path:
print(f"Could not find {exe}. Install NodeJS.")
exit(1)
command = [exe, str(builder_path), "--core-path", str(GLOBAL_APP_DIR.resolve())]
if dev_mode:
command.append("--development")
command.append("--source-map")
if watch:
command.append("--watch")
command.append(str(ext_path))
print("Building extension")
subprocess.check_call(command)
def _find_builder(ext_path: Path) -> Optional[Path]:
"""Find the package '@quetz-frontend/builder' in the extension dependencies"""
with (ext_path / "package.json").open(encoding="utf-8") as fid:
ext_data = json.load(fid)
depVersion2 = ext_data.get("devDependencies", dict()).get("@quetz-frontend/builder")
depVersion2 = depVersion2 or ext_data.get("dependencies", dict()).get(
"@quetz-frontend/builder"
)
if depVersion2 is None:
return None
# Find @quetz-frontend/builder in the node_modules directory
target = ext_path
while not (target / "node_modules" / "@quetz-frontend" / "builder").exists():
if target.parent == target:
return None
target = target.parent
return (
target
/ "node_modules"
/ "@quetz-frontend"
/ "builder"
/ "lib"
/ "build-quetzextension.js"
)
def _get_extensions_metadata(
module_path: Path,
) -> Tuple["importlib.ModuleType", List[str]]:
mod_path = module_path.resolve()
if not mod_path.exists():
raise FileNotFoundError(f"The path `{mod_path!s}` does not exist.")
# TODO: Change function name to match lab
try:
module = importlib.import_module(str(module_path))
if hasattr(module, "js_plugin_paths"):
return module, module.js_plugin_paths()
else:
module = None
except Exception:
module = None
# Looking for modules in the package
packages = find_packages(str(mod_path))
for package in packages:
try:
module = importlib.import_module(package)
if hasattr(module, "js_plugin_paths"):
return module, module.js_plugin_paths()
except Exception:
module = None
raise ModuleNotFoundError(f"There is not a extension at {module_path}")
if __name__ == "__main__":
app()
| import importlib
import json
import os
import shutil
import subprocess
from pathlib import Path
from shutil import which
from typing import List, Optional, Tuple
from setuptools import find_packages
from typer import Argument, Option, Typer
from .paths import (
GLOBAL_APP_DIR,
GLOBAL_EXTENSIONS_DIR,
GLOBAL_FRONTEND_DIR,
GLOBAL_QUETZ_DIR,
LOCAL_APP_DIR,
)
from .utils import clean_dir, get_extensions_dir, get_federated_extensions
app = Typer()
@app.command()
def link_frontend(
dev_mode: bool = Option(
False, "--development", help="Whether to install it in dev mode or not"
)
) -> None:
"""Intall the Quetz-Frontend"""
assert LOCAL_APP_DIR.exists()
if not GLOBAL_FRONTEND_DIR.exists():
GLOBAL_FRONTEND_DIR.mkdir(parents=True, exist_ok=True)
if GLOBAL_APP_DIR.exists():
if GLOBAL_APP_DIR.is_symlink():
GLOBAL_APP_DIR.unlink()
else:
shutil.rmtree(GLOBAL_APP_DIR)
if dev_mode:
GLOBAL_APP_DIR.symlink_to(LOCAL_APP_DIR)
print(
f"""Symlink created:
Ori: {LOCAL_APP_DIR}
Dest: {GLOBAL_APP_DIR}
"""
)
else:
shutil.copytree(LOCAL_APP_DIR, GLOBAL_APP_DIR, symlinks=True)
print(
f"""App directory copied:
Ori: {LOCAL_APP_DIR}
Dest: {GLOBAL_APP_DIR}
"""
)
@app.command()
def clean_frontend() -> None:
"""Clean the Quetz-Frontend"""
if GLOBAL_APP_DIR.is_file() or GLOBAL_APP_DIR.is_symlink():
GLOBAL_APP_DIR.unlink()
elif GLOBAL_APP_DIR.is_dir():
shutil.rmtree(GLOBAL_APP_DIR)
@app.command()
def install(ext_path: str = Argument(Path(), help="The path of the extension")) -> None:
"""Build and install an extension"""
if not GLOBAL_EXTENSIONS_DIR.exists():
os.mkdir(GLOBAL_EXTENSIONS_DIR)
extension_path = Path(ext_path).resolve()
assert extension_path.joinpath("package.json").exists()
_build_extension(ext_path, True, False)
module, metadata = _get_extensions_metadata(extension_path)
src = Path(extension_path).joinpath(module.__name__, metadata[0]["src"])
dest = GLOBAL_EXTENSIONS_DIR.joinpath(metadata[0]["dest"])
clean_dir(dest)
shutil.copytree(src, dest, symlinks=True)
print(
f"""
Extension installed:
Path: {dest}
"""
)
@app.command()
def develop(ext_path: str = Argument(Path(), help="The path of the extension")) -> None:
"""Build and install an extension in dev mode"""
if not GLOBAL_EXTENSIONS_DIR.exists():
os.mkdir(GLOBAL_EXTENSIONS_DIR)
extension_path = Path(ext_path).resolve()
assert extension_path.joinpath("package.json").exists()
_build_extension(extension_path, True, False)
_develop_extension(extension_path)
@app.command()
def build(
ext_path: str = Argument(Path(), help="The path of the extension"),
dev_mode: bool = Option(False, "--development", help="Build in development"),
) -> None:
"""Build an extension"""
if not GLOBAL_EXTENSIONS_DIR.exists():
os.mkdir(GLOBAL_EXTENSIONS_DIR)
extension_path = Path(ext_path).resolve()
assert extension_path.joinpath("package.json").exists()
_build_extension(extension_path, dev_mode, False)
@app.command()
def watch(ext_path: str = Argument(Path(), help="The path of the extension")) -> None:
"""Watch an extension"""
if not GLOBAL_EXTENSIONS_DIR.exists():
os.mkdir(GLOBAL_EXTENSIONS_DIR)
extension_path = Path(ext_path).resolve()
assert extension_path.joinpath("package.json").exists()
_develop_extension(extension_path)
_build_extension(extension_path, True, True)
@app.command()
def uninstall(ext_name: str = Argument("", help="The name of the extension")) -> None:
"""Uninstall an extension"""
if not GLOBAL_EXTENSIONS_DIR.exists():
os.mkdir(GLOBAL_EXTENSIONS_DIR)
extension_path = Path(GLOBAL_EXTENSIONS_DIR, ext_name)
clean_dir(extension_path)
@app.command()
def list() -> None:
"""List of extensions"""
print(f"Installed extensions:")
print(f"---------------------")
print(f" Installation path: '{GLOBAL_EXTENSIONS_DIR}'\n")
extensions = get_federated_extensions([get_extensions_dir()])
if not extensions:
print("No installed extensions yet")
for ext in extensions.values():
print(f'\t- {Path(ext["ext_path"]).relative_to(GLOBAL_EXTENSIONS_DIR)}')
print()
@app.command()
def clean() -> None:
"""Clean the extensions directory"""
if GLOBAL_EXTENSIONS_DIR.exists():
shutil.rmtree(GLOBAL_EXTENSIONS_DIR)
@app.command()
def paths() -> None:
"""Quetz installation paths"""
print(
f"""
System cofigured paths:
Quetz: {GLOBAL_QUETZ_DIR}
Frontend: {GLOBAL_FRONTEND_DIR}
App: {GLOBAL_APP_DIR}
Extensions: {GLOBAL_EXTENSIONS_DIR}
"""
)
def _develop_extension(ext_path: Path):
with (ext_path / "package.json").open(encoding="utf-8") as fid:
ext_data = json.load(fid)
_, metadata = _get_extensions_metadata(ext_path)
src = ext_path / ext_data["quetz"].get("outputDir", metadata[0]["src"])
dest = GLOBAL_EXTENSIONS_DIR.joinpath(ext_data["name"])
clean_dir(dest)
# Create parent directory if extension name is scoped
dest.parent.mkdir(parents=True, exist_ok=True)
dest.symlink_to(src)
print(
f"""
Symlink created:
Ori: {src!s}
Dest: {dest!s}
"""
)
def _build_extension(ext_path: Path, dev_mode: bool = False, watch: bool = False):
if not GLOBAL_APP_DIR.joinpath("package.json").exists():
print(f"Quetz frontend not fount at '{GLOBAL_APP_DIR!s}'")
builder_path = _find_builder(ext_path)
if builder_path is None:
print(f"Could not find @quetz-frontend/builder at {ext_path!s}")
print(f"Extensions require a devDependency '@quetz-frontend/builder'")
return
exe = "node"
exe_path = which(exe)
if not exe_path:
print(f"Could not find {exe}. Install NodeJS.")
exit(1)
command = [exe, str(builder_path), "--core-path", str(GLOBAL_APP_DIR.resolve())]
if dev_mode:
command.append("--development")
command.append("--source-map")
if watch:
command.append("--watch")
command.append(str(ext_path))
print("Building extension")
subprocess.check_call(command)
def _find_builder(ext_path: Path) -> Optional[Path]:
"""Find the package '@quetz-frontend/builder' in the extension dependencies"""
with (ext_path / "package.json").open(encoding="utf-8") as fid:
ext_data = json.load(fid)
depVersion2 = ext_data.get("devDependencies", dict()).get("@quetz-frontend/builder")
depVersion2 = depVersion2 or ext_data.get("dependencies", dict()).get(
"@quetz-frontend/builder"
)
if depVersion2 is None:
return None
# Find @quetz-frontend/builder in the node_modules directory
target = ext_path
while not (target / "node_modules" / "@quetz-frontend" / "builder").exists():
if target.parent == target:
return None
target = target.parent
return (
target
/ "node_modules"
/ "@quetz-frontend"
/ "builder"
/ "lib"
/ "build-quetzextension.js"
)
def _get_extensions_metadata(
module_path: Path,
) -> Tuple["importlib.ModuleType", List[str]]:
mod_path = module_path.resolve()
if not mod_path.exists():
raise FileNotFoundError(f"The path `{mod_path!s}` does not exist.")
# TODO: Change function name to match lab
try:
module = importlib.import_module(str(module_path))
if hasattr(module, "js_plugin_paths"):
return module, module.js_plugin_paths()
else:
module = None
except Exception:
module = None
# Looking for modules in the package
packages = find_packages(str(mod_path))
for package in packages:
try:
module = importlib.import_module(package)
if hasattr(module, "js_plugin_paths"):
return module, module.js_plugin_paths()
except Exception:
module = None
raise ModuleNotFoundError(f"There is not a extension at {module_path}")
if __name__ == "__main__":
app()
|
"""Helper functions for the distribution."""
import importlib
import json
import pathlib
import subprocess
import sys
import types
import os
from typing import Optional, List
import requests
import repobee_plug as plug
import _repobee.ext
from _repobee import distinfo
from _repobee import plugin
class DependencyResolutionError(plug.PlugError):
"""Raise when dependency resolution fails during an install."""
def get_installed_plugins_path() -> pathlib.Path:
"""Return the path to the installed_plugins.json file."""
assert distinfo.INSTALL_DIR
return distinfo.INSTALL_DIR / "installed_plugins.json"
def get_installed_plugins(
installed_plugins_path: Optional[pathlib.Path] = None,
) -> dict:
"""Return the public content of the installed_plugins.json file."""
installed_plugins = _get_installed_plugins(installed_plugins_path)
if "_metainfo" in installed_plugins:
del installed_plugins["_metainfo"]
return installed_plugins
def _get_installed_plugins(
installed_plugins_path: Optional[pathlib.Path] = None,
):
"""Return the content of the installed_plugins.json file, with metainfo."""
return json.loads(
(installed_plugins_path or get_installed_plugins_path()).read_text(
"utf8"
)
)
def write_installed_plugins(
installed_plugins: dict,
installed_plugins_path: Optional[pathlib.Path] = None,
) -> None:
"""Write the installed_plugins.json file."""
path = installed_plugins_path or get_installed_plugins_path()
metainfo = _get_installed_plugins(path).get("_metainfo") or {}
metainfo.update(installed_plugins.get("_metainfo") or {})
installed_plugins_write = dict(installed_plugins)
installed_plugins_write["_metainfo"] = metainfo
path.write_text(
json.dumps(installed_plugins_write, indent=4), encoding="utf8"
)
def get_active_plugins(
installed_plugins_path: Optional[pathlib.Path] = None,
) -> List[str]:
"""Read active plugins from the installed_plugins.json file."""
installed_plugins = _get_installed_plugins(installed_plugins_path)
return (installed_plugins.get("_metainfo") or {}).get(
"active_plugins"
) or []
def write_active_plugins(
active_plugins: List[str],
installed_plugins_path: Optional[pathlib.Path] = None,
) -> None:
"""Write the active plugins."""
installed_plugins = _get_installed_plugins(installed_plugins_path)
installed_plugins.setdefault("_metainfo", {})[
"active_plugins"
] = active_plugins
write_installed_plugins(installed_plugins, installed_plugins_path)
def get_pip_path() -> pathlib.Path:
"""Return the path to the installed pip binary."""
assert distinfo.INSTALL_DIR
return distinfo.INSTALL_DIR / "env" / "bin" / "pip"
def get_plugins_json(url: str = "https://repobee.org/plugins.json") -> dict:
"""Fetch and parse the plugins.json file.
Args:
url: URL to the plugins.json file.
Returns:
A dictionary with the contents of the plugins.json file.
"""
resp = requests.get(url)
if resp.status_code != 200:
plug.log.error(resp.content.decode("utf8"))
raise plug.PlugError(f"could not fetch plugins.json from '{url}'")
return resp.json()
def get_builtin_plugins(ext_pkg: types.ModuleType = _repobee.ext) -> dict:
"""Returns a dictionary of builting plugins on the same form as the
plugins.json dict.
"""
def _get_plugin_description(name):
return (
importlib.import_module(f"{ext_pkg.__name__}.{name}").__dict__.get(
"PLUGIN_DESCRIPTION"
)
or "-"
)
return {
name: dict(
description=_get_plugin_description(name),
url=f"https://repobee.readthedocs.io/"
f"en/stable/builtins.html#{name}",
versions={"N/A": {}},
builtin=True,
)
for name in plugin.get_module_names(ext_pkg)
}
def pip(command: str, *args, **kwargs) -> subprocess.CompletedProcess:
"""Thin wrapper around the ``pip`` executable in the distribution's virtual
environment.
Args:
command: The command to execute (e.g. "install" or "list").
args: Positional arguments to ``pip``, passed in order. Flags should
also be passed here (e.g. `--pre`)
kwargs: Keyword arguments to ``pip``, passed as ``--key value`` to the
CLI. If the value is ``True``, the argument is passed as a flag,
i.e. as ``--key``.
Returns:
True iff the command exited with a zero exit status.
Raises:
DependencyResolutionError: If the 2020-resolver encounters fails to
resolve dependencies.
"""
cli_kwargs = [
f"--{key.replace("_", "-")}"
# True is interpreted as a flag
+ (f"={val}" if val is not True else "")
for key, val in kwargs.items()
]
env = dict(os.environ)
if command == "install":
# the resolver allows us to avoid installing plugins that are
# incompatible with the current version of RepoBee
cli_kwargs.append("--use-feature=2020-resolver")
# REPOBEE_INSTALL_DIR must be available when upgrading RepoBee,
# or the dist plugins aren't activated
env["REPOBEE_INSTALL_DIR"] = str(distinfo.INSTALL_DIR)
# due to the hack in setup.py to edit the distinfo, we must build
# RepoBee from source
cli_kwargs.append("--no-binary=repobee")
cmd = [str(get_pip_path()), command, *args, *cli_kwargs]
proc = subprocess.run(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
)
if proc.returncode != 0:
stderr = proc.stderr.decode(sys.getdefaultencoding())
plug.log.error(stderr)
if "ResolutionImpossible" in stderr:
raise DependencyResolutionError()
return proc
| """Helper functions for the distribution."""
import importlib
import json
import pathlib
import subprocess
import sys
import types
import os
from typing import Optional, List
import requests
import repobee_plug as plug
import _repobee.ext
from _repobee import distinfo
from _repobee import plugin
class DependencyResolutionError(plug.PlugError):
"""Raise when dependency resolution fails during an install."""
def get_installed_plugins_path() -> pathlib.Path:
"""Return the path to the installed_plugins.json file."""
assert distinfo.INSTALL_DIR
return distinfo.INSTALL_DIR / "installed_plugins.json"
def get_installed_plugins(
installed_plugins_path: Optional[pathlib.Path] = None,
) -> dict:
"""Return the public content of the installed_plugins.json file."""
installed_plugins = _get_installed_plugins(installed_plugins_path)
if "_metainfo" in installed_plugins:
del installed_plugins["_metainfo"]
return installed_plugins
def _get_installed_plugins(
installed_plugins_path: Optional[pathlib.Path] = None,
):
"""Return the content of the installed_plugins.json file, with metainfo."""
return json.loads(
(installed_plugins_path or get_installed_plugins_path()).read_text(
"utf8"
)
)
def write_installed_plugins(
installed_plugins: dict,
installed_plugins_path: Optional[pathlib.Path] = None,
) -> None:
"""Write the installed_plugins.json file."""
path = installed_plugins_path or get_installed_plugins_path()
metainfo = _get_installed_plugins(path).get("_metainfo") or {}
metainfo.update(installed_plugins.get("_metainfo") or {})
installed_plugins_write = dict(installed_plugins)
installed_plugins_write["_metainfo"] = metainfo
path.write_text(
json.dumps(installed_plugins_write, indent=4), encoding="utf8"
)
def get_active_plugins(
installed_plugins_path: Optional[pathlib.Path] = None,
) -> List[str]:
"""Read active plugins from the installed_plugins.json file."""
installed_plugins = _get_installed_plugins(installed_plugins_path)
return (installed_plugins.get("_metainfo") or {}).get(
"active_plugins"
) or []
def write_active_plugins(
active_plugins: List[str],
installed_plugins_path: Optional[pathlib.Path] = None,
) -> None:
"""Write the active plugins."""
installed_plugins = _get_installed_plugins(installed_plugins_path)
installed_plugins.setdefault("_metainfo", {})[
"active_plugins"
] = active_plugins
write_installed_plugins(installed_plugins, installed_plugins_path)
def get_pip_path() -> pathlib.Path:
"""Return the path to the installed pip binary."""
assert distinfo.INSTALL_DIR
return distinfo.INSTALL_DIR / "env" / "bin" / "pip"
def get_plugins_json(url: str = "https://repobee.org/plugins.json") -> dict:
"""Fetch and parse the plugins.json file.
Args:
url: URL to the plugins.json file.
Returns:
A dictionary with the contents of the plugins.json file.
"""
resp = requests.get(url)
if resp.status_code != 200:
plug.log.error(resp.content.decode("utf8"))
raise plug.PlugError(f"could not fetch plugins.json from '{url}'")
return resp.json()
def get_builtin_plugins(ext_pkg: types.ModuleType = _repobee.ext) -> dict:
"""Returns a dictionary of builting plugins on the same form as the
plugins.json dict.
"""
def _get_plugin_description(name):
return (
importlib.import_module(f"{ext_pkg.__name__}.{name}").__dict__.get(
"PLUGIN_DESCRIPTION"
)
or "-"
)
return {
name: dict(
description=_get_plugin_description(name),
url=f"https://repobee.readthedocs.io/"
f"en/stable/builtins.html#{name}",
versions={"N/A": {}},
builtin=True,
)
for name in plugin.get_module_names(ext_pkg)
}
def pip(command: str, *args, **kwargs) -> subprocess.CompletedProcess:
"""Thin wrapper around the ``pip`` executable in the distribution's virtual
environment.
Args:
command: The command to execute (e.g. "install" or "list").
args: Positional arguments to ``pip``, passed in order. Flags should
also be passed here (e.g. `--pre`)
kwargs: Keyword arguments to ``pip``, passed as ``--key value`` to the
CLI. If the value is ``True``, the argument is passed as a flag,
i.e. as ``--key``.
Returns:
True iff the command exited with a zero exit status.
Raises:
DependencyResolutionError: If the 2020-resolver encounters fails to
resolve dependencies.
"""
cli_kwargs = [
f"--{key.replace('_', '-')}"
# True is interpreted as a flag
+ (f"={val}" if val is not True else "")
for key, val in kwargs.items()
]
env = dict(os.environ)
if command == "install":
# the resolver allows us to avoid installing plugins that are
# incompatible with the current version of RepoBee
cli_kwargs.append("--use-feature=2020-resolver")
# REPOBEE_INSTALL_DIR must be available when upgrading RepoBee,
# or the dist plugins aren't activated
env["REPOBEE_INSTALL_DIR"] = str(distinfo.INSTALL_DIR)
# due to the hack in setup.py to edit the distinfo, we must build
# RepoBee from source
cli_kwargs.append("--no-binary=repobee")
cmd = [str(get_pip_path()), command, *args, *cli_kwargs]
proc = subprocess.run(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
)
if proc.returncode != 0:
stderr = proc.stderr.decode(sys.getdefaultencoding())
plug.log.error(stderr)
if "ResolutionImpossible" in stderr:
raise DependencyResolutionError()
return proc
|
#
# Copyright (c) 2018 Via Technology Ltd. All Rights Reserved.
# Consult your license regarding permissions and restrictions.
#
"""
operations related to airspaces and intersections.
"""
from psycopg2 import Error, InternalError
from psycopg2.extensions import AsIs
from psycopg2.extras import DictCursor
from itertools import filterfalse
from functools import reduce
from shapely.wkt import loads
import pru.db.context as ctx
from pru.logger import logger
log = logger(__name__)
def make_point(lon, lat, connection):
"""
Makes a geo point
"""
cursor = connection.cursor()
query = "SELECT ST_MakePoint(%s, %s)"
params = (float(lon), float(lat))
cursor.execute(query, params)
return cursor.fetchone()
def make_augmented_point_from_position(position, flight_id, connection):
"""
Takes a position tuple and makes a augmented point.
"""
point = make_point(position[1], position[0], connection)
return {'flight_id': flight_id, 'lon': position[1], 'lat': position[0],
'geoPoint': point}
def make_augmented_points_from_positions(latitudes, longitudes, flight_id, connection):
"""
Takes a list of latitudes and a list of longitudes and a flight_id.
Makes a list of augmented points.
"""
return [make_augmented_point_from_position(position, flight_id, connection) for position in zip(latitudes, longitudes)]
def extract_point_list_from_augmented_points(augmented_points):
"""
Given a list or generator of augmented points extract the geo point
representation as a list.
"""
return list(map(lambda augmented_points: augmented_points['geoPoint'],
augmented_points))
def make_line_from_augmented_points(augmented_points, flight_id, connection):
"""
Given a list of augmented points create a geographic line.
"""
if (len(augmented_points) == 0):
log.warning(f"Creating a line from a list of points but the list "
"was empty for flight id {flight_id}.")
return [[]]
cursor = connection.cursor()
query = "SELECT ST_AsEWKT(ST_MakeLine(ARRAY[%s]));"
params = [augmented_points]
cursor.execute(query, params)
return cursor.fetchone()
def find_sectors_intersected_by(line_string, flight_id, min_altitude, max_altitude, context, connection):
"""
Lists the airspace ids and details of those airspaces where the
given line string intersects excluding those that are outside of the range of
altitudes of the trajectory.
"""
log.debug(f"Finding trajectory intersection with airspaces for flight id: {flight_id}")
schema_name = context[ctx.SCHEMA_NAME]
try:
with connection.cursor() as cursor:
query = "SELECT id, av_airspace_id, min_altitude, max_altitude " \
"from %s.sectors where " \
"NOT (max_altitude < %s OR min_altitude > %s) AND " \
"ST_Intersects(wkt, ST_GeographyFromText('SRID=4326;%s'));"
params = [AsIs(schema_name), min_altitude, max_altitude, AsIs(line_string)]
cursor.execute(query, params)
return cursor.fetchall()
except InternalError:
log.exception(f"Failed whist trying to find the intersection between "
"a route with flight id {flight_id} and the airspace model.")
return []
def find_user_sectors_intersected_by(line_string, flight_id, min_altitude, max_altitude, context, connection):
"""
Lists the user defined airspace uids and details of those airspaces where the
given line string intersects.
"""
log.debug(f"Finding trajectory intersection with user defined airspaces for flight id: {flight_id}")
schema_name = context[ctx.SCHEMA_NAME]
try:
with connection.cursor() as cursor:
query = "SELECT id, org_id, min_altitude, max_altitude, user_id, " \
"sector_name from %s.user_defined_sectors where " \
"NOT (max_altitude < %s OR min_altitude > %s) AND " \
"ST_Intersects(wkt, ST_GeographyFromText('SRID=4326;%s'));"
params = [AsIs(schema_name), min_altitude, max_altitude, AsIs(line_string)]
cursor.execute(query, params)
return cursor.fetchall()
except InternalError:
log.exception(f"Failed whist trying to find the intersection between "
"a route with flight id {flight_id} and the airspace model.")
return []
def make_geographic_trajectory(augmented_points, flight_id, connection):
"""
Given a list of augmented points create a geographic line segment.
"""
log.debug(f"Making geo trajectory for flight id: {flight_id}")
return make_line_from_augmented_points(
extract_point_list_from_augmented_points(augmented_points),
flight_id,
connection)[0]
def make_augmented_trajectory(augmented_points, geographic_trajectory, flight_id, min_altitude, max_altitude, connection, is_user_defined=False):
"""
Makes a trajectory augmented with geographic positions and a list of sectors
intersected by the trajectory excluding those that do not meet the altitude range
of the trajectory.
"""
log.debug(f"Creating an augmented trajectory for flight id: {flight_id}")
if not is_user_defined:
sectors = find_sectors_intersected_by(geographic_trajectory, flight_id, min_altitude, max_altitude, ctx.CONTEXT, connection)
else:
sectors = find_user_sectors_intersected_by(geographic_trajectory, flight_id, min_altitude, max_altitude, ctx.CONTEXT, connection)
return {'extendedPoints': augmented_points,
'line': geographic_trajectory,
'sectors': sectors,
'is_user_defined': is_user_defined}
def find_sector(db_ID, connection):
schemaName = ctx.CONTEXT[ctx.SCHEMA_NAME]
with connection.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute("SELECT id, av_airspace_id, av_icao_state_id, av_name, min_altitude, max_altitude FROM %s.sectors WHERE "
"id = %s",
[AsIs(schemaName), db_ID])
return cursor.fetchone()
def find_sector_identifiers(db_ID, context, connection):
"""
Finds the identifiers for a sector given the db id of the sector.
"""
schemaName = context[ctx.SCHEMA_NAME]
with connection.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute("SELECT av_airspace_id, av_icao_state_id, av_name FROM %s.sectors WHERE "
"id = %s",
[AsIs(schemaName), db_ID])
return cursor.fetchmany()
def find_airspace_by_database_ID(db_ID, context, connection, is_user_defined=False):
"""
Finds an aairspace with the given database id
Returns a list, list may be empty.
"""
schemaName = context[ctx.SCHEMA_NAME]
with connection.cursor(cursor_factory=DictCursor) as cursor:
if is_user_defined:
cursor.execute("SELECT * FROM %s.user_defined_sectors WHERE "
"id = %s", [AsIs(schemaName), db_ID])
return cursor.fetchmany()
else:
cursor.execute("SELECT * FROM %s.sectors WHERE "
"id = %s", [AsIs(schemaName), db_ID])
return cursor.fetchmany()
def originates(first_point, polygon_string, flight_id, sector_id, connection):
"""
If the first point is inside the given sector we determine that the
trajectory originates in the sector.
first_point wkb for the first point of the trajectory
returns True => originates in sectors
"""
cursor = connection.cursor()
query = "SELECT ST_Intersects(%s::geography, %s::geography);"
params = [first_point, polygon_string]
cursor.execute(query, params)
originates = cursor.fetchone()[0]
if originates:
log.debug(f"Flight with id {flight_id} originates in sector {sector_id}")
return originates
def find_line_poly_intersection_without_boundary(lineString, polygonString, connection):
"""
Use the geo db to find the intersections between the linestring and the unbounded polygon string.
The polygon is assumed to _NOT_ have a boundary around it.
"""
query = "SELECT ST_AsText(ST_Intersection(%s::geography, ST_Force2D(ST_Boundary(%s))::geography));"
params = [lineString, polygonString]
try:
with connection.cursor() as cursor:
cursor.execute(query, params)
res = cursor.fetchall()
return {'segmentStrings': res,
'ploygonString': polygonString}
except Error:
log.exception("Failed to find intersection : Error")
return []
def find_line_poly_intersection_with_boundary(lineString, polygonString, connection):
"""
Use the geo db to find the intersections between the linestring and the bounded polygon string.
The polygon is assumed to already have a boundary around it.
"""
query = "SELECT unit.find_intersections(%s, %s)"
params = [lineString, polygonString]
try:
with connection.cursor() as cursor:
cursor.execute(query, params)
res = cursor.fetchall()
return {'segmentStrings': res,
'ploygonString': polygonString}
except Error:
log.exception("Failed to find intersection : Error")
return []
def find_intersections(augmented_trajectory, min_altitude, max_altitude, flight_id, connection):
"""
Finds the points on the trajectory that intersect with the sectors of the
the augmented trajectory.
"""
log.debug(f"Finding intersection for flight id {flight_id}")
first_point = augmented_trajectory['extendedPoints'][0]['geoPoint']
first_point_lon = augmented_trajectory['extendedPoints'][0]['lon']
first_point_lat = augmented_trajectory['extendedPoints'][0]['lat']
is_user_defined = augmented_trajectory['is_user_defined']
# Find each sector
sector_IDs = [sector[0] for sector in augmented_trajectory['sectors']]
log.debug("Found sector ids %s", str(sector_IDs))
sectors = [find_airspace_by_database_ID(str(sector_id),
ctx.CONTEXT,
connection, is_user_defined)[0] for sector_id in sector_IDs]
# Find the points of the trajectory where the trajectory intersects
# with each sector
if is_user_defined:
segments = [{'flight_id': flight_id,
'intersections': find_line_poly_intersection_with_boundary(augmented_trajectory['line'],
sector['bounded_sector'],
connection),
'origin': {'is_origin': originates(first_point, sector['wkt'], flight_id, sector['id'], connection),
'origin_lat': first_point_lat,
'origin_lon': first_point_lon},
'id': sector['id'],
'org_id': sector['org_id'],
'user_id': sector['user_id'],
'sector_name': sector['sector_name'],
'min_altitude': sector['min_altitude'],
'max_altitude': sector['max_altitude'],
'is_cylinder': sector['is_cylinder'],
'is_user_defined': is_user_defined} for sector in sectors]
else:
segments = [{'flight_id': flight_id,
'intersections': find_line_poly_intersection_with_boundary(augmented_trajectory['line'],
sector['bounded_sector'],
connection),
'origin': {'is_origin': originates(first_point, sector['wkt'], flight_id, sector['id'], connection),
'origin_lat': first_point_lat,
'origin_lon': first_point_lon},
'id': sector['id'],
'av_icao_state_id': sector['av_icao_state_id'],
'av_name': sector['av_name'],
'av_airspace_id': sector['av_airspace_id'],
'min_altitude': sector['min_altitude'],
'max_altitude': sector['max_altitude'],
'is_user_defined': is_user_defined} for sector in sectors]
return segments
def extract(sector_id, shape, flight_id):
"""
Given a shapley shape find if we have a point or a multipoint.
For a point extract the y, x pair as a list of one tuple of sector_id,
latitude and longitude.
For a multipoint return a list of multiple tuples.
"""
if shape.geom_type == 'MultiPoint':
return [(sector_id, p.y, p.x) for p in shape]
elif shape.geom_type == 'Point':
return [(sector_id, shape.y, shape.x)]
else:
log.debug("Unknown geom type : %s in flight id %s and sector_id %s, was %s, skipping", shape.geom_type, flight_id, sector_id, str(shape))
return []
def extract_details_from_intersection(sector_id, wkt, origin, flight_id):
"""
Given an intersection wkt use shapley to create the point or multipoint
object. Then extract the latitude and longitudes from the (multi)point.
Returns a list of tuples of sector_id, latiitude and longitude
"""
intersection_tuples = extract(sector_id, loads(wkt), flight_id)
if origin['is_origin']:
# If this sector is an origin sector, add in the lat lons at the start.
intersection_tuples = [(sector_id, origin['origin_lat'], origin['origin_lon'])] + intersection_tuples
return intersection_tuples
def make_sector_description(intersection, is_user_defined=False):
"""
Makes a text description of the sector from the intersection description
"""
if is_user_defined:
return f'{intersection['org_id']}/{intersection['user_id']}/{intersection['sector_name']}'
else:
return f'{intersection['av_icao_state_id']}/{intersection['av_name']}/{intersection['id']}/{intersection['av_airspace_id']}'
def make_sector_identifier(intersection):
"""
Makes a text version of the database id in the given intersection
"""
return f'{intersection['id']}'
def extract_intersection_wkts(intersections):
"""
Given a list of intersection dicts return a list of wkts with sector
descriptive text and the origin details as a tuple.
ie ("some-text-made-from-sector-ids", wkt, {is_origin:False, origin_lat:lat, origin_lon: lon})
"""
return [(make_sector_identifier(intersection),
intersection['intersections']['segmentStrings'][0][0], intersection['origin'])
for intersection in intersections]
def merge_l_t(l, lt):
"""
Merge a list of tuples lt, each of three values into three lists l.
For example: [('a', 'b', 'c'), ('a', 'd', 'e')] ->
[['a', 'a'], ['b', 'd'], ['c', 'e']]
"""
for t in lt:
l[0].append(t[1])
l[1].append(t[2])
l[2].append(t[0])
return l
def create_intersection_data_structure(intersections, flight_id):
"""
Given the intersection data structures create a response tuple.
"""
# The intersection wkts are tuples of the sector_id, the wkt and the origin
# status for the intersection.
intersection_wkts = extract_intersection_wkts(intersections)
intersection_details = [extract_details_from_intersection(*intersection_wkt, flight_id) for intersection_wkt in intersection_wkts]
x_y_sector_ids = reduce(merge_l_t, intersection_details, [[], [], []])
return x_y_sector_ids[0], x_y_sector_ids[1], x_y_sector_ids[2]
| #
# Copyright (c) 2018 Via Technology Ltd. All Rights Reserved.
# Consult your license regarding permissions and restrictions.
#
"""
operations related to airspaces and intersections.
"""
from psycopg2 import Error, InternalError
from psycopg2.extensions import AsIs
from psycopg2.extras import DictCursor
from itertools import filterfalse
from functools import reduce
from shapely.wkt import loads
import pru.db.context as ctx
from pru.logger import logger
log = logger(__name__)
def make_point(lon, lat, connection):
"""
Makes a geo point
"""
cursor = connection.cursor()
query = "SELECT ST_MakePoint(%s, %s)"
params = (float(lon), float(lat))
cursor.execute(query, params)
return cursor.fetchone()
def make_augmented_point_from_position(position, flight_id, connection):
"""
Takes a position tuple and makes a augmented point.
"""
point = make_point(position[1], position[0], connection)
return {'flight_id': flight_id, 'lon': position[1], 'lat': position[0],
'geoPoint': point}
def make_augmented_points_from_positions(latitudes, longitudes, flight_id, connection):
"""
Takes a list of latitudes and a list of longitudes and a flight_id.
Makes a list of augmented points.
"""
return [make_augmented_point_from_position(position, flight_id, connection) for position in zip(latitudes, longitudes)]
def extract_point_list_from_augmented_points(augmented_points):
"""
Given a list or generator of augmented points extract the geo point
representation as a list.
"""
return list(map(lambda augmented_points: augmented_points['geoPoint'],
augmented_points))
def make_line_from_augmented_points(augmented_points, flight_id, connection):
"""
Given a list of augmented points create a geographic line.
"""
if (len(augmented_points) == 0):
log.warning(f"Creating a line from a list of points but the list "
"was empty for flight id {flight_id}.")
return [[]]
cursor = connection.cursor()
query = "SELECT ST_AsEWKT(ST_MakeLine(ARRAY[%s]));"
params = [augmented_points]
cursor.execute(query, params)
return cursor.fetchone()
def find_sectors_intersected_by(line_string, flight_id, min_altitude, max_altitude, context, connection):
"""
Lists the airspace ids and details of those airspaces where the
given line string intersects excluding those that are outside of the range of
altitudes of the trajectory.
"""
log.debug(f"Finding trajectory intersection with airspaces for flight id: {flight_id}")
schema_name = context[ctx.SCHEMA_NAME]
try:
with connection.cursor() as cursor:
query = "SELECT id, av_airspace_id, min_altitude, max_altitude " \
"from %s.sectors where " \
"NOT (max_altitude < %s OR min_altitude > %s) AND " \
"ST_Intersects(wkt, ST_GeographyFromText('SRID=4326;%s'));"
params = [AsIs(schema_name), min_altitude, max_altitude, AsIs(line_string)]
cursor.execute(query, params)
return cursor.fetchall()
except InternalError:
log.exception(f"Failed whist trying to find the intersection between "
"a route with flight id {flight_id} and the airspace model.")
return []
def find_user_sectors_intersected_by(line_string, flight_id, min_altitude, max_altitude, context, connection):
"""
Lists the user defined airspace uids and details of those airspaces where the
given line string intersects.
"""
log.debug(f"Finding trajectory intersection with user defined airspaces for flight id: {flight_id}")
schema_name = context[ctx.SCHEMA_NAME]
try:
with connection.cursor() as cursor:
query = "SELECT id, org_id, min_altitude, max_altitude, user_id, " \
"sector_name from %s.user_defined_sectors where " \
"NOT (max_altitude < %s OR min_altitude > %s) AND " \
"ST_Intersects(wkt, ST_GeographyFromText('SRID=4326;%s'));"
params = [AsIs(schema_name), min_altitude, max_altitude, AsIs(line_string)]
cursor.execute(query, params)
return cursor.fetchall()
except InternalError:
log.exception(f"Failed whist trying to find the intersection between "
"a route with flight id {flight_id} and the airspace model.")
return []
def make_geographic_trajectory(augmented_points, flight_id, connection):
"""
Given a list of augmented points create a geographic line segment.
"""
log.debug(f"Making geo trajectory for flight id: {flight_id}")
return make_line_from_augmented_points(
extract_point_list_from_augmented_points(augmented_points),
flight_id,
connection)[0]
def make_augmented_trajectory(augmented_points, geographic_trajectory, flight_id, min_altitude, max_altitude, connection, is_user_defined=False):
"""
Makes a trajectory augmented with geographic positions and a list of sectors
intersected by the trajectory excluding those that do not meet the altitude range
of the trajectory.
"""
log.debug(f"Creating an augmented trajectory for flight id: {flight_id}")
if not is_user_defined:
sectors = find_sectors_intersected_by(geographic_trajectory, flight_id, min_altitude, max_altitude, ctx.CONTEXT, connection)
else:
sectors = find_user_sectors_intersected_by(geographic_trajectory, flight_id, min_altitude, max_altitude, ctx.CONTEXT, connection)
return {'extendedPoints': augmented_points,
'line': geographic_trajectory,
'sectors': sectors,
'is_user_defined': is_user_defined}
def find_sector(db_ID, connection):
schemaName = ctx.CONTEXT[ctx.SCHEMA_NAME]
with connection.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute("SELECT id, av_airspace_id, av_icao_state_id, av_name, min_altitude, max_altitude FROM %s.sectors WHERE "
"id = %s",
[AsIs(schemaName), db_ID])
return cursor.fetchone()
def find_sector_identifiers(db_ID, context, connection):
"""
Finds the identifiers for a sector given the db id of the sector.
"""
schemaName = context[ctx.SCHEMA_NAME]
with connection.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute("SELECT av_airspace_id, av_icao_state_id, av_name FROM %s.sectors WHERE "
"id = %s",
[AsIs(schemaName), db_ID])
return cursor.fetchmany()
def find_airspace_by_database_ID(db_ID, context, connection, is_user_defined=False):
"""
Finds an aairspace with the given database id
Returns a list, list may be empty.
"""
schemaName = context[ctx.SCHEMA_NAME]
with connection.cursor(cursor_factory=DictCursor) as cursor:
if is_user_defined:
cursor.execute("SELECT * FROM %s.user_defined_sectors WHERE "
"id = %s", [AsIs(schemaName), db_ID])
return cursor.fetchmany()
else:
cursor.execute("SELECT * FROM %s.sectors WHERE "
"id = %s", [AsIs(schemaName), db_ID])
return cursor.fetchmany()
def originates(first_point, polygon_string, flight_id, sector_id, connection):
"""
If the first point is inside the given sector we determine that the
trajectory originates in the sector.
first_point wkb for the first point of the trajectory
returns True => originates in sectors
"""
cursor = connection.cursor()
query = "SELECT ST_Intersects(%s::geography, %s::geography);"
params = [first_point, polygon_string]
cursor.execute(query, params)
originates = cursor.fetchone()[0]
if originates:
log.debug(f"Flight with id {flight_id} originates in sector {sector_id}")
return originates
def find_line_poly_intersection_without_boundary(lineString, polygonString, connection):
"""
Use the geo db to find the intersections between the linestring and the unbounded polygon string.
The polygon is assumed to _NOT_ have a boundary around it.
"""
query = "SELECT ST_AsText(ST_Intersection(%s::geography, ST_Force2D(ST_Boundary(%s))::geography));"
params = [lineString, polygonString]
try:
with connection.cursor() as cursor:
cursor.execute(query, params)
res = cursor.fetchall()
return {'segmentStrings': res,
'ploygonString': polygonString}
except Error:
log.exception("Failed to find intersection : Error")
return []
def find_line_poly_intersection_with_boundary(lineString, polygonString, connection):
"""
Use the geo db to find the intersections between the linestring and the bounded polygon string.
The polygon is assumed to already have a boundary around it.
"""
query = "SELECT unit.find_intersections(%s, %s)"
params = [lineString, polygonString]
try:
with connection.cursor() as cursor:
cursor.execute(query, params)
res = cursor.fetchall()
return {'segmentStrings': res,
'ploygonString': polygonString}
except Error:
log.exception("Failed to find intersection : Error")
return []
def find_intersections(augmented_trajectory, min_altitude, max_altitude, flight_id, connection):
"""
Finds the points on the trajectory that intersect with the sectors of the
the augmented trajectory.
"""
log.debug(f"Finding intersection for flight id {flight_id}")
first_point = augmented_trajectory['extendedPoints'][0]['geoPoint']
first_point_lon = augmented_trajectory['extendedPoints'][0]['lon']
first_point_lat = augmented_trajectory['extendedPoints'][0]['lat']
is_user_defined = augmented_trajectory['is_user_defined']
# Find each sector
sector_IDs = [sector[0] for sector in augmented_trajectory['sectors']]
log.debug("Found sector ids %s", str(sector_IDs))
sectors = [find_airspace_by_database_ID(str(sector_id),
ctx.CONTEXT,
connection, is_user_defined)[0] for sector_id in sector_IDs]
# Find the points of the trajectory where the trajectory intersects
# with each sector
if is_user_defined:
segments = [{'flight_id': flight_id,
'intersections': find_line_poly_intersection_with_boundary(augmented_trajectory['line'],
sector['bounded_sector'],
connection),
'origin': {'is_origin': originates(first_point, sector['wkt'], flight_id, sector['id'], connection),
'origin_lat': first_point_lat,
'origin_lon': first_point_lon},
'id': sector['id'],
'org_id': sector['org_id'],
'user_id': sector['user_id'],
'sector_name': sector['sector_name'],
'min_altitude': sector['min_altitude'],
'max_altitude': sector['max_altitude'],
'is_cylinder': sector['is_cylinder'],
'is_user_defined': is_user_defined} for sector in sectors]
else:
segments = [{'flight_id': flight_id,
'intersections': find_line_poly_intersection_with_boundary(augmented_trajectory['line'],
sector['bounded_sector'],
connection),
'origin': {'is_origin': originates(first_point, sector['wkt'], flight_id, sector['id'], connection),
'origin_lat': first_point_lat,
'origin_lon': first_point_lon},
'id': sector['id'],
'av_icao_state_id': sector['av_icao_state_id'],
'av_name': sector['av_name'],
'av_airspace_id': sector['av_airspace_id'],
'min_altitude': sector['min_altitude'],
'max_altitude': sector['max_altitude'],
'is_user_defined': is_user_defined} for sector in sectors]
return segments
def extract(sector_id, shape, flight_id):
"""
Given a shapley shape find if we have a point or a multipoint.
For a point extract the y, x pair as a list of one tuple of sector_id,
latitude and longitude.
For a multipoint return a list of multiple tuples.
"""
if shape.geom_type == 'MultiPoint':
return [(sector_id, p.y, p.x) for p in shape]
elif shape.geom_type == 'Point':
return [(sector_id, shape.y, shape.x)]
else:
log.debug("Unknown geom type : %s in flight id %s and sector_id %s, was %s, skipping", shape.geom_type, flight_id, sector_id, str(shape))
return []
def extract_details_from_intersection(sector_id, wkt, origin, flight_id):
"""
Given an intersection wkt use shapley to create the point or multipoint
object. Then extract the latitude and longitudes from the (multi)point.
Returns a list of tuples of sector_id, latiitude and longitude
"""
intersection_tuples = extract(sector_id, loads(wkt), flight_id)
if origin['is_origin']:
# If this sector is an origin sector, add in the lat lons at the start.
intersection_tuples = [(sector_id, origin['origin_lat'], origin['origin_lon'])] + intersection_tuples
return intersection_tuples
def make_sector_description(intersection, is_user_defined=False):
"""
Makes a text description of the sector from the intersection description
"""
if is_user_defined:
return f'{intersection["org_id"]}/{intersection["user_id"]}/{intersection["sector_name"]}'
else:
return f'{intersection["av_icao_state_id"]}/{intersection["av_name"]}/{intersection["id"]}/{intersection["av_airspace_id"]}'
def make_sector_identifier(intersection):
"""
Makes a text version of the database id in the given intersection
"""
return f'{intersection["id"]}'
def extract_intersection_wkts(intersections):
"""
Given a list of intersection dicts return a list of wkts with sector
descriptive text and the origin details as a tuple.
ie ("some-text-made-from-sector-ids", wkt, {is_origin:False, origin_lat:lat, origin_lon: lon})
"""
return [(make_sector_identifier(intersection),
intersection['intersections']['segmentStrings'][0][0], intersection['origin'])
for intersection in intersections]
def merge_l_t(l, lt):
"""
Merge a list of tuples lt, each of three values into three lists l.
For example: [('a', 'b', 'c'), ('a', 'd', 'e')] ->
[['a', 'a'], ['b', 'd'], ['c', 'e']]
"""
for t in lt:
l[0].append(t[1])
l[1].append(t[2])
l[2].append(t[0])
return l
def create_intersection_data_structure(intersections, flight_id):
"""
Given the intersection data structures create a response tuple.
"""
# The intersection wkts are tuples of the sector_id, the wkt and the origin
# status for the intersection.
intersection_wkts = extract_intersection_wkts(intersections)
intersection_details = [extract_details_from_intersection(*intersection_wkt, flight_id) for intersection_wkt in intersection_wkts]
x_y_sector_ids = reduce(merge_l_t, intersection_details, [[], [], []])
return x_y_sector_ids[0], x_y_sector_ids[1], x_y_sector_ids[2]
|
import builtins
import os
from rich.repr import RichReprResult
import sys
from array import array
from collections import Counter, defaultdict, deque, UserDict, UserList
import dataclasses
from dataclasses import dataclass, fields, is_dataclass
from inspect import isclass
from itertools import islice
import re
from typing import (
DefaultDict,
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
Tuple,
)
from types import MappingProxyType
try:
import attr as _attr_module
except ImportError: # pragma: no cover
_attr_module = None # type: ignore
from .highlighter import ReprHighlighter
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
# Matches Jupyter's special methods
_re_jupyter_repr = re.compile(f"^_repr_.+_$")
def _is_attr_object(obj: Any) -> bool:
"""Check if an object was created with attrs module."""
return _attr_module is not None and _attr_module.has(type(obj))
def _get_attr_fields(obj: Any) -> Iterable["_attr_module.Attribute[Any]"]:
"""Get fields for an attrs object."""
return _attr_module.fields(type(obj)) if _attr_module is not None else []
def _is_dataclass_repr(obj: object) -> bool:
"""Check if an instance of a dataclass contains the default repr.
Args:
obj (object): A dataclass instance.
Returns:
bool: True if the default repr is used, False if there is a custom repr.
"""
# Digging in to a lot of internals here
# Catching all exceptions in case something is missing on a non CPython implementation
try:
return obj.__repr__.__code__.co_filename == dataclasses.__file__
except Exception: # pragma: no coverage
return False
def install(
console: Optional["Console"] = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
) -> None:
"""Install automatic pretty printing in the Python REPL.
Args:
console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
"""
from rich import get_console
from .console import ConsoleRenderable # needed here to prevent circular import
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
"""Replacement sys.displayhook which prettifies objects with Rich."""
if value is not None:
assert console is not None
builtins._ = None # type: ignore
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
),
crop=crop,
)
builtins._ = value # type: ignore
def ipy_display_hook(value: Any) -> None: # pragma: no cover
assert console is not None
# always skip rich generated jupyter renderables or None values
if isinstance(value, JupyterRenderable) or value is None:
return
# on jupyter rich display, if using one of the special representations don't use rich
if console.is_jupyter and any(
_re_jupyter_repr.match(attr) for attr in dir(value)
):
return
# certain renderables should start on a new line
if isinstance(value, ConsoleRenderable):
console.line()
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
margin=12,
),
crop=crop,
new_line_start=True,
)
try: # pragma: no cover
ip = get_ipython() # type: ignore
from IPython.core.formatters import BaseFormatter
class RichFormatter(BaseFormatter): # type: ignore
pprint: bool = True
def __call__(self, value: Any) -> Any:
if self.pprint:
return ipy_display_hook(value)
else:
return repr(value)
# replace plain text formatter with rich formatter
rich_formatter = RichFormatter()
ip.display_formatter.formatters["text/plain"] = rich_formatter
except Exception:
sys.displayhook = display_hook
class Pretty(JupyterMixin):
"""A rich renderable that pretty prints an object.
Args:
_object (Any): An object to pretty print.
highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
indent_size (int, optional): Number of spaces in indent. Defaults to 4.
justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
"""
def __init__(
self,
_object: Any,
highlighter: Optional["HighlighterType"] = None,
*,
indent_size: int = 4,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify: Optional["JustifyMethod"] = justify
self.overflow: Optional["OverflowMethod"] = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
expand_all=self.expand_all,
)
pretty_text = Text(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: DefaultDict[Any, Any]) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_array(_object: "array[Any]") -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: lambda _object: ("deque([", "])", "deque()"),
dict: lambda _object: ("{", "}", "{}"),
UserDict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
UserList: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
MappingProxyType: lambda _object: ("mappingproxy({", "})", "mappingproxy({})"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ, MappingProxyType, UserDict)
def is_expandable(obj: Any) -> bool:
"""Check if an object may be expanded by pretty print."""
return (
isinstance(obj, _CONTAINERS)
or (is_dataclass(obj))
or (hasattr(obj, "__rich_repr__"))
or _is_attr_object(obj)
) and not isclass(obj)
@dataclass
class Node:
"""A node in a repr tree. May be atomic or a container."""
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
children: Optional[List["Node"]] = None
key_separator = ": "
separator: str = ", "
def iter_tokens(self) -> Iterable[str]:
"""Generate tokens for this node."""
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield self.separator
yield self.close_brace
else:
yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
"""Check the length fits within a limit.
Args:
start_length (int): Starting length of the line (indent, prefix, suffix).
max_length (int): Maximum length.
Returns:
bool: True if the node can be rendered within max length, otherwise False.
"""
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
"""Render the node to a pretty repr.
Args:
max_width (int, optional): Maximum width of the repr. Defaults to 80.
indent_size (int, optional): Size of indents. Defaults to 4.
expand_all (bool, optional): Expand all levels. Defaults to False.
Returns:
str: A repr string of the original object.
"""
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str
@dataclass
class _Line:
"""A line in repr output."""
parent: Optional["_Line"] = None
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
last: bool = False
@property
def expandable(self) -> bool:
"""Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
"""Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
"""Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
new_line = yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
new_line = yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for last, child in loop_last(node.children):
separator = "," if tuple_of_one else node.separator
line = _Line(
parent=new_line,
node=child,
whitespace=child_whitespace,
suffix=separator,
last=last and not tuple_of_one,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix=self.suffix,
last=self.last,
)
def __str__(self) -> str:
if self.last:
return f"{self.whitespace}{self.text}{self.node or ""}"
else:
return (
f"{self.whitespace}{self.text}{self.node or ""}{self.suffix.rstrip()}"
)
def traverse(
_object: Any, max_length: Optional[int] = None, max_string: Optional[int] = None
) -> Node:
"""Traverse object and generate a tree.
Args:
_object (Any): Object to be traversed.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
Returns:
Node: The root of a tree structure which can be used to render a pretty repr.
"""
def to_repr(obj: Any) -> str:
"""Get repr string for an object, but catch errors."""
if (
max_string is not None
and isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error {str(error)!r}>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False) -> Node:
"""Walk the object depth first."""
obj_type = type(obj)
py_version = (sys.version_info.major, sys.version_info.minor)
children: List[Node]
def iter_rich_args(rich_args: Any) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
try:
fake_attributes = hasattr(
obj, "awehoi234_wdfjwljet234_234wdfoijsdfmmnxpi492"
)
except Exception:
fake_attributes = False
rich_repr_result: Optional[RichReprResult] = None
if not fake_attributes:
try:
if hasattr(obj, "__rich_repr__") and not isclass(obj):
rich_repr_result = obj.__rich_repr__()
except Exception:
pass
if rich_repr_result is not None:
angular = getattr(obj.__rich_repr__, "angular", False)
args = list(iter_rich_args(rich_repr_result))
class_name = obj.__class__.__name__
if args:
children = []
append = children.append
if angular:
node = Node(
open_brace=f"<{class_name} ",
close_brace=">",
children=children,
last=root,
separator=" ",
)
else:
node = Node(
open_brace=f"{class_name}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child)
child_node.last = last
child_node.key_repr = key
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"<{class_name}>" if angular else f"{class_name}()",
children=[],
last=root,
)
elif _is_attr_object(obj) and not fake_attributes:
children = []
append = children.append
attr_fields = _get_attr_fields(obj)
if attr_fields:
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
def iter_attrs() -> Iterable[
Tuple[str, Any, Optional[Callable[[Any], str]]]
]:
"""Iterate over attr fields and values."""
for attr in attr_fields:
if attr.repr:
try:
value = getattr(obj, attr.name)
except Exception as error:
# Can happen, albeit rarely
yield (attr.name, error, None)
else:
yield (
attr.name,
value,
attr.repr if callable(attr.repr) else None,
)
for last, (name, value, repr_callable) in loop_last(iter_attrs()):
if repr_callable:
child_node = Node(value_repr=str(repr_callable(value)))
else:
child_node = _traverse(value)
child_node.last = last
child_node.key_repr = name
child_node.key_separator = "="
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
elif (
is_dataclass(obj)
and not isinstance(obj, type)
and not fake_attributes
and (_is_dataclass_repr(obj) or py_version == (3, 6))
):
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, field in loop_last(field for field in fields(obj) if field.repr):
child_node = _traverse(getattr(obj, field.name))
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif isinstance(obj, _CONTAINERS):
for container_type in _CONTAINERS:
if isinstance(obj, container_type):
obj_type = container_type
break
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if obj_type.__repr__ != type(obj).__repr__:
node = Node(value_repr=to_repr(obj), last=root)
elif obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items-max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = isinstance(obj, tuple)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
) -> str:
"""Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object.
"""
if isinstance(_object, Node):
node = _object
else:
node = traverse(_object, max_length=max_length, max_string=max_string)
repr_str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: Optional["Console"] = None,
indent_guides: bool = True,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
) -> None:
"""A convenience function for pretty printing.
Args:
_object (Any): Object to pretty print.
console (Console, optional): Console instance, or None to use default. Defaults to None.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
indent_guides (bool, optional): Enable indentation guides. Defaults to True.
expand_all (bool, optional): Expand all containers. Defaults to False.
"""
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self) -> str:
1 / 0
return "this will fail"
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore
from rich import print
print(Pretty(data, indent_guides=True, max_string=20))
| import builtins
import os
from rich.repr import RichReprResult
import sys
from array import array
from collections import Counter, defaultdict, deque, UserDict, UserList
import dataclasses
from dataclasses import dataclass, fields, is_dataclass
from inspect import isclass
from itertools import islice
import re
from typing import (
DefaultDict,
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
Tuple,
)
from types import MappingProxyType
try:
import attr as _attr_module
except ImportError: # pragma: no cover
_attr_module = None # type: ignore
from .highlighter import ReprHighlighter
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
# Matches Jupyter's special methods
_re_jupyter_repr = re.compile(f"^_repr_.+_$")
def _is_attr_object(obj: Any) -> bool:
"""Check if an object was created with attrs module."""
return _attr_module is not None and _attr_module.has(type(obj))
def _get_attr_fields(obj: Any) -> Iterable["_attr_module.Attribute[Any]"]:
"""Get fields for an attrs object."""
return _attr_module.fields(type(obj)) if _attr_module is not None else []
def _is_dataclass_repr(obj: object) -> bool:
"""Check if an instance of a dataclass contains the default repr.
Args:
obj (object): A dataclass instance.
Returns:
bool: True if the default repr is used, False if there is a custom repr.
"""
# Digging in to a lot of internals here
# Catching all exceptions in case something is missing on a non CPython implementation
try:
return obj.__repr__.__code__.co_filename == dataclasses.__file__
except Exception: # pragma: no coverage
return False
def install(
console: Optional["Console"] = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
) -> None:
"""Install automatic pretty printing in the Python REPL.
Args:
console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
"""
from rich import get_console
from .console import ConsoleRenderable # needed here to prevent circular import
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
"""Replacement sys.displayhook which prettifies objects with Rich."""
if value is not None:
assert console is not None
builtins._ = None # type: ignore
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
),
crop=crop,
)
builtins._ = value # type: ignore
def ipy_display_hook(value: Any) -> None: # pragma: no cover
assert console is not None
# always skip rich generated jupyter renderables or None values
if isinstance(value, JupyterRenderable) or value is None:
return
# on jupyter rich display, if using one of the special representations don't use rich
if console.is_jupyter and any(
_re_jupyter_repr.match(attr) for attr in dir(value)
):
return
# certain renderables should start on a new line
if isinstance(value, ConsoleRenderable):
console.line()
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
margin=12,
),
crop=crop,
new_line_start=True,
)
try: # pragma: no cover
ip = get_ipython() # type: ignore
from IPython.core.formatters import BaseFormatter
class RichFormatter(BaseFormatter): # type: ignore
pprint: bool = True
def __call__(self, value: Any) -> Any:
if self.pprint:
return ipy_display_hook(value)
else:
return repr(value)
# replace plain text formatter with rich formatter
rich_formatter = RichFormatter()
ip.display_formatter.formatters["text/plain"] = rich_formatter
except Exception:
sys.displayhook = display_hook
class Pretty(JupyterMixin):
"""A rich renderable that pretty prints an object.
Args:
_object (Any): An object to pretty print.
highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
indent_size (int, optional): Number of spaces in indent. Defaults to 4.
justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
"""
def __init__(
self,
_object: Any,
highlighter: Optional["HighlighterType"] = None,
*,
indent_size: int = 4,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify: Optional["JustifyMethod"] = justify
self.overflow: Optional["OverflowMethod"] = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
expand_all=self.expand_all,
)
pretty_text = Text(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: DefaultDict[Any, Any]) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_array(_object: "array[Any]") -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: lambda _object: ("deque([", "])", "deque()"),
dict: lambda _object: ("{", "}", "{}"),
UserDict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
UserList: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
MappingProxyType: lambda _object: ("mappingproxy({", "})", "mappingproxy({})"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ, MappingProxyType, UserDict)
def is_expandable(obj: Any) -> bool:
"""Check if an object may be expanded by pretty print."""
return (
isinstance(obj, _CONTAINERS)
or (is_dataclass(obj))
or (hasattr(obj, "__rich_repr__"))
or _is_attr_object(obj)
) and not isclass(obj)
@dataclass
class Node:
"""A node in a repr tree. May be atomic or a container."""
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
children: Optional[List["Node"]] = None
key_separator = ": "
separator: str = ", "
def iter_tokens(self) -> Iterable[str]:
"""Generate tokens for this node."""
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield self.separator
yield self.close_brace
else:
yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
"""Check the length fits within a limit.
Args:
start_length (int): Starting length of the line (indent, prefix, suffix).
max_length (int): Maximum length.
Returns:
bool: True if the node can be rendered within max length, otherwise False.
"""
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
"""Render the node to a pretty repr.
Args:
max_width (int, optional): Maximum width of the repr. Defaults to 80.
indent_size (int, optional): Size of indents. Defaults to 4.
expand_all (bool, optional): Expand all levels. Defaults to False.
Returns:
str: A repr string of the original object.
"""
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str
@dataclass
class _Line:
"""A line in repr output."""
parent: Optional["_Line"] = None
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
last: bool = False
@property
def expandable(self) -> bool:
"""Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
"""Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
"""Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
new_line = yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
new_line = yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for last, child in loop_last(node.children):
separator = "," if tuple_of_one else node.separator
line = _Line(
parent=new_line,
node=child,
whitespace=child_whitespace,
suffix=separator,
last=last and not tuple_of_one,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix=self.suffix,
last=self.last,
)
def __str__(self) -> str:
if self.last:
return f"{self.whitespace}{self.text}{self.node or ''}"
else:
return (
f"{self.whitespace}{self.text}{self.node or ''}{self.suffix.rstrip()}"
)
def traverse(
_object: Any, max_length: Optional[int] = None, max_string: Optional[int] = None
) -> Node:
"""Traverse object and generate a tree.
Args:
_object (Any): Object to be traversed.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
Returns:
Node: The root of a tree structure which can be used to render a pretty repr.
"""
def to_repr(obj: Any) -> str:
"""Get repr string for an object, but catch errors."""
if (
max_string is not None
and isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error {str(error)!r}>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False) -> Node:
"""Walk the object depth first."""
obj_type = type(obj)
py_version = (sys.version_info.major, sys.version_info.minor)
children: List[Node]
def iter_rich_args(rich_args: Any) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
try:
fake_attributes = hasattr(
obj, "awehoi234_wdfjwljet234_234wdfoijsdfmmnxpi492"
)
except Exception:
fake_attributes = False
rich_repr_result: Optional[RichReprResult] = None
if not fake_attributes:
try:
if hasattr(obj, "__rich_repr__") and not isclass(obj):
rich_repr_result = obj.__rich_repr__()
except Exception:
pass
if rich_repr_result is not None:
angular = getattr(obj.__rich_repr__, "angular", False)
args = list(iter_rich_args(rich_repr_result))
class_name = obj.__class__.__name__
if args:
children = []
append = children.append
if angular:
node = Node(
open_brace=f"<{class_name} ",
close_brace=">",
children=children,
last=root,
separator=" ",
)
else:
node = Node(
open_brace=f"{class_name}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child)
child_node.last = last
child_node.key_repr = key
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"<{class_name}>" if angular else f"{class_name}()",
children=[],
last=root,
)
elif _is_attr_object(obj) and not fake_attributes:
children = []
append = children.append
attr_fields = _get_attr_fields(obj)
if attr_fields:
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
def iter_attrs() -> Iterable[
Tuple[str, Any, Optional[Callable[[Any], str]]]
]:
"""Iterate over attr fields and values."""
for attr in attr_fields:
if attr.repr:
try:
value = getattr(obj, attr.name)
except Exception as error:
# Can happen, albeit rarely
yield (attr.name, error, None)
else:
yield (
attr.name,
value,
attr.repr if callable(attr.repr) else None,
)
for last, (name, value, repr_callable) in loop_last(iter_attrs()):
if repr_callable:
child_node = Node(value_repr=str(repr_callable(value)))
else:
child_node = _traverse(value)
child_node.last = last
child_node.key_repr = name
child_node.key_separator = "="
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
elif (
is_dataclass(obj)
and not isinstance(obj, type)
and not fake_attributes
and (_is_dataclass_repr(obj) or py_version == (3, 6))
):
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, field in loop_last(field for field in fields(obj) if field.repr):
child_node = _traverse(getattr(obj, field.name))
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif isinstance(obj, _CONTAINERS):
for container_type in _CONTAINERS:
if isinstance(obj, container_type):
obj_type = container_type
break
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if obj_type.__repr__ != type(obj).__repr__:
node = Node(value_repr=to_repr(obj), last=root)
elif obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items-max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = isinstance(obj, tuple)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
) -> str:
"""Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object.
"""
if isinstance(_object, Node):
node = _object
else:
node = traverse(_object, max_length=max_length, max_string=max_string)
repr_str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: Optional["Console"] = None,
indent_guides: bool = True,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
) -> None:
"""A convenience function for pretty printing.
Args:
_object (Any): Object to pretty print.
console (Console, optional): Console instance, or None to use default. Defaults to None.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
indent_guides (bool, optional): Enable indentation guides. Defaults to True.
expand_all (bool, optional): Expand all containers. Defaults to False.
"""
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self) -> str:
1 / 0
return "this will fail"
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore
from rich import print
print(Pretty(data, indent_guides=True, max_string=20))
|
import logging
import os
import traceback
from datetime import datetime, time, timezone
from random import Random, choice
import disnake
from disnake.ext import tasks
from disnake.ext.commands import BucketType, cooldown, guild_only
from bot.bot import command, group, has_permissions
from bot.globals import PLAYLISTS
from cogs.cog import Cog
from utils.utilities import read_lines
logger = logging.getLogger('terminal')
class WrestlingGif:
def __init__(self, url, text):
self.url = url
self.text = text
def build_embed(self, author, recipient):
description = self.text.format(author=author, recipient=recipient)
embed = disnake.Embed(description=description)
embed.set_image(url=self.url)
return embed
wrestling_gifs = [
WrestlingGif('https://i.imgur.com/xUi2Vq1.gif', "**{recipient.name}** tries to grab but it fails. **{author.name}** grabs **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/osDWTHG.gif', "**{recipient.name}** tries to escape but **{author.name}** pins them down"),
WrestlingGif('https://i.imgur.com/HS6R463.gif', "**{author.name}** lifts **{recipient.name}** up. **{recipient.name}** is powerless to do anything"),
WrestlingGif('https://i.imgur.com/jbE2XVt.gif', "**{author.name}** challenges **{recipient.name}** to a friendly wrestling match"),
WrestlingGif('https://i.imgur.com/XVUjH9x.gif', "**{recipient.name}** tries to attack but **{author.name}** counters"),
WrestlingGif('https://i.imgur.com/vTeoYAE.gif', "**{author.name}** and **{recipient.name}** engage in a battle of strength"),
WrestlingGif('https://i.imgur.com/iu2kiVy.gif', "**{author.name}** gets a hold of **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/BulkVW1.gif', "**{author.name}** gets **{recipient.name}** with a knee strike"),
WrestlingGif('https://i.imgur.com/zXaIYLp.gif', "**{author.name}** beats **{recipient.name}** down"),
WrestlingGif('https://i.imgur.com/XNOMUcg.gif', "**{author.name}** delivers a low blow to **{recipient.name}**. Nasty strategy"),
WrestlingGif('https://i.imgur.com/oSG0V6a.gif', "**{recipient.name}** gets beaten by **{author.name}**"),
WrestlingGif('https://i.imgur.com/u0H0ZSA.gif', "**{author.name}** grabs **{recipient.name}**s fucking pants <:GWjojoGachiGASM:363025405562585088>"),
WrestlingGif('https://i.imgur.com/VFruiTR.gif', "**{author.name}** flexes on **{recipient.name}** after kicking their ass. WOO"),
WrestlingGif('https://i.imgur.com/YCd1aSo.gif', "**{author.name}** beats **{recipient.name}** up"),
WrestlingGif('https://i.imgur.com/M3sAu23.gif', "**{author.name}** chokes **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/inEROy3.gif', "**{author.name}** throws **{recipient.name}** on the ground"),
WrestlingGif('https://i.imgur.com/8qI8f1M.gif', "**{author.name}** battles **{recipient.name}** in a feat of pure strength"),
WrestlingGif('https://i.imgur.com/xhVIjIt.gif', "**{author.name}** lifts **{recipient.name}** up"),
WrestlingGif('https://i.imgur.com/RW07zr0.gif', "**{author.name}** escapes the choke of **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/g6wVGpG.gif', "**{author.name}** escapes **{recipient.name}**s grab and begins a counter-attack"),
WrestlingGif('https://i.imgur.com/LKHtUeo.gif', "**{author.name}** gets a hold of **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/eCCAKoA.gif', "It's time to wrestle"),
WrestlingGif('https://i.imgur.com/ZFiT5Ew.gif', "**{author.name}** lifts **{recipient.name}** up"),
WrestlingGif('https://i.imgur.com/A4Oo0Tp.gif', "**{author.name}** puts **{recipient.name}** down"),
WrestlingGif('https://i.imgur.com/COQlI5t.gif', "**{author.name}** swaps positions with **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/pIaErDy.gif', "**{author.name}** pulls **{recipient.name}**s arms"),
WrestlingGif('https://i.imgur.com/hThhSrl.gif', "**{author.name}** locks **{recipient.name}**s leg"),
WrestlingGif('https://i.imgur.com/goMZvRE.gif', "**{author.name}** turns the tables on **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/3A9eMu0.gif', "**{author.name}** slams **{recipient.name}** on the floor"),
WrestlingGif('https://i.imgur.com/G9Iklxu.gif', "**{author.name}** and **{recipient.name}** are in the middle of an intense battle"),
WrestlingGif('https://i.imgur.com/c1CQBnJ.gif', "**{recipient.name}** gets elbow struck by **{author.name}**"),
WrestlingGif('https://i.imgur.com/cKcOJo0.gif', "**{author.name}** pulls **{recipient.name}**s leg"),
WrestlingGif('https://i.imgur.com/Q41oEne.gif', "**{recipient.name}** gets elbow struck by **{author.name}**"),
WrestlingGif('https://i.imgur.com/AP7MRnF.gif', "**{author.name}** escapes the hold of **{recipient.name}** and is ready for more"),
WrestlingGif('https://i.imgur.com/6khggL1.gif', "**{author.name}** pulls the hair of **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/bq0Bjbl.gif', "**{author.name}** got the moves"),
WrestlingGif('https://i.imgur.com/aIVoytr.gif', "**{author.name}** throws **{recipient.name}** on the ground"),
WrestlingGif('https://i.imgur.com/l137Zzh.gif', "**{recipient.name}** gets elbow struck by **{author.name}**"),
WrestlingGif('https://i.imgur.com/tFZv2j9.gif', "**{recipient.name}** and **{author.name}** engage in a fight. **{author.name}** makes the first move"),
WrestlingGif('https://i.imgur.com/kVXjE3Q.gif', "**{author.name}** pulls **{recipient.name}**'s hands"),
WrestlingGif('https://i.imgur.com/4IsfXSD.gif', "**{author.name}** has **{recipient.name}** locked down"),
WrestlingGif('https://i.imgur.com/HnLRl26.gif', "**{author.name}** spins **{recipient.name}** right round baby right round"),
WrestlingGif('https://i.imgur.com/uJtuZ4V.gif', "**{author.name}** beats **{recipient.name}** up and locks him down"),
WrestlingGif('https://i.imgur.com/ZgXNVIb.gif', "**{recipient.name}** flails his arms around helplessly"),
WrestlingGif('https://i.imgur.com/Jcu4NyL.gif', "**{author.name}** manages to get a quick jab in at **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/XUpxidH.gif', "**{author.name}** pulls on **{recipient.name}**'s leg"),
WrestlingGif('https://i.imgur.com/pTBy6ap.gif', "**{recipient.name}** and **{author.name}** engage in a hugging competition"),
WrestlingGif('https://i.imgur.com/ggTj4xI.gif', "**{author.name}** escapes **{recipient.name}**'s hold and counters"),
WrestlingGif('https://i.imgur.com/lS2zZre.gif', "**{author.name}** locks **{recipient.name}**'s legs"),
WrestlingGif('https://i.imgur.com/fdgI1Br.gif', "**{recipient.name}** gets choked by **{author.name}** and tries to escape but fails"),
]
class gachiGASM(Cog):
def __init__(self, bot):
super().__init__(bot)
self.gachilist = self.bot.gachilist
if not self.gachilist:
self.reload_gachilist()
self._start_task = self._reload_and_post.start()
logger.info(f'Starting gachi loop.\n{''.join(traceback.format_stack()[-8:])}')
def cog_unload(self):
self._reload_and_post.cancel()
@tasks.loop(time=time(tzinfo=timezone.utc), reconnect=False)
async def _reload_and_post(self):
logger.info(f'Start task is {self._start_task}, '
f'current task is {self._reload_and_post.get_task()}, '
f'fail status: {self._reload_and_post._last_iteration_failed}, '
f'next iter {self._reload_and_post.next_iteration}.\n{''.join(traceback.format_stack()[-8:])}')
self.reload_gachilist()
for guild in self.bot.guilds:
channel = self.bot.guild_cache.dailygachi(guild.id)
if not channel:
continue
channel = guild.get_channel(channel)
if not channel:
continue
vid = Random(self.get_day()+guild.id).choice(self.gachilist)
try:
await channel.send(f'Daily gachi {vid}')
except disnake.HTTPException:
pass
def reload_gachilist(self):
self.bot.gachilist = read_lines(os.path.join(PLAYLISTS, 'gachi.txt'))
self.gachilist = self.bot.gachilist
@staticmethod
def get_day():
return (datetime.utcnow() - datetime.min).days
@command()
@cooldown(1, 2, BucketType.channel)
async def gachify(self, ctx, *, words):
"""Gachify a string"""
if ' ' not in words:
# We need to undo the string view or it will skip the first word
ctx.view.undo()
await self.gachify2.invoke(ctx)
else:
return await ctx.send(words.replace(' ', r' \♂ ').upper()[:2000])
@command()
@cooldown(1, 2, BucketType.channel)
async def gachify2(self, ctx, *, words):
"""An alternative way of gachifying"""
s = r'\♂ ' + words.replace(' ', r' \♂ ').upper() + r' \♂'
return await ctx.send(s[:2000])
@command(aliases=['rg'])
@cooldown(1, 5, BucketType.channel)
async def randomgachi(self, ctx):
await ctx.send(choice(self.gachilist))
@group(invoke_without_command=True, aliases=['dg'])
@guild_only()
@cooldown(1, 5, BucketType.channel)
async def dailygachi(self, ctx):
await ctx.send(Random(self.get_day()+ctx.guild.id).choice(self.gachilist))
@dailygachi.command(np_pm=True)
@cooldown(1, 5)
@has_permissions(manage_guild=True)
async def subscribe(self, ctx, *, channel: disnake.TextChannel=None):
if channel:
await self.bot.guild_cache.set_dailygachi(ctx.guild.id, channel.id)
return await ctx.send(f'New dailygachi channel set to {channel}')
channel = self.bot.guild_cache.dailygachi(ctx.guild.id)
channel = ctx.guild.get_channel(channel)
if channel:
await ctx.send(f'Current dailygachi channel is {channel}')
else:
await ctx.send('No dailygachi channel set')
@dailygachi.command()
@cooldown(1, 5)
@has_permissions(manage_guild=True)
@guild_only()
async def unsubscribe(self, ctx):
await self.bot.guild_cache.set_dailygachi(ctx.guild.id, None)
await ctx.send('Dailygachi channel no longer set')
@command()
@cooldown(1, 5, BucketType.member)
@guild_only()
async def wrestle(self, ctx, *, user: disnake.User):
if user == ctx.author:
await ctx.send('Wrestling against yourself...')
return
wrestling_gif = choice(wrestling_gifs)
await ctx.send(embed=wrestling_gif.build_embed(ctx.author, user))
def setup(bot):
bot.add_cog(gachiGASM(bot))
| import logging
import os
import traceback
from datetime import datetime, time, timezone
from random import Random, choice
import disnake
from disnake.ext import tasks
from disnake.ext.commands import BucketType, cooldown, guild_only
from bot.bot import command, group, has_permissions
from bot.globals import PLAYLISTS
from cogs.cog import Cog
from utils.utilities import read_lines
logger = logging.getLogger('terminal')
class WrestlingGif:
def __init__(self, url, text):
self.url = url
self.text = text
def build_embed(self, author, recipient):
description = self.text.format(author=author, recipient=recipient)
embed = disnake.Embed(description=description)
embed.set_image(url=self.url)
return embed
wrestling_gifs = [
WrestlingGif('https://i.imgur.com/xUi2Vq1.gif', "**{recipient.name}** tries to grab but it fails. **{author.name}** grabs **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/osDWTHG.gif', "**{recipient.name}** tries to escape but **{author.name}** pins them down"),
WrestlingGif('https://i.imgur.com/HS6R463.gif', "**{author.name}** lifts **{recipient.name}** up. **{recipient.name}** is powerless to do anything"),
WrestlingGif('https://i.imgur.com/jbE2XVt.gif', "**{author.name}** challenges **{recipient.name}** to a friendly wrestling match"),
WrestlingGif('https://i.imgur.com/XVUjH9x.gif', "**{recipient.name}** tries to attack but **{author.name}** counters"),
WrestlingGif('https://i.imgur.com/vTeoYAE.gif', "**{author.name}** and **{recipient.name}** engage in a battle of strength"),
WrestlingGif('https://i.imgur.com/iu2kiVy.gif', "**{author.name}** gets a hold of **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/BulkVW1.gif', "**{author.name}** gets **{recipient.name}** with a knee strike"),
WrestlingGif('https://i.imgur.com/zXaIYLp.gif', "**{author.name}** beats **{recipient.name}** down"),
WrestlingGif('https://i.imgur.com/XNOMUcg.gif', "**{author.name}** delivers a low blow to **{recipient.name}**. Nasty strategy"),
WrestlingGif('https://i.imgur.com/oSG0V6a.gif', "**{recipient.name}** gets beaten by **{author.name}**"),
WrestlingGif('https://i.imgur.com/u0H0ZSA.gif', "**{author.name}** grabs **{recipient.name}**s fucking pants <:GWjojoGachiGASM:363025405562585088>"),
WrestlingGif('https://i.imgur.com/VFruiTR.gif', "**{author.name}** flexes on **{recipient.name}** after kicking their ass. WOO"),
WrestlingGif('https://i.imgur.com/YCd1aSo.gif', "**{author.name}** beats **{recipient.name}** up"),
WrestlingGif('https://i.imgur.com/M3sAu23.gif', "**{author.name}** chokes **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/inEROy3.gif', "**{author.name}** throws **{recipient.name}** on the ground"),
WrestlingGif('https://i.imgur.com/8qI8f1M.gif', "**{author.name}** battles **{recipient.name}** in a feat of pure strength"),
WrestlingGif('https://i.imgur.com/xhVIjIt.gif', "**{author.name}** lifts **{recipient.name}** up"),
WrestlingGif('https://i.imgur.com/RW07zr0.gif', "**{author.name}** escapes the choke of **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/g6wVGpG.gif', "**{author.name}** escapes **{recipient.name}**s grab and begins a counter-attack"),
WrestlingGif('https://i.imgur.com/LKHtUeo.gif', "**{author.name}** gets a hold of **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/eCCAKoA.gif', "It's time to wrestle"),
WrestlingGif('https://i.imgur.com/ZFiT5Ew.gif', "**{author.name}** lifts **{recipient.name}** up"),
WrestlingGif('https://i.imgur.com/A4Oo0Tp.gif', "**{author.name}** puts **{recipient.name}** down"),
WrestlingGif('https://i.imgur.com/COQlI5t.gif', "**{author.name}** swaps positions with **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/pIaErDy.gif', "**{author.name}** pulls **{recipient.name}**s arms"),
WrestlingGif('https://i.imgur.com/hThhSrl.gif', "**{author.name}** locks **{recipient.name}**s leg"),
WrestlingGif('https://i.imgur.com/goMZvRE.gif', "**{author.name}** turns the tables on **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/3A9eMu0.gif', "**{author.name}** slams **{recipient.name}** on the floor"),
WrestlingGif('https://i.imgur.com/G9Iklxu.gif', "**{author.name}** and **{recipient.name}** are in the middle of an intense battle"),
WrestlingGif('https://i.imgur.com/c1CQBnJ.gif', "**{recipient.name}** gets elbow struck by **{author.name}**"),
WrestlingGif('https://i.imgur.com/cKcOJo0.gif', "**{author.name}** pulls **{recipient.name}**s leg"),
WrestlingGif('https://i.imgur.com/Q41oEne.gif', "**{recipient.name}** gets elbow struck by **{author.name}**"),
WrestlingGif('https://i.imgur.com/AP7MRnF.gif', "**{author.name}** escapes the hold of **{recipient.name}** and is ready for more"),
WrestlingGif('https://i.imgur.com/6khggL1.gif', "**{author.name}** pulls the hair of **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/bq0Bjbl.gif', "**{author.name}** got the moves"),
WrestlingGif('https://i.imgur.com/aIVoytr.gif', "**{author.name}** throws **{recipient.name}** on the ground"),
WrestlingGif('https://i.imgur.com/l137Zzh.gif', "**{recipient.name}** gets elbow struck by **{author.name}**"),
WrestlingGif('https://i.imgur.com/tFZv2j9.gif', "**{recipient.name}** and **{author.name}** engage in a fight. **{author.name}** makes the first move"),
WrestlingGif('https://i.imgur.com/kVXjE3Q.gif', "**{author.name}** pulls **{recipient.name}**'s hands"),
WrestlingGif('https://i.imgur.com/4IsfXSD.gif', "**{author.name}** has **{recipient.name}** locked down"),
WrestlingGif('https://i.imgur.com/HnLRl26.gif', "**{author.name}** spins **{recipient.name}** right round baby right round"),
WrestlingGif('https://i.imgur.com/uJtuZ4V.gif', "**{author.name}** beats **{recipient.name}** up and locks him down"),
WrestlingGif('https://i.imgur.com/ZgXNVIb.gif', "**{recipient.name}** flails his arms around helplessly"),
WrestlingGif('https://i.imgur.com/Jcu4NyL.gif', "**{author.name}** manages to get a quick jab in at **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/XUpxidH.gif', "**{author.name}** pulls on **{recipient.name}**'s leg"),
WrestlingGif('https://i.imgur.com/pTBy6ap.gif', "**{recipient.name}** and **{author.name}** engage in a hugging competition"),
WrestlingGif('https://i.imgur.com/ggTj4xI.gif', "**{author.name}** escapes **{recipient.name}**'s hold and counters"),
WrestlingGif('https://i.imgur.com/lS2zZre.gif', "**{author.name}** locks **{recipient.name}**'s legs"),
WrestlingGif('https://i.imgur.com/fdgI1Br.gif', "**{recipient.name}** gets choked by **{author.name}** and tries to escape but fails"),
]
class gachiGASM(Cog):
def __init__(self, bot):
super().__init__(bot)
self.gachilist = self.bot.gachilist
if not self.gachilist:
self.reload_gachilist()
self._start_task = self._reload_and_post.start()
logger.info(f'Starting gachi loop.\n{"".join(traceback.format_stack()[-8:])}')
def cog_unload(self):
self._reload_and_post.cancel()
@tasks.loop(time=time(tzinfo=timezone.utc), reconnect=False)
async def _reload_and_post(self):
logger.info(f'Start task is {self._start_task}, '
f'current task is {self._reload_and_post.get_task()}, '
f'fail status: {self._reload_and_post._last_iteration_failed}, '
f'next iter {self._reload_and_post.next_iteration}.\n{"".join(traceback.format_stack()[-8:])}')
self.reload_gachilist()
for guild in self.bot.guilds:
channel = self.bot.guild_cache.dailygachi(guild.id)
if not channel:
continue
channel = guild.get_channel(channel)
if not channel:
continue
vid = Random(self.get_day()+guild.id).choice(self.gachilist)
try:
await channel.send(f'Daily gachi {vid}')
except disnake.HTTPException:
pass
def reload_gachilist(self):
self.bot.gachilist = read_lines(os.path.join(PLAYLISTS, 'gachi.txt'))
self.gachilist = self.bot.gachilist
@staticmethod
def get_day():
return (datetime.utcnow() - datetime.min).days
@command()
@cooldown(1, 2, BucketType.channel)
async def gachify(self, ctx, *, words):
"""Gachify a string"""
if ' ' not in words:
# We need to undo the string view or it will skip the first word
ctx.view.undo()
await self.gachify2.invoke(ctx)
else:
return await ctx.send(words.replace(' ', r' \♂ ').upper()[:2000])
@command()
@cooldown(1, 2, BucketType.channel)
async def gachify2(self, ctx, *, words):
"""An alternative way of gachifying"""
s = r'\♂ ' + words.replace(' ', r' \♂ ').upper() + r' \♂'
return await ctx.send(s[:2000])
@command(aliases=['rg'])
@cooldown(1, 5, BucketType.channel)
async def randomgachi(self, ctx):
await ctx.send(choice(self.gachilist))
@group(invoke_without_command=True, aliases=['dg'])
@guild_only()
@cooldown(1, 5, BucketType.channel)
async def dailygachi(self, ctx):
await ctx.send(Random(self.get_day()+ctx.guild.id).choice(self.gachilist))
@dailygachi.command(np_pm=True)
@cooldown(1, 5)
@has_permissions(manage_guild=True)
async def subscribe(self, ctx, *, channel: disnake.TextChannel=None):
if channel:
await self.bot.guild_cache.set_dailygachi(ctx.guild.id, channel.id)
return await ctx.send(f'New dailygachi channel set to {channel}')
channel = self.bot.guild_cache.dailygachi(ctx.guild.id)
channel = ctx.guild.get_channel(channel)
if channel:
await ctx.send(f'Current dailygachi channel is {channel}')
else:
await ctx.send('No dailygachi channel set')
@dailygachi.command()
@cooldown(1, 5)
@has_permissions(manage_guild=True)
@guild_only()
async def unsubscribe(self, ctx):
await self.bot.guild_cache.set_dailygachi(ctx.guild.id, None)
await ctx.send('Dailygachi channel no longer set')
@command()
@cooldown(1, 5, BucketType.member)
@guild_only()
async def wrestle(self, ctx, *, user: disnake.User):
if user == ctx.author:
await ctx.send('Wrestling against yourself...')
return
wrestling_gif = choice(wrestling_gifs)
await ctx.send(embed=wrestling_gif.build_embed(ctx.author, user))
def setup(bot):
bot.add_cog(gachiGASM(bot))
|
#!/usr/local/sal/Python.framework/Versions/Current/bin/python3
import datetime
import pathlib
import plistlib
import sys
import sal
sys.path.insert(0, "/usr/local/munki")
from munkilib import munkicommon
__version__ = "1.2.0"
def main():
# If we haven't successfully submitted to Sal, pull the existing
# munki section rather than start from scratch, as we want to
# keep any install/removal history that may be there.
munki_submission = sal.get_checkin_results().get("munki", {})
munki_report = get_managed_install_report()
extras = {}
extras["munki_version"] = munki_report["MachineInfo"].get("munki_version")
extras["manifest"] = munki_report.get("ManifestName")
extras["runtype"] = munki_report.get("RunType", "custom")
munki_submission["extra_data"] = extras
munki_submission["facts"] = {
"checkin_module_version": __version__,
"RunType": munki_report["RunType"],
"StartTime": munki_report["StartTime"],
"EndTime": munki_report["EndTime"],
}
if munki_report.get("Conditions"):
for condition, value in munki_report["Conditions"].items():
# Join lists of strings into a comma-delimited string, as
# the server wants just text.
try:
if hasattr(value, "append"):
value = ", ".join(value)
except Exception as e:
# We god something weird from a condtion that probably wouldn't work anyway
continue
munki_submission["facts"][condition] = value
munki_submission["messages"] = []
for key in ("Errors", "Warnings"):
for msg in munki_report[key]:
# We need to drop the final 'S' to match Sal's message types.
munki_submission["messages"].append(
{"message_type": key.upper()[:-1], "text": msg}
)
now = datetime.datetime.now().astimezone(datetime.timezone.utc).isoformat()
# Process managed items and update histories.
munki_submission["managed_items"] = {}
optional_manifest = get_optional_manifest()
for item in munki_report.get("ManagedInstalls", []):
submission_item = {"date_managed": now}
submission_item["status"] = "PRESENT" if item["installed"] else "PENDING"
version_key = (
"version_to_install" if not item["installed"] else "installed_version"
)
version = item[version_key]
name = f'{item['name']} {version}'
submission_item["name"] = name
# Pop off these two since we already used them.
item.pop("name")
item.pop("installed")
item["type"] = "ManagedInstalls"
self_serve = (
"True" if name in optional_manifest.get("managed_installs", []) else "False"
)
item["self_serve"] = self_serve
submission_item["data"] = item
munki_submission["managed_items"][name] = submission_item
for item in munki_report.get("managed_uninstalls_list", []):
submission_item = {"date_managed": now, "status": "ABSENT"}
self_serve = (
"True"
if name in optional_manifest.get("managed_uninstalls", [])
else "False"
)
submission_item["data"] = {
"self_serve": self_serve,
"type": "ManagedUninstalls",
}
munki_submission["managed_items"][item] = submission_item
# Process InstallResults and RemovalResults into update history
for report_key in ("InstallResults", "RemovalResults"):
for item in munki_report.get(report_key, []):
# Skip Apple software update items.
if item.get("applesus"):
continue
# Construct key; we pop the name off because we don't need
# to submit it again when we stuff `item` into `data`.
name = f'{item.pop('name')} {item['version']}'
submission_item = munki_submission["managed_items"].get(
name, {"name": name}
)
if item.get("status") != 0:
# Something went wrong, so change the status.
submission_item["status"] = "ERROR"
if "data" in submission_item:
submission_item["data"].update(item)
else:
submission_item["data"] = item
if "type" not in submission_item["data"]:
submission_item["data"]["type"] = (
"ManagedInstalls"
if report_key == "InstallResults"
else "ManagedUninstalls"
)
# This UTC datetime gets converted to a naive datetime by
# plistlib. Fortunately, we can just tell it that it's UTC.
submission_item["date_managed"] = (
item["time"].replace(tzinfo=datetime.timezone.utc).isoformat()
)
munki_submission["managed_items"][name] = submission_item
sal.set_checkin_results("Munki", munki_submission)
def get_managed_install_report():
"""Return Munki ManagedInstallsReport.plist as a plist dict.
Returns:
ManagedInstalls report for last Munki run as a plist
dict, or an empty dict.
"""
# Checks munki preferences to see where the install directory is set to.
managed_install_dir = munkicommon.pref("ManagedInstallDir")
# set the paths based on munki's configuration.
managed_install_report = (
pathlib.Path(managed_install_dir) / "ManagedInstallReport.plist"
)
try:
munki_report = plistlib.loads(managed_install_report.read_bytes())
except (IOError, plistlib.InvalidFileException):
munki_report = {}
if "MachineInfo" not in munki_report:
munki_report["MachineInfo"] = {}
return sal.unobjctify(munki_report)
def get_optional_manifest():
"""Return Munki SelfServeManifest as a plist dict.
Returns:
SelfServeManifest for last Munki run as a plist
dict, or an empty dict.
"""
# Checks munki preferences to see where the install directory is set to.
managed_install_dir = munkicommon.pref("ManagedInstallDir")
# set the paths based on munki's configuration.
optional_manifest_path = (
pathlib.Path(managed_install_dir) / "manifests/SelfServeManifest"
)
try:
optional_manifest = plistlib.loads(optional_manifest_path.read_bytes())
except (IOError, plistlib.InvalidFileException):
optional_manifest = {}
return optional_manifest
if __name__ == "__main__":
main()
| #!/usr/local/sal/Python.framework/Versions/Current/bin/python3
import datetime
import pathlib
import plistlib
import sys
import sal
sys.path.insert(0, "/usr/local/munki")
from munkilib import munkicommon
__version__ = "1.2.0"
def main():
# If we haven't successfully submitted to Sal, pull the existing
# munki section rather than start from scratch, as we want to
# keep any install/removal history that may be there.
munki_submission = sal.get_checkin_results().get("munki", {})
munki_report = get_managed_install_report()
extras = {}
extras["munki_version"] = munki_report["MachineInfo"].get("munki_version")
extras["manifest"] = munki_report.get("ManifestName")
extras["runtype"] = munki_report.get("RunType", "custom")
munki_submission["extra_data"] = extras
munki_submission["facts"] = {
"checkin_module_version": __version__,
"RunType": munki_report["RunType"],
"StartTime": munki_report["StartTime"],
"EndTime": munki_report["EndTime"],
}
if munki_report.get("Conditions"):
for condition, value in munki_report["Conditions"].items():
# Join lists of strings into a comma-delimited string, as
# the server wants just text.
try:
if hasattr(value, "append"):
value = ", ".join(value)
except Exception as e:
# We god something weird from a condtion that probably wouldn't work anyway
continue
munki_submission["facts"][condition] = value
munki_submission["messages"] = []
for key in ("Errors", "Warnings"):
for msg in munki_report[key]:
# We need to drop the final 'S' to match Sal's message types.
munki_submission["messages"].append(
{"message_type": key.upper()[:-1], "text": msg}
)
now = datetime.datetime.now().astimezone(datetime.timezone.utc).isoformat()
# Process managed items and update histories.
munki_submission["managed_items"] = {}
optional_manifest = get_optional_manifest()
for item in munki_report.get("ManagedInstalls", []):
submission_item = {"date_managed": now}
submission_item["status"] = "PRESENT" if item["installed"] else "PENDING"
version_key = (
"version_to_install" if not item["installed"] else "installed_version"
)
version = item[version_key]
name = f'{item["name"]} {version}'
submission_item["name"] = name
# Pop off these two since we already used them.
item.pop("name")
item.pop("installed")
item["type"] = "ManagedInstalls"
self_serve = (
"True" if name in optional_manifest.get("managed_installs", []) else "False"
)
item["self_serve"] = self_serve
submission_item["data"] = item
munki_submission["managed_items"][name] = submission_item
for item in munki_report.get("managed_uninstalls_list", []):
submission_item = {"date_managed": now, "status": "ABSENT"}
self_serve = (
"True"
if name in optional_manifest.get("managed_uninstalls", [])
else "False"
)
submission_item["data"] = {
"self_serve": self_serve,
"type": "ManagedUninstalls",
}
munki_submission["managed_items"][item] = submission_item
# Process InstallResults and RemovalResults into update history
for report_key in ("InstallResults", "RemovalResults"):
for item in munki_report.get(report_key, []):
# Skip Apple software update items.
if item.get("applesus"):
continue
# Construct key; we pop the name off because we don't need
# to submit it again when we stuff `item` into `data`.
name = f'{item.pop("name")} {item["version"]}'
submission_item = munki_submission["managed_items"].get(
name, {"name": name}
)
if item.get("status") != 0:
# Something went wrong, so change the status.
submission_item["status"] = "ERROR"
if "data" in submission_item:
submission_item["data"].update(item)
else:
submission_item["data"] = item
if "type" not in submission_item["data"]:
submission_item["data"]["type"] = (
"ManagedInstalls"
if report_key == "InstallResults"
else "ManagedUninstalls"
)
# This UTC datetime gets converted to a naive datetime by
# plistlib. Fortunately, we can just tell it that it's UTC.
submission_item["date_managed"] = (
item["time"].replace(tzinfo=datetime.timezone.utc).isoformat()
)
munki_submission["managed_items"][name] = submission_item
sal.set_checkin_results("Munki", munki_submission)
def get_managed_install_report():
"""Return Munki ManagedInstallsReport.plist as a plist dict.
Returns:
ManagedInstalls report for last Munki run as a plist
dict, or an empty dict.
"""
# Checks munki preferences to see where the install directory is set to.
managed_install_dir = munkicommon.pref("ManagedInstallDir")
# set the paths based on munki's configuration.
managed_install_report = (
pathlib.Path(managed_install_dir) / "ManagedInstallReport.plist"
)
try:
munki_report = plistlib.loads(managed_install_report.read_bytes())
except (IOError, plistlib.InvalidFileException):
munki_report = {}
if "MachineInfo" not in munki_report:
munki_report["MachineInfo"] = {}
return sal.unobjctify(munki_report)
def get_optional_manifest():
"""Return Munki SelfServeManifest as a plist dict.
Returns:
SelfServeManifest for last Munki run as a plist
dict, or an empty dict.
"""
# Checks munki preferences to see where the install directory is set to.
managed_install_dir = munkicommon.pref("ManagedInstallDir")
# set the paths based on munki's configuration.
optional_manifest_path = (
pathlib.Path(managed_install_dir) / "manifests/SelfServeManifest"
)
try:
optional_manifest = plistlib.loads(optional_manifest_path.read_bytes())
except (IOError, plistlib.InvalidFileException):
optional_manifest = {}
return optional_manifest
if __name__ == "__main__":
main()
|
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse, StreamingHttpResponse, FileResponse
from django.template import loader
from django.shortcuts import get_object_or_404, render, redirect
from django.views import View
from django.views.generic import DetailView, ListView
from django.db.models import (
Count,
Max,
Min,
Q,
F,
Prefetch,
Subquery,
OuterRef,
ExpressionWrapper,
FloatField,
BooleanField,
)
from django.db.models.functions import Concat, FirstValue, Cast
from django.core import management
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.db.models.functions import Coalesce
from django.contrib.postgres.search import SearchRank, SearchQuery
from django.contrib.postgres.aggregates import StringAgg
from django.urls import reverse, reverse_lazy
from django.contrib import messages
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.decorators import login_required, user_passes_test
from dal.autocomplete import Select2QuerySetView
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.db import transaction, IntegrityError
from django.forms.models import model_to_dict
from django.forms import formset_factory, inlineformset_factory, modelformset_factory
from django.conf import settings
from django.utils.html import format_html
from django.views.decorators.cache import cache_page
import glob
from os.path import basename, getmtime
from datetime import datetime
import csv
import sys
from operator import attrgetter
from tempfile import NamedTemporaryFile, TemporaryDirectory
import zipfile
from . import models
from .models import (
Work,
WorkType,
Author,
Conference,
Institution,
Appellation,
Affiliation,
ConferenceSeries,
SeriesMembership,
Organizer,
Country,
Keyword,
Topic,
Language,
CountryLabel,
Authorship,
License,
)
from .forms import (
WorkFilter,
AuthorFilter,
AuthorMergeForm,
WorkForm,
WorkAuthorshipForm,
FullInstitutionForm,
InstitutionMergeForm,
AffiliationEditForm,
AffiliationMergeForm,
KeywordMergeForm,
TagForm,
TopicMergeForm,
AffiliationMultiMergeForm,
KeywordMultiMergeForm,
ConferenceForm,
ConferenceCheckoutForm,
ConferenceSeriesInline,
LanguageMergeForm,
WorkTypeMergeForm,
InstitutionMultiMergeForm,
TopicMultiMergeForm,
ConferenceXMLUploadForm,
)
PERMISSIONS_ERROR_TEXT = (
"Please contact the lead project editors to edit this part of the database."
)
def cache_for_anon(func):
"""
On these views, call the cache if the user is not authenticated
"""
def wrap(request, *args, **kwargs):
if request.user.is_authenticated:
return func(request, *args, **kwargs)
else:
return cache_page(settings.CACHES["default"]["TIMEOUT"])(func)(
request, *args, **kwargs
)
return wrap
def user_is_staff(func):
def wrap(request, *args, **kwargs):
if not request.user.is_authenticated:
return redirect(f"{reverse("login")}?next={request.path}")
if request.user.is_staff:
return func(request, *args, **kwargs)
else:
messages.warning(request, PERMISSIONS_ERROR_TEXT)
return redirect("home_view")
return wrap
class StaffRequiredMixin:
def dispatch(self, *args, **kwargs):
if not self.request.user.is_authenticated:
return redirect(f"{reverse("login")}?next={self.request.path}")
if self.request.user.is_staff:
return super().dispatch(*args, **kwargs)
else:
messages.warning(self.request, PERMISSIONS_ERROR_TEXT)
return redirect("home_view")
class ItemLabelAutocomplete(Select2QuerySetView):
def get_selected_result_label(self, item):
return self.get_result_label(item)
class WorkAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Work.objects.all()
parents_only = self.forwarded.get("parents_only", None)
if parents_only:
qs = qs.filter(work_type__is_parent=True)
conference = self.forwarded.get("conference", None)
if conference:
qs = qs.filter(conference=conference)
if self.q:
qs = qs.filter(title__icontains=self.q)
return qs.all()
class AppellationAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Appellation.objects.all()
if self.q:
qs = qs.filter(
Q(first_name__icontains=self.q) | Q(last_name__icontains=self.q)
).all()
return qs
class KeywordAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Keyword.objects.annotate(n_works=Count("works")).order_by("-n_works")
if self.q:
qs = qs.filter(title__icontains=self.q).all()
return qs
def get_result_label(self, item):
return f"{item} ({item.n_works} works)"
class LanguageAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Language.objects.annotate(n_works=Count("works")).order_by(
"-n_works", "title"
)
if self.q:
qs = qs.filter(title__icontains=self.q).all()
return qs
def get_result_label(self, item):
return f"{item} ({item.n_works} works)"
class TopicAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Topic.objects.annotate(n_works=Count("works")).order_by("-n_works")
if self.q:
qs = qs.filter(title__icontains=self.q).all()
return qs
def get_result_label(self, item):
return f"{item} ({item.n_works} works)"
class CountryAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Country.objects.annotate(
n_works=Count(
"institutions__affiliations__asserted_by__work", distinct=True
)
).order_by("-n_works")
if self.q:
qs = qs.filter(
Q(pref_name__icontains=self.q) | Q(names__name__icontains=self.q)
)
return qs.distinct()
def get_result_label(self, item):
return f"{item} ({item.n_works} works)"
class InstitutionAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = (
Institution.objects.annotate(
n_works=Count("affiliations__asserted_by__work", distinct=True)
)
.select_related("country")
.order_by("-n_works")
)
if self.q:
qs = qs.filter(name__icontains=self.q).all()
return qs
def get_result_label(self, item):
if item.country is not None:
c_label = item.country.pref_name
else:
c_label = ""
location_statement = ", ".join(
[l for l in [item.state_province_region, c_label] if l != ""]
)
return f"{item} ({item.n_works} works)<br><small text-class='muted'>{location_statement}</small>"
class AffiliationAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = (
Affiliation.objects.annotate(
n_works=Count("asserted_by__work", distinct=True)
)
.select_related("institution", "institution__country")
.order_by("-n_works")
)
inst_filter = self.forwarded.get("institution", None)
if inst_filter:
qs = qs.filter(institution=inst_filter)
if self.q:
qs = qs.filter(
Q(department__icontains=self.q) | Q(institution__name__icontains=self.q)
).distinct()
return qs
def get_result_label(self, item):
if item.institution.country is not None:
c_label = item.institution.country.pref_name
else:
c_label = ""
location_statement = ", ".join(
[l for l in [item.institution.state_province_region, c_label] if l != ""]
)
return f"{item} ({item.n_works} works)<br><small text-class='muted'>{location_statement}</small>"
class ConferenceAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Conference.objects.annotate(
main_series=StringAgg(
"series_memberships__series__abbreviation",
delimiter=" / ",
distinct=True,
)
).order_by("year", "main_series", "short_title", "theme_title")
if self.q:
qs = qs.filter(search_text__icontains=self.q).distinct()
return qs
def get_result_label(self, item):
if item.main_series:
return f"{item.main_series} - {item.year} - {item.short_title}"
elif item.short_title:
return f"{item.year} - {item.short_title}"
else:
return f"{item.year} - {item.theme_title}"
class AuthorAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Author.objects.annotate(
n_works=Count("authorships", distinct=True),
main_last_name=Max("appellations__last_name"),
main_first_name=Max("appellations__first_name"),
).order_by("main_last_name", "main_first_name", "-n_works")
if self.q:
qs = qs.filter(appellations_index__icontains=self.q).distinct()
return qs
def get_result_label(self, item):
return format_html(
f"{item.most_recent_appellation} ({item.n_works} works)<br><small text-class='muted'>(All names: {item.appellations_index})</small>"
)
def work_view(request, work_id):
related_conference = Conference.objects.annotate(
n_works=Count("works", distinct=True),
n_authors=Count("works__authors", distinct=True),
main_series=StringAgg(
"series_memberships__series__abbreviation", delimiter=" / ", distinct=True
),
).prefetch_related("series", "organizers")
work = get_object_or_404(
Work.objects.select_related("work_type", "full_text_license").prefetch_related(
Prefetch("conference", queryset=related_conference),
"keywords",
"topics",
"languages",
Prefetch(
"session_papers",
queryset=Work.objects.prefetch_related(
Prefetch(
"authorships",
queryset=Authorship.objects.select_related("appellation"),
),
),
),
Prefetch(
"parent_session",
queryset=Work.objects.prefetch_related(
Prefetch(
"authorships",
queryset=Authorship.objects.select_related(
"author", "appellation"
),
)
),
),
),
pk=work_id,
)
authorships = (
Authorship.objects.filter(work_id=work_id)
.order_by("authorship_order")
.distinct()
.select_related("work", "author", "appellation")
.prefetch_related(
Prefetch(
"affiliations",
queryset=Affiliation.objects.select_related(
"institution", "institution__country"
),
)
)
)
context = {"work": work, "authorships": authorships}
return render(request, "work_detail.html", context)
def author_view(request, author_id):
author = get_object_or_404(Author, pk=author_id)
sorted_authorships = (
Authorship.objects.filter(author=author)
.order_by("work__conference__year")
.prefetch_related(
Prefetch("work", queryset=Work.objects.select_related("conference"))
)
)
appellations = (
Appellation.objects.filter(asserted_by__author=author)
.distinct()
.annotate(latest_year=Max("asserted_by__work__conference__year"))
.order_by("-latest_year")
.prefetch_related(Prefetch("asserted_by", queryset=sorted_authorships))
)
affiliations = (
Affiliation.objects.filter(asserted_by__author=author)
.distinct()
.annotate(latest_year=Max("asserted_by__work__conference__year"))
.order_by("-latest_year")
.prefetch_related(
Prefetch("asserted_by", queryset=sorted_authorships),
Prefetch(
"institution", queryset=Institution.objects.select_related("country")
),
)
)
works = (
Work.objects.filter(authorships__author=author)
.order_by("conference__year")
.distinct()
.select_related("conference", "parent_session", "work_type")
.prefetch_related(
Prefetch(
"conference",
queryset=Conference.objects.prefetch_related("series", "organizers"),
),
"session_papers",
"keywords",
"topics",
"languages",
Prefetch(
"authorships",
queryset=Authorship.objects.select_related("appellation", "author"),
),
)
)
author_admin_page = reverse("admin:abstracts_author_change", args=(author.pk,))
context = {
"author": author,
"works": works,
"appellations": appellations,
"affiliations": affiliations,
"author_admin_page": author_admin_page,
}
return render(request, "author_detail.html", context)
class AuthorSplit(DetailView, StaffRequiredMixin):
model = Author
template_name = "author_split.html"
context_object_name = "original_author"
def get_context_data(self, **kwargs):
authorships = Authorship.objects.filter(author=self.get_object()).order_by(
"work__conference__year"
)
return {self.context_object_name: self.get_object(), "authorships": authorships}
@transaction.atomic
def post(self, request, *args, **kwargs):
"""
Create new author and transfer authorships
"""
authorships_to_move = request.POST.getlist("splitselect")
try:
new_author = Author.objects.create()
Authorship.objects.filter(id__in=authorships_to_move).update(
author=new_author
)
# Force-update appellations
self.get_object().save()
new_author.save()
messages.success(
request,
f"{len(authorships_to_move)} authorships moved to new author id {new_author.id}",
)
return redirect("author_detail", new_author.id)
except:
messages.error(request, str(authorships_to_move))
return redirect("author_split", self.get_object().id)
class XMLView(DetailView, LoginRequiredMixin):
model = Work
context_object_name = "work"
def get(self, request, *args, **kwargs):
response = HttpResponse(self.get_object().full_text, content_type="xhtml+xml")
response[
"Content-Disposition"
] = f"attachment; filename={self.get_object().id}.xml"
return response
class AuthorList(ListView):
context_object_name = "author_list"
template_name = "author_list.html"
paginate_by = 50
def get_queryset(self):
base_result_set = Author.objects.exclude(appellations__isnull=True).annotate(
n_conferences=Count("works__conference", distinct=True)
)
raw_filter_form = AuthorFilter(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
order_res = filter_form["ordering"]
if order_res is None or order_res == "":
order_res = "last_name"
result_set = base_result_set.annotate(
last_name=Max("appellations__last_name"),
n_works=Count("authorships", distinct=True),
).order_by(order_res)
author_res = filter_form["author"]
if author_res is not None:
result_set = result_set.filter(id=author_res.id)
affiliation_res = filter_form["affiliation"]
if affiliation_res is not None:
result_set = result_set.filter(
authorships__affiliations=affiliation_res
)
institution_res = filter_form["institution"]
if institution_res is not None:
result_set = result_set.filter(
authorships__affiliations__institution=institution_res
)
country_res = filter_form["country"]
if country_res is not None:
result_set = result_set.filter(
authorships__affiliations__institution__country=country_res
)
conference_res = filter_form["conference"]
if conference_res is not None:
result_set = result_set.filter(works__conference=conference_res)
if filter_form["singleton"]:
result_set = result_set.filter(n_conferences=1)
name_res = filter_form["name"]
if name_res != "":
result_set = result_set.filter(appellations_index__icontains=name_res)
first_name_res = filter_form["first_name"]
if first_name_res != "":
result_set = result_set.filter(
authorships__appellation__first_name__icontains=first_name_res
)
last_name_res = filter_form["last_name"]
if last_name_res != "":
result_set = result_set.filter(
authorships__appellation__last_name__icontains=last_name_res
)
# Newest affiliations
newest_authorship = Authorship.objects.filter(
author=OuterRef("pk")
).order_by("-work__conference__year")
annotated_authors = result_set.annotate(
main_affiliation_department=Subquery(
newest_authorship.values("affiliations__department")[:1]
),
main_affiliation_institution=Subquery(
newest_authorship.values("affiliations__institution__name")[:1]
),
main_affiliation_institution_city=Subquery(
newest_authorship.values("affiliations__institution__city")[:1]
),
main_affiliation_institution_state=Subquery(
newest_authorship.values(
"affiliations__institution__state_province_region"
)[:1]
),
main_affiliation_institution_country=Subquery(
newest_authorship.values(
"affiliations__institution__country__pref_name"
)[:1]
),
most_recent_first_name=Subquery(
newest_authorship.values("appellation__first_name")[:1]
),
most_recent_last_name=Subquery(
newest_authorship.values("appellation__last_name")[:1]
),
n_works=Count("authorships", distinct=True),
)
return annotated_authors
else:
messages.warning(
self.request,
"Query parameters not recognized. Check your URL and try again.",
)
return base_result_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["author_filter_form"] = AuthorFilter(data=self.request.GET)
context["available_authors_count"] = Author.objects.count()
context["redirect_url"] = reverse("author_list")
return context
def annotate_multiple_series(qs):
return qs.annotate(
n_conferences=Count("conferences", distinct=True),
earliest_year=Min("conferences__year"),
latest_year=Max("conferences__year"),
n_complete=Count(
"conferences", filter=Q(conferences__entry_status="c"), distinct=True
),
n_in_progress=Count(
"conferences", filter=Q(conferences__entry_status="i"), distinct=True
),
n_in_review=Count(
"conferences", filter=Q(conferences__entry_status="r"), distinct=True
),
n_remaining=F("n_conferences")
- F("n_complete")
- F("n_in_progress")
- F("n_in_review"),
pct_complete=(
Cast(F("n_complete"), FloatField()) / Cast(F("n_conferences"), FloatField())
)
* 100,
pct_in_progress=(
Cast(F("n_in_progress"), FloatField())
/ Cast(F("n_conferences"), FloatField())
)
* 100,
pct_in_review=(
Cast(F("n_in_review"), FloatField())
/ Cast(F("n_conferences"), FloatField())
)
* 100,
pct_remaining=(
Cast(F("n_remaining"), FloatField())
/ Cast(F("n_conferences"), FloatField())
)
* 100,
).order_by("title")
def annotate_single_series(qs):
res = qs.aggregate(
earliest_year=Min("year"),
latest_year=Max("year"),
n_conferences=Count("id", distinct=True),
n_complete=Count("id", filter=Q(entry_status="c"), distinct=True),
n_in_progress=Count("id", filter=Q(entry_status="i"), distinct=True),
n_in_review=Count("id", filter=Q(entry_status="r"), distinct=True),
)
res["n_remaining"] = (
res["n_conferences"]
- res["n_complete"]
- res["n_in_progress"]
- res["n_in_review"]
)
if res["n_conferences"] > 0:
res["pct_complete"] = (res["n_complete"] / res["n_conferences"]) * 100
res["pct_in_progress"] = (res["n_in_progress"] / res["n_conferences"]) * 100
res["pct_in_review"] = (res["n_in_review"] / res["n_conferences"]) * 100
res["pct_remaining"] = (res["n_remaining"] / res["n_conferences"]) * 100
else:
res["pct_complete"] = 0
res["pct_in_progress"] = 0
res["pct_in_review"] = 0
res["pct_remaining"] = 0
return res
def conference_series_qs():
return annotate_multiple_series(
ConferenceSeries.objects.exclude(conferences__isnull=True)
)
class ConferenceSeriesList(ListView):
context_object_name = "series_list"
template_name = "conference_series_list.html"
def get_queryset(self):
base_result_set = conference_series_qs()
return base_result_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
sa_conf = Conference.objects.filter(series__isnull=True)
context["standalone_conferences"] = annotate_single_series(sa_conf)
context["standalone_conference_count"] = sa_conf.count()
return context
class ConferenceSeriesDetail(DetailView):
model = ConferenceSeries
template_name = "conference_series_detail.html"
context_object_name = "series"
def get_member_conferences(self):
return Conference.objects.filter(series_memberships__series=self.get_object())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["series_progress"] = annotate_single_series(
self.get_member_conferences()
)
series_order_subquery = SeriesMembership.objects.filter(
conference=OuterRef("pk"), series=self.get_object()
).order_by("number")
context["conference_list"] = (
self.get_member_conferences()
.annotate(
main_series=StringAgg(
"series_memberships__series__abbreviation",
delimiter=" / ",
distinct=True,
),
n_works=Count("works", distinct=True),
n_authors=Count("works__authors", distinct=True),
series_order=Subquery(series_order_subquery.values("number")[:1]),
)
.order_by("series_order")
.prefetch_related(
"series_memberships",
"series_memberships__series",
"organizers",
"country",
"hosting_institutions",
"hosting_institutions__country",
"documents",
)
)
context["series_list"] = conference_series_qs()
return context
class StandaloneList(View):
template_name = "conference_series_detail.html"
def get_standalone_list(self):
qs = (
Conference.objects.filter(series__isnull=True)
.annotate(
main_series=StringAgg(
"series_memberships__series__abbreviation",
delimiter=" / ",
distinct=True,
),
n_works=Count("works", distinct=True),
n_authors=Count("works__authors", distinct=True),
)
.order_by("year", "short_title", "theme_title")
.prefetch_related(
"series_memberships",
"series_memberships__series",
"organizers",
"country",
"hosting_institutions",
"hosting_institutions__country",
"documents",
)
)
return qs
def get(self, request):
faux_series = {
"title": "Standalone Events",
"notes": "Digital humanities events not belonging to a larger series, such symposia or workshops.",
"n_conferences": self.get_standalone_list().count(),
}
context = {
"conference_list": self.get_standalone_list(),
"series": faux_series,
"series_list": conference_series_qs(),
"series_progress": annotate_single_series(self.get_standalone_list()),
}
return render(request, self.template_name, context)
def home_view(request):
conference_count = Conference.objects.count()
years_count = Conference.objects.aggregate(year_range=Max("year") - Min("year"))[
"year_range"
]
work_count = Work.objects.count()
author_count = Author.objects.exclude(authorships__work__isnull=True).count()
institution_count = Institution.objects.count()
country_count = (
Country.objects.filter(
Q(institutions__affiliations__asserted_by__work__isnull=False)
| Q(institutions__conferences__isnull=False)
| Q(conferences__isnull=False)
)
.distinct()
.count()
)
context = {
"site": {
"conference_count": conference_count,
"years_count": years_count,
"work_count": work_count,
"author_count": author_count,
"institution_count": institution_count,
"country_count": country_count,
}
}
return render(request, "index.html", context)
@user_is_staff
@transaction.atomic
def author_merge_view(request, author_id):
author = get_object_or_404(Author, pk=author_id)
if request.method == "GET":
"""
Initial load of the merge form displays all the authorships of the current author that will be affected
"""
context = {"merging": author, "author_merge_form": AuthorMergeForm}
return render(request, "author_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = AuthorMergeForm(request.POST)
if raw_form.is_valid():
target_author = raw_form.cleaned_data["into"]
if author == target_author:
"""
If the user chooses the existing author, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge an author into themselves. Please select a different author.",
)
return redirect("author_merge", author_id=author_id)
else:
old_author_string = str(author)
merge_results = author.merge(target_author)
target_author.user_last_updated = request.user
target_author.save()
messages.success(
request,
f"Author {old_author_string} has been merged into {target_author}, and the old author entry has been deleted.",
)
messages.success(
request, f"{merge_results["update_results"]} authorships updated"
)
return redirect("author_detail", author_id=target_author.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "author_merge.html", context)
def field_required(field):
if field.get_internal_type() in ("CharField", "TextField") and field.blank:
return False
if field.null:
return False
return True
def download_data(request):
data_dictionary = []
if request.user.is_authenticated:
dt_config = settings.PRIVATE_DATA_TABLE_CONFIG
zip_url = reverse("private_all_tables_download")
else:
dt_config = settings.PUBLIC_DATA_TABLE_CONFIG
zip_url = reverse("public_all_tables_download")
denormalized_url = reverse("works_download")
denormalized_last_updated = datetime.fromtimestamp(
getmtime(f"{settings.DATA_OUTPUT_PATH}/{settings.DENORMALIZED_WORKS_NAME}.zip")
)
for m in dt_config["CONFIGURATION"]:
model = attrgetter(m["model"])(models)
if "manual_model_description" in m:
model_description = m["manual_model_description"]
else:
try:
model_description = model.model_description
except:
model_description = None
all_model_fields = [
{
"name": f.name,
"relation": f.is_relation,
"help_text": f.help_text,
"related_model": str(f.related_model)
.replace("<class 'abstracts.models.", "")
.replace("'>", ""),
"type": f.get_internal_type(),
"required": field_required(f),
}
for f in model._meta.fields
if not f.one_to_many and f.name not in m["exclude_fields"]
]
if m.get("include_string", False):
all_model_fields.append(
{
"name": "label",
"relation": None,
"help_text": "General label for this object",
"related_model": None,
"type": "CharField",
"required": True,
}
)
data_dictionary.append(
{
"model": m["model"],
"model_description": model_description,
"csv_name": m["csv_name"],
"fields": all_model_fields,
}
)
normalized_last_updated = datetime.fromtimestamp(
getmtime(f"{settings.DATA_OUTPUT_PATH}/{dt_config["DATA_ZIP_NAME"]}")
)
context = {
"zip_url": zip_url,
"denormalized_url": denormalized_url,
"denormalized_last_updated": denormalized_last_updated,
"normalized_last_updated": normalized_last_updated,
"data_dictionary": data_dictionary,
"denormalized_data_dictionary": settings.DENORMALIZED_HEADERS,
}
return render(request, "downloads.html", context)
def download_works_csv(request):
target_zip = f"{settings.DATA_OUTPUT_PATH}/{settings.DENORMALIZED_WORKS_NAME}.zip"
response = FileResponse(open(target_zip, "rb"))
return response
def public_download_all_tables(request):
target_zip = f"{settings.DATA_OUTPUT_PATH}/{settings.PUBLIC_DATA_TABLE_CONFIG["DATA_ZIP_NAME"]}"
response = FileResponse(open(target_zip, "rb"))
return response
@login_required
def private_download_all_tables(request):
target_zip = f"{settings.DATA_OUTPUT_PATH}/{settings.PRIVATE_DATA_TABLE_CONFIG["DATA_ZIP_NAME"]}"
response = FileResponse(open(target_zip, "rb"))
return response
@login_required
def WorkCreate(request):
if request.method == "GET":
if "conference" in request.GET:
conf = get_object_or_404(Conference, pk=int(request.GET["conference"]))
work_form = WorkForm(initial={"conference": conf.pk})
else:
work_form = WorkForm()
if request.method == "POST":
work_form = WorkForm(request.POST)
if work_form.is_valid():
new_work = work_form.save()
new_work.user_last_updated = request.user
new_work.save()
messages.success(request, f"{new_work} created.")
return redirect("work_edit_authorship", work_id=new_work.pk)
else:
for err in work_form.errors:
messages.error(request, err)
context = {"work_form": work_form}
return render(request, "work_create.html", context)
@login_required
def WorkEdit(request, work_id):
work = get_object_or_404(Work, pk=work_id)
if request.method == "POST":
work_form = WorkForm(request.POST, instance=work)
if work_form.is_valid():
work.user_last_updated = request.user
work_form.save()
messages.success(request, f'"{work.title}" sucessfully updated.')
return redirect("work_detail", work_id=work.pk)
else:
for f, e in work_form.errors.items():
messages.error(request, f"{f}: {e}")
work_initial_data = model_to_dict(work)
context = {"work_form": WorkForm(initial=work_initial_data), "work": work}
return render(request, "work_edit.html", context)
@login_required
@transaction.atomic
def WorkEditAuthorship(request, work_id):
work = get_object_or_404(Work, pk=work_id)
authorships = work.authorships.all()
AuthorshipWorkFormset = formset_factory(
WorkAuthorshipForm, can_delete=True, extra=0
)
initial_data = []
for authorship in authorships:
base_data = {
"author": authorship.author,
"authorship_order": authorship.authorship_order,
"first_name": authorship.appellation.first_name,
"last_name": authorship.appellation.last_name,
"affiliations": [aff for aff in authorship.affiliations.all()],
}
initial_data.append(base_data)
if request.method == "GET":
authorships_forms = AuthorshipWorkFormset(initial=initial_data)
elif request.method == "POST":
authorships_forms = AuthorshipWorkFormset(request.POST)
if authorships_forms.is_valid():
for d_form in authorships_forms.deleted_forms:
d_form_data = d_form.cleaned_data
attached_author = d_form_data["author"]
Authorship.objects.filter(
work=work, author=d_form_data["author"]
).delete()
# Refresh the author in DB to update appellations index
attached_author.save()
for aform in authorships_forms:
if aform not in authorships_forms.deleted_forms:
aform_data = aform.cleaned_data
appellation = Appellation.objects.get_or_create(
first_name=aform_data["first_name"],
last_name=aform_data["last_name"],
)[0]
affiliations = aform_data["affiliations"]
authorship_order = aform_data["authorship_order"]
try:
if aform_data["author"] is None:
author_id = Author.objects.create()
else:
author_id = aform_data["author"]
auth = Authorship.objects.update_or_create(
work=work,
author=author_id,
defaults={
"authorship_order": authorship_order,
"appellation": appellation,
"user_last_updated": request.user,
},
)[0]
author_id.user_last_updated = request.user
author_id.save()
except IntegrityError as e:
messages.error(
request, f"{e}: Ensure authorship order numbers are unique"
)
return redirect("work_edit_authorship", work.pk)
auth.affiliations.clear()
if affiliations is not None:
auth.affiliations.set(affiliations)
messages.success(
request, f'"{work.title}" authorships successfully updated.'
)
if "start_new" in request.POST:
return redirect(
f"{reverse("work_create")}?conference={work.conference.pk}"
)
return redirect("work_detail", work_id=work.pk)
else:
for error in authorships_forms.errors:
messages.error(request, error)
context = {
"authorships_form": authorships_forms,
"work": work,
"affiliation_form": AffiliationEditForm,
}
return render(request, "work_edit_authorships.html", context)
@login_required
def AuthorInfoJSON(request, author_id):
if request.method == "GET":
author = get_object_or_404(Author, pk=author_id)
author_aff = Affiliation.objects.filter(asserted_by__author=author).distinct()
author_dict = {
"first_name": author.most_recent_appellation.first_name,
"last_name": author.most_recent_appellation.last_name,
"work_titles": [w.title for w in author.works.all()][:4],
"works_count": author.works.count(),
}
if author_aff is not None:
author_dict["affiliations"] = [
{"name": str(aff), "id": aff.pk} for aff in author_aff
]
return JsonResponse(author_dict)
@login_required
def AffiliationInfoJSON(request, affiliation_id):
if request.method == "GET":
affiliation = get_object_or_404(Affiliation, pk=affiliation_id)
affiliation_dict = {
"institution": {
"name": str(affiliation.institution),
"id": affiliation.institution.id,
}
}
if affiliation.department is not None:
affiliation_dict["department"] = affiliation.department
return JsonResponse(affiliation_dict)
class WorkDelete(LoginRequiredMixin, SuccessMessageMixin, DeleteView):
model = Work
template_name = "work_delete.html"
extra_context = {"cancel_view": "work_list"}
success_url = reverse_lazy("work_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, f"'{self.get_object().title}' deleted")
return super().delete(request, *args, **kwargs)
class FullWorkList(ListView):
context_object_name = "work_list"
template_name = "work_list.html"
paginate_by = 10
def get_queryset(self):
base_result_set = Work.objects.all()
raw_filter_form = WorkFilter(self.request.GET)
if raw_filter_form.is_valid():
result_set = base_result_set
filter_form = raw_filter_form.cleaned_data
work_type_res = filter_form["work_type"]
if work_type_res is not None:
result_set = result_set.filter(work_type=work_type_res)
conference_res = filter_form["conference"]
if conference_res is not None:
result_set = result_set.filter(conference=conference_res)
affiliation_res = filter_form["affiliation"]
if len(affiliation_res) > 0:
result_set = result_set.filter(
authorships__affiliations__in=affiliation_res
).distinct()
institution_res = filter_form["institution"]
if len(institution_res) > 0:
result_set = result_set.filter(
authorships__affiliations__institution__in=institution_res
).distinct()
author_res = filter_form["author"]
if len(author_res) > 0:
result_set = result_set.filter(authorships__author__in=author_res)
keyword_res = filter_form["keywords"]
if len(keyword_res) > 0:
result_set = result_set.filter(keywords__in=keyword_res)
topic_res = filter_form["topics"]
if len(topic_res) > 0:
result_set = result_set.filter(topics__in=topic_res)
language_res = filter_form["languages"]
if len(language_res) > 0:
result_set = result_set.filter(languages__in=language_res)
if filter_form["full_text_available"]:
result_set = result_set.exclude(full_text="")
if filter_form["full_text_viewable"]:
result_set = result_set.exclude(full_text="").filter(
full_text_license__isnull=False
)
text_res = filter_form["text"]
if text_res != "":
text_query = SearchQuery(text_res, search_type="websearch")
result_set = (
result_set.filter(search_text=text_query)
.annotate(
rank=SearchRank(
F("search_text"),
text_query,
),
# Does the search text show up only in the full text?
search_in_ft_only=ExpressionWrapper(
~Q(title__icontains=text_res), output_field=BooleanField()
),
)
.filter(rank__gt=0.1)
.order_by("-rank")
)
order_res = "rank"
# To find the last name of the first author, we develop a subquery that will pull the first authorship for a given work. We can then call the appellation__last_name
first_author_subquery = Authorship.objects.filter(
work=OuterRef("pk")
).order_by("authorship_order")
order_res = filter_form["ordering"]
if order_res is None or order_res == "":
order_res = "year"
if order_res == "year":
result_set = result_set.order_by("conference__year", "title")
elif order_res == "-year":
result_set = result_set.order_by("-conference__year", "title")
elif order_res == "title":
result_set = result_set.order_by("title")
elif order_res == "-title":
result_set = result_set.order_by("-title")
elif order_res == "last_name":
result_set = result_set.annotate(
first_author_last_name=Subquery(
first_author_subquery.values("appellation__last_name")[:1]
)
).order_by("first_author_last_name", "title")
elif order_res == "-last_name":
result_set = result_set.annotate(
first_author_last_name=Subquery(
first_author_subquery.values("appellation__last_name")[:1]
)
).order_by("-first_author_last_name", "title")
return (
result_set.select_related(
"conference", "work_type", "parent_session", "full_text_license"
)
.annotate(
main_series=StringAgg(
"conference__series_memberships__series__abbreviation",
delimiter=" / ",
distinct=True,
),
main_institution=StringAgg(
"conference__hosting_institutions__name",
delimiter=" / ",
distinct=True,
),
)
.prefetch_related(
Prefetch(
"conference",
queryset=Conference.objects.prefetch_related(
Prefetch(
"series_memberships",
queryset=SeriesMembership.objects.select_related(
"series"
),
),
"organizers",
),
),
"session_papers",
Prefetch(
"authorships",
queryset=Authorship.objects.select_related(
"appellation", "author"
),
),
"keywords",
"topics",
"languages",
)
)
else:
for error in raw_filter_form.errors:
messages.warning(self.request, error)
return base_result_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
raw_filter_form = WorkFilter(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
conference_res = filter_form["conference"]
if conference_res is not None:
conferences_data = (
Conference.objects.filter(id=conference_res.id)
.annotate(
n_works=Count("works", distinct=True),
n_authors=Count("works__authors", distinct=True),
main_series=StringAgg(
"series_memberships__series__abbreviation",
delimiter=" / ",
distinct=True,
),
)
.select_related("country")
.prefetch_related(
"organizers", "series_memberships", "series_memberships__series"
)
.all()
)
context["selected_conferences"] = conferences_data
context["work_filter_form"] = WorkFilter(data=self.request.GET)
context["available_works_count"] = Work.objects.count()
context["filtered_works_count"] = self.get_queryset().count()
context["redirect_url"] = reverse("work_list")
return context
class FullInstitutionList(LoginRequiredMixin, ListView):
context_object_name = "institution_list"
template_name = "full_institution_list.html"
paginate_by = 10
def get_queryset(self):
annotated_affiliations = Affiliation.objects.annotate(
n_works=Count("asserted_by__work", distinct=True)
)
result_set = (
Institution.objects.annotate(
n_works=Count("affiliations__asserted_by__work", distinct=True)
)
.prefetch_related(
Prefetch("affiliations", annotated_affiliations), "country"
)
.order_by("-n_works")
)
if self.request.GET:
raw_filter_form = FullInstitutionForm(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
result_set = result_set.annotate(
n_conferences=Count(
"affiliations__asserted_by__work__conference", distinct=True
)
)
department_res = filter_form["department"]
if department_res != "":
result_set = result_set.filter(
affiliations__department__icontains=department_res
)
affiliation_res = filter_form["affiliation"]
if affiliation_res is not None:
result_set = result_set.filter(affiliations=affiliation_res)
institution_res = filter_form["institution"]
if institution_res is not None:
result_set = result_set.filter(pk=institution_res.pk)
country_res = filter_form["country"]
if country_res is not None:
result_set = result_set.filter(country=country_res)
if filter_form["no_department"]:
result_set = result_set.filter(affiliations__department="")
conference_res = filter_form["conference"]
if conference_res is not None:
result_set = result_set.filter(
affiliations__asserted_by__work__conference=conference_res
).distinct()
if filter_form["singleton"]:
result_set = result_set.filter(n_conferences=1)
if filter_form["ordering"] == "n_dsc":
result_set = result_set.order_by(
"-n_works", "affiliations__institution__name"
)
elif filter_form["ordering"] == "n_asc":
result_set = result_set.order_by(
"n_works", "affiliations__institution__name"
)
elif filter_form["ordering"] == "a":
result_set = result_set.order_by("affiliations__institution__name")
else:
for f, e in raw_filter_form.errors.items():
messages.error(self.request, f"{f}: {e}")
return result_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["institution_filter_form"] = FullInstitutionForm(
initial=self.request.GET
)
context["available_institutions_count"] = Institution.objects.count()
context["filtered_institutions_count"] = self.get_queryset().count()
context["redirect_url"] = reverse("full_institution_list")
return context
class AuthorInstitutionList(FullInstitutionList):
template_name = "author_institution_list.html"
def get_queryset(self):
base_result_set = Institution.objects.annotate(
n_authors=Count("affiliations__asserted_by__author", distinct=True),
n_conferences=Count(
"affiliations__asserted_by__work__conference", distinct=True
),
).distinct()
result_set = base_result_set
if self.request.GET:
raw_filter_form = FullInstitutionForm(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
department_res = filter_form["department"]
if department_res != "":
result_set = result_set.filter(
affiliations__department__icontains=department_res
)
affiliation_res = filter_form["affiliation"]
if affiliation_res is not None:
result_set = result_set.filter(affiliations=affiliation_res)
institution_res = filter_form["institution"]
if institution_res is not None:
result_set = result_set.filter(pk=institution_res.pk)
conference_res = filter_form["conference"]
if conference_res is not None:
result_set = result_set.filter(works__conference=conference_res)
country_res = filter_form["country"]
if country_res is not None:
result_set = result_set.filter(country=country_res)
if filter_form["singleton"]:
result_set = result_set.filter(n_conferences=1)
if filter_form["no_department"]:
result_set = result_set.filter(affiliations__department="")
if filter_form["ordering"] == "n_dsc":
result_set = result_set.order_by("-n_authors")
elif filter_form["ordering"] == "n_asc":
result_set = result_set.order_by("n_authors")
elif filter_form["ordering"] == "a":
result_set = result_set.order_by("affiliations__institution__name")
else:
for f, e in raw_filter_form.errors.items():
messages.error(self.request, f"{f}: {e}")
result_set = base_result_set
else:
# Otherwise default to sorting by n_dsc
result_set = result_set.order_by("-n_authors")
return result_set.distinct()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["redirect_url"] = reverse("author_institution_list")
return context
class InstitutionEdit(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = Institution
template_name = "generic_form.html"
fields = ["name", "city", "state_province_region", "country"]
extra_context = {
"form_title": "Edit institution",
"cancel_view": "full_institution_list",
"merge_view": "institution_merge",
}
success_message = "%(name)s updated"
success_url = reverse_lazy("full_institution_list")
def form_valid(self, form):
response = super(InstitutionEdit, self).form_valid(form)
self.object.user_last_updated = self.request.user
self.object.save()
return response
class InstitutionCreate(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Institution
template_name = "generic_form.html"
fields = ["name", "city", "state_province_region", "country"]
extra_context = {
"form_title": "Create institution",
"cancel_view": "full_institution_list",
}
success_message = "%(name)s created"
success_url = reverse_lazy("full_institution_list")
def form_valid(self, form):
response = super(InstitutionCreate, self).form_valid(form)
self.object.user_last_updated = self.request.user
self.object.save()
return response
@user_is_staff
@transaction.atomic
def institution_merge(request, institution_id):
institution = get_object_or_404(Institution, pk=institution_id)
context = {"merging": institution, "institution_merge_form": InstitutionMergeForm}
if request.method == "GET":
"""
Initial load of the merge form displays all the authors and works associated with this institution.
"""
return render(request, "institution_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = InstitutionMergeForm(request.POST)
if raw_form.is_valid():
target_institution = raw_form.cleaned_data["into"]
if institution == target_institution:
"""
If the user chooses the existing institution, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge an institution into itself. Please select a different institution.",
)
return redirect("institution_merge", institution_id=institution_id)
else:
old_institution_id = str(institution)
merge_results = institution.merge(target_institution)
target_institution.user_last_updated = request.user
target_institution.save()
messages.success(
request,
f"Author {old_institution_id} has been merged into {target_institution}, and the old institution entry has been deleted.",
)
messages.success(
request, f"{merge_results["update_results"]} affiliations updated"
)
return redirect("institution_edit", pk=target_institution.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "institution_merge.html", context)
@user_is_staff
@transaction.atomic
def institution_multi_merge(request):
context = {"form": InstitutionMultiMergeForm}
if request.method == "POST":
raw_form = InstitutionMultiMergeForm(request.POST)
if raw_form.is_valid():
target_institution = raw_form.cleaned_data["into"]
source_institutions = raw_form.cleaned_data["sources"].exclude(
pk=target_institution.pk
)
for institution in source_institutions:
old_institution_id = str(institution)
merge_results = institution.merge(target_institution)
target_institution.user_last_updated = request.user
target_institution.save()
messages.success(
request,
f"Institution {old_institution_id} has been merged into {target_institution}, and the old institution entry has been deleted.",
)
messages.success(
request, f"{merge_results["update_results"]} institutions updated"
)
return redirect("institution_edit", pk=target_institution.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "institution_multi_merge.html", context)
class AffiliationEdit(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = Affiliation
template_name = "generic_form.html"
form_class = AffiliationEditForm
extra_context = {
"form_title": "Edit affiliation",
"cancel_view": "full_institution_list",
"merge_view": "affiliation_merge",
}
success_message = "%(department)s updated"
success_url = reverse_lazy("full_institution_list")
class AffiliationCreate(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Affiliation
template_name = "generic_form.html"
form_class = AffiliationEditForm
extra_context = {
"form_title": "Create affiliation",
"cancel_view": "full_institution_list",
}
success_message = "%(department)s created"
success_url = reverse_lazy("full_institution_list")
def get_initial(self, **kwargs):
super().get_initial(**kwargs)
if "institution" in self.request.GET:
self.initial = {"institution": int(self.request.GET["institution"])}
return self.initial
@login_required
def ajax_affiliation_create(request):
newaff = Affiliation.objects.get_or_create(
department=request.POST["department"],
institution=Institution.objects.get(pk=int(request.POST["institution"])),
)[0]
return JsonResponse({"name": str(newaff), "id": newaff.pk})
@user_is_staff
@transaction.atomic
def affiliation_merge(request, affiliation_id):
affiliation = get_object_or_404(Affiliation, pk=affiliation_id)
context = {"merging": affiliation, "affiliation_merge_form": AffiliationMergeForm}
if request.method == "GET":
"""
Initial load of the merge form displays all the authors and works associated with this affiliation.
"""
return render(request, "affiliation_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = AffiliationMergeForm(request.POST)
if raw_form.is_valid():
target_affiliation = raw_form.cleaned_data["into"]
if affiliation == target_affiliation:
"""
If the user chooses the existing affiliation, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge an affiliation into itself. Please select a different affiliation.",
)
return redirect("affiliation_merge", affiliation_id=affiliation_id)
else:
old_affiliation_id = str(affiliation)
merge_results = affiliation.merge(target_affiliation)
messages.success(
request,
f"Affiliation {old_affiliation_id} has been merged into {target_affiliation}, and the old affiliation entry has been deleted.",
)
messages.success(
request, f"{merge_results["update_results"]} affiliations updated"
)
return redirect("affiliation_edit", pk=target_affiliation.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "affiliation_merge.html", context)
@user_is_staff
@transaction.atomic
def affiliation_multi_merge(request):
context = {"form": AffiliationMultiMergeForm}
if request.method == "POST":
raw_form = AffiliationMultiMergeForm(request.POST)
if raw_form.is_valid():
target_affiliation = raw_form.cleaned_data["into"]
source_affiliations = raw_form.cleaned_data["sources"].exclude(
pk=target_affiliation.pk
)
for affiliation in source_affiliations:
old_affiliation_id = str(affiliation)
merge_results = affiliation.merge(target_affiliation)
messages.success(
request,
f"Affiliation {old_affiliation_id} has been merged into {target_affiliation}, and the old affiliation entry has been deleted.",
)
messages.success(
request, f"{merge_results["update_results"]} affiliations updated"
)
return redirect("affiliation_edit", pk=target_affiliation.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "affiliation_multi_merge.html", context)
@user_is_staff
@transaction.atomic
def wipe_unused(request):
deletion_dict = {
"Author": Author.objects.exclude(authorships__isnull=False).distinct(),
"Affiliation": Affiliation.objects.exclude(
asserted_by__isnull=False
).distinct(),
"Institution": Institution.objects.exclude(
Q(affiliations__asserted_by__isnull=False) | Q(conferences__isnull=False)
).distinct(),
"Keyword": Keyword.objects.exclude(works__isnull=False).distinct(),
"Appellation": Appellation.objects.exclude(
asserted_by__isnull=False
).distinct(),
}
if request.method == "POST":
for k, v in deletion_dict.items():
res = v.delete()
if res[0] > 0:
messages.success(request, f"{k}: {res[0]} objects deleted")
any_hanging_items = any([v.exists() for k, v in deletion_dict.items()])
context = {"deletions": deletion_dict, "hanging_items": any_hanging_items}
return render(request, "wipe_unused.html", context)
class ConferenceCreate(StaffRequiredMixin, SuccessMessageMixin, CreateView):
model = Conference
template_name = "conference_create.html"
form_class = ConferenceForm
extra_context = {
"form_title": "Create conference",
"cancel_view": "conference_list",
}
success_message = "Conference '%(year)s - %(short_title)s' created"
@transaction.atomic
def post(self, request, *args, **kwargs):
response = super().post(request, *args, **kwargs)
form_instance = self.get_form()
if form_instance.is_valid():
for organizer in form_instance.cleaned_data["organizers"]:
self.object.organizers.add(organizer)
self.object.save()
return response
if "goto_abstracts" in request.POST:
return redirect(reverse("work_list") + f"?conference={self.object.id}")
else:
for err in form_instance.errors:
messages.error(request, err)
return response
@user_is_staff
@transaction.atomic
def ConferenceEdit(request, pk):
conference = get_object_or_404(Conference, pk=pk)
# populate the conference form, including pulling in the related organizers
conference_dict = model_to_dict(conference)
conference_dict["organizers"] = conference.organizers.all()
form = ConferenceForm(initial=conference_dict)
ConferenceSeriesFormSet = formset_factory(
ConferenceSeriesInline, can_delete=True, extra=0
)
initial_series = [
{"series": memb.series, "number": memb.number}
for memb in SeriesMembership.objects.filter(conference=conference).all()
]
context = {
"conference": conference,
"form": form,
# "licenses": License.objects.all(),
"series_membership_form": ConferenceSeriesFormSet(initial=initial_series),
"form_title": "Edit conference",
"cancel_view": "conference_list",
}
if request.method == "POST":
form = ConferenceForm(data=request.POST, instance=conference)
if form.is_valid():
clean_form = form.cleaned_data
conference.year = clean_form["year"]
conference.short_title = clean_form["short_title"]
conference.notes = clean_form["notes"]
conference.url = clean_form["url"]
# Clear existing relations and update according to the form
conference.organizers.clear()
for organizer in clean_form["organizers"]:
conference.organizers.add(organizer)
conference.hosting_institutions.clear()
for hosting_institution in clean_form["hosting_institutions"]:
conference.hosting_institutions.add(hosting_institution)
conference.save()
# License action
license_action = clean_form["license_action"]
if license_action == "":
pass
elif license_action == "clear":
conference.works.all().update(full_text_license=None)
else:
license_object = License.objects.get(id=int(license_action))
conference.works.all().update(full_text_license=license_object)
series_forms = ConferenceSeriesFormSet(data=request.POST)
if series_forms.is_valid():
# Delete memberships first
for d_form in series_forms.deleted_forms:
d_form_data = d_form.cleaned_data
SeriesMembership.objects.filter(
conference=conference,
series=d_form_data["series"],
number=d_form_data["number"],
).delete()
# Then update new ones
for s_form in series_forms.forms:
if s_form not in series_forms.deleted_forms:
s_form_data = s_form.cleaned_data
SeriesMembership.objects.update_or_create(
conference=conference,
series=s_form_data["series"],
defaults={"number": s_form_data["number"]},
)
messages.success(request, f"Conference {conference} updated.")
if "goto_abstracts" in request.POST:
return redirect(
reverse("work_list") + f"?conference={conference.id}"
)
if "goto_series" in request.POST:
first_series = conference.series.first()
if first_series is None:
return redirect("standalone_conferences")
else:
return redirect("conference_series_detail", pk=first_series.id)
return redirect("conference_edit", pk=conference.pk)
else:
for f, e in series_forms.errors.items():
messages.error(request, f"{f}: {e}")
else:
for f, e in form.errors.items():
messages.error(request, f"{f}: {e}")
return render(request, "conference_edit.html", context)
class ConferenceDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = Conference
template_name = "conference_delete.html"
extra_context = {
"form_title": "Delete conference",
"cancel_view": "conference_list",
}
success_message = "Conference deleted"
success_url = reverse_lazy("conference_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(ConferenceDelete, self).delete(request, *args, **kwargs)
class ConferenceXMLLoad(StaffRequiredMixin, DetailView):
model = Conference
template_name = "conference_xml_load.html"
extra_context = {"form": ConferenceXMLUploadForm()}
@transaction.atomic
def post(self, request, *args, **kwargs):
raw_form = ConferenceXMLUploadForm(request.POST, request.FILES)
conference = self.get_object()
if raw_form.is_valid():
with TemporaryDirectory() as upload_dir:
# Write uploaded zip to tempdir
with NamedTemporaryFile(dir=upload_dir, suffix=".zip") as tei_zip:
with open(tei_zip.name, "wb") as upload_zip:
for chunk in request.FILES["file"]:
upload_zip.write(chunk)
if not zipfile.is_zipfile(tei_zip.name):
messages.error(request, "That is not a valid zipfile.")
return render(
request,
"conference_xml_load.html",
{
"object": self.get_object(),
"form": ConferenceXMLUploadForm(),
},
)
# Extract all the files within
with zipfile.ZipFile(tei_zip.name) as zip_ref:
zip_ref.extractall(upload_dir)
# Import all XML
import_results = conference.import_xml_directory(upload_dir)
n_success = len(import_results["successful_files"])
n_failed = len(import_results["failed_files"])
messages.info(
request,
f"{n_success} of {n_success + n_failed} files valid.",
)
for err in import_results["failed_files"]:
messages.error(
request, f"{basename(err["filepath"])}: {err["error"]}"
)
if n_failed == 0:
messages.success(request, f"All files imported successfully.")
else:
messages.info(
request,
"Please fix errors or remove malformed files, and re-upload zip. All TEI documents must be valid in order to complete the import.",
)
return render(
request,
"conference_xml_load.html",
{"object": self.get_object(), "form": ConferenceXMLUploadForm()},
)
else:
for f, e in raw_form.errors.items():
messages.error(request, f"{f}: {e}")
return render(
request,
"conference_xml_load.html",
{"object": self.get_object(), "form": ConferenceXMLUploadForm()},
)
@login_required
@transaction.atomic
def conference_checkout(request, conference_id):
conference = get_object_or_404(Conference, pk=conference_id)
if request.method == "GET":
"""
Load the current form and display current attached user
"""
context = {
"conference": conference,
"form": ConferenceCheckoutForm(
{"entry_status": conference.entry_status, "editing_user": "self"}
),
}
return render(request, "conference_checkout.html", context)
elif request.method == "POST":
"""
Get the form and update the status if the user has the authority to do so
"""
raw_form = ConferenceCheckoutForm(request.POST)
if raw_form.is_valid():
clean_form = raw_form.cleaned_data
if clean_form["entry_status"] == "c" and not request.user.is_staff:
messages.error(
request,
"Only an administrator can mark this conference as completed.",
)
return redirect("conference_checkout", conference_id=conference.id)
else:
if clean_form["assign_user"] == "self":
conference.entry_status = clean_form["entry_status"]
conference.editing_user = request.user
conference.save()
messages.success(request, "Conference checked out")
elif clean_form["assign_user"] == "clear":
conference.entry_status = clean_form["entry_status"]
conference.editing_user = None
conference.save()
messages.success(request, "Conference cleared")
return redirect(reverse("work_list") + f"?conference={conference.id}")
class SeriesCreate(StaffRequiredMixin, SuccessMessageMixin, CreateView):
model = ConferenceSeries
template_name = "generic_form.html"
extra_context = {
"form_title": "Create conference series",
"cancel_view": "conference_list",
}
fields = ["title", "abbreviation", "notes"]
success_message = "Series '%(title)s' created"
success_url = reverse_lazy("conference_list")
class SeriesEdit(StaffRequiredMixin, SuccessMessageMixin, UpdateView):
model = ConferenceSeries
template_name = "generic_form.html"
extra_context = {
"form_title": "Update conference series",
"cancel_view": "conference_list",
}
fields = ["title", "abbreviation", "notes"]
success_message = "Series '%(title)s' updated"
success_url = reverse_lazy("conference_list")
class SeriesDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = ConferenceSeries
template_name = "generic_form.html"
extra_context = {
"form_title": "Delete conference series",
"cancel_view": "conference_list",
}
success_message = "Series '%(title)s' deleted"
success_url = reverse_lazy("conference_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(SeriesDelete, self).delete(request, *args, **kwargs)
class OrganizerCreate(StaffRequiredMixin, SuccessMessageMixin, CreateView):
model = Organizer
template_name = "generic_form.html"
extra_context = {
"form_title": "Create conference organizer",
"cancel_view": "full_organizer_list",
}
fields = ["name", "abbreviation", "conferences_organized", "notes", "url"]
success_message = "Organizer '%(name)s' created"
success_url = reverse_lazy("full_organizer_list")
def form_valid(self, form):
response = super(OrganizerCreate, self).form_valid(form)
self.object.user_last_updated = self.request.user
self.object.save()
return response
class OrganizerEdit(StaffRequiredMixin, SuccessMessageMixin, UpdateView):
model = Organizer
template_name = "generic_form.html"
extra_context = {
"form_title": "Update conference organizer",
"cancel_view": "full_organizer_list",
}
fields = ["name", "abbreviation", "conferences_organized", "notes", "url"]
success_message = "Organizer '%(name)s' updated"
success_url = reverse_lazy("full_organizer_list")
def form_valid(self, form):
response = super(OrganizerEdit, self).form_valid(form)
self.object.user_last_updated = self.request.user
self.object.save()
return response
class OrganizerDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = Organizer
template_name = "generic_form.html"
extra_context = {
"form_title": "Delete organizer",
"cancel_view": "full_organizer_list",
}
success_message = "Organizer %(name)s deleted."
success_url = reverse_lazy("full_organizer_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(OrganizerDelete, self).delete(request, *args, **kwargs)
class OrganizerList(LoginRequiredMixin, ListView):
model = Organizer
template_name = "full_organizer_list.html"
context_object_name = "organizer_list"
class KeywordCreate(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Keyword
template_name = "generic_form.html"
extra_context = {"form_title": "Create keyword", "cancel_view": "full_keyword_list"}
fields = ["title"]
success_message = "Keyword '%(title)s' created"
success_url = reverse_lazy("full_keyword_list")
class KeywordDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = Keyword
template_name = "generic_form.html"
extra_context = {"form_title": "Delete keyword", "cancel_view": "full_keyword_list"}
success_message = "Keyword '%(title)s' deleted"
success_url = reverse_lazy("full_keyword_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(KeywordDelete, self).delete(request, *args, **kwargs)
class KeywordEdit(StaffRequiredMixin, SuccessMessageMixin, UpdateView):
model = Keyword
template_name = "generic_form.html"
extra_context = {
"form_title": "Update keyword",
"cancel_view": "full_keyword_list",
"merge_view": "keyword_merge",
"delete_view": "keyword_delete",
}
fields = ["title"]
success_message = "Keyword '%(title)s' updated"
success_url = reverse_lazy("full_keyword_list")
class KeywordList(LoginRequiredMixin, ListView):
model = Keyword
template_name = "tag_list.html"
context_object_name = "tag_list"
extra_context = {
"tag_category": "Keywords",
"tag_edit_view": "keyword_edit",
"tag_create_view": "keyword_create",
"tag_list_view": "full_keyword_list",
"multi_merge": "keyword_multi_merge",
"filter_param_name": "keywords",
}
def get_queryset(self):
base_results_set = Keyword.objects.order_by("title")
results_set = base_results_set.annotate(n_works=Count("works"))
if self.request.GET:
raw_filter_form = TagForm(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
if filter_form["name"] != "":
results_set = results_set.filter(
title__icontains=filter_form["name"]
)
if filter_form["ordering"] == "a":
results_set = results_set.order_by("title")
elif filter_form["ordering"] == "n_asc":
results_set = results_set.order_by("n_works")
elif filter_form["ordering"] == "n_dsc":
results_set = results_set.order_by("-n_works")
else:
for f, e in raw_filter_form.errors.items():
messages.error(self.request, f"{f}: {e}")
else:
results_set = results_set.order_by("title")
return results_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["tag_filter_form"] = TagForm(initial=self.request.GET)
context["filtered_tags_count"] = self.get_queryset().count()
context["available_tags_count"] = Keyword.objects.count()
return context
@user_is_staff
@transaction.atomic
def keyword_merge(request, keyword_id):
keyword = get_object_or_404(Keyword, pk=keyword_id)
affected_works = Work.objects.filter(keywords=keyword).all()
sample_works = affected_works[:15]
count_elements = affected_works.count() - 15
context = {
"merging": keyword,
"tag_merge_form": KeywordMergeForm,
"sample_elements": sample_works,
"tag_category": "Keyword",
"merge_view": "keyword_merge",
}
if request.method == "GET":
"""
Initial load of the merge form displays all the authors and works associated with this keyword.
"""
return render(request, "tag_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = KeywordMergeForm(request.POST)
if raw_form.is_valid():
target_keyword = raw_form.cleaned_data["into"]
if keyword == target_keyword:
"""
If the user chooses the existing keyword, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge a keyword into itself. Please select a different keyword.",
)
return redirect("keyword_merge", keyword_id=keyword_id)
else:
old_keyword_id = str(keyword)
merge_results = keyword.merge(target_keyword)
messages.success(
request,
f"Keyword {old_keyword_id} has been merged into {target_keyword}, and the old keyword entry has been deleted.",
)
messages.success(
request, f"{merge_results["update_results"]} keywords updated"
)
return redirect("keyword_edit", pk=target_keyword.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "tag_merge.html", context)
@user_is_staff
@transaction.atomic
def keyword_multi_merge(request):
context = {
"tag_merge_form": KeywordMultiMergeForm,
"tag_category": "Keyword",
"multi_merge_view": "keyword_multi_merge",
}
if request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = KeywordMultiMergeForm(request.POST)
if raw_form.is_valid():
target_keyword = raw_form.cleaned_data["into"]
source_keywords = raw_form.cleaned_data["sources"].exclude(
pk=target_keyword.pk
)
for keyword in source_keywords:
old_keyword_id = keyword.title
merge_results = keyword.merge(target_keyword)
messages.success(
request,
f"Keyword {old_keyword_id} has been merged into {target_keyword}, and the old keyword entry has been deleted.",
)
messages.success(
request, f"{merge_results["update_results"]} keywords updated"
)
return redirect("keyword_edit", pk=target_keyword.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "tag_multi_merge.html", context)
class TopicCreate(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Topic
template_name = "generic_form.html"
extra_context = {"form_title": "Create topic", "cancel_view": "full_topic_list"}
fields = ["title"]
success_message = "Topic '%(title)s' created"
success_url = reverse_lazy("full_topic_list")
class TopicDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = Topic
template_name = "generic_form.html"
extra_context = {"form_title": "Delete topic", "cancel_view": "full_topic_list"}
success_message = "Topic '%(title)s' deleted"
success_url = reverse_lazy("full_topic_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(TopicDelete, self).delete(request, *args, **kwargs)
class TopicEdit(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = Topic
template_name = "generic_form.html"
extra_context = {
"form_title": "Update topic",
"cancel_view": "full_topic_list",
"merge_view": "topic_merge",
"delete_view": "topic_delete",
}
fields = ["title"]
success_message = "Topic '%(title)s' updated"
success_url = reverse_lazy("full_topic_list")
class TopicList(LoginRequiredMixin, ListView):
model = Topic
template_name = "tag_list.html"
context_object_name = "tag_list"
extra_context = {
"tag_category": "Topics",
"tag_edit_view": "topic_edit",
"tag_create_view": "topic_create",
"tag_filter_form": TagForm,
"tag_list_view": "full_topic_list",
"multi_merge": "topic_multi_merge",
"filter_param_name": "topics",
}
def get_queryset(self):
base_results_set = Topic.objects.order_by("title")
results_set = base_results_set.annotate(n_works=Count("works"))
raw_filter_form = TagForm(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
if filter_form["name"] != "":
results_set = results_set.filter(title__icontains=filter_form["name"])
if filter_form["ordering"] == "a":
results_set = results_set.order_by("title")
elif filter_form["ordering"] == "n_asc":
results_set = results_set.order_by("n_works")
elif filter_form["ordering"] == "n_dsc":
results_set = results_set.order_by("-n_works")
return results_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["filtered_tags_count"] = self.get_queryset().count()
context["available_tags_count"] = Topic.objects.count()
return context
@user_is_staff
@transaction.atomic
def topic_merge(request, topic_id):
topic = get_object_or_404(Topic, pk=topic_id)
affected_elements = topic.works.all()
count_elements = affected_elements.count() - 10
sample_elements = affected_elements[:10]
context = {
"merging": topic,
"tag_merge_form": TopicMergeForm,
"tag_category": "Topic",
"merge_view": "topic_merge",
"sample_elements": sample_elements,
"count_elements": count_elements,
}
if request.method == "GET":
"""
Initial load of the merge form displays all the authors and works associated with this topic.
"""
return render(request, "tag_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = TopicMergeForm(request.POST)
if raw_form.is_valid():
target_topic = raw_form.cleaned_data["into"]
if topic == target_topic:
"""
If the user chooses the existing topic, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge a topic into itself. Please select a different topic.",
)
return redirect("topic_merge", topic_id=topic_id)
else:
old_topic_id = str(topic)
merge_results = topic.merge(target_topic)
messages.success(
request,
f"Topic {old_topic_id} has been merged into {target_topic}, and the old topic entry has been deleted.",
)
messages.success(
request, f"{merge_results["update_results"]} topics updated"
)
return redirect("topic_edit", pk=target_topic.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "tag_merge.html", context)
@user_is_staff
@transaction.atomic
def topic_multi_merge(request):
context = {
"tag_merge_form": TopicMultiMergeForm,
"tag_category": "Topic",
"multi_merge_view": "topic_multi_merge",
}
if request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = TopicMultiMergeForm(request.POST)
if raw_form.is_valid():
target_topic = raw_form.cleaned_data["into"]
source_topics = raw_form.cleaned_data["sources"].exclude(pk=target_topic.pk)
for topic in source_topics:
old_topic_id = topic.title
merge_results = topic.merge(target_topic)
messages.success(
request,
f"Topic {old_topic_id} has been merged into {target_topic}, and the old topic entry has been deleted.",
)
messages.success(
request, f"{merge_results["update_results"]} topics updated"
)
return redirect("topic_edit", pk=target_topic.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "tag_multi_merge.html", context)
class LanguageCreate(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Language
template_name = "generic_form.html"
extra_context = {
"form_title": "Create language",
"cancel_view": "full_language_list",
}
fields = ["title", "code"]
success_message = "Language '%(title)s' created"
success_url = reverse_lazy("full_language_list")
class LanguageDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = Language
template_name = "generic_form.html"
extra_context = {
"form_title": "Delete language",
"cancel_view": "full_language_list",
}
success_message = "Language '%(title)s' deleted"
success_url = reverse_lazy("full_language_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(LanguageDelete, self).delete(request, *args, **kwargs)
class LanguageEdit(StaffRequiredMixin, SuccessMessageMixin, UpdateView):
model = Language
template_name = "generic_form.html"
extra_context = {
"form_title": "Update language",
"cancel_view": "full_language_list",
"merge_view": "language_merge",
"delete_view": "language_delete",
}
fields = ["title", "code"]
success_message = "Language '%(title)s' updated"
success_url = reverse_lazy("full_language_list")
class LanguageList(LoginRequiredMixin, ListView):
model = Language
template_name = "tag_list.html"
context_object_name = "tag_list"
extra_context = {
"tag_category": "Languages",
"tag_edit_view": "language_edit",
"tag_create_view": "language_create",
"tag_filter_form": TagForm,
"tag_list_view": "full_language_list",
"filter_param_name": "languages",
}
def get_queryset(self):
base_results_set = Language.objects.order_by("title")
results_set = base_results_set.annotate(n_works=Count("works"))
raw_filter_form = TagForm(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
if filter_form["name"] != "":
results_set = results_set.filter(title__icontains=filter_form["name"])
if filter_form["ordering"] == "a":
results_set = results_set.order_by("title")
elif filter_form["ordering"] == "n_asc":
results_set = results_set.order_by("n_works")
elif filter_form["ordering"] == "n_dsc":
results_set = results_set.order_by("-n_works")
return results_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["filtered_tags_count"] = self.get_queryset().count()
context["available_tags_count"] = Language.objects.count()
return context
@user_is_staff
@transaction.atomic
def language_merge(request, language_id):
language = get_object_or_404(Language, pk=language_id)
affected_elements = language.works.all()
count_elements = affected_elements.count() - 10
sample_elements = affected_elements[:10]
context = {
"merging": language,
"tag_merge_form": LanguageMergeForm,
"tag_category": "Language",
"merge_view": "language_merge",
"sample_elements": sample_elements,
"count_elements": count_elements,
}
if request.method == "GET":
"""
Initial load of the merge form displays all the authors and works associated with this language.
"""
return render(request, "tag_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = LanguageMergeForm(request.POST)
if raw_form.is_valid():
target_language = raw_form.cleaned_data["into"]
if language == target_language:
"""
If the user chooses the existing language, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge a language into itself. Please select a different language.",
)
return redirect("language_merge", language_id=language_id)
else:
old_language_id = str(language)
merge_results = language.merge(target_language)
messages.success(
request,
f"Language {old_language_id} has been merged into {target_language}, and the old language entry has been deleted.",
)
messages.success(
request, f"{merge_results["update_results"]} languages updated"
)
return redirect("language_edit", pk=target_language.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "tag_merge.html", context)
class WorkTypeCreate(StaffRequiredMixin, SuccessMessageMixin, CreateView):
model = WorkType
template_name = "generic_form.html"
extra_context = {
"form_title": "Create work_type",
"cancel_view": "full_work_type_list",
}
fields = ["title", "is_parent"]
success_message = "Abstract type '%(title)s' created"
success_url = reverse_lazy("full_work_type_list")
class WorkTypeDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = WorkType
template_name = "generic_form.html"
extra_context = {
"form_title": "Delete work_type",
"cancel_view": "full_work_type_list",
}
success_message = "Abstract type '%(title)s' deleted"
success_url = reverse_lazy("full_work_type_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(WorkTypeDelete, self).delete(request, *args, **kwargs)
class WorkTypeEdit(StaffRequiredMixin, SuccessMessageMixin, UpdateView):
model = WorkType
template_name = "generic_form.html"
extra_context = {
"form_title": "Update abstract type",
"cancel_view": "full_work_type_list",
"merge_view": "work_type_merge",
"delete_view": "work_type_delete",
}
fields = ["title", "is_parent"]
success_message = "Abstract '%(title)s' updated"
success_url = reverse_lazy("full_work_type_list")
class WorkTypeList(LoginRequiredMixin, ListView):
model = WorkType
template_name = "tag_list.html"
context_object_name = "tag_list"
extra_context = {
"tag_category": "Abstract Types",
"tag_edit_view": "work_type_edit",
"tag_create_view": "work_type_create",
"tag_filter_form": TagForm,
"tag_list_view": "full_work_type_list",
"filter_param_name": "work_type",
}
def get_queryset(self):
base_results_set = WorkType.objects.order_by("title")
results_set = base_results_set.annotate(n_works=Count("works"))
raw_filter_form = TagForm(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
if filter_form["name"] != "":
results_set = results_set.filter(title__icontains=filter_form["name"])
if filter_form["ordering"] == "a":
results_set = results_set.order_by("title")
elif filter_form["ordering"] == "n_asc":
results_set = results_set.order_by("n_works")
elif filter_form["ordering"] == "n_dsc":
results_set = results_set.order_by("-n_works")
return results_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["filtered_tags_count"] = self.get_queryset().count()
context["available_tags_count"] = WorkType.objects.count()
return context
@user_is_staff
@transaction.atomic
def work_type_merge(request, work_type_id):
work_type = get_object_or_404(WorkType, pk=work_type_id)
affected_elements = work_type.works.all()
count_elements = affected_elements.count() - 10
sample_elements = affected_elements[:10]
context = {
"merging": work_type,
"tag_merge_form": WorkTypeMergeForm,
"tag_category": "Abstract Type",
"merge_view": "work_type_merge",
"sample_elements": sample_elements,
"count_elements": count_elements,
}
if request.method == "GET":
"""
Initial load of the merge form displays all the authors and works associated with this work_type.
"""
return render(request, "tag_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = WorkTypeMergeForm(request.POST)
if raw_form.is_valid():
target_work_type = raw_form.cleaned_data["into"]
if work_type == target_work_type:
"""
If the user chooses the existing work_type, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge a work_type into itself. Please select a different work_type.",
)
return redirect("work_type_merge", work_type_id=work_type_id)
else:
old_work_type_id = str(work_type)
merge_results = work_type.merge(target_work_type)
messages.success(
request,
f"WorkType {old_work_type_id} has been merged into {target_work_type}, and the old work_type entry has been deleted.",
)
messages.success(
request, f"{merge_results["update_results"]} work_types updated"
)
return redirect("work_type_edit", pk=target_work_type.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "tag_merge.html", context)
| from django.shortcuts import render
from django.http import HttpResponse, JsonResponse, StreamingHttpResponse, FileResponse
from django.template import loader
from django.shortcuts import get_object_or_404, render, redirect
from django.views import View
from django.views.generic import DetailView, ListView
from django.db.models import (
Count,
Max,
Min,
Q,
F,
Prefetch,
Subquery,
OuterRef,
ExpressionWrapper,
FloatField,
BooleanField,
)
from django.db.models.functions import Concat, FirstValue, Cast
from django.core import management
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.db.models.functions import Coalesce
from django.contrib.postgres.search import SearchRank, SearchQuery
from django.contrib.postgres.aggregates import StringAgg
from django.urls import reverse, reverse_lazy
from django.contrib import messages
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.decorators import login_required, user_passes_test
from dal.autocomplete import Select2QuerySetView
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.db import transaction, IntegrityError
from django.forms.models import model_to_dict
from django.forms import formset_factory, inlineformset_factory, modelformset_factory
from django.conf import settings
from django.utils.html import format_html
from django.views.decorators.cache import cache_page
import glob
from os.path import basename, getmtime
from datetime import datetime
import csv
import sys
from operator import attrgetter
from tempfile import NamedTemporaryFile, TemporaryDirectory
import zipfile
from . import models
from .models import (
Work,
WorkType,
Author,
Conference,
Institution,
Appellation,
Affiliation,
ConferenceSeries,
SeriesMembership,
Organizer,
Country,
Keyword,
Topic,
Language,
CountryLabel,
Authorship,
License,
)
from .forms import (
WorkFilter,
AuthorFilter,
AuthorMergeForm,
WorkForm,
WorkAuthorshipForm,
FullInstitutionForm,
InstitutionMergeForm,
AffiliationEditForm,
AffiliationMergeForm,
KeywordMergeForm,
TagForm,
TopicMergeForm,
AffiliationMultiMergeForm,
KeywordMultiMergeForm,
ConferenceForm,
ConferenceCheckoutForm,
ConferenceSeriesInline,
LanguageMergeForm,
WorkTypeMergeForm,
InstitutionMultiMergeForm,
TopicMultiMergeForm,
ConferenceXMLUploadForm,
)
PERMISSIONS_ERROR_TEXT = (
"Please contact the lead project editors to edit this part of the database."
)
def cache_for_anon(func):
"""
On these views, call the cache if the user is not authenticated
"""
def wrap(request, *args, **kwargs):
if request.user.is_authenticated:
return func(request, *args, **kwargs)
else:
return cache_page(settings.CACHES["default"]["TIMEOUT"])(func)(
request, *args, **kwargs
)
return wrap
def user_is_staff(func):
def wrap(request, *args, **kwargs):
if not request.user.is_authenticated:
return redirect(f"{reverse('login')}?next={request.path}")
if request.user.is_staff:
return func(request, *args, **kwargs)
else:
messages.warning(request, PERMISSIONS_ERROR_TEXT)
return redirect("home_view")
return wrap
class StaffRequiredMixin:
def dispatch(self, *args, **kwargs):
if not self.request.user.is_authenticated:
return redirect(f"{reverse('login')}?next={self.request.path}")
if self.request.user.is_staff:
return super().dispatch(*args, **kwargs)
else:
messages.warning(self.request, PERMISSIONS_ERROR_TEXT)
return redirect("home_view")
class ItemLabelAutocomplete(Select2QuerySetView):
def get_selected_result_label(self, item):
return self.get_result_label(item)
class WorkAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Work.objects.all()
parents_only = self.forwarded.get("parents_only", None)
if parents_only:
qs = qs.filter(work_type__is_parent=True)
conference = self.forwarded.get("conference", None)
if conference:
qs = qs.filter(conference=conference)
if self.q:
qs = qs.filter(title__icontains=self.q)
return qs.all()
class AppellationAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Appellation.objects.all()
if self.q:
qs = qs.filter(
Q(first_name__icontains=self.q) | Q(last_name__icontains=self.q)
).all()
return qs
class KeywordAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Keyword.objects.annotate(n_works=Count("works")).order_by("-n_works")
if self.q:
qs = qs.filter(title__icontains=self.q).all()
return qs
def get_result_label(self, item):
return f"{item} ({item.n_works} works)"
class LanguageAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Language.objects.annotate(n_works=Count("works")).order_by(
"-n_works", "title"
)
if self.q:
qs = qs.filter(title__icontains=self.q).all()
return qs
def get_result_label(self, item):
return f"{item} ({item.n_works} works)"
class TopicAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Topic.objects.annotate(n_works=Count("works")).order_by("-n_works")
if self.q:
qs = qs.filter(title__icontains=self.q).all()
return qs
def get_result_label(self, item):
return f"{item} ({item.n_works} works)"
class CountryAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Country.objects.annotate(
n_works=Count(
"institutions__affiliations__asserted_by__work", distinct=True
)
).order_by("-n_works")
if self.q:
qs = qs.filter(
Q(pref_name__icontains=self.q) | Q(names__name__icontains=self.q)
)
return qs.distinct()
def get_result_label(self, item):
return f"{item} ({item.n_works} works)"
class InstitutionAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = (
Institution.objects.annotate(
n_works=Count("affiliations__asserted_by__work", distinct=True)
)
.select_related("country")
.order_by("-n_works")
)
if self.q:
qs = qs.filter(name__icontains=self.q).all()
return qs
def get_result_label(self, item):
if item.country is not None:
c_label = item.country.pref_name
else:
c_label = ""
location_statement = ", ".join(
[l for l in [item.state_province_region, c_label] if l != ""]
)
return f"{item} ({item.n_works} works)<br><small text-class='muted'>{location_statement}</small>"
class AffiliationAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = (
Affiliation.objects.annotate(
n_works=Count("asserted_by__work", distinct=True)
)
.select_related("institution", "institution__country")
.order_by("-n_works")
)
inst_filter = self.forwarded.get("institution", None)
if inst_filter:
qs = qs.filter(institution=inst_filter)
if self.q:
qs = qs.filter(
Q(department__icontains=self.q) | Q(institution__name__icontains=self.q)
).distinct()
return qs
def get_result_label(self, item):
if item.institution.country is not None:
c_label = item.institution.country.pref_name
else:
c_label = ""
location_statement = ", ".join(
[l for l in [item.institution.state_province_region, c_label] if l != ""]
)
return f"{item} ({item.n_works} works)<br><small text-class='muted'>{location_statement}</small>"
class ConferenceAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Conference.objects.annotate(
main_series=StringAgg(
"series_memberships__series__abbreviation",
delimiter=" / ",
distinct=True,
)
).order_by("year", "main_series", "short_title", "theme_title")
if self.q:
qs = qs.filter(search_text__icontains=self.q).distinct()
return qs
def get_result_label(self, item):
if item.main_series:
return f"{item.main_series} - {item.year} - {item.short_title}"
elif item.short_title:
return f"{item.year} - {item.short_title}"
else:
return f"{item.year} - {item.theme_title}"
class AuthorAutocomplete(ItemLabelAutocomplete):
raise_exception = True
def get_queryset(self):
qs = Author.objects.annotate(
n_works=Count("authorships", distinct=True),
main_last_name=Max("appellations__last_name"),
main_first_name=Max("appellations__first_name"),
).order_by("main_last_name", "main_first_name", "-n_works")
if self.q:
qs = qs.filter(appellations_index__icontains=self.q).distinct()
return qs
def get_result_label(self, item):
return format_html(
f"{item.most_recent_appellation} ({item.n_works} works)<br><small text-class='muted'>(All names: {item.appellations_index})</small>"
)
def work_view(request, work_id):
related_conference = Conference.objects.annotate(
n_works=Count("works", distinct=True),
n_authors=Count("works__authors", distinct=True),
main_series=StringAgg(
"series_memberships__series__abbreviation", delimiter=" / ", distinct=True
),
).prefetch_related("series", "organizers")
work = get_object_or_404(
Work.objects.select_related("work_type", "full_text_license").prefetch_related(
Prefetch("conference", queryset=related_conference),
"keywords",
"topics",
"languages",
Prefetch(
"session_papers",
queryset=Work.objects.prefetch_related(
Prefetch(
"authorships",
queryset=Authorship.objects.select_related("appellation"),
),
),
),
Prefetch(
"parent_session",
queryset=Work.objects.prefetch_related(
Prefetch(
"authorships",
queryset=Authorship.objects.select_related(
"author", "appellation"
),
)
),
),
),
pk=work_id,
)
authorships = (
Authorship.objects.filter(work_id=work_id)
.order_by("authorship_order")
.distinct()
.select_related("work", "author", "appellation")
.prefetch_related(
Prefetch(
"affiliations",
queryset=Affiliation.objects.select_related(
"institution", "institution__country"
),
)
)
)
context = {"work": work, "authorships": authorships}
return render(request, "work_detail.html", context)
def author_view(request, author_id):
author = get_object_or_404(Author, pk=author_id)
sorted_authorships = (
Authorship.objects.filter(author=author)
.order_by("work__conference__year")
.prefetch_related(
Prefetch("work", queryset=Work.objects.select_related("conference"))
)
)
appellations = (
Appellation.objects.filter(asserted_by__author=author)
.distinct()
.annotate(latest_year=Max("asserted_by__work__conference__year"))
.order_by("-latest_year")
.prefetch_related(Prefetch("asserted_by", queryset=sorted_authorships))
)
affiliations = (
Affiliation.objects.filter(asserted_by__author=author)
.distinct()
.annotate(latest_year=Max("asserted_by__work__conference__year"))
.order_by("-latest_year")
.prefetch_related(
Prefetch("asserted_by", queryset=sorted_authorships),
Prefetch(
"institution", queryset=Institution.objects.select_related("country")
),
)
)
works = (
Work.objects.filter(authorships__author=author)
.order_by("conference__year")
.distinct()
.select_related("conference", "parent_session", "work_type")
.prefetch_related(
Prefetch(
"conference",
queryset=Conference.objects.prefetch_related("series", "organizers"),
),
"session_papers",
"keywords",
"topics",
"languages",
Prefetch(
"authorships",
queryset=Authorship.objects.select_related("appellation", "author"),
),
)
)
author_admin_page = reverse("admin:abstracts_author_change", args=(author.pk,))
context = {
"author": author,
"works": works,
"appellations": appellations,
"affiliations": affiliations,
"author_admin_page": author_admin_page,
}
return render(request, "author_detail.html", context)
class AuthorSplit(DetailView, StaffRequiredMixin):
model = Author
template_name = "author_split.html"
context_object_name = "original_author"
def get_context_data(self, **kwargs):
authorships = Authorship.objects.filter(author=self.get_object()).order_by(
"work__conference__year"
)
return {self.context_object_name: self.get_object(), "authorships": authorships}
@transaction.atomic
def post(self, request, *args, **kwargs):
"""
Create new author and transfer authorships
"""
authorships_to_move = request.POST.getlist("splitselect")
try:
new_author = Author.objects.create()
Authorship.objects.filter(id__in=authorships_to_move).update(
author=new_author
)
# Force-update appellations
self.get_object().save()
new_author.save()
messages.success(
request,
f"{len(authorships_to_move)} authorships moved to new author id {new_author.id}",
)
return redirect("author_detail", new_author.id)
except:
messages.error(request, str(authorships_to_move))
return redirect("author_split", self.get_object().id)
class XMLView(DetailView, LoginRequiredMixin):
model = Work
context_object_name = "work"
def get(self, request, *args, **kwargs):
response = HttpResponse(self.get_object().full_text, content_type="xhtml+xml")
response[
"Content-Disposition"
] = f"attachment; filename={self.get_object().id}.xml"
return response
class AuthorList(ListView):
context_object_name = "author_list"
template_name = "author_list.html"
paginate_by = 50
def get_queryset(self):
base_result_set = Author.objects.exclude(appellations__isnull=True).annotate(
n_conferences=Count("works__conference", distinct=True)
)
raw_filter_form = AuthorFilter(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
order_res = filter_form["ordering"]
if order_res is None or order_res == "":
order_res = "last_name"
result_set = base_result_set.annotate(
last_name=Max("appellations__last_name"),
n_works=Count("authorships", distinct=True),
).order_by(order_res)
author_res = filter_form["author"]
if author_res is not None:
result_set = result_set.filter(id=author_res.id)
affiliation_res = filter_form["affiliation"]
if affiliation_res is not None:
result_set = result_set.filter(
authorships__affiliations=affiliation_res
)
institution_res = filter_form["institution"]
if institution_res is not None:
result_set = result_set.filter(
authorships__affiliations__institution=institution_res
)
country_res = filter_form["country"]
if country_res is not None:
result_set = result_set.filter(
authorships__affiliations__institution__country=country_res
)
conference_res = filter_form["conference"]
if conference_res is not None:
result_set = result_set.filter(works__conference=conference_res)
if filter_form["singleton"]:
result_set = result_set.filter(n_conferences=1)
name_res = filter_form["name"]
if name_res != "":
result_set = result_set.filter(appellations_index__icontains=name_res)
first_name_res = filter_form["first_name"]
if first_name_res != "":
result_set = result_set.filter(
authorships__appellation__first_name__icontains=first_name_res
)
last_name_res = filter_form["last_name"]
if last_name_res != "":
result_set = result_set.filter(
authorships__appellation__last_name__icontains=last_name_res
)
# Newest affiliations
newest_authorship = Authorship.objects.filter(
author=OuterRef("pk")
).order_by("-work__conference__year")
annotated_authors = result_set.annotate(
main_affiliation_department=Subquery(
newest_authorship.values("affiliations__department")[:1]
),
main_affiliation_institution=Subquery(
newest_authorship.values("affiliations__institution__name")[:1]
),
main_affiliation_institution_city=Subquery(
newest_authorship.values("affiliations__institution__city")[:1]
),
main_affiliation_institution_state=Subquery(
newest_authorship.values(
"affiliations__institution__state_province_region"
)[:1]
),
main_affiliation_institution_country=Subquery(
newest_authorship.values(
"affiliations__institution__country__pref_name"
)[:1]
),
most_recent_first_name=Subquery(
newest_authorship.values("appellation__first_name")[:1]
),
most_recent_last_name=Subquery(
newest_authorship.values("appellation__last_name")[:1]
),
n_works=Count("authorships", distinct=True),
)
return annotated_authors
else:
messages.warning(
self.request,
"Query parameters not recognized. Check your URL and try again.",
)
return base_result_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["author_filter_form"] = AuthorFilter(data=self.request.GET)
context["available_authors_count"] = Author.objects.count()
context["redirect_url"] = reverse("author_list")
return context
def annotate_multiple_series(qs):
return qs.annotate(
n_conferences=Count("conferences", distinct=True),
earliest_year=Min("conferences__year"),
latest_year=Max("conferences__year"),
n_complete=Count(
"conferences", filter=Q(conferences__entry_status="c"), distinct=True
),
n_in_progress=Count(
"conferences", filter=Q(conferences__entry_status="i"), distinct=True
),
n_in_review=Count(
"conferences", filter=Q(conferences__entry_status="r"), distinct=True
),
n_remaining=F("n_conferences")
- F("n_complete")
- F("n_in_progress")
- F("n_in_review"),
pct_complete=(
Cast(F("n_complete"), FloatField()) / Cast(F("n_conferences"), FloatField())
)
* 100,
pct_in_progress=(
Cast(F("n_in_progress"), FloatField())
/ Cast(F("n_conferences"), FloatField())
)
* 100,
pct_in_review=(
Cast(F("n_in_review"), FloatField())
/ Cast(F("n_conferences"), FloatField())
)
* 100,
pct_remaining=(
Cast(F("n_remaining"), FloatField())
/ Cast(F("n_conferences"), FloatField())
)
* 100,
).order_by("title")
def annotate_single_series(qs):
res = qs.aggregate(
earliest_year=Min("year"),
latest_year=Max("year"),
n_conferences=Count("id", distinct=True),
n_complete=Count("id", filter=Q(entry_status="c"), distinct=True),
n_in_progress=Count("id", filter=Q(entry_status="i"), distinct=True),
n_in_review=Count("id", filter=Q(entry_status="r"), distinct=True),
)
res["n_remaining"] = (
res["n_conferences"]
- res["n_complete"]
- res["n_in_progress"]
- res["n_in_review"]
)
if res["n_conferences"] > 0:
res["pct_complete"] = (res["n_complete"] / res["n_conferences"]) * 100
res["pct_in_progress"] = (res["n_in_progress"] / res["n_conferences"]) * 100
res["pct_in_review"] = (res["n_in_review"] / res["n_conferences"]) * 100
res["pct_remaining"] = (res["n_remaining"] / res["n_conferences"]) * 100
else:
res["pct_complete"] = 0
res["pct_in_progress"] = 0
res["pct_in_review"] = 0
res["pct_remaining"] = 0
return res
def conference_series_qs():
return annotate_multiple_series(
ConferenceSeries.objects.exclude(conferences__isnull=True)
)
class ConferenceSeriesList(ListView):
context_object_name = "series_list"
template_name = "conference_series_list.html"
def get_queryset(self):
base_result_set = conference_series_qs()
return base_result_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
sa_conf = Conference.objects.filter(series__isnull=True)
context["standalone_conferences"] = annotate_single_series(sa_conf)
context["standalone_conference_count"] = sa_conf.count()
return context
class ConferenceSeriesDetail(DetailView):
model = ConferenceSeries
template_name = "conference_series_detail.html"
context_object_name = "series"
def get_member_conferences(self):
return Conference.objects.filter(series_memberships__series=self.get_object())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["series_progress"] = annotate_single_series(
self.get_member_conferences()
)
series_order_subquery = SeriesMembership.objects.filter(
conference=OuterRef("pk"), series=self.get_object()
).order_by("number")
context["conference_list"] = (
self.get_member_conferences()
.annotate(
main_series=StringAgg(
"series_memberships__series__abbreviation",
delimiter=" / ",
distinct=True,
),
n_works=Count("works", distinct=True),
n_authors=Count("works__authors", distinct=True),
series_order=Subquery(series_order_subquery.values("number")[:1]),
)
.order_by("series_order")
.prefetch_related(
"series_memberships",
"series_memberships__series",
"organizers",
"country",
"hosting_institutions",
"hosting_institutions__country",
"documents",
)
)
context["series_list"] = conference_series_qs()
return context
class StandaloneList(View):
template_name = "conference_series_detail.html"
def get_standalone_list(self):
qs = (
Conference.objects.filter(series__isnull=True)
.annotate(
main_series=StringAgg(
"series_memberships__series__abbreviation",
delimiter=" / ",
distinct=True,
),
n_works=Count("works", distinct=True),
n_authors=Count("works__authors", distinct=True),
)
.order_by("year", "short_title", "theme_title")
.prefetch_related(
"series_memberships",
"series_memberships__series",
"organizers",
"country",
"hosting_institutions",
"hosting_institutions__country",
"documents",
)
)
return qs
def get(self, request):
faux_series = {
"title": "Standalone Events",
"notes": "Digital humanities events not belonging to a larger series, such symposia or workshops.",
"n_conferences": self.get_standalone_list().count(),
}
context = {
"conference_list": self.get_standalone_list(),
"series": faux_series,
"series_list": conference_series_qs(),
"series_progress": annotate_single_series(self.get_standalone_list()),
}
return render(request, self.template_name, context)
def home_view(request):
conference_count = Conference.objects.count()
years_count = Conference.objects.aggregate(year_range=Max("year") - Min("year"))[
"year_range"
]
work_count = Work.objects.count()
author_count = Author.objects.exclude(authorships__work__isnull=True).count()
institution_count = Institution.objects.count()
country_count = (
Country.objects.filter(
Q(institutions__affiliations__asserted_by__work__isnull=False)
| Q(institutions__conferences__isnull=False)
| Q(conferences__isnull=False)
)
.distinct()
.count()
)
context = {
"site": {
"conference_count": conference_count,
"years_count": years_count,
"work_count": work_count,
"author_count": author_count,
"institution_count": institution_count,
"country_count": country_count,
}
}
return render(request, "index.html", context)
@user_is_staff
@transaction.atomic
def author_merge_view(request, author_id):
author = get_object_or_404(Author, pk=author_id)
if request.method == "GET":
"""
Initial load of the merge form displays all the authorships of the current author that will be affected
"""
context = {"merging": author, "author_merge_form": AuthorMergeForm}
return render(request, "author_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = AuthorMergeForm(request.POST)
if raw_form.is_valid():
target_author = raw_form.cleaned_data["into"]
if author == target_author:
"""
If the user chooses the existing author, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge an author into themselves. Please select a different author.",
)
return redirect("author_merge", author_id=author_id)
else:
old_author_string = str(author)
merge_results = author.merge(target_author)
target_author.user_last_updated = request.user
target_author.save()
messages.success(
request,
f"Author {old_author_string} has been merged into {target_author}, and the old author entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} authorships updated"
)
return redirect("author_detail", author_id=target_author.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "author_merge.html", context)
def field_required(field):
if field.get_internal_type() in ("CharField", "TextField") and field.blank:
return False
if field.null:
return False
return True
def download_data(request):
data_dictionary = []
if request.user.is_authenticated:
dt_config = settings.PRIVATE_DATA_TABLE_CONFIG
zip_url = reverse("private_all_tables_download")
else:
dt_config = settings.PUBLIC_DATA_TABLE_CONFIG
zip_url = reverse("public_all_tables_download")
denormalized_url = reverse("works_download")
denormalized_last_updated = datetime.fromtimestamp(
getmtime(f"{settings.DATA_OUTPUT_PATH}/{settings.DENORMALIZED_WORKS_NAME}.zip")
)
for m in dt_config["CONFIGURATION"]:
model = attrgetter(m["model"])(models)
if "manual_model_description" in m:
model_description = m["manual_model_description"]
else:
try:
model_description = model.model_description
except:
model_description = None
all_model_fields = [
{
"name": f.name,
"relation": f.is_relation,
"help_text": f.help_text,
"related_model": str(f.related_model)
.replace("<class 'abstracts.models.", "")
.replace("'>", ""),
"type": f.get_internal_type(),
"required": field_required(f),
}
for f in model._meta.fields
if not f.one_to_many and f.name not in m["exclude_fields"]
]
if m.get("include_string", False):
all_model_fields.append(
{
"name": "label",
"relation": None,
"help_text": "General label for this object",
"related_model": None,
"type": "CharField",
"required": True,
}
)
data_dictionary.append(
{
"model": m["model"],
"model_description": model_description,
"csv_name": m["csv_name"],
"fields": all_model_fields,
}
)
normalized_last_updated = datetime.fromtimestamp(
getmtime(f"{settings.DATA_OUTPUT_PATH}/{dt_config['DATA_ZIP_NAME']}")
)
context = {
"zip_url": zip_url,
"denormalized_url": denormalized_url,
"denormalized_last_updated": denormalized_last_updated,
"normalized_last_updated": normalized_last_updated,
"data_dictionary": data_dictionary,
"denormalized_data_dictionary": settings.DENORMALIZED_HEADERS,
}
return render(request, "downloads.html", context)
def download_works_csv(request):
target_zip = f"{settings.DATA_OUTPUT_PATH}/{settings.DENORMALIZED_WORKS_NAME}.zip"
response = FileResponse(open(target_zip, "rb"))
return response
def public_download_all_tables(request):
target_zip = f"{settings.DATA_OUTPUT_PATH}/{settings.PUBLIC_DATA_TABLE_CONFIG['DATA_ZIP_NAME']}"
response = FileResponse(open(target_zip, "rb"))
return response
@login_required
def private_download_all_tables(request):
target_zip = f"{settings.DATA_OUTPUT_PATH}/{settings.PRIVATE_DATA_TABLE_CONFIG['DATA_ZIP_NAME']}"
response = FileResponse(open(target_zip, "rb"))
return response
@login_required
def WorkCreate(request):
if request.method == "GET":
if "conference" in request.GET:
conf = get_object_or_404(Conference, pk=int(request.GET["conference"]))
work_form = WorkForm(initial={"conference": conf.pk})
else:
work_form = WorkForm()
if request.method == "POST":
work_form = WorkForm(request.POST)
if work_form.is_valid():
new_work = work_form.save()
new_work.user_last_updated = request.user
new_work.save()
messages.success(request, f"{new_work} created.")
return redirect("work_edit_authorship", work_id=new_work.pk)
else:
for err in work_form.errors:
messages.error(request, err)
context = {"work_form": work_form}
return render(request, "work_create.html", context)
@login_required
def WorkEdit(request, work_id):
work = get_object_or_404(Work, pk=work_id)
if request.method == "POST":
work_form = WorkForm(request.POST, instance=work)
if work_form.is_valid():
work.user_last_updated = request.user
work_form.save()
messages.success(request, f'"{work.title}" sucessfully updated.')
return redirect("work_detail", work_id=work.pk)
else:
for f, e in work_form.errors.items():
messages.error(request, f"{f}: {e}")
work_initial_data = model_to_dict(work)
context = {"work_form": WorkForm(initial=work_initial_data), "work": work}
return render(request, "work_edit.html", context)
@login_required
@transaction.atomic
def WorkEditAuthorship(request, work_id):
work = get_object_or_404(Work, pk=work_id)
authorships = work.authorships.all()
AuthorshipWorkFormset = formset_factory(
WorkAuthorshipForm, can_delete=True, extra=0
)
initial_data = []
for authorship in authorships:
base_data = {
"author": authorship.author,
"authorship_order": authorship.authorship_order,
"first_name": authorship.appellation.first_name,
"last_name": authorship.appellation.last_name,
"affiliations": [aff for aff in authorship.affiliations.all()],
}
initial_data.append(base_data)
if request.method == "GET":
authorships_forms = AuthorshipWorkFormset(initial=initial_data)
elif request.method == "POST":
authorships_forms = AuthorshipWorkFormset(request.POST)
if authorships_forms.is_valid():
for d_form in authorships_forms.deleted_forms:
d_form_data = d_form.cleaned_data
attached_author = d_form_data["author"]
Authorship.objects.filter(
work=work, author=d_form_data["author"]
).delete()
# Refresh the author in DB to update appellations index
attached_author.save()
for aform in authorships_forms:
if aform not in authorships_forms.deleted_forms:
aform_data = aform.cleaned_data
appellation = Appellation.objects.get_or_create(
first_name=aform_data["first_name"],
last_name=aform_data["last_name"],
)[0]
affiliations = aform_data["affiliations"]
authorship_order = aform_data["authorship_order"]
try:
if aform_data["author"] is None:
author_id = Author.objects.create()
else:
author_id = aform_data["author"]
auth = Authorship.objects.update_or_create(
work=work,
author=author_id,
defaults={
"authorship_order": authorship_order,
"appellation": appellation,
"user_last_updated": request.user,
},
)[0]
author_id.user_last_updated = request.user
author_id.save()
except IntegrityError as e:
messages.error(
request, f"{e}: Ensure authorship order numbers are unique"
)
return redirect("work_edit_authorship", work.pk)
auth.affiliations.clear()
if affiliations is not None:
auth.affiliations.set(affiliations)
messages.success(
request, f'"{work.title}" authorships successfully updated.'
)
if "start_new" in request.POST:
return redirect(
f"{reverse('work_create')}?conference={work.conference.pk}"
)
return redirect("work_detail", work_id=work.pk)
else:
for error in authorships_forms.errors:
messages.error(request, error)
context = {
"authorships_form": authorships_forms,
"work": work,
"affiliation_form": AffiliationEditForm,
}
return render(request, "work_edit_authorships.html", context)
@login_required
def AuthorInfoJSON(request, author_id):
if request.method == "GET":
author = get_object_or_404(Author, pk=author_id)
author_aff = Affiliation.objects.filter(asserted_by__author=author).distinct()
author_dict = {
"first_name": author.most_recent_appellation.first_name,
"last_name": author.most_recent_appellation.last_name,
"work_titles": [w.title for w in author.works.all()][:4],
"works_count": author.works.count(),
}
if author_aff is not None:
author_dict["affiliations"] = [
{"name": str(aff), "id": aff.pk} for aff in author_aff
]
return JsonResponse(author_dict)
@login_required
def AffiliationInfoJSON(request, affiliation_id):
if request.method == "GET":
affiliation = get_object_or_404(Affiliation, pk=affiliation_id)
affiliation_dict = {
"institution": {
"name": str(affiliation.institution),
"id": affiliation.institution.id,
}
}
if affiliation.department is not None:
affiliation_dict["department"] = affiliation.department
return JsonResponse(affiliation_dict)
class WorkDelete(LoginRequiredMixin, SuccessMessageMixin, DeleteView):
model = Work
template_name = "work_delete.html"
extra_context = {"cancel_view": "work_list"}
success_url = reverse_lazy("work_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, f"'{self.get_object().title}' deleted")
return super().delete(request, *args, **kwargs)
class FullWorkList(ListView):
context_object_name = "work_list"
template_name = "work_list.html"
paginate_by = 10
def get_queryset(self):
base_result_set = Work.objects.all()
raw_filter_form = WorkFilter(self.request.GET)
if raw_filter_form.is_valid():
result_set = base_result_set
filter_form = raw_filter_form.cleaned_data
work_type_res = filter_form["work_type"]
if work_type_res is not None:
result_set = result_set.filter(work_type=work_type_res)
conference_res = filter_form["conference"]
if conference_res is not None:
result_set = result_set.filter(conference=conference_res)
affiliation_res = filter_form["affiliation"]
if len(affiliation_res) > 0:
result_set = result_set.filter(
authorships__affiliations__in=affiliation_res
).distinct()
institution_res = filter_form["institution"]
if len(institution_res) > 0:
result_set = result_set.filter(
authorships__affiliations__institution__in=institution_res
).distinct()
author_res = filter_form["author"]
if len(author_res) > 0:
result_set = result_set.filter(authorships__author__in=author_res)
keyword_res = filter_form["keywords"]
if len(keyword_res) > 0:
result_set = result_set.filter(keywords__in=keyword_res)
topic_res = filter_form["topics"]
if len(topic_res) > 0:
result_set = result_set.filter(topics__in=topic_res)
language_res = filter_form["languages"]
if len(language_res) > 0:
result_set = result_set.filter(languages__in=language_res)
if filter_form["full_text_available"]:
result_set = result_set.exclude(full_text="")
if filter_form["full_text_viewable"]:
result_set = result_set.exclude(full_text="").filter(
full_text_license__isnull=False
)
text_res = filter_form["text"]
if text_res != "":
text_query = SearchQuery(text_res, search_type="websearch")
result_set = (
result_set.filter(search_text=text_query)
.annotate(
rank=SearchRank(
F("search_text"),
text_query,
),
# Does the search text show up only in the full text?
search_in_ft_only=ExpressionWrapper(
~Q(title__icontains=text_res), output_field=BooleanField()
),
)
.filter(rank__gt=0.1)
.order_by("-rank")
)
order_res = "rank"
# To find the last name of the first author, we develop a subquery that will pull the first authorship for a given work. We can then call the appellation__last_name
first_author_subquery = Authorship.objects.filter(
work=OuterRef("pk")
).order_by("authorship_order")
order_res = filter_form["ordering"]
if order_res is None or order_res == "":
order_res = "year"
if order_res == "year":
result_set = result_set.order_by("conference__year", "title")
elif order_res == "-year":
result_set = result_set.order_by("-conference__year", "title")
elif order_res == "title":
result_set = result_set.order_by("title")
elif order_res == "-title":
result_set = result_set.order_by("-title")
elif order_res == "last_name":
result_set = result_set.annotate(
first_author_last_name=Subquery(
first_author_subquery.values("appellation__last_name")[:1]
)
).order_by("first_author_last_name", "title")
elif order_res == "-last_name":
result_set = result_set.annotate(
first_author_last_name=Subquery(
first_author_subquery.values("appellation__last_name")[:1]
)
).order_by("-first_author_last_name", "title")
return (
result_set.select_related(
"conference", "work_type", "parent_session", "full_text_license"
)
.annotate(
main_series=StringAgg(
"conference__series_memberships__series__abbreviation",
delimiter=" / ",
distinct=True,
),
main_institution=StringAgg(
"conference__hosting_institutions__name",
delimiter=" / ",
distinct=True,
),
)
.prefetch_related(
Prefetch(
"conference",
queryset=Conference.objects.prefetch_related(
Prefetch(
"series_memberships",
queryset=SeriesMembership.objects.select_related(
"series"
),
),
"organizers",
),
),
"session_papers",
Prefetch(
"authorships",
queryset=Authorship.objects.select_related(
"appellation", "author"
),
),
"keywords",
"topics",
"languages",
)
)
else:
for error in raw_filter_form.errors:
messages.warning(self.request, error)
return base_result_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
raw_filter_form = WorkFilter(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
conference_res = filter_form["conference"]
if conference_res is not None:
conferences_data = (
Conference.objects.filter(id=conference_res.id)
.annotate(
n_works=Count("works", distinct=True),
n_authors=Count("works__authors", distinct=True),
main_series=StringAgg(
"series_memberships__series__abbreviation",
delimiter=" / ",
distinct=True,
),
)
.select_related("country")
.prefetch_related(
"organizers", "series_memberships", "series_memberships__series"
)
.all()
)
context["selected_conferences"] = conferences_data
context["work_filter_form"] = WorkFilter(data=self.request.GET)
context["available_works_count"] = Work.objects.count()
context["filtered_works_count"] = self.get_queryset().count()
context["redirect_url"] = reverse("work_list")
return context
class FullInstitutionList(LoginRequiredMixin, ListView):
context_object_name = "institution_list"
template_name = "full_institution_list.html"
paginate_by = 10
def get_queryset(self):
annotated_affiliations = Affiliation.objects.annotate(
n_works=Count("asserted_by__work", distinct=True)
)
result_set = (
Institution.objects.annotate(
n_works=Count("affiliations__asserted_by__work", distinct=True)
)
.prefetch_related(
Prefetch("affiliations", annotated_affiliations), "country"
)
.order_by("-n_works")
)
if self.request.GET:
raw_filter_form = FullInstitutionForm(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
result_set = result_set.annotate(
n_conferences=Count(
"affiliations__asserted_by__work__conference", distinct=True
)
)
department_res = filter_form["department"]
if department_res != "":
result_set = result_set.filter(
affiliations__department__icontains=department_res
)
affiliation_res = filter_form["affiliation"]
if affiliation_res is not None:
result_set = result_set.filter(affiliations=affiliation_res)
institution_res = filter_form["institution"]
if institution_res is not None:
result_set = result_set.filter(pk=institution_res.pk)
country_res = filter_form["country"]
if country_res is not None:
result_set = result_set.filter(country=country_res)
if filter_form["no_department"]:
result_set = result_set.filter(affiliations__department="")
conference_res = filter_form["conference"]
if conference_res is not None:
result_set = result_set.filter(
affiliations__asserted_by__work__conference=conference_res
).distinct()
if filter_form["singleton"]:
result_set = result_set.filter(n_conferences=1)
if filter_form["ordering"] == "n_dsc":
result_set = result_set.order_by(
"-n_works", "affiliations__institution__name"
)
elif filter_form["ordering"] == "n_asc":
result_set = result_set.order_by(
"n_works", "affiliations__institution__name"
)
elif filter_form["ordering"] == "a":
result_set = result_set.order_by("affiliations__institution__name")
else:
for f, e in raw_filter_form.errors.items():
messages.error(self.request, f"{f}: {e}")
return result_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["institution_filter_form"] = FullInstitutionForm(
initial=self.request.GET
)
context["available_institutions_count"] = Institution.objects.count()
context["filtered_institutions_count"] = self.get_queryset().count()
context["redirect_url"] = reverse("full_institution_list")
return context
class AuthorInstitutionList(FullInstitutionList):
template_name = "author_institution_list.html"
def get_queryset(self):
base_result_set = Institution.objects.annotate(
n_authors=Count("affiliations__asserted_by__author", distinct=True),
n_conferences=Count(
"affiliations__asserted_by__work__conference", distinct=True
),
).distinct()
result_set = base_result_set
if self.request.GET:
raw_filter_form = FullInstitutionForm(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
department_res = filter_form["department"]
if department_res != "":
result_set = result_set.filter(
affiliations__department__icontains=department_res
)
affiliation_res = filter_form["affiliation"]
if affiliation_res is not None:
result_set = result_set.filter(affiliations=affiliation_res)
institution_res = filter_form["institution"]
if institution_res is not None:
result_set = result_set.filter(pk=institution_res.pk)
conference_res = filter_form["conference"]
if conference_res is not None:
result_set = result_set.filter(works__conference=conference_res)
country_res = filter_form["country"]
if country_res is not None:
result_set = result_set.filter(country=country_res)
if filter_form["singleton"]:
result_set = result_set.filter(n_conferences=1)
if filter_form["no_department"]:
result_set = result_set.filter(affiliations__department="")
if filter_form["ordering"] == "n_dsc":
result_set = result_set.order_by("-n_authors")
elif filter_form["ordering"] == "n_asc":
result_set = result_set.order_by("n_authors")
elif filter_form["ordering"] == "a":
result_set = result_set.order_by("affiliations__institution__name")
else:
for f, e in raw_filter_form.errors.items():
messages.error(self.request, f"{f}: {e}")
result_set = base_result_set
else:
# Otherwise default to sorting by n_dsc
result_set = result_set.order_by("-n_authors")
return result_set.distinct()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["redirect_url"] = reverse("author_institution_list")
return context
class InstitutionEdit(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = Institution
template_name = "generic_form.html"
fields = ["name", "city", "state_province_region", "country"]
extra_context = {
"form_title": "Edit institution",
"cancel_view": "full_institution_list",
"merge_view": "institution_merge",
}
success_message = "%(name)s updated"
success_url = reverse_lazy("full_institution_list")
def form_valid(self, form):
response = super(InstitutionEdit, self).form_valid(form)
self.object.user_last_updated = self.request.user
self.object.save()
return response
class InstitutionCreate(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Institution
template_name = "generic_form.html"
fields = ["name", "city", "state_province_region", "country"]
extra_context = {
"form_title": "Create institution",
"cancel_view": "full_institution_list",
}
success_message = "%(name)s created"
success_url = reverse_lazy("full_institution_list")
def form_valid(self, form):
response = super(InstitutionCreate, self).form_valid(form)
self.object.user_last_updated = self.request.user
self.object.save()
return response
@user_is_staff
@transaction.atomic
def institution_merge(request, institution_id):
institution = get_object_or_404(Institution, pk=institution_id)
context = {"merging": institution, "institution_merge_form": InstitutionMergeForm}
if request.method == "GET":
"""
Initial load of the merge form displays all the authors and works associated with this institution.
"""
return render(request, "institution_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = InstitutionMergeForm(request.POST)
if raw_form.is_valid():
target_institution = raw_form.cleaned_data["into"]
if institution == target_institution:
"""
If the user chooses the existing institution, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge an institution into itself. Please select a different institution.",
)
return redirect("institution_merge", institution_id=institution_id)
else:
old_institution_id = str(institution)
merge_results = institution.merge(target_institution)
target_institution.user_last_updated = request.user
target_institution.save()
messages.success(
request,
f"Author {old_institution_id} has been merged into {target_institution}, and the old institution entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} affiliations updated"
)
return redirect("institution_edit", pk=target_institution.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "institution_merge.html", context)
@user_is_staff
@transaction.atomic
def institution_multi_merge(request):
context = {"form": InstitutionMultiMergeForm}
if request.method == "POST":
raw_form = InstitutionMultiMergeForm(request.POST)
if raw_form.is_valid():
target_institution = raw_form.cleaned_data["into"]
source_institutions = raw_form.cleaned_data["sources"].exclude(
pk=target_institution.pk
)
for institution in source_institutions:
old_institution_id = str(institution)
merge_results = institution.merge(target_institution)
target_institution.user_last_updated = request.user
target_institution.save()
messages.success(
request,
f"Institution {old_institution_id} has been merged into {target_institution}, and the old institution entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} institutions updated"
)
return redirect("institution_edit", pk=target_institution.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "institution_multi_merge.html", context)
class AffiliationEdit(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = Affiliation
template_name = "generic_form.html"
form_class = AffiliationEditForm
extra_context = {
"form_title": "Edit affiliation",
"cancel_view": "full_institution_list",
"merge_view": "affiliation_merge",
}
success_message = "%(department)s updated"
success_url = reverse_lazy("full_institution_list")
class AffiliationCreate(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Affiliation
template_name = "generic_form.html"
form_class = AffiliationEditForm
extra_context = {
"form_title": "Create affiliation",
"cancel_view": "full_institution_list",
}
success_message = "%(department)s created"
success_url = reverse_lazy("full_institution_list")
def get_initial(self, **kwargs):
super().get_initial(**kwargs)
if "institution" in self.request.GET:
self.initial = {"institution": int(self.request.GET["institution"])}
return self.initial
@login_required
def ajax_affiliation_create(request):
newaff = Affiliation.objects.get_or_create(
department=request.POST["department"],
institution=Institution.objects.get(pk=int(request.POST["institution"])),
)[0]
return JsonResponse({"name": str(newaff), "id": newaff.pk})
@user_is_staff
@transaction.atomic
def affiliation_merge(request, affiliation_id):
affiliation = get_object_or_404(Affiliation, pk=affiliation_id)
context = {"merging": affiliation, "affiliation_merge_form": AffiliationMergeForm}
if request.method == "GET":
"""
Initial load of the merge form displays all the authors and works associated with this affiliation.
"""
return render(request, "affiliation_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = AffiliationMergeForm(request.POST)
if raw_form.is_valid():
target_affiliation = raw_form.cleaned_data["into"]
if affiliation == target_affiliation:
"""
If the user chooses the existing affiliation, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge an affiliation into itself. Please select a different affiliation.",
)
return redirect("affiliation_merge", affiliation_id=affiliation_id)
else:
old_affiliation_id = str(affiliation)
merge_results = affiliation.merge(target_affiliation)
messages.success(
request,
f"Affiliation {old_affiliation_id} has been merged into {target_affiliation}, and the old affiliation entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} affiliations updated"
)
return redirect("affiliation_edit", pk=target_affiliation.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "affiliation_merge.html", context)
@user_is_staff
@transaction.atomic
def affiliation_multi_merge(request):
context = {"form": AffiliationMultiMergeForm}
if request.method == "POST":
raw_form = AffiliationMultiMergeForm(request.POST)
if raw_form.is_valid():
target_affiliation = raw_form.cleaned_data["into"]
source_affiliations = raw_form.cleaned_data["sources"].exclude(
pk=target_affiliation.pk
)
for affiliation in source_affiliations:
old_affiliation_id = str(affiliation)
merge_results = affiliation.merge(target_affiliation)
messages.success(
request,
f"Affiliation {old_affiliation_id} has been merged into {target_affiliation}, and the old affiliation entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} affiliations updated"
)
return redirect("affiliation_edit", pk=target_affiliation.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "affiliation_multi_merge.html", context)
@user_is_staff
@transaction.atomic
def wipe_unused(request):
deletion_dict = {
"Author": Author.objects.exclude(authorships__isnull=False).distinct(),
"Affiliation": Affiliation.objects.exclude(
asserted_by__isnull=False
).distinct(),
"Institution": Institution.objects.exclude(
Q(affiliations__asserted_by__isnull=False) | Q(conferences__isnull=False)
).distinct(),
"Keyword": Keyword.objects.exclude(works__isnull=False).distinct(),
"Appellation": Appellation.objects.exclude(
asserted_by__isnull=False
).distinct(),
}
if request.method == "POST":
for k, v in deletion_dict.items():
res = v.delete()
if res[0] > 0:
messages.success(request, f"{k}: {res[0]} objects deleted")
any_hanging_items = any([v.exists() for k, v in deletion_dict.items()])
context = {"deletions": deletion_dict, "hanging_items": any_hanging_items}
return render(request, "wipe_unused.html", context)
class ConferenceCreate(StaffRequiredMixin, SuccessMessageMixin, CreateView):
model = Conference
template_name = "conference_create.html"
form_class = ConferenceForm
extra_context = {
"form_title": "Create conference",
"cancel_view": "conference_list",
}
success_message = "Conference '%(year)s - %(short_title)s' created"
@transaction.atomic
def post(self, request, *args, **kwargs):
response = super().post(request, *args, **kwargs)
form_instance = self.get_form()
if form_instance.is_valid():
for organizer in form_instance.cleaned_data["organizers"]:
self.object.organizers.add(organizer)
self.object.save()
return response
if "goto_abstracts" in request.POST:
return redirect(reverse("work_list") + f"?conference={self.object.id}")
else:
for err in form_instance.errors:
messages.error(request, err)
return response
@user_is_staff
@transaction.atomic
def ConferenceEdit(request, pk):
conference = get_object_or_404(Conference, pk=pk)
# populate the conference form, including pulling in the related organizers
conference_dict = model_to_dict(conference)
conference_dict["organizers"] = conference.organizers.all()
form = ConferenceForm(initial=conference_dict)
ConferenceSeriesFormSet = formset_factory(
ConferenceSeriesInline, can_delete=True, extra=0
)
initial_series = [
{"series": memb.series, "number": memb.number}
for memb in SeriesMembership.objects.filter(conference=conference).all()
]
context = {
"conference": conference,
"form": form,
# "licenses": License.objects.all(),
"series_membership_form": ConferenceSeriesFormSet(initial=initial_series),
"form_title": "Edit conference",
"cancel_view": "conference_list",
}
if request.method == "POST":
form = ConferenceForm(data=request.POST, instance=conference)
if form.is_valid():
clean_form = form.cleaned_data
conference.year = clean_form["year"]
conference.short_title = clean_form["short_title"]
conference.notes = clean_form["notes"]
conference.url = clean_form["url"]
# Clear existing relations and update according to the form
conference.organizers.clear()
for organizer in clean_form["organizers"]:
conference.organizers.add(organizer)
conference.hosting_institutions.clear()
for hosting_institution in clean_form["hosting_institutions"]:
conference.hosting_institutions.add(hosting_institution)
conference.save()
# License action
license_action = clean_form["license_action"]
if license_action == "":
pass
elif license_action == "clear":
conference.works.all().update(full_text_license=None)
else:
license_object = License.objects.get(id=int(license_action))
conference.works.all().update(full_text_license=license_object)
series_forms = ConferenceSeriesFormSet(data=request.POST)
if series_forms.is_valid():
# Delete memberships first
for d_form in series_forms.deleted_forms:
d_form_data = d_form.cleaned_data
SeriesMembership.objects.filter(
conference=conference,
series=d_form_data["series"],
number=d_form_data["number"],
).delete()
# Then update new ones
for s_form in series_forms.forms:
if s_form not in series_forms.deleted_forms:
s_form_data = s_form.cleaned_data
SeriesMembership.objects.update_or_create(
conference=conference,
series=s_form_data["series"],
defaults={"number": s_form_data["number"]},
)
messages.success(request, f"Conference {conference} updated.")
if "goto_abstracts" in request.POST:
return redirect(
reverse("work_list") + f"?conference={conference.id}"
)
if "goto_series" in request.POST:
first_series = conference.series.first()
if first_series is None:
return redirect("standalone_conferences")
else:
return redirect("conference_series_detail", pk=first_series.id)
return redirect("conference_edit", pk=conference.pk)
else:
for f, e in series_forms.errors.items():
messages.error(request, f"{f}: {e}")
else:
for f, e in form.errors.items():
messages.error(request, f"{f}: {e}")
return render(request, "conference_edit.html", context)
class ConferenceDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = Conference
template_name = "conference_delete.html"
extra_context = {
"form_title": "Delete conference",
"cancel_view": "conference_list",
}
success_message = "Conference deleted"
success_url = reverse_lazy("conference_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(ConferenceDelete, self).delete(request, *args, **kwargs)
class ConferenceXMLLoad(StaffRequiredMixin, DetailView):
model = Conference
template_name = "conference_xml_load.html"
extra_context = {"form": ConferenceXMLUploadForm()}
@transaction.atomic
def post(self, request, *args, **kwargs):
raw_form = ConferenceXMLUploadForm(request.POST, request.FILES)
conference = self.get_object()
if raw_form.is_valid():
with TemporaryDirectory() as upload_dir:
# Write uploaded zip to tempdir
with NamedTemporaryFile(dir=upload_dir, suffix=".zip") as tei_zip:
with open(tei_zip.name, "wb") as upload_zip:
for chunk in request.FILES["file"]:
upload_zip.write(chunk)
if not zipfile.is_zipfile(tei_zip.name):
messages.error(request, "That is not a valid zipfile.")
return render(
request,
"conference_xml_load.html",
{
"object": self.get_object(),
"form": ConferenceXMLUploadForm(),
},
)
# Extract all the files within
with zipfile.ZipFile(tei_zip.name) as zip_ref:
zip_ref.extractall(upload_dir)
# Import all XML
import_results = conference.import_xml_directory(upload_dir)
n_success = len(import_results["successful_files"])
n_failed = len(import_results["failed_files"])
messages.info(
request,
f"{n_success} of {n_success + n_failed} files valid.",
)
for err in import_results["failed_files"]:
messages.error(
request, f"{basename(err['filepath'])}: {err['error']}"
)
if n_failed == 0:
messages.success(request, f"All files imported successfully.")
else:
messages.info(
request,
"Please fix errors or remove malformed files, and re-upload zip. All TEI documents must be valid in order to complete the import.",
)
return render(
request,
"conference_xml_load.html",
{"object": self.get_object(), "form": ConferenceXMLUploadForm()},
)
else:
for f, e in raw_form.errors.items():
messages.error(request, f"{f}: {e}")
return render(
request,
"conference_xml_load.html",
{"object": self.get_object(), "form": ConferenceXMLUploadForm()},
)
@login_required
@transaction.atomic
def conference_checkout(request, conference_id):
conference = get_object_or_404(Conference, pk=conference_id)
if request.method == "GET":
"""
Load the current form and display current attached user
"""
context = {
"conference": conference,
"form": ConferenceCheckoutForm(
{"entry_status": conference.entry_status, "editing_user": "self"}
),
}
return render(request, "conference_checkout.html", context)
elif request.method == "POST":
"""
Get the form and update the status if the user has the authority to do so
"""
raw_form = ConferenceCheckoutForm(request.POST)
if raw_form.is_valid():
clean_form = raw_form.cleaned_data
if clean_form["entry_status"] == "c" and not request.user.is_staff:
messages.error(
request,
"Only an administrator can mark this conference as completed.",
)
return redirect("conference_checkout", conference_id=conference.id)
else:
if clean_form["assign_user"] == "self":
conference.entry_status = clean_form["entry_status"]
conference.editing_user = request.user
conference.save()
messages.success(request, "Conference checked out")
elif clean_form["assign_user"] == "clear":
conference.entry_status = clean_form["entry_status"]
conference.editing_user = None
conference.save()
messages.success(request, "Conference cleared")
return redirect(reverse("work_list") + f"?conference={conference.id}")
class SeriesCreate(StaffRequiredMixin, SuccessMessageMixin, CreateView):
model = ConferenceSeries
template_name = "generic_form.html"
extra_context = {
"form_title": "Create conference series",
"cancel_view": "conference_list",
}
fields = ["title", "abbreviation", "notes"]
success_message = "Series '%(title)s' created"
success_url = reverse_lazy("conference_list")
class SeriesEdit(StaffRequiredMixin, SuccessMessageMixin, UpdateView):
model = ConferenceSeries
template_name = "generic_form.html"
extra_context = {
"form_title": "Update conference series",
"cancel_view": "conference_list",
}
fields = ["title", "abbreviation", "notes"]
success_message = "Series '%(title)s' updated"
success_url = reverse_lazy("conference_list")
class SeriesDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = ConferenceSeries
template_name = "generic_form.html"
extra_context = {
"form_title": "Delete conference series",
"cancel_view": "conference_list",
}
success_message = "Series '%(title)s' deleted"
success_url = reverse_lazy("conference_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(SeriesDelete, self).delete(request, *args, **kwargs)
class OrganizerCreate(StaffRequiredMixin, SuccessMessageMixin, CreateView):
model = Organizer
template_name = "generic_form.html"
extra_context = {
"form_title": "Create conference organizer",
"cancel_view": "full_organizer_list",
}
fields = ["name", "abbreviation", "conferences_organized", "notes", "url"]
success_message = "Organizer '%(name)s' created"
success_url = reverse_lazy("full_organizer_list")
def form_valid(self, form):
response = super(OrganizerCreate, self).form_valid(form)
self.object.user_last_updated = self.request.user
self.object.save()
return response
class OrganizerEdit(StaffRequiredMixin, SuccessMessageMixin, UpdateView):
model = Organizer
template_name = "generic_form.html"
extra_context = {
"form_title": "Update conference organizer",
"cancel_view": "full_organizer_list",
}
fields = ["name", "abbreviation", "conferences_organized", "notes", "url"]
success_message = "Organizer '%(name)s' updated"
success_url = reverse_lazy("full_organizer_list")
def form_valid(self, form):
response = super(OrganizerEdit, self).form_valid(form)
self.object.user_last_updated = self.request.user
self.object.save()
return response
class OrganizerDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = Organizer
template_name = "generic_form.html"
extra_context = {
"form_title": "Delete organizer",
"cancel_view": "full_organizer_list",
}
success_message = "Organizer %(name)s deleted."
success_url = reverse_lazy("full_organizer_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(OrganizerDelete, self).delete(request, *args, **kwargs)
class OrganizerList(LoginRequiredMixin, ListView):
model = Organizer
template_name = "full_organizer_list.html"
context_object_name = "organizer_list"
class KeywordCreate(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Keyword
template_name = "generic_form.html"
extra_context = {"form_title": "Create keyword", "cancel_view": "full_keyword_list"}
fields = ["title"]
success_message = "Keyword '%(title)s' created"
success_url = reverse_lazy("full_keyword_list")
class KeywordDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = Keyword
template_name = "generic_form.html"
extra_context = {"form_title": "Delete keyword", "cancel_view": "full_keyword_list"}
success_message = "Keyword '%(title)s' deleted"
success_url = reverse_lazy("full_keyword_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(KeywordDelete, self).delete(request, *args, **kwargs)
class KeywordEdit(StaffRequiredMixin, SuccessMessageMixin, UpdateView):
model = Keyword
template_name = "generic_form.html"
extra_context = {
"form_title": "Update keyword",
"cancel_view": "full_keyword_list",
"merge_view": "keyword_merge",
"delete_view": "keyword_delete",
}
fields = ["title"]
success_message = "Keyword '%(title)s' updated"
success_url = reverse_lazy("full_keyword_list")
class KeywordList(LoginRequiredMixin, ListView):
model = Keyword
template_name = "tag_list.html"
context_object_name = "tag_list"
extra_context = {
"tag_category": "Keywords",
"tag_edit_view": "keyword_edit",
"tag_create_view": "keyword_create",
"tag_list_view": "full_keyword_list",
"multi_merge": "keyword_multi_merge",
"filter_param_name": "keywords",
}
def get_queryset(self):
base_results_set = Keyword.objects.order_by("title")
results_set = base_results_set.annotate(n_works=Count("works"))
if self.request.GET:
raw_filter_form = TagForm(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
if filter_form["name"] != "":
results_set = results_set.filter(
title__icontains=filter_form["name"]
)
if filter_form["ordering"] == "a":
results_set = results_set.order_by("title")
elif filter_form["ordering"] == "n_asc":
results_set = results_set.order_by("n_works")
elif filter_form["ordering"] == "n_dsc":
results_set = results_set.order_by("-n_works")
else:
for f, e in raw_filter_form.errors.items():
messages.error(self.request, f"{f}: {e}")
else:
results_set = results_set.order_by("title")
return results_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["tag_filter_form"] = TagForm(initial=self.request.GET)
context["filtered_tags_count"] = self.get_queryset().count()
context["available_tags_count"] = Keyword.objects.count()
return context
@user_is_staff
@transaction.atomic
def keyword_merge(request, keyword_id):
keyword = get_object_or_404(Keyword, pk=keyword_id)
affected_works = Work.objects.filter(keywords=keyword).all()
sample_works = affected_works[:15]
count_elements = affected_works.count() - 15
context = {
"merging": keyword,
"tag_merge_form": KeywordMergeForm,
"sample_elements": sample_works,
"tag_category": "Keyword",
"merge_view": "keyword_merge",
}
if request.method == "GET":
"""
Initial load of the merge form displays all the authors and works associated with this keyword.
"""
return render(request, "tag_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = KeywordMergeForm(request.POST)
if raw_form.is_valid():
target_keyword = raw_form.cleaned_data["into"]
if keyword == target_keyword:
"""
If the user chooses the existing keyword, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge a keyword into itself. Please select a different keyword.",
)
return redirect("keyword_merge", keyword_id=keyword_id)
else:
old_keyword_id = str(keyword)
merge_results = keyword.merge(target_keyword)
messages.success(
request,
f"Keyword {old_keyword_id} has been merged into {target_keyword}, and the old keyword entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} keywords updated"
)
return redirect("keyword_edit", pk=target_keyword.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "tag_merge.html", context)
@user_is_staff
@transaction.atomic
def keyword_multi_merge(request):
context = {
"tag_merge_form": KeywordMultiMergeForm,
"tag_category": "Keyword",
"multi_merge_view": "keyword_multi_merge",
}
if request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = KeywordMultiMergeForm(request.POST)
if raw_form.is_valid():
target_keyword = raw_form.cleaned_data["into"]
source_keywords = raw_form.cleaned_data["sources"].exclude(
pk=target_keyword.pk
)
for keyword in source_keywords:
old_keyword_id = keyword.title
merge_results = keyword.merge(target_keyword)
messages.success(
request,
f"Keyword {old_keyword_id} has been merged into {target_keyword}, and the old keyword entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} keywords updated"
)
return redirect("keyword_edit", pk=target_keyword.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "tag_multi_merge.html", context)
class TopicCreate(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Topic
template_name = "generic_form.html"
extra_context = {"form_title": "Create topic", "cancel_view": "full_topic_list"}
fields = ["title"]
success_message = "Topic '%(title)s' created"
success_url = reverse_lazy("full_topic_list")
class TopicDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = Topic
template_name = "generic_form.html"
extra_context = {"form_title": "Delete topic", "cancel_view": "full_topic_list"}
success_message = "Topic '%(title)s' deleted"
success_url = reverse_lazy("full_topic_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(TopicDelete, self).delete(request, *args, **kwargs)
class TopicEdit(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = Topic
template_name = "generic_form.html"
extra_context = {
"form_title": "Update topic",
"cancel_view": "full_topic_list",
"merge_view": "topic_merge",
"delete_view": "topic_delete",
}
fields = ["title"]
success_message = "Topic '%(title)s' updated"
success_url = reverse_lazy("full_topic_list")
class TopicList(LoginRequiredMixin, ListView):
model = Topic
template_name = "tag_list.html"
context_object_name = "tag_list"
extra_context = {
"tag_category": "Topics",
"tag_edit_view": "topic_edit",
"tag_create_view": "topic_create",
"tag_filter_form": TagForm,
"tag_list_view": "full_topic_list",
"multi_merge": "topic_multi_merge",
"filter_param_name": "topics",
}
def get_queryset(self):
base_results_set = Topic.objects.order_by("title")
results_set = base_results_set.annotate(n_works=Count("works"))
raw_filter_form = TagForm(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
if filter_form["name"] != "":
results_set = results_set.filter(title__icontains=filter_form["name"])
if filter_form["ordering"] == "a":
results_set = results_set.order_by("title")
elif filter_form["ordering"] == "n_asc":
results_set = results_set.order_by("n_works")
elif filter_form["ordering"] == "n_dsc":
results_set = results_set.order_by("-n_works")
return results_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["filtered_tags_count"] = self.get_queryset().count()
context["available_tags_count"] = Topic.objects.count()
return context
@user_is_staff
@transaction.atomic
def topic_merge(request, topic_id):
topic = get_object_or_404(Topic, pk=topic_id)
affected_elements = topic.works.all()
count_elements = affected_elements.count() - 10
sample_elements = affected_elements[:10]
context = {
"merging": topic,
"tag_merge_form": TopicMergeForm,
"tag_category": "Topic",
"merge_view": "topic_merge",
"sample_elements": sample_elements,
"count_elements": count_elements,
}
if request.method == "GET":
"""
Initial load of the merge form displays all the authors and works associated with this topic.
"""
return render(request, "tag_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = TopicMergeForm(request.POST)
if raw_form.is_valid():
target_topic = raw_form.cleaned_data["into"]
if topic == target_topic:
"""
If the user chooses the existing topic, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge a topic into itself. Please select a different topic.",
)
return redirect("topic_merge", topic_id=topic_id)
else:
old_topic_id = str(topic)
merge_results = topic.merge(target_topic)
messages.success(
request,
f"Topic {old_topic_id} has been merged into {target_topic}, and the old topic entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} topics updated"
)
return redirect("topic_edit", pk=target_topic.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "tag_merge.html", context)
@user_is_staff
@transaction.atomic
def topic_multi_merge(request):
context = {
"tag_merge_form": TopicMultiMergeForm,
"tag_category": "Topic",
"multi_merge_view": "topic_multi_merge",
}
if request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = TopicMultiMergeForm(request.POST)
if raw_form.is_valid():
target_topic = raw_form.cleaned_data["into"]
source_topics = raw_form.cleaned_data["sources"].exclude(pk=target_topic.pk)
for topic in source_topics:
old_topic_id = topic.title
merge_results = topic.merge(target_topic)
messages.success(
request,
f"Topic {old_topic_id} has been merged into {target_topic}, and the old topic entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} topics updated"
)
return redirect("topic_edit", pk=target_topic.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "tag_multi_merge.html", context)
class LanguageCreate(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Language
template_name = "generic_form.html"
extra_context = {
"form_title": "Create language",
"cancel_view": "full_language_list",
}
fields = ["title", "code"]
success_message = "Language '%(title)s' created"
success_url = reverse_lazy("full_language_list")
class LanguageDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = Language
template_name = "generic_form.html"
extra_context = {
"form_title": "Delete language",
"cancel_view": "full_language_list",
}
success_message = "Language '%(title)s' deleted"
success_url = reverse_lazy("full_language_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(LanguageDelete, self).delete(request, *args, **kwargs)
class LanguageEdit(StaffRequiredMixin, SuccessMessageMixin, UpdateView):
model = Language
template_name = "generic_form.html"
extra_context = {
"form_title": "Update language",
"cancel_view": "full_language_list",
"merge_view": "language_merge",
"delete_view": "language_delete",
}
fields = ["title", "code"]
success_message = "Language '%(title)s' updated"
success_url = reverse_lazy("full_language_list")
class LanguageList(LoginRequiredMixin, ListView):
model = Language
template_name = "tag_list.html"
context_object_name = "tag_list"
extra_context = {
"tag_category": "Languages",
"tag_edit_view": "language_edit",
"tag_create_view": "language_create",
"tag_filter_form": TagForm,
"tag_list_view": "full_language_list",
"filter_param_name": "languages",
}
def get_queryset(self):
base_results_set = Language.objects.order_by("title")
results_set = base_results_set.annotate(n_works=Count("works"))
raw_filter_form = TagForm(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
if filter_form["name"] != "":
results_set = results_set.filter(title__icontains=filter_form["name"])
if filter_form["ordering"] == "a":
results_set = results_set.order_by("title")
elif filter_form["ordering"] == "n_asc":
results_set = results_set.order_by("n_works")
elif filter_form["ordering"] == "n_dsc":
results_set = results_set.order_by("-n_works")
return results_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["filtered_tags_count"] = self.get_queryset().count()
context["available_tags_count"] = Language.objects.count()
return context
@user_is_staff
@transaction.atomic
def language_merge(request, language_id):
language = get_object_or_404(Language, pk=language_id)
affected_elements = language.works.all()
count_elements = affected_elements.count() - 10
sample_elements = affected_elements[:10]
context = {
"merging": language,
"tag_merge_form": LanguageMergeForm,
"tag_category": "Language",
"merge_view": "language_merge",
"sample_elements": sample_elements,
"count_elements": count_elements,
}
if request.method == "GET":
"""
Initial load of the merge form displays all the authors and works associated with this language.
"""
return render(request, "tag_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = LanguageMergeForm(request.POST)
if raw_form.is_valid():
target_language = raw_form.cleaned_data["into"]
if language == target_language:
"""
If the user chooses the existing language, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge a language into itself. Please select a different language.",
)
return redirect("language_merge", language_id=language_id)
else:
old_language_id = str(language)
merge_results = language.merge(target_language)
messages.success(
request,
f"Language {old_language_id} has been merged into {target_language}, and the old language entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} languages updated"
)
return redirect("language_edit", pk=target_language.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "tag_merge.html", context)
class WorkTypeCreate(StaffRequiredMixin, SuccessMessageMixin, CreateView):
model = WorkType
template_name = "generic_form.html"
extra_context = {
"form_title": "Create work_type",
"cancel_view": "full_work_type_list",
}
fields = ["title", "is_parent"]
success_message = "Abstract type '%(title)s' created"
success_url = reverse_lazy("full_work_type_list")
class WorkTypeDelete(StaffRequiredMixin, SuccessMessageMixin, DeleteView):
model = WorkType
template_name = "generic_form.html"
extra_context = {
"form_title": "Delete work_type",
"cancel_view": "full_work_type_list",
}
success_message = "Abstract type '%(title)s' deleted"
success_url = reverse_lazy("full_work_type_list")
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(WorkTypeDelete, self).delete(request, *args, **kwargs)
class WorkTypeEdit(StaffRequiredMixin, SuccessMessageMixin, UpdateView):
model = WorkType
template_name = "generic_form.html"
extra_context = {
"form_title": "Update abstract type",
"cancel_view": "full_work_type_list",
"merge_view": "work_type_merge",
"delete_view": "work_type_delete",
}
fields = ["title", "is_parent"]
success_message = "Abstract '%(title)s' updated"
success_url = reverse_lazy("full_work_type_list")
class WorkTypeList(LoginRequiredMixin, ListView):
model = WorkType
template_name = "tag_list.html"
context_object_name = "tag_list"
extra_context = {
"tag_category": "Abstract Types",
"tag_edit_view": "work_type_edit",
"tag_create_view": "work_type_create",
"tag_filter_form": TagForm,
"tag_list_view": "full_work_type_list",
"filter_param_name": "work_type",
}
def get_queryset(self):
base_results_set = WorkType.objects.order_by("title")
results_set = base_results_set.annotate(n_works=Count("works"))
raw_filter_form = TagForm(self.request.GET)
if raw_filter_form.is_valid():
filter_form = raw_filter_form.cleaned_data
if filter_form["name"] != "":
results_set = results_set.filter(title__icontains=filter_form["name"])
if filter_form["ordering"] == "a":
results_set = results_set.order_by("title")
elif filter_form["ordering"] == "n_asc":
results_set = results_set.order_by("n_works")
elif filter_form["ordering"] == "n_dsc":
results_set = results_set.order_by("-n_works")
return results_set
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["filtered_tags_count"] = self.get_queryset().count()
context["available_tags_count"] = WorkType.objects.count()
return context
@user_is_staff
@transaction.atomic
def work_type_merge(request, work_type_id):
work_type = get_object_or_404(WorkType, pk=work_type_id)
affected_elements = work_type.works.all()
count_elements = affected_elements.count() - 10
sample_elements = affected_elements[:10]
context = {
"merging": work_type,
"tag_merge_form": WorkTypeMergeForm,
"tag_category": "Abstract Type",
"merge_view": "work_type_merge",
"sample_elements": sample_elements,
"count_elements": count_elements,
}
if request.method == "GET":
"""
Initial load of the merge form displays all the authors and works associated with this work_type.
"""
return render(request, "tag_merge.html", context)
elif request.method == "POST":
"""
Posting the new author id causes all of the old author's authorships to be reassigned.
"""
raw_form = WorkTypeMergeForm(request.POST)
if raw_form.is_valid():
target_work_type = raw_form.cleaned_data["into"]
if work_type == target_work_type:
"""
If the user chooses the existing work_type, don't merge, but instead error out.
"""
messages.error(
request,
f"You cannot merge a work_type into itself. Please select a different work_type.",
)
return redirect("work_type_merge", work_type_id=work_type_id)
else:
old_work_type_id = str(work_type)
merge_results = work_type.merge(target_work_type)
messages.success(
request,
f"WorkType {old_work_type_id} has been merged into {target_work_type}, and the old work_type entry has been deleted.",
)
messages.success(
request, f"{merge_results['update_results']} work_types updated"
)
return redirect("work_type_edit", pk=target_work_type.pk)
else:
for error in raw_form.errors:
messages.error(request, error)
return render(request, "tag_merge.html", context)
|
import logging
import operator
import time
import traceback
from pathlib import Path
from typing import List, Type, Set, Tuple, Optional
from PyQt5.QtCore import QEvent, Qt, pyqtSignal
from PyQt5.QtGui import QIcon, QWindowStateChangeEvent, QCursor
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QCheckBox, QHeaderView, QToolBar, \
QLabel, QPlainTextEdit, QProgressBar, QPushButton, QComboBox, QApplication, QListView, QSizePolicy, \
QMenu, QHBoxLayout
from bauh.api import user
from bauh.api.abstract.cache import MemoryCache
from bauh.api.abstract.context import ApplicationContext
from bauh.api.abstract.controller import SoftwareManager, SoftwareAction
from bauh.api.abstract.model import SoftwarePackage
from bauh.api.abstract.view import MessageType
from bauh.api.http import HttpClient
from bauh.api.paths import LOGS_DIR
from bauh.commons.html import bold
from bauh.context import set_theme
from bauh.stylesheet import read_all_themes_metadata, ThemeMetadata
from bauh.view.core.config import CoreConfigManager
from bauh.view.core.tray_client import notify_tray
from bauh.view.qt import dialog, commons, qt_utils
from bauh.view.qt.about import AboutDialog
from bauh.view.qt.apps_table import PackagesTable, UpgradeToggleButton
from bauh.view.qt.commons import sum_updates_displayed
from bauh.view.qt.components import new_spacer, IconButton, QtComponentsManager, to_widget, QSearchBar, \
QCustomMenuAction, QCustomToolbar
from bauh.view.qt.dialog import ConfirmationDialog
from bauh.view.qt.history import HistoryDialog
from bauh.view.qt.info import InfoDialog
from bauh.view.qt.root import RootDialog
from bauh.view.qt.screenshots import ScreenshotsDialog
from bauh.view.qt.settings import SettingsWindow
from bauh.view.qt.thread import UpgradeSelected, RefreshApps, UninstallPackage, DowngradePackage, ShowPackageInfo, \
ShowPackageHistory, SearchPackages, InstallPackage, AnimateProgress, NotifyPackagesReady, FindSuggestions, \
ListWarnings, \
AsyncAction, LaunchPackage, ApplyFilters, CustomSoftwareAction, ShowScreenshots, CustomAction, \
NotifyInstalledLoaded, \
IgnorePackageUpdates, SaveTheme, StartAsyncAction
from bauh.view.qt.view_model import PackageView, PackageViewStatus
from bauh.view.util import util, resource
from bauh.view.util.translation import I18n
DARK_ORANGE = '#FF4500'
# action ids
ACTION_APPLY_FILTERS = 1
ACTION_SEARCH = 2
ACTION_INSTALL = 3
ACTION_UNINSTALL = 4
ACTION_INFO = 5
ACTION_HISTORY = 6
ACTION_DOWNGRADE = 7
ACTION_UPGRADE = 8
ACTION_LAUNCH = 9
ACTION_CUSTOM_ACTION = 10
ACTION_SCREENSHOTS = 11
ACTION_IGNORE_UPDATES = 12
# components ids
SEARCH_BAR = 1
BT_INSTALLED = 2
BT_REFRESH = 3
BT_SUGGESTIONS = 4
BT_UPGRADE = 5
CHECK_UPDATES = 6
CHECK_APPS = 7
COMBO_TYPES = 8
COMBO_CATEGORIES = 9
INP_NAME = 10
CHECK_DETAILS = 11
BT_SETTINGS = 12
BT_CUSTOM_ACTIONS = 13
BT_ABOUT = 14
BT_THEMES = 15
# component groups ids
GROUP_FILTERS = 1
GROUP_VIEW_INSTALLED = 2
GROUP_VIEW_SEARCH = 3
GROUP_UPPER_BAR = 4
GROUP_LOWER_BTS = 5
class ManageWindow(QWidget):
signal_user_res = pyqtSignal(bool)
signal_root_password = pyqtSignal(bool, str)
signal_table_update = pyqtSignal()
signal_stop_notifying = pyqtSignal()
def __init__(self, i18n: I18n, icon_cache: MemoryCache, manager: SoftwareManager, screen_size, config: dict,
context: ApplicationContext, http_client: HttpClient, logger: logging.Logger, icon: QIcon):
super(ManageWindow, self).__init__()
self.setObjectName('manage_window')
self.comp_manager = QtComponentsManager()
self.i18n = i18n
self.logger = logger
self.manager = manager
self.working = False # restrict the number of threaded actions
self.installed_loaded = False # used to control the state when the interface is set to not load the apps on startup
self.pkgs = [] # packages current loaded in the table
self.pkgs_available = [] # all packages loaded in memory
self.pkgs_installed = [] # cached installed packages
self.display_limit = config['ui']['table']['max_displayed']
self.icon_cache = icon_cache
self.screen_size = screen_size
self.config = config
self.context = context
self.http_client = http_client
self.icon_app = icon
self.setWindowIcon(self.icon_app)
self.layout = QVBoxLayout()
self.setLayout(self.layout)
self.toolbar_status = QToolBar()
self.toolbar_status.setObjectName('toolbar_status')
self.toolbar_status.addWidget(new_spacer())
self.label_status = QLabel()
self.label_status.setObjectName('label_status')
self.label_status.setText('')
self.toolbar_status.addWidget(self.label_status)
self.search_bar = QSearchBar(search_callback=self.search)
self.search_bar.set_placeholder(i18n['window_manage.search_bar.placeholder'] + "...")
self.search_bar.set_tooltip(i18n['window_manage.search_bar.tooltip'])
self.search_bar.set_button_tooltip(i18n['window_manage.search_bar.button_tooltip'])
self.comp_manager.register_component(SEARCH_BAR, self.search_bar, self.toolbar_status.addWidget(self.search_bar))
self.toolbar_status.addWidget(new_spacer())
self.layout.addWidget(self.toolbar_status)
self.toolbar_filters = QWidget()
self.toolbar_filters.setObjectName('table_filters')
self.toolbar_filters.setLayout(QHBoxLayout())
self.toolbar_filters.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
self.toolbar_filters.setContentsMargins(0, 0, 0, 0)
self.check_updates = QCheckBox()
self.check_updates.setObjectName('check_updates')
self.check_updates.setCursor(QCursor(Qt.PointingHandCursor))
self.check_updates.setText(self.i18n['updates'].capitalize())
self.check_updates.stateChanged.connect(self._handle_updates_filter)
self.check_updates.sizePolicy().setRetainSizeWhenHidden(True)
self.toolbar_filters.layout().addWidget(self.check_updates)
self.comp_manager.register_component(CHECK_UPDATES, self.check_updates)
self.check_apps = QCheckBox()
self.check_apps.setObjectName('check_apps')
self.check_apps.setCursor(QCursor(Qt.PointingHandCursor))
self.check_apps.setText(self.i18n['manage_window.checkbox.only_apps'])
self.check_apps.setChecked(True)
self.check_apps.stateChanged.connect(self._handle_filter_only_apps)
self.check_apps.sizePolicy().setRetainSizeWhenHidden(True)
self.toolbar_filters.layout().addWidget(self.check_apps)
self.comp_manager.register_component(CHECK_APPS, self.check_apps)
self.any_type_filter = 'any'
self.cache_type_filter_icons = {}
self.combo_filter_type = QComboBox()
self.combo_filter_type.setObjectName('combo_types')
self.combo_filter_type.setCursor(QCursor(Qt.PointingHandCursor))
self.combo_filter_type.setView(QListView())
self.combo_filter_type.view().setCursor(QCursor(Qt.PointingHandCursor))
self.combo_filter_type.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.combo_filter_type.setEditable(True)
self.combo_filter_type.lineEdit().setReadOnly(True)
self.combo_filter_type.lineEdit().setAlignment(Qt.AlignCenter)
self.combo_filter_type.activated.connect(self._handle_type_filter)
self.combo_filter_type.addItem('--- {} ---'.format(self.i18n['type'].capitalize()), self.any_type_filter)
self.combo_filter_type.sizePolicy().setRetainSizeWhenHidden(True)
self.toolbar_filters.layout().addWidget(self.combo_filter_type)
self.comp_manager.register_component(COMBO_TYPES, self.combo_filter_type)
self.any_category_filter = 'any'
self.combo_categories = QComboBox()
self.combo_categories.setObjectName('combo_categories')
self.combo_categories.setCursor(QCursor(Qt.PointingHandCursor))
self.combo_categories.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.combo_categories.view().setCursor(QCursor(Qt.PointingHandCursor))
self.combo_categories.setEditable(True)
self.combo_categories.lineEdit().setReadOnly(True)
self.combo_categories.lineEdit().setAlignment(Qt.AlignCenter)
self.combo_categories.activated.connect(self._handle_category_filter)
self.combo_categories.sizePolicy().setRetainSizeWhenHidden(True)
self.combo_categories.addItem('--- {} ---'.format(self.i18n['category'].capitalize()), self.any_category_filter)
self.toolbar_filters.layout().addWidget(self.combo_categories)
self.comp_manager.register_component(COMBO_CATEGORIES, self.combo_categories)
self.input_name = QSearchBar(search_callback=self.begin_apply_filters)
self.input_name.palette().swap(self.combo_categories.palette())
self.input_name.setObjectName('name_filter')
self.input_name.set_placeholder(self.i18n['manage_window.name_filter.placeholder'] + '...')
self.input_name.set_tooltip(self.i18n['manage_window.name_filter.tooltip'])
self.input_name.set_button_tooltip(self.i18n['manage_window.name_filter.button_tooltip'])
self.input_name.sizePolicy().setRetainSizeWhenHidden(True)
self.toolbar_filters.layout().addWidget(self.input_name)
self.comp_manager.register_component(INP_NAME, self.input_name)
self.toolbar_filters.layout().addWidget(new_spacer())
toolbar_bts = []
bt_inst = QPushButton()
bt_inst.setObjectName('bt_installed')
bt_inst.setProperty('root', 'true')
bt_inst.setCursor(QCursor(Qt.PointingHandCursor))
bt_inst.setToolTip(self.i18n['manage_window.bt.installed.tooltip'])
bt_inst.setText(self.i18n['manage_window.bt.installed.text'].capitalize())
bt_inst.clicked.connect(self._begin_loading_installed)
bt_inst.sizePolicy().setRetainSizeWhenHidden(True)
toolbar_bts.append(bt_inst)
self.toolbar_filters.layout().addWidget(bt_inst)
self.comp_manager.register_component(BT_INSTALLED, bt_inst)
bt_ref = QPushButton()
bt_ref.setObjectName('bt_refresh')
bt_ref.setProperty('root', 'true')
bt_ref.setCursor(QCursor(Qt.PointingHandCursor))
bt_ref.setToolTip(i18n['manage_window.bt.refresh.tooltip'])
bt_ref.setText(self.i18n['manage_window.bt.refresh.text'])
bt_ref.clicked.connect(self.begin_refresh_packages)
bt_ref.sizePolicy().setRetainSizeWhenHidden(True)
toolbar_bts.append(bt_ref)
self.toolbar_filters.layout().addWidget(bt_ref)
self.comp_manager.register_component(BT_REFRESH, bt_ref)
self.bt_upgrade = QPushButton()
self.bt_upgrade.setProperty('root', 'true')
self.bt_upgrade.setObjectName('bt_upgrade')
self.bt_upgrade.setCursor(QCursor(Qt.PointingHandCursor))
self.bt_upgrade.setToolTip(i18n['manage_window.bt.upgrade.tooltip'])
self.bt_upgrade.setText(i18n['manage_window.bt.upgrade.text'])
self.bt_upgrade.clicked.connect(self.upgrade_selected)
self.bt_upgrade.sizePolicy().setRetainSizeWhenHidden(True)
toolbar_bts.append(self.bt_upgrade)
self.toolbar_filters.layout().addWidget(self.bt_upgrade)
self.comp_manager.register_component(BT_UPGRADE, self.bt_upgrade)
# setting all buttons to the same size:
bt_biggest_size = 0
for bt in toolbar_bts:
bt_width = bt.sizeHint().width()
if bt_width > bt_biggest_size:
bt_biggest_size = bt_width
for bt in toolbar_bts:
bt_width = bt.sizeHint().width()
if bt_biggest_size > bt_width:
bt.setFixedWidth(bt_biggest_size)
self.layout.addWidget(self.toolbar_filters)
self.table_container = QWidget()
self.table_container.setObjectName('table_container')
self.table_container.setContentsMargins(0, 0, 0, 0)
self.table_container.setLayout(QVBoxLayout())
self.table_container.layout().setContentsMargins(0, 0, 0, 0)
self.table_apps = PackagesTable(self, self.icon_cache, download_icons=bool(self.config['download']['icons']))
self.table_apps.change_headers_policy()
self.table_container.layout().addWidget(self.table_apps)
self.layout.addWidget(self.table_container)
self.toolbar_console = QWidget()
self.toolbar_console.setObjectName('console_toolbar')
self.toolbar_console.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
self.toolbar_console.setLayout(QHBoxLayout())
self.toolbar_console.setContentsMargins(0, 0, 0, 0)
self.check_details = QCheckBox()
self.check_details.setObjectName('check_details')
self.check_details.setCursor(QCursor(Qt.PointingHandCursor))
self.check_details.setText(self.i18n['manage_window.checkbox.show_details'])
self.check_details.stateChanged.connect(self._handle_console)
self.toolbar_console.layout().addWidget(self.check_details)
self.comp_manager.register_component(CHECK_DETAILS, self.check_details)
self.toolbar_console.layout().addWidget(new_spacer())
self.label_displayed = QLabel()
self.label_displayed.setObjectName('apps_displayed')
self.label_displayed.setCursor(QCursor(Qt.WhatsThisCursor))
self.label_displayed.setToolTip(self.i18n['manage_window.label.apps_displayed.tip'])
self.toolbar_console.layout().addWidget(self.label_displayed)
self.label_displayed.hide()
self.layout.addWidget(self.toolbar_console)
self.textarea_details = QPlainTextEdit(self)
self.textarea_details.setObjectName('textarea_details')
self.textarea_details.setProperty('console', 'true')
self.textarea_details.resize(self.table_apps.size())
self.layout.addWidget(self.textarea_details)
self.textarea_details.setVisible(False)
self.textarea_details.setReadOnly(True)
self.toolbar_substatus = QToolBar()
self.toolbar_substatus.setObjectName('toolbar_substatus')
self.toolbar_substatus.addWidget(new_spacer())
self.label_substatus = QLabel()
self.label_substatus.setObjectName('label_substatus')
self.label_substatus.setCursor(QCursor(Qt.WaitCursor))
self.toolbar_substatus.addWidget(self.label_substatus)
self.toolbar_substatus.addWidget(new_spacer())
self.layout.addWidget(self.toolbar_substatus)
self._change_label_substatus('')
self.thread_update = self._bind_async_action(UpgradeSelected(self.manager, context.internet_checker, self.i18n), finished_call=self._finish_upgrade_selected)
self.thread_refresh = self._bind_async_action(RefreshApps(self.manager), finished_call=self._finish_refresh_packages, only_finished=True)
self.thread_uninstall = self._bind_async_action(UninstallPackage(self.manager, self.icon_cache, self.i18n), finished_call=self._finish_uninstall)
self.thread_show_info = self._bind_async_action(ShowPackageInfo(self.manager), finished_call=self._finish_show_info)
self.thread_show_history = self._bind_async_action(ShowPackageHistory(self.manager, self.i18n), finished_call=self._finish_show_history)
self.thread_search = self._bind_async_action(SearchPackages(self.manager), finished_call=self._finish_search, only_finished=True)
self.thread_downgrade = self._bind_async_action(DowngradePackage(self.manager, self.i18n), finished_call=self._finish_downgrade)
self.thread_suggestions = self._bind_async_action(FindSuggestions(man=self.manager), finished_call=self._finish_load_suggestions, only_finished=True)
self.thread_launch = self._bind_async_action(LaunchPackage(self.manager), finished_call=self._finish_launch_package, only_finished=False)
self.thread_custom_action = self._bind_async_action(CustomAction(manager=self.manager, i18n=self.i18n), finished_call=self._finish_execute_custom_action)
self.thread_screenshots = self._bind_async_action(ShowScreenshots(self.manager), finished_call=self._finish_show_screenshots)
self.thread_apply_filters = ApplyFilters()
self.thread_apply_filters.signal_finished.connect(self._finish_apply_filters)
self.thread_apply_filters.signal_table.connect(self._update_table_and_upgrades)
self.signal_table_update.connect(self.thread_apply_filters.stop_waiting)
self.thread_install = InstallPackage(manager=self.manager, icon_cache=self.icon_cache, i18n=self.i18n)
self._bind_async_action(self.thread_install, finished_call=self._finish_install)
self.thread_animate_progress = AnimateProgress()
self.thread_animate_progress.signal_change.connect(self._update_progress)
self.thread_notify_pkgs_ready = NotifyPackagesReady()
self.thread_notify_pkgs_ready.signal_changed.connect(self._update_package_data)
self.thread_notify_pkgs_ready.signal_finished.connect(self._update_state_when_pkgs_ready)
self.signal_stop_notifying.connect(self.thread_notify_pkgs_ready.stop_working)
self.thread_ignore_updates = IgnorePackageUpdates(manager=self.manager)
self._bind_async_action(self.thread_ignore_updates, finished_call=self.finish_ignore_updates)
self.thread_reload = StartAsyncAction(delay_in_milis=5)
self.thread_reload.signal_start.connect(self._reload)
self.container_bottom = QWidget()
self.container_bottom.setObjectName('container_bottom')
self.container_bottom.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
self.container_bottom.setLayout(QHBoxLayout())
self.container_bottom.layout().setContentsMargins(0, 0, 0, 0)
self.container_bottom.layout().addWidget(new_spacer())
if config['suggestions']['enabled']:
bt_sugs = IconButton(action=lambda: self._begin_load_suggestions(filter_installed=True),
i18n=i18n,
tooltip=self.i18n['manage_window.bt.suggestions.tooltip'])
bt_sugs.setObjectName('suggestions')
self.container_bottom.layout().addWidget(bt_sugs)
self.comp_manager.register_component(BT_SUGGESTIONS, bt_sugs)
bt_themes = IconButton(self.show_themes,
i18n=self.i18n,
tooltip=self.i18n['manage_window.bt_themes.tip'])
bt_themes.setObjectName('themes')
self.container_bottom.layout().addWidget(bt_themes)
self.comp_manager.register_component(BT_THEMES, bt_themes)
self.custom_actions = [a for a in manager.gen_custom_actions()]
bt_custom_actions = IconButton(action=self.show_custom_actions,
i18n=self.i18n,
tooltip=self.i18n['manage_window.bt_custom_actions.tip'])
bt_custom_actions.setObjectName('custom_actions')
bt_custom_actions.setVisible(bool(self.custom_actions))
self.container_bottom.layout().addWidget(bt_custom_actions)
self.comp_manager.register_component(BT_CUSTOM_ACTIONS, bt_custom_actions)
bt_settings = IconButton(action=self.show_settings,
i18n=self.i18n,
tooltip=self.i18n['manage_window.bt_settings.tooltip'])
bt_settings.setObjectName('settings')
self.container_bottom.layout().addWidget(bt_settings)
self.comp_manager.register_component(BT_SETTINGS, bt_settings)
bt_about = IconButton(action=self._show_about,
i18n=self.i18n,
tooltip=self.i18n['manage_window.settings.about'])
bt_about.setObjectName('about')
self.container_bottom.layout().addWidget(bt_about)
self.comp_manager.register_component(BT_ABOUT, bt_about)
self.layout.addWidget(self.container_bottom)
self.container_progress = QCustomToolbar(spacing=0, policy_height=QSizePolicy.Fixed)
self.container_progress.setObjectName('container_progress')
self.container_progress.add_space()
self.progress_bar = QProgressBar()
self.progress_bar.setObjectName('progress_manage')
self.progress_bar.setCursor(QCursor(Qt.WaitCursor))
self.progress_bar.setTextVisible(False)
self.container_progress.add_widget(self.progress_bar)
self.container_progress.add_space()
self.layout.addWidget(self.container_progress)
qt_utils.centralize(self)
self.filter_only_apps = True
self.type_filter = self.any_type_filter
self.category_filter = self.any_category_filter
self.filter_updates = False
self._maximized = False
self.progress_controll_enabled = True
self.recent_uninstall = False
self.types_changed = False
self.dialog_about = None
self.load_suggestions = bool(config['suggestions']['enabled'])
self.suggestions_requested = False
self.first_refresh = True
self.thread_warnings = ListWarnings(man=manager, i18n=i18n)
self.thread_warnings.signal_warnings.connect(self._show_warnings)
self.settings_window = None
self.search_performed = False
self.thread_save_theme = SaveTheme(theme_key='')
self.thread_load_installed = NotifyInstalledLoaded()
self.thread_load_installed.signal_loaded.connect(self._finish_loading_installed)
self.setMinimumHeight(int(screen_size.height() * 0.5))
self.setMinimumWidth(int(screen_size.width() * 0.6))
self._register_groups()
def _register_groups(self):
filters = (CHECK_APPS, CHECK_UPDATES, COMBO_CATEGORIES, COMBO_TYPES, INP_NAME)
self.comp_manager.register_group(GROUP_FILTERS, False, *filters)
self.comp_manager.register_group(GROUP_VIEW_SEARCH, False,
COMBO_CATEGORIES, COMBO_TYPES, INP_NAME, # filters
BT_INSTALLED, BT_SUGGESTIONS) # buttons
self.comp_manager.register_group(GROUP_VIEW_INSTALLED, False,
BT_REFRESH, BT_UPGRADE, # buttons
*filters)
self.comp_manager.register_group(GROUP_UPPER_BAR, False,
CHECK_APPS, CHECK_UPDATES, COMBO_CATEGORIES, COMBO_TYPES, INP_NAME,
BT_INSTALLED, BT_SUGGESTIONS, BT_REFRESH, BT_UPGRADE)
self.comp_manager.register_group(GROUP_LOWER_BTS, False, BT_SUGGESTIONS, BT_THEMES, BT_CUSTOM_ACTIONS, BT_SETTINGS, BT_ABOUT)
def update_custom_actions(self):
self.custom_actions = [a for a in self.manager.gen_custom_actions()]
def _update_process_progress(self, val: int):
if self.progress_controll_enabled:
self.thread_animate_progress.set_progress(val)
def _change_status(self, status: str = None):
if status:
self.label_status.setText(status + '...')
self.label_status.setCursor(QCursor(Qt.WaitCursor))
else:
self.label_status.setText('')
self.label_status.unsetCursor()
def _set_table_enabled(self, enabled: bool):
self.table_apps.setEnabled(enabled)
if enabled:
self.table_container.unsetCursor()
else:
self.table_container.setCursor(QCursor(Qt.WaitCursor))
def begin_apply_filters(self):
self.stop_notifying_package_states()
self._begin_action(action_label=self.i18n['manage_window.status.filtering'],
action_id=ACTION_APPLY_FILTERS)
self.comp_manager.disable_visible_from_groups(GROUP_UPPER_BAR, GROUP_LOWER_BTS)
self.comp_manager.set_component_read_only(INP_NAME, True)
self.thread_apply_filters.filters = self._gen_filters()
self.thread_apply_filters.pkgs = self.pkgs_available
self.thread_apply_filters.start()
self.setFocus(Qt.NoFocusReason)
def _finish_apply_filters(self):
self._finish_action(ACTION_APPLY_FILTERS)
self.update_bt_upgrade()
def stop_notifying_package_states(self):
if self.thread_notify_pkgs_ready.isRunning():
self.signal_stop_notifying.emit()
self.thread_notify_pkgs_ready.wait(1000)
def _update_table_and_upgrades(self, pkgs_info: dict):
self._update_table(pkgs_info=pkgs_info, signal=True)
if self.pkgs:
self._update_state_when_pkgs_ready()
self.stop_notifying_package_states()
self.thread_notify_pkgs_ready.pkgs = self.pkgs
self.thread_notify_pkgs_ready.work = True
self.thread_notify_pkgs_ready.start()
def _bind_async_action(self, action: AsyncAction, finished_call, only_finished: bool = False) -> AsyncAction:
action.signal_finished.connect(finished_call)
if not only_finished:
action.signal_confirmation.connect(self._ask_confirmation)
action.signal_output.connect(self._update_action_output)
action.signal_message.connect(self._show_message)
action.signal_status.connect(self._change_label_status)
action.signal_substatus.connect(self._change_label_substatus)
action.signal_progress.connect(self._update_process_progress)
action.signal_progress_control.connect(self.set_progress_controll)
action.signal_root_password.connect(self._pause_and_ask_root_password)
self.signal_user_res.connect(action.confirm)
self.signal_root_password.connect(action.set_root_password)
return action
def _ask_confirmation(self, msg: dict):
self.thread_animate_progress.pause()
extra_widgets = [to_widget(comp=c, i18n=self.i18n) for c in msg['components']] if msg.get('components') else None
diag = ConfirmationDialog(title=msg['title'],
body=msg['body'],
i18n=self.i18n,
widgets=extra_widgets,
confirmation_label=msg['confirmation_label'],
deny_label=msg['deny_label'],
deny_button=msg['deny_button'],
window_cancel=msg['window_cancel'],
confirmation_button=msg.get('confirmation_button', True))
diag.ask()
res = diag.confirmed
self.thread_animate_progress.animate()
self.signal_user_res.emit(res)
def _pause_and_ask_root_password(self):
self.thread_animate_progress.pause()
valid, password = RootDialog.ask_password(self.context, i18n=self.i18n, comp_manager=self.comp_manager)
self.thread_animate_progress.animate()
self.signal_root_password.emit(valid, password)
def _show_message(self, msg: dict):
self.thread_animate_progress.pause()
dialog.show_message(title=msg['title'], body=msg['body'], type_=msg['type'])
self.thread_animate_progress.animate()
def _show_warnings(self, warnings: List[str]):
if warnings:
dialog.show_message(title=self.i18n['warning'].capitalize(), body='<p>{}</p>'.format('<br/><br/>'.join(warnings)), type_=MessageType.WARNING)
def show(self):
super(ManageWindow, self).show()
if not self.thread_warnings.isFinished():
self.thread_warnings.start()
qt_utils.centralize(self)
def verify_warnings(self):
self.thread_warnings.start()
def _begin_loading_installed(self):
if self.installed_loaded:
self.search_bar.clear()
self.input_name.set_text('')
self._begin_action(self.i18n['manage_window.status.installed'])
self._handle_console_option(False)
self.comp_manager.set_components_visible(False)
self.suggestions_requested = False
self.search_performed = False
self.thread_load_installed.start()
else:
self.load_suggestions = False
self.begin_refresh_packages()
def _finish_loading_installed(self):
self._finish_action()
self.comp_manager.set_group_visible(GROUP_VIEW_INSTALLED, True)
self.update_pkgs(new_pkgs=None, as_installed=True)
self._hide_filters_no_packages()
self._update_bts_installed_and_suggestions()
self._set_lower_buttons_visible(True)
self._reorganize()
def _update_bts_installed_and_suggestions(self):
available_types = len(self.manager.get_managed_types())
self.comp_manager.set_component_visible(BT_INSTALLED, available_types > 0 and any([self.suggestions_requested, self.search_performed]))
self.comp_manager.set_component_visible(BT_SUGGESTIONS, available_types > 0)
def _hide_filters_no_packages(self):
if not self.pkgs:
self.comp_manager.set_group_visible(GROUP_FILTERS, False)
def _show_about(self):
if self.dialog_about is None:
self.dialog_about = AboutDialog(self.config)
self.dialog_about.show()
def _handle_updates_filter(self, status: int):
self.filter_updates = status == 2
self.begin_apply_filters()
def _handle_filter_only_apps(self, status: int):
self.filter_only_apps = status == 2
self.begin_apply_filters()
def _handle_type_filter(self, idx: int):
self.type_filter = self.combo_filter_type.itemData(idx)
self.combo_filter_type.adjustSize()
self.begin_apply_filters()
def _handle_category_filter(self, idx: int):
self.category_filter = self.combo_categories.itemData(idx)
self.begin_apply_filters()
def _update_state_when_pkgs_ready(self):
if self.progress_bar.isVisible():
return
self._reload_categories()
self._reorganize()
def _update_package_data(self, idx: int):
if self.table_apps.isEnabled():
pkg = self.pkgs[idx]
pkg.status = PackageViewStatus.READY
self.table_apps.update_package(pkg)
def _reload_categories(self):
categories = set()
for p in self.pkgs_available:
if p.model.categories:
for c in p.model.categories:
if c:
cat = c.strip().lower()
if cat:
categories.add(cat)
if categories:
self._update_categories(categories, keep_selected=True)
def changeEvent(self, e: QEvent):
if isinstance(e, QWindowStateChangeEvent):
self._maximized = self.isMaximized()
self.table_apps.change_headers_policy(maximized=self._maximized)
def _handle_console(self, checked: bool):
if checked:
self.textarea_details.show()
else:
self.textarea_details.hide()
def _handle_console_option(self, enable: bool):
if enable:
self.textarea_details.clear()
self.comp_manager.set_component_visible(CHECK_DETAILS, enable)
self.check_details.setChecked(False)
self.textarea_details.hide()
def begin_refresh_packages(self, pkg_types: Optional[Set[Type[SoftwarePackage]]] = None):
self.search_bar.clear()
self._begin_action(self.i18n['manage_window.status.refreshing'])
self.comp_manager.set_components_visible(False)
self._handle_console_option(False)
self.suggestions_requested = False
self.search_performed = False
self.thread_refresh.pkg_types = pkg_types
self.thread_refresh.start()
def _finish_refresh_packages(self, res: dict, as_installed: bool = True):
self._finish_action()
self._set_lower_buttons_visible(True)
self.comp_manager.set_component_visible(SEARCH_BAR, True)
if self.search_performed or self.suggestions_requested:
self.comp_manager.set_group_visible(GROUP_VIEW_SEARCH, True)
else:
self.comp_manager.set_group_visible(GROUP_VIEW_INSTALLED, True)
if self.update_pkgs(res['installed'], as_installed=as_installed, types=res['types']):
self._hide_filters_no_packages()
self._update_bts_installed_and_suggestions()
self._reorganize()
self.load_suggestions = False
self.types_changed = False
def load_without_packages(self):
self.load_suggestions = False
self._handle_console_option(False)
self._finish_refresh_packages({'installed': None, 'types': None}, as_installed=False)
def _begin_load_suggestions(self, filter_installed: bool):
self.search_bar.clear()
self._begin_action(self.i18n['manage_window.status.suggestions'])
self._handle_console_option(False)
self.comp_manager.set_components_visible(False)
self.suggestions_requested = True
self.thread_suggestions.filter_installed = filter_installed
self.thread_suggestions.start()
def _finish_load_suggestions(self, res: dict):
self._finish_search(res)
def begin_uninstall(self, pkg: PackageView):
pwd, proceed = self._ask_root_password(SoftwareAction.UNINSTALL, pkg)
if not proceed:
return
self._begin_action(action_label='{} {}'.format(self.i18n['manage_window.status.uninstalling'], pkg.model.name),
action_id=ACTION_UNINSTALL)
self.comp_manager.set_groups_visible(False, GROUP_UPPER_BAR, GROUP_LOWER_BTS)
self._handle_console_option(True)
self.thread_uninstall.pkg = pkg
self.thread_uninstall.root_pwd = pwd
self.thread_uninstall.start()
def _finish_uninstall(self, res: dict):
self._finish_action(action_id=ACTION_UNINSTALL)
if res['success']:
src_pkg = res['pkg']
if self._can_notify_user():
util.notify_user('{} ({}) {}'.format(src_pkg.model.name, src_pkg.model.get_type(), self.i18n['uninstalled']))
if res['removed']:
for list_idx, pkg_list in enumerate((self.pkgs_available, self.pkgs, self.pkgs_installed)):
if pkg_list:
removed_idxs = []
for pkgv_idx, pkgv in enumerate(pkg_list):
if len(removed_idxs) == len(res['removed']):
break
for model in res['removed']:
if pkgv.model == model:
if list_idx == 0: # updates the model
pkgv.update_model(model)
if not self.search_performed or list_idx == 2: # always from the installed packages
removed_idxs.append(pkgv_idx)
if self.search_performed and list_idx == 1: # only for displayed
self.table_apps.update_package(pkgv, change_update_col=True)
break # as the model has been found, stops the loop
if removed_idxs:
# updating the list
removed_idxs.sort()
for decrement, pkg_idx in enumerate(removed_idxs):
del pkg_list[pkg_idx - decrement]
if list_idx == 1: # updates the rows if the current list reprents the displayed packages:
for decrement, idx in enumerate(removed_idxs):
self.table_apps.removeRow(idx - decrement)
self._update_table_indexes()
self.update_bt_upgrade()
self.update_custom_actions()
self._show_console_checkbox_if_output()
notify_tray()
else:
self._show_console_errors()
if self._can_notify_user():
util.notify_user('{}: {}'.format(res['pkg'].model.name, self.i18n['notification.uninstall.failed']))
def _update_table_indexes(self):
if self.pkgs:
for new_idx, pkgv in enumerate(self.pkgs): # updating the package indexes
pkgv.table_index = new_idx
def begin_launch_package(self, pkg: PackageView):
self._begin_action(action_label=self.i18n['manage_window.status.running_app'].format(pkg.model.name),
action_id=ACTION_LAUNCH)
self.comp_manager.disable_visible()
self.thread_launch.pkg = pkg
self.thread_launch.start()
def _finish_launch_package(self, success: bool):
self._finish_action(action_id=ACTION_LAUNCH)
def _can_notify_user(self):
return bool(self.config['system']['notifications']) and (self.isHidden() or self.isMinimized())
def _change_label_status(self, status: str):
self.label_status.setText(status)
def _change_label_substatus(self, substatus: str):
self.label_substatus.setText('<p>{}</p>'.format(substatus))
if not substatus:
self.toolbar_substatus.hide()
elif not self.toolbar_substatus.isVisible() and self.progress_bar.isVisible():
self.toolbar_substatus.show()
def _reorganize(self):
if not self._maximized:
self.table_apps.change_headers_policy(QHeaderView.Stretch)
self.table_apps.change_headers_policy()
self._resize(accept_lower_width=len(self.pkgs) > 0)
def _update_table(self, pkgs_info: dict, signal: bool = False):
self.pkgs = pkgs_info['pkgs_displayed']
if pkgs_info['not_installed'] == 0:
update_check = sum_updates_displayed(pkgs_info) > 0
else:
update_check = False
self.table_apps.update_packages(self.pkgs, update_check_enabled=update_check)
if not self._maximized:
self.label_displayed.show()
self.table_apps.change_headers_policy(QHeaderView.Stretch)
self.table_apps.change_headers_policy()
self._resize(accept_lower_width=len(self.pkgs) > 0)
if len(self.pkgs) == 0 and len(self.pkgs_available) == 0:
self.label_displayed.setText('')
else:
self.label_displayed.setText('{} / {}'.format(len(self.pkgs), len(self.pkgs_available)))
else:
self.label_displayed.hide()
if signal:
self.signal_table_update.emit()
def update_bt_upgrade(self, pkgs_info: dict = None):
show_bt_upgrade = False
if not any([self.suggestions_requested, self.search_performed]) and (not pkgs_info or pkgs_info['not_installed'] == 0):
for pkg in (pkgs_info['pkgs_displayed'] if pkgs_info else self.pkgs):
if not pkg.model.is_update_ignored() and pkg.update_checked:
show_bt_upgrade = True
break
self.comp_manager.set_component_visible(BT_UPGRADE, show_bt_upgrade)
if show_bt_upgrade:
self._reorganize()
def change_update_state(self, pkgs_info: dict, trigger_filters: bool = True, keep_selected: bool = False):
self.update_bt_upgrade(pkgs_info)
if pkgs_info['updates'] > 0:
if pkgs_info['not_installed'] == 0:
if not self.comp_manager.is_visible(CHECK_UPDATES):
self.comp_manager.set_component_visible(CHECK_UPDATES, True)
if not self.filter_updates and not keep_selected:
self._change_checkbox(self.check_updates, True, 'filter_updates', trigger_filters)
if pkgs_info['napp_updates'] > 0 and self.filter_only_apps and not keep_selected:
self._change_checkbox(self.check_apps, False, 'filter_only_apps', trigger_filters)
else:
if not keep_selected:
self._change_checkbox(self.check_updates, False, 'filter_updates', trigger_filters)
self.comp_manager.set_component_visible(CHECK_UPDATES, False)
def _change_checkbox(self, checkbox: QCheckBox, checked: bool, attr: str = None, trigger: bool = True):
if not trigger:
checkbox.blockSignals(True)
checkbox.setChecked(checked)
if not trigger:
setattr(self, attr, checked)
checkbox.blockSignals(False)
def _gen_filters(self, ignore_updates: bool = False) -> dict:
return {
'only_apps': False if self.search_performed else self.filter_only_apps,
'type': self.type_filter,
'category': self.category_filter,
'updates': False if ignore_updates else self.filter_updates,
'name': self.input_name.text().lower() if self.input_name.text() else None,
'display_limit': None if self.filter_updates else self.display_limit
}
def update_pkgs(self, new_pkgs: Optional[List[SoftwarePackage]], as_installed: bool, types: Optional[Set[type]] = None, ignore_updates: bool = False, keep_filters: bool = False) -> bool:
self.input_name.set_text('')
pkgs_info = commons.new_pkgs_info()
filters = self._gen_filters(ignore_updates=ignore_updates)
if new_pkgs is not None:
old_installed = None
if as_installed:
old_installed = self.pkgs_installed
self.pkgs_installed = []
for pkg in new_pkgs:
app_model = PackageView(model=pkg, i18n=self.i18n)
commons.update_info(app_model, pkgs_info)
commons.apply_filters(app_model, filters, pkgs_info)
if old_installed and types:
for pkgv in old_installed:
if pkgv.model.__class__ not in types:
commons.update_info(pkgv, pkgs_info)
commons.apply_filters(pkgv, filters, pkgs_info)
else: # use installed
for pkgv in self.pkgs_installed:
commons.update_info(pkgv, pkgs_info)
commons.apply_filters(pkgv, filters, pkgs_info)
if pkgs_info['apps_count'] == 0:
if self.load_suggestions or self.types_changed:
if as_installed:
self.pkgs_installed = pkgs_info['pkgs']
self._begin_load_suggestions(filter_installed=False)
self.load_suggestions = False
return False
else:
if not keep_filters:
self._change_checkbox(self.check_apps, False, 'filter_only_apps', trigger=False)
self.check_apps.setCheckable(False)
else:
if not keep_filters:
self.check_apps.setCheckable(True)
self._change_checkbox(self.check_apps, True, 'filter_only_apps', trigger=False)
self.change_update_state(pkgs_info=pkgs_info, trigger_filters=False, keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed']))
self._update_categories(pkgs_info['categories'], keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed']))
self._update_type_filters(pkgs_info['available_types'], keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed']))
self._apply_filters(pkgs_info, ignore_updates=ignore_updates)
self.change_update_state(pkgs_info=pkgs_info, trigger_filters=False, keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed']))
self.pkgs_available = pkgs_info['pkgs']
if as_installed:
self.pkgs_installed = pkgs_info['pkgs']
self.pkgs = pkgs_info['pkgs_displayed']
self._update_table(pkgs_info=pkgs_info)
if new_pkgs:
self.stop_notifying_package_states()
self.thread_notify_pkgs_ready.work = True
self.thread_notify_pkgs_ready.pkgs = self.pkgs
self.thread_notify_pkgs_ready.start()
self._resize(accept_lower_width=bool(self.pkgs_installed))
if self.first_refresh:
qt_utils.centralize(self)
self.first_refresh = False
if not self.installed_loaded and as_installed:
self.installed_loaded = True
return True
def _apply_filters(self, pkgs_info: dict, ignore_updates: bool):
pkgs_info['pkgs_displayed'] = []
filters = self._gen_filters(ignore_updates=ignore_updates)
for pkgv in pkgs_info['pkgs']:
commons.apply_filters(pkgv, filters, pkgs_info)
def _clean_combo_types(self):
if self.combo_filter_type.count() > 1:
for _ in range(self.combo_filter_type.count() - 1):
self.combo_filter_type.removeItem(1)
def _update_type_filters(self, available_types: dict = None, keep_selected: bool = False):
if available_types is None:
self.comp_manager.set_component_visible(COMBO_TYPES, self.combo_filter_type.count() > 2)
else:
keeping_selected = keep_selected and available_types and self.type_filter in available_types
if not keeping_selected:
self.type_filter = self.any_type_filter
if not available_types:
self._clean_combo_types()
if available_types:
self._clean_combo_types()
sel_type = -1
for idx, item in enumerate(available_types.items()):
app_type, icon_path, label = item[0], item[1]['icon'], item[1]['label']
icon = self.cache_type_filter_icons.get(app_type)
if not icon:
icon = QIcon(icon_path)
self.cache_type_filter_icons[app_type] = icon
self.combo_filter_type.addItem(icon, label, app_type)
if keeping_selected and app_type == self.type_filter:
sel_type = idx + 1
self.combo_filter_type.blockSignals(True)
self.combo_filter_type.setCurrentIndex(sel_type if sel_type > -1 else 0)
self.combo_filter_type.blockSignals(False)
self.comp_manager.set_component_visible(COMBO_TYPES, len(available_types) > 1)
else:
self.comp_manager.set_component_visible(COMBO_TYPES, False)
def _update_categories(self, categories: Set[str] = None, keep_selected: bool = False):
if categories is None:
self.comp_manager.set_component_visible(COMBO_CATEGORIES, self.combo_categories.count() > 1)
else:
keeping_selected = keep_selected and categories and self.category_filter in categories
if not keeping_selected:
self.category_filter = self.any_category_filter
if categories:
if self.combo_categories.count() > 1:
for _ in range(self.combo_categories.count() - 1):
self.combo_categories.removeItem(1)
selected_cat = -1
cat_list = list(categories)
cat_list.sort()
for idx, c in enumerate(cat_list):
self.__add_category(c)
if keeping_selected and c == self.category_filter:
selected_cat = idx + 1
self.combo_categories.blockSignals(True)
self.combo_categories.setCurrentIndex(selected_cat if selected_cat > -1 else 0)
self.combo_categories.blockSignals(False)
self.comp_manager.set_component_visible(COMBO_CATEGORIES, True)
else:
self.comp_manager.set_component_visible(COMBO_CATEGORIES, False)
def __add_category(self, category: str):
i18n_cat = self.i18n.get('category.{}'.format(category), self.i18n.get(category, category))
self.combo_categories.addItem(i18n_cat.capitalize(), category)
def _get_current_categories(self) -> Set[str]:
if self.combo_categories.count() > 1:
return {self.combo_categories.itemData(idx) for idx in range(self.combo_categories.count()) if idx > 0}
def _resize(self, accept_lower_width: bool = True):
table_width = self.table_apps.get_width()
toolbar_width = self.toolbar_filters.sizeHint().width()
topbar_width = self.toolbar_status.sizeHint().width()
new_width = max(table_width, toolbar_width, topbar_width)
new_width *= 1.05 # this extra size is not because of the toolbar button, but the table upgrade buttons
if (self.pkgs and accept_lower_width) or new_width > self.width():
self.resize(int(new_width), self.height())
def set_progress_controll(self, enabled: bool):
self.progress_controll_enabled = enabled
def upgrade_selected(self):
body = QWidget()
body.setLayout(QHBoxLayout())
body.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Preferred)
body.layout().addWidget(QLabel(self.i18n['manage_window.upgrade_all.popup.body']))
body.layout().addWidget(UpgradeToggleButton(pkg=None, root=self, i18n=self.i18n, clickable=False))
if ConfirmationDialog(title=self.i18n['manage_window.upgrade_all.popup.title'],
i18n=self.i18n, body=None,
widgets=[body]).ask():
self._begin_action(action_label=self.i18n['manage_window.status.upgrading'],
action_id=ACTION_UPGRADE)
self.comp_manager.set_components_visible(False)
self._handle_console_option(True)
self.thread_update.pkgs = self.pkgs
self.thread_update.start()
def _finish_upgrade_selected(self, res: dict):
self._finish_action()
if res.get('id'):
output = self.textarea_details.toPlainText()
if output:
try:
Path(UpgradeSelected.UPGRADE_LOGS_DIR).mkdir(parents=True, exist_ok=True)
logs_path = '{}/{}.log'.format(UpgradeSelected.UPGRADE_LOGS_DIR, res['id'])
with open(logs_path, 'w+') as f:
f.write(output)
self.textarea_details.appendPlainText('\n*Upgrade summary generated at: {}'.format(UpgradeSelected.SUMMARY_FILE.format(res['id'])))
self.textarea_details.appendPlainText('*Upgrade logs generated at: {}'.format(logs_path))
except:
traceback.print_exc()
if res['success']:
self.comp_manager.remove_saved_state(ACTION_UPGRADE)
self.begin_refresh_packages(pkg_types=res['types'])
self._show_console_checkbox_if_output()
if self._can_notify_user():
util.notify_user('{} {}'.format(res['updated'], self.i18n['notification.update_selected.success']))
notify_tray()
else:
self.comp_manager.restore_state(ACTION_UPGRADE)
self._show_console_errors()
if self._can_notify_user():
util.notify_user(self.i18n['notification.update_selected.failed'])
self.update_custom_actions()
def _show_console_errors(self):
if self.textarea_details.toPlainText():
self.check_details.setChecked(True)
else:
self._handle_console_option(False)
self.comp_manager.set_component_visible(CHECK_DETAILS, False)
def _update_action_output(self, output: str):
self.textarea_details.appendPlainText(output)
def _begin_action(self, action_label: str, action_id: int = None):
self.thread_animate_progress.stop = False
self.thread_animate_progress.start()
self.progress_bar.setVisible(True)
if action_id is not None:
self.comp_manager.save_states(action_id, only_visible=True)
self._set_table_enabled(False)
self.comp_manager.set_component_visible(SEARCH_BAR, False)
self._change_status(action_label)
def _set_lower_buttons_visible(self, visible: bool):
self.comp_manager.set_group_visible(GROUP_LOWER_BTS, visible)
if visible:
self.comp_manager.set_component_visible(BT_CUSTOM_ACTIONS, bool(self.custom_actions))
def _finish_action(self, action_id: int = None):
self.thread_animate_progress.stop = True
self.thread_animate_progress.wait(msecs=1000)
self.progress_bar.setVisible(False)
self.progress_bar.setValue(0)
self.progress_bar.setTextVisible(False)
if action_id is not None:
self.comp_manager.restore_state(action_id)
self.comp_manager.set_component_visible(SEARCH_BAR, True)
self._change_status()
self._change_label_substatus('')
self._set_table_enabled(True)
self.progress_controll_enabled = True
def begin_downgrade(self, pkg: PackageView):
pwd, proceed = self._ask_root_password(SoftwareAction.DOWNGRADE, pkg)
if not proceed:
return
self._begin_action(action_label='{} {}'.format(self.i18n['manage_window.status.downgrading'], pkg.model.name),
action_id=ACTION_DOWNGRADE)
self.comp_manager.set_components_visible(False)
self._handle_console_option(True)
self.thread_downgrade.pkg = pkg
self.thread_downgrade.root_pwd = pwd
self.thread_downgrade.start()
def _finish_downgrade(self, res: dict):
self._finish_action()
if res['success']:
self.comp_manager.remove_saved_state(ACTION_DOWNGRADE)
if self._can_notify_user():
util.notify_user('{} {}'.format(res['app'], self.i18n['downgraded']))
self.begin_refresh_packages(pkg_types={res['app'].model.__class__} if len(self.pkgs) > 1 else None)
self._show_console_checkbox_if_output()
self.update_custom_actions()
notify_tray()
else:
self.comp_manager.restore_state(ACTION_DOWNGRADE)
self._show_console_errors()
if self._can_notify_user():
util.notify_user(self.i18n['notification.downgrade.failed'])
def begin_show_info(self, pkg: dict):
self._begin_action(self.i18n['manage_window.status.info'], action_id=ACTION_INFO)
self.comp_manager.disable_visible()
self.thread_show_info.pkg = pkg
self.thread_show_info.start()
def _finish_show_info(self, pkg_info: dict):
self._finish_action(action_id=ACTION_INFO)
if pkg_info:
if len(pkg_info) > 1:
dialog_info = InfoDialog(pkg_info=pkg_info, icon_cache=self.icon_cache,
i18n=self.i18n, screen_size=self.screen_size)
dialog_info.exec_()
else:
dialog.show_message(title=self.i18n['warning'].capitalize(),
body=self.i18n['manage_window.info.no_info'].format(bold(pkg_info['__app__'].model.name)),
type_=MessageType.WARNING)
def begin_show_screenshots(self, pkg: PackageView):
self._begin_action(action_label=self.i18n['manage_window.status.screenshots'].format(bold(pkg.model.name)),
action_id=ACTION_SCREENSHOTS)
self.comp_manager.disable_visible()
self.thread_screenshots.pkg = pkg
self.thread_screenshots.start()
def _finish_show_screenshots(self, res: dict):
self._finish_action(ACTION_SCREENSHOTS)
if res.get('screenshots'):
diag = ScreenshotsDialog(pkg=res['pkg'],
http_client=self.http_client,
icon_cache=self.icon_cache,
logger=self.logger,
i18n=self.i18n,
screenshots=res['screenshots'])
diag.exec_()
else:
dialog.show_message(title=self.i18n['error'],
body=self.i18n['popup.screenshots.no_screenshot.body'].format(bold(res['pkg'].model.name)),
type_=MessageType.ERROR)
def begin_show_history(self, pkg: PackageView):
self._begin_action(self.i18n['manage_window.status.history'], action_id=ACTION_HISTORY)
self.comp_manager.disable_visible()
self.thread_show_history.pkg = pkg
self.thread_show_history.start()
def _finish_show_history(self, res: dict):
self._finish_action(ACTION_HISTORY)
if res.get('error'):
self._handle_console_option(True)
self.textarea_details.appendPlainText(res['error'])
self.check_details.setChecked(True)
elif not res['history'].history:
dialog.show_message(title=self.i18n['action.history.no_history.title'],
body=self.i18n['action.history.no_history.body'].format(bold(res['history'].pkg.name)),
type_=MessageType.WARNING)
else:
dialog_history = HistoryDialog(res['history'], self.icon_cache, self.i18n)
dialog_history.exec_()
def _begin_search(self, word, action_id: int = None):
self.filter_updates = False
self._begin_action('{} {}'.format(self.i18n['manage_window.status.searching'], word if word else ''), action_id=action_id)
def search(self):
word = self.search_bar.text().strip()
if word:
self._handle_console(False)
self._begin_search(word, action_id=ACTION_SEARCH)
self.comp_manager.set_components_visible(False)
self.thread_search.word = word
self.thread_search.start()
def _finish_search(self, res: dict):
self._finish_action()
self.search_performed = True
if not res['error']:
self.comp_manager.set_group_visible(GROUP_VIEW_SEARCH, True)
self.update_pkgs(res['pkgs_found'], as_installed=False, ignore_updates=True)
self._set_lower_buttons_visible(True)
self._update_bts_installed_and_suggestions()
self._hide_filters_no_packages()
self._reorganize()
else:
self.comp_manager.restore_state(ACTION_SEARCH)
dialog.show_message(title=self.i18n['warning'].capitalize(), body=self.i18n[res['error']], type_=MessageType.WARNING)
def _ask_root_password(self, action: SoftwareAction, pkg: PackageView) -> Tuple[Optional[str], bool]:
pwd = None
requires_root = self.manager.requires_root(action, pkg.model)
if not user.is_root() and requires_root:
valid, pwd = RootDialog.ask_password(self.context, i18n=self.i18n, comp_manager=self.comp_manager)
if not valid:
return pwd, False
return pwd, True
def install(self, pkg: PackageView):
pwd, proceed = self._ask_root_password(SoftwareAction.INSTALL, pkg)
if not proceed:
return
self._begin_action('{} {}'.format(self.i18n['manage_window.status.installing'], pkg.model.name), action_id=ACTION_INSTALL)
self.comp_manager.set_groups_visible(False, GROUP_UPPER_BAR, GROUP_LOWER_BTS)
self._handle_console_option(True)
self.thread_install.pkg = pkg
self.thread_install.root_pwd = pwd
self.thread_install.start()
def _finish_install(self, res: dict):
self._finish_action(action_id=ACTION_INSTALL)
console_output = self.textarea_details.toPlainText()
if console_output:
log_path = f"{LOGS_DIR}/install/{res["pkg"].model.get_type()}/{res["pkg"].model.name}"
try:
Path(log_path).mkdir(parents=True, exist_ok=True)
log_file = f'{log_path}/{int(time.time())}.log'
with open(log_file, 'w+') as f:
f.write(console_output)
self.textarea_details.appendPlainText(self.i18n['console.install_logs.path'].format('"{}"'.format(log_file)))
except:
self.textarea_details.appendPlainText("[warning] Could not write install log file to '{}'".format(log_path))
if res['success']:
if self._can_notify_user():
util.notify_user(msg='{} ({}) {}'.format(res['pkg'].model.name, res['pkg'].model.get_type(), self.i18n['installed']))
models_updated = []
for key in ('installed', 'removed'):
if res.get(key):
models_updated.extend(res[key])
if models_updated:
installed_available_idxs = []
for idx, available in enumerate(self.pkgs_available):
for pidx, model in enumerate(models_updated):
if available.model == model:
available.update_model(model)
if model.installed:
installed_available_idxs.append((idx, pidx, available))
# re-indexing all installed so they always will be be displayed when no filters are applied
if installed_available_idxs:
# removing from available
installed_available_idxs.sort(key=operator.itemgetter(0))
for decrement, data in enumerate(installed_available_idxs):
del self.pkgs_available[data[0] - decrement]
# re-inserting into the available
installed_available_idxs.sort(key=operator.itemgetter(1))
for new_idx, data in enumerate(installed_available_idxs):
self.pkgs_available.insert(new_idx, data[2])
# updating the respective table rows:
for displayed in self.pkgs:
for model in models_updated:
if displayed.model == model:
self.table_apps.update_package(displayed, change_update_col=True)
self.update_bt_upgrade()
# updating installed packages
if res['removed'] and self.pkgs_installed:
to_remove = []
for idx, installed in enumerate(self.pkgs_installed):
for removed in res['removed']:
if installed.model == removed:
to_remove.append(idx)
if to_remove:
to_remove.sort()
for decrement, idx in enumerate(to_remove):
del self.pkgs_installed[idx - decrement]
if res['installed']:
for idx, model in enumerate(res['installed']):
self.pkgs_installed.insert(idx, PackageView(model, self.i18n))
self.update_custom_actions()
self.table_apps.change_headers_policy(policy=QHeaderView.Stretch, maximized=self._maximized)
self.table_apps.change_headers_policy(policy=QHeaderView.ResizeToContents, maximized=self._maximized)
self._resize(accept_lower_width=False)
else:
self._show_console_errors()
if self._can_notify_user():
util.notify_user('{}: {}'.format(res['pkg'].model.name, self.i18n['notification.install.failed']))
def _update_progress(self, value: int):
self.progress_bar.setValue(value)
def begin_execute_custom_action(self, pkg: Optional[PackageView], action: CustomSoftwareAction):
if pkg is None and action.requires_confirmation and \
not ConfirmationDialog(title=self.i18n['confirmation'].capitalize(),
body='<p>{}</p>'.format(self.i18n['custom_action.proceed_with'].capitalize().format(bold(self.i18n[action.i18n_label_key]))),
icon=QIcon(action.icon_path) if action.icon_path else QIcon(resource.get_path('img/logo.svg')),
i18n=self.i18n).ask():
return False
pwd = None
if not user.is_root() and action.requires_root:
valid, pwd = RootDialog.ask_password(self.context, i18n=self.i18n, comp_manager=self.comp_manager)
if not valid:
return
self._begin_action(action_label='{}{}'.format(self.i18n[action.i18n_status_key], ' {}'.format(pkg.model.name) if pkg else ''),
action_id=ACTION_CUSTOM_ACTION)
self.comp_manager.set_components_visible(False)
self._handle_console_option(True)
self.thread_custom_action.pkg = pkg
self.thread_custom_action.root_pwd = pwd
self.thread_custom_action.custom_action = action
self.thread_custom_action.start()
def _finish_execute_custom_action(self, res: dict):
self._finish_action()
if res['success']:
if res['action'].refresh:
self.comp_manager.remove_saved_state(ACTION_CUSTOM_ACTION)
self.begin_refresh_packages(pkg_types={res['pkg'].model.__class__} if res['pkg'] else None)
else:
self.comp_manager.restore_state(ACTION_CUSTOM_ACTION)
self._show_console_checkbox_if_output()
else:
self.comp_manager.restore_state(ACTION_CUSTOM_ACTION)
self._show_console_errors()
if res['error']:
dialog.show_message(title=self.i18n['warning' if res['error_type'] == MessageType.WARNING else 'error'].capitalize(),
body=self.i18n[res['error']],
type_=res['error_type'])
def _show_console_checkbox_if_output(self):
if self.textarea_details.toPlainText():
self.comp_manager.set_component_visible(CHECK_DETAILS, True)
else:
self.comp_manager.set_component_visible(CHECK_DETAILS, False)
def show_settings(self):
if self.settings_window:
self.settings_window.handle_display()
else:
self.settings_window = SettingsWindow(self.manager, self.i18n, self.screen_size, self)
self.settings_window.setMinimumWidth(int(self.screen_size.width() / 4))
self.settings_window.resize(self.size())
self.settings_window.adjustSize()
qt_utils.centralize(self.settings_window)
self.settings_window.show()
def _map_custom_action(self, action: CustomSoftwareAction, parent: QWidget) -> QCustomMenuAction:
if action.icon_path:
try:
if action.icon_path.startswith('/'):
icon = QIcon(action.icon_path)
else:
icon = QIcon.fromTheme(action.icon_path)
except:
icon = None
else:
icon = None
return QCustomMenuAction(parent=parent,
label=self.i18n[action.i18n_label_key],
action=lambda: self.begin_execute_custom_action(None, action),
icon=icon)
def show_custom_actions(self):
if self.custom_actions:
menu_row = QMenu()
menu_row.setCursor(QCursor(Qt.PointingHandCursor))
actions = [self._map_custom_action(a, menu_row) for a in self.custom_actions]
menu_row.addActions(actions)
menu_row.adjustSize()
menu_row.popup(QCursor.pos())
menu_row.exec_()
def begin_ignore_updates(self, pkg: PackageView):
status_key = 'ignore_updates' if not pkg.model.is_update_ignored() else 'ignore_updates_reverse'
self._begin_action(action_label=self.i18n['manage_window.status.{}'.format(status_key)].format(pkg.model.name),
action_id=ACTION_IGNORE_UPDATES)
self.comp_manager.disable_visible()
self.thread_ignore_updates.pkg = pkg
self.thread_ignore_updates.start()
def finish_ignore_updates(self, res: dict):
self._finish_action(action_id=ACTION_IGNORE_UPDATES)
if res['success']:
hide_package = commons.is_package_hidden(res['pkg'], self._gen_filters())
if hide_package:
idx_to_remove = None
for pkg in self.pkgs:
if pkg == res['pkg']:
idx_to_remove = pkg.table_index
break
if idx_to_remove is not None:
del self.pkgs[idx_to_remove]
self.table_apps.removeRow(idx_to_remove)
self._update_table_indexes()
self.update_bt_upgrade()
else:
for pkg in self.pkgs:
if pkg == res['pkg']:
pkg.update_model(res['pkg'].model)
self.table_apps.update_package(pkg, change_update_col=not any([self.search_performed, self.suggestions_requested]))
self.update_bt_upgrade()
break
for pkg_list in (self.pkgs_available, self.pkgs_installed):
if pkg_list:
for pkg in pkg_list:
if pkg == res['pkg']:
pkg.update_model(res['pkg'].model)
break
self._add_pkg_categories(res['pkg'])
dialog.show_message(title=self.i18n['success'].capitalize(),
body=self.i18n['action.{}.success'.format(res['action'])].format(bold(res['pkg'].model.name)),
type_=MessageType.INFO)
else:
dialog.show_message(title=self.i18n['fail'].capitalize(),
body=self.i18n['action.{}.fail'.format(res['action'])].format(bold(res['pkg'].model.name)),
type_=MessageType.ERROR)
def _add_pkg_categories(self, pkg: PackageView):
if pkg.model.categories:
pkg_categories = {c.strip().lower() for c in pkg.model.categories if c and c.strip()}
if pkg_categories:
current_categories = self._get_current_categories()
if current_categories:
pkg_categories = {c.strip().lower() for c in pkg.model.categories if c}
if pkg_categories:
categories_to_add = {c for c in pkg_categories if c and c not in current_categories}
if categories_to_add:
for cat in categories_to_add:
self.__add_category(cat)
else:
self._update_categories(pkg_categories)
def _map_theme_action(self, theme: ThemeMetadata, menu: QMenu) -> QCustomMenuAction:
def _change_theme():
set_theme(theme_key=theme.key, app=QApplication.instance(), logger=self.context.logger)
self.thread_save_theme.theme_key = theme.key
self.thread_save_theme.start()
return QCustomMenuAction(label=theme.get_i18n_name(self.i18n),
action=_change_theme,
parent=menu,
tooltip=theme.get_i18n_description(self.i18n))
def show_themes(self):
menu_row = QMenu()
menu_row.setCursor(QCursor(Qt.PointingHandCursor))
menu_row.addActions(self._map_theme_actions(menu_row))
menu_row.adjustSize()
menu_row.popup(QCursor.pos())
menu_row.exec_()
def _map_theme_actions(self, menu: QMenu) -> List[QCustomMenuAction]:
core_config = CoreConfigManager().get_config()
current_theme_key, current_action = core_config['ui']['theme'], None
actions = []
for t in read_all_themes_metadata():
if not t.abstract:
action = self._map_theme_action(t, menu)
if current_action is None and current_theme_key is not None and current_theme_key == t.key:
action.button.setProperty('current', 'true')
current_action = action
else:
actions.append(action)
if not current_action:
invalid_action = QCustomMenuAction(label=self.i18n['manage_window.bt_themes.option.invalid'], parent=menu)
invalid_action.button.setProperty('current', 'true')
current_action = invalid_action
actions.sort(key=lambda a: a.get_label())
actions.insert(0, current_action)
return actions
def reload(self):
self.thread_reload.start()
def _reload(self):
self.update_custom_actions()
self.verify_warnings()
self.types_changed = True
self.begin_refresh_packages()
| import logging
import operator
import time
import traceback
from pathlib import Path
from typing import List, Type, Set, Tuple, Optional
from PyQt5.QtCore import QEvent, Qt, pyqtSignal
from PyQt5.QtGui import QIcon, QWindowStateChangeEvent, QCursor
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QCheckBox, QHeaderView, QToolBar, \
QLabel, QPlainTextEdit, QProgressBar, QPushButton, QComboBox, QApplication, QListView, QSizePolicy, \
QMenu, QHBoxLayout
from bauh.api import user
from bauh.api.abstract.cache import MemoryCache
from bauh.api.abstract.context import ApplicationContext
from bauh.api.abstract.controller import SoftwareManager, SoftwareAction
from bauh.api.abstract.model import SoftwarePackage
from bauh.api.abstract.view import MessageType
from bauh.api.http import HttpClient
from bauh.api.paths import LOGS_DIR
from bauh.commons.html import bold
from bauh.context import set_theme
from bauh.stylesheet import read_all_themes_metadata, ThemeMetadata
from bauh.view.core.config import CoreConfigManager
from bauh.view.core.tray_client import notify_tray
from bauh.view.qt import dialog, commons, qt_utils
from bauh.view.qt.about import AboutDialog
from bauh.view.qt.apps_table import PackagesTable, UpgradeToggleButton
from bauh.view.qt.commons import sum_updates_displayed
from bauh.view.qt.components import new_spacer, IconButton, QtComponentsManager, to_widget, QSearchBar, \
QCustomMenuAction, QCustomToolbar
from bauh.view.qt.dialog import ConfirmationDialog
from bauh.view.qt.history import HistoryDialog
from bauh.view.qt.info import InfoDialog
from bauh.view.qt.root import RootDialog
from bauh.view.qt.screenshots import ScreenshotsDialog
from bauh.view.qt.settings import SettingsWindow
from bauh.view.qt.thread import UpgradeSelected, RefreshApps, UninstallPackage, DowngradePackage, ShowPackageInfo, \
ShowPackageHistory, SearchPackages, InstallPackage, AnimateProgress, NotifyPackagesReady, FindSuggestions, \
ListWarnings, \
AsyncAction, LaunchPackage, ApplyFilters, CustomSoftwareAction, ShowScreenshots, CustomAction, \
NotifyInstalledLoaded, \
IgnorePackageUpdates, SaveTheme, StartAsyncAction
from bauh.view.qt.view_model import PackageView, PackageViewStatus
from bauh.view.util import util, resource
from bauh.view.util.translation import I18n
DARK_ORANGE = '#FF4500'
# action ids
ACTION_APPLY_FILTERS = 1
ACTION_SEARCH = 2
ACTION_INSTALL = 3
ACTION_UNINSTALL = 4
ACTION_INFO = 5
ACTION_HISTORY = 6
ACTION_DOWNGRADE = 7
ACTION_UPGRADE = 8
ACTION_LAUNCH = 9
ACTION_CUSTOM_ACTION = 10
ACTION_SCREENSHOTS = 11
ACTION_IGNORE_UPDATES = 12
# components ids
SEARCH_BAR = 1
BT_INSTALLED = 2
BT_REFRESH = 3
BT_SUGGESTIONS = 4
BT_UPGRADE = 5
CHECK_UPDATES = 6
CHECK_APPS = 7
COMBO_TYPES = 8
COMBO_CATEGORIES = 9
INP_NAME = 10
CHECK_DETAILS = 11
BT_SETTINGS = 12
BT_CUSTOM_ACTIONS = 13
BT_ABOUT = 14
BT_THEMES = 15
# component groups ids
GROUP_FILTERS = 1
GROUP_VIEW_INSTALLED = 2
GROUP_VIEW_SEARCH = 3
GROUP_UPPER_BAR = 4
GROUP_LOWER_BTS = 5
class ManageWindow(QWidget):
signal_user_res = pyqtSignal(bool)
signal_root_password = pyqtSignal(bool, str)
signal_table_update = pyqtSignal()
signal_stop_notifying = pyqtSignal()
def __init__(self, i18n: I18n, icon_cache: MemoryCache, manager: SoftwareManager, screen_size, config: dict,
context: ApplicationContext, http_client: HttpClient, logger: logging.Logger, icon: QIcon):
super(ManageWindow, self).__init__()
self.setObjectName('manage_window')
self.comp_manager = QtComponentsManager()
self.i18n = i18n
self.logger = logger
self.manager = manager
self.working = False # restrict the number of threaded actions
self.installed_loaded = False # used to control the state when the interface is set to not load the apps on startup
self.pkgs = [] # packages current loaded in the table
self.pkgs_available = [] # all packages loaded in memory
self.pkgs_installed = [] # cached installed packages
self.display_limit = config['ui']['table']['max_displayed']
self.icon_cache = icon_cache
self.screen_size = screen_size
self.config = config
self.context = context
self.http_client = http_client
self.icon_app = icon
self.setWindowIcon(self.icon_app)
self.layout = QVBoxLayout()
self.setLayout(self.layout)
self.toolbar_status = QToolBar()
self.toolbar_status.setObjectName('toolbar_status')
self.toolbar_status.addWidget(new_spacer())
self.label_status = QLabel()
self.label_status.setObjectName('label_status')
self.label_status.setText('')
self.toolbar_status.addWidget(self.label_status)
self.search_bar = QSearchBar(search_callback=self.search)
self.search_bar.set_placeholder(i18n['window_manage.search_bar.placeholder'] + "...")
self.search_bar.set_tooltip(i18n['window_manage.search_bar.tooltip'])
self.search_bar.set_button_tooltip(i18n['window_manage.search_bar.button_tooltip'])
self.comp_manager.register_component(SEARCH_BAR, self.search_bar, self.toolbar_status.addWidget(self.search_bar))
self.toolbar_status.addWidget(new_spacer())
self.layout.addWidget(self.toolbar_status)
self.toolbar_filters = QWidget()
self.toolbar_filters.setObjectName('table_filters')
self.toolbar_filters.setLayout(QHBoxLayout())
self.toolbar_filters.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
self.toolbar_filters.setContentsMargins(0, 0, 0, 0)
self.check_updates = QCheckBox()
self.check_updates.setObjectName('check_updates')
self.check_updates.setCursor(QCursor(Qt.PointingHandCursor))
self.check_updates.setText(self.i18n['updates'].capitalize())
self.check_updates.stateChanged.connect(self._handle_updates_filter)
self.check_updates.sizePolicy().setRetainSizeWhenHidden(True)
self.toolbar_filters.layout().addWidget(self.check_updates)
self.comp_manager.register_component(CHECK_UPDATES, self.check_updates)
self.check_apps = QCheckBox()
self.check_apps.setObjectName('check_apps')
self.check_apps.setCursor(QCursor(Qt.PointingHandCursor))
self.check_apps.setText(self.i18n['manage_window.checkbox.only_apps'])
self.check_apps.setChecked(True)
self.check_apps.stateChanged.connect(self._handle_filter_only_apps)
self.check_apps.sizePolicy().setRetainSizeWhenHidden(True)
self.toolbar_filters.layout().addWidget(self.check_apps)
self.comp_manager.register_component(CHECK_APPS, self.check_apps)
self.any_type_filter = 'any'
self.cache_type_filter_icons = {}
self.combo_filter_type = QComboBox()
self.combo_filter_type.setObjectName('combo_types')
self.combo_filter_type.setCursor(QCursor(Qt.PointingHandCursor))
self.combo_filter_type.setView(QListView())
self.combo_filter_type.view().setCursor(QCursor(Qt.PointingHandCursor))
self.combo_filter_type.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.combo_filter_type.setEditable(True)
self.combo_filter_type.lineEdit().setReadOnly(True)
self.combo_filter_type.lineEdit().setAlignment(Qt.AlignCenter)
self.combo_filter_type.activated.connect(self._handle_type_filter)
self.combo_filter_type.addItem('--- {} ---'.format(self.i18n['type'].capitalize()), self.any_type_filter)
self.combo_filter_type.sizePolicy().setRetainSizeWhenHidden(True)
self.toolbar_filters.layout().addWidget(self.combo_filter_type)
self.comp_manager.register_component(COMBO_TYPES, self.combo_filter_type)
self.any_category_filter = 'any'
self.combo_categories = QComboBox()
self.combo_categories.setObjectName('combo_categories')
self.combo_categories.setCursor(QCursor(Qt.PointingHandCursor))
self.combo_categories.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.combo_categories.view().setCursor(QCursor(Qt.PointingHandCursor))
self.combo_categories.setEditable(True)
self.combo_categories.lineEdit().setReadOnly(True)
self.combo_categories.lineEdit().setAlignment(Qt.AlignCenter)
self.combo_categories.activated.connect(self._handle_category_filter)
self.combo_categories.sizePolicy().setRetainSizeWhenHidden(True)
self.combo_categories.addItem('--- {} ---'.format(self.i18n['category'].capitalize()), self.any_category_filter)
self.toolbar_filters.layout().addWidget(self.combo_categories)
self.comp_manager.register_component(COMBO_CATEGORIES, self.combo_categories)
self.input_name = QSearchBar(search_callback=self.begin_apply_filters)
self.input_name.palette().swap(self.combo_categories.palette())
self.input_name.setObjectName('name_filter')
self.input_name.set_placeholder(self.i18n['manage_window.name_filter.placeholder'] + '...')
self.input_name.set_tooltip(self.i18n['manage_window.name_filter.tooltip'])
self.input_name.set_button_tooltip(self.i18n['manage_window.name_filter.button_tooltip'])
self.input_name.sizePolicy().setRetainSizeWhenHidden(True)
self.toolbar_filters.layout().addWidget(self.input_name)
self.comp_manager.register_component(INP_NAME, self.input_name)
self.toolbar_filters.layout().addWidget(new_spacer())
toolbar_bts = []
bt_inst = QPushButton()
bt_inst.setObjectName('bt_installed')
bt_inst.setProperty('root', 'true')
bt_inst.setCursor(QCursor(Qt.PointingHandCursor))
bt_inst.setToolTip(self.i18n['manage_window.bt.installed.tooltip'])
bt_inst.setText(self.i18n['manage_window.bt.installed.text'].capitalize())
bt_inst.clicked.connect(self._begin_loading_installed)
bt_inst.sizePolicy().setRetainSizeWhenHidden(True)
toolbar_bts.append(bt_inst)
self.toolbar_filters.layout().addWidget(bt_inst)
self.comp_manager.register_component(BT_INSTALLED, bt_inst)
bt_ref = QPushButton()
bt_ref.setObjectName('bt_refresh')
bt_ref.setProperty('root', 'true')
bt_ref.setCursor(QCursor(Qt.PointingHandCursor))
bt_ref.setToolTip(i18n['manage_window.bt.refresh.tooltip'])
bt_ref.setText(self.i18n['manage_window.bt.refresh.text'])
bt_ref.clicked.connect(self.begin_refresh_packages)
bt_ref.sizePolicy().setRetainSizeWhenHidden(True)
toolbar_bts.append(bt_ref)
self.toolbar_filters.layout().addWidget(bt_ref)
self.comp_manager.register_component(BT_REFRESH, bt_ref)
self.bt_upgrade = QPushButton()
self.bt_upgrade.setProperty('root', 'true')
self.bt_upgrade.setObjectName('bt_upgrade')
self.bt_upgrade.setCursor(QCursor(Qt.PointingHandCursor))
self.bt_upgrade.setToolTip(i18n['manage_window.bt.upgrade.tooltip'])
self.bt_upgrade.setText(i18n['manage_window.bt.upgrade.text'])
self.bt_upgrade.clicked.connect(self.upgrade_selected)
self.bt_upgrade.sizePolicy().setRetainSizeWhenHidden(True)
toolbar_bts.append(self.bt_upgrade)
self.toolbar_filters.layout().addWidget(self.bt_upgrade)
self.comp_manager.register_component(BT_UPGRADE, self.bt_upgrade)
# setting all buttons to the same size:
bt_biggest_size = 0
for bt in toolbar_bts:
bt_width = bt.sizeHint().width()
if bt_width > bt_biggest_size:
bt_biggest_size = bt_width
for bt in toolbar_bts:
bt_width = bt.sizeHint().width()
if bt_biggest_size > bt_width:
bt.setFixedWidth(bt_biggest_size)
self.layout.addWidget(self.toolbar_filters)
self.table_container = QWidget()
self.table_container.setObjectName('table_container')
self.table_container.setContentsMargins(0, 0, 0, 0)
self.table_container.setLayout(QVBoxLayout())
self.table_container.layout().setContentsMargins(0, 0, 0, 0)
self.table_apps = PackagesTable(self, self.icon_cache, download_icons=bool(self.config['download']['icons']))
self.table_apps.change_headers_policy()
self.table_container.layout().addWidget(self.table_apps)
self.layout.addWidget(self.table_container)
self.toolbar_console = QWidget()
self.toolbar_console.setObjectName('console_toolbar')
self.toolbar_console.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
self.toolbar_console.setLayout(QHBoxLayout())
self.toolbar_console.setContentsMargins(0, 0, 0, 0)
self.check_details = QCheckBox()
self.check_details.setObjectName('check_details')
self.check_details.setCursor(QCursor(Qt.PointingHandCursor))
self.check_details.setText(self.i18n['manage_window.checkbox.show_details'])
self.check_details.stateChanged.connect(self._handle_console)
self.toolbar_console.layout().addWidget(self.check_details)
self.comp_manager.register_component(CHECK_DETAILS, self.check_details)
self.toolbar_console.layout().addWidget(new_spacer())
self.label_displayed = QLabel()
self.label_displayed.setObjectName('apps_displayed')
self.label_displayed.setCursor(QCursor(Qt.WhatsThisCursor))
self.label_displayed.setToolTip(self.i18n['manage_window.label.apps_displayed.tip'])
self.toolbar_console.layout().addWidget(self.label_displayed)
self.label_displayed.hide()
self.layout.addWidget(self.toolbar_console)
self.textarea_details = QPlainTextEdit(self)
self.textarea_details.setObjectName('textarea_details')
self.textarea_details.setProperty('console', 'true')
self.textarea_details.resize(self.table_apps.size())
self.layout.addWidget(self.textarea_details)
self.textarea_details.setVisible(False)
self.textarea_details.setReadOnly(True)
self.toolbar_substatus = QToolBar()
self.toolbar_substatus.setObjectName('toolbar_substatus')
self.toolbar_substatus.addWidget(new_spacer())
self.label_substatus = QLabel()
self.label_substatus.setObjectName('label_substatus')
self.label_substatus.setCursor(QCursor(Qt.WaitCursor))
self.toolbar_substatus.addWidget(self.label_substatus)
self.toolbar_substatus.addWidget(new_spacer())
self.layout.addWidget(self.toolbar_substatus)
self._change_label_substatus('')
self.thread_update = self._bind_async_action(UpgradeSelected(self.manager, context.internet_checker, self.i18n), finished_call=self._finish_upgrade_selected)
self.thread_refresh = self._bind_async_action(RefreshApps(self.manager), finished_call=self._finish_refresh_packages, only_finished=True)
self.thread_uninstall = self._bind_async_action(UninstallPackage(self.manager, self.icon_cache, self.i18n), finished_call=self._finish_uninstall)
self.thread_show_info = self._bind_async_action(ShowPackageInfo(self.manager), finished_call=self._finish_show_info)
self.thread_show_history = self._bind_async_action(ShowPackageHistory(self.manager, self.i18n), finished_call=self._finish_show_history)
self.thread_search = self._bind_async_action(SearchPackages(self.manager), finished_call=self._finish_search, only_finished=True)
self.thread_downgrade = self._bind_async_action(DowngradePackage(self.manager, self.i18n), finished_call=self._finish_downgrade)
self.thread_suggestions = self._bind_async_action(FindSuggestions(man=self.manager), finished_call=self._finish_load_suggestions, only_finished=True)
self.thread_launch = self._bind_async_action(LaunchPackage(self.manager), finished_call=self._finish_launch_package, only_finished=False)
self.thread_custom_action = self._bind_async_action(CustomAction(manager=self.manager, i18n=self.i18n), finished_call=self._finish_execute_custom_action)
self.thread_screenshots = self._bind_async_action(ShowScreenshots(self.manager), finished_call=self._finish_show_screenshots)
self.thread_apply_filters = ApplyFilters()
self.thread_apply_filters.signal_finished.connect(self._finish_apply_filters)
self.thread_apply_filters.signal_table.connect(self._update_table_and_upgrades)
self.signal_table_update.connect(self.thread_apply_filters.stop_waiting)
self.thread_install = InstallPackage(manager=self.manager, icon_cache=self.icon_cache, i18n=self.i18n)
self._bind_async_action(self.thread_install, finished_call=self._finish_install)
self.thread_animate_progress = AnimateProgress()
self.thread_animate_progress.signal_change.connect(self._update_progress)
self.thread_notify_pkgs_ready = NotifyPackagesReady()
self.thread_notify_pkgs_ready.signal_changed.connect(self._update_package_data)
self.thread_notify_pkgs_ready.signal_finished.connect(self._update_state_when_pkgs_ready)
self.signal_stop_notifying.connect(self.thread_notify_pkgs_ready.stop_working)
self.thread_ignore_updates = IgnorePackageUpdates(manager=self.manager)
self._bind_async_action(self.thread_ignore_updates, finished_call=self.finish_ignore_updates)
self.thread_reload = StartAsyncAction(delay_in_milis=5)
self.thread_reload.signal_start.connect(self._reload)
self.container_bottom = QWidget()
self.container_bottom.setObjectName('container_bottom')
self.container_bottom.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
self.container_bottom.setLayout(QHBoxLayout())
self.container_bottom.layout().setContentsMargins(0, 0, 0, 0)
self.container_bottom.layout().addWidget(new_spacer())
if config['suggestions']['enabled']:
bt_sugs = IconButton(action=lambda: self._begin_load_suggestions(filter_installed=True),
i18n=i18n,
tooltip=self.i18n['manage_window.bt.suggestions.tooltip'])
bt_sugs.setObjectName('suggestions')
self.container_bottom.layout().addWidget(bt_sugs)
self.comp_manager.register_component(BT_SUGGESTIONS, bt_sugs)
bt_themes = IconButton(self.show_themes,
i18n=self.i18n,
tooltip=self.i18n['manage_window.bt_themes.tip'])
bt_themes.setObjectName('themes')
self.container_bottom.layout().addWidget(bt_themes)
self.comp_manager.register_component(BT_THEMES, bt_themes)
self.custom_actions = [a for a in manager.gen_custom_actions()]
bt_custom_actions = IconButton(action=self.show_custom_actions,
i18n=self.i18n,
tooltip=self.i18n['manage_window.bt_custom_actions.tip'])
bt_custom_actions.setObjectName('custom_actions')
bt_custom_actions.setVisible(bool(self.custom_actions))
self.container_bottom.layout().addWidget(bt_custom_actions)
self.comp_manager.register_component(BT_CUSTOM_ACTIONS, bt_custom_actions)
bt_settings = IconButton(action=self.show_settings,
i18n=self.i18n,
tooltip=self.i18n['manage_window.bt_settings.tooltip'])
bt_settings.setObjectName('settings')
self.container_bottom.layout().addWidget(bt_settings)
self.comp_manager.register_component(BT_SETTINGS, bt_settings)
bt_about = IconButton(action=self._show_about,
i18n=self.i18n,
tooltip=self.i18n['manage_window.settings.about'])
bt_about.setObjectName('about')
self.container_bottom.layout().addWidget(bt_about)
self.comp_manager.register_component(BT_ABOUT, bt_about)
self.layout.addWidget(self.container_bottom)
self.container_progress = QCustomToolbar(spacing=0, policy_height=QSizePolicy.Fixed)
self.container_progress.setObjectName('container_progress')
self.container_progress.add_space()
self.progress_bar = QProgressBar()
self.progress_bar.setObjectName('progress_manage')
self.progress_bar.setCursor(QCursor(Qt.WaitCursor))
self.progress_bar.setTextVisible(False)
self.container_progress.add_widget(self.progress_bar)
self.container_progress.add_space()
self.layout.addWidget(self.container_progress)
qt_utils.centralize(self)
self.filter_only_apps = True
self.type_filter = self.any_type_filter
self.category_filter = self.any_category_filter
self.filter_updates = False
self._maximized = False
self.progress_controll_enabled = True
self.recent_uninstall = False
self.types_changed = False
self.dialog_about = None
self.load_suggestions = bool(config['suggestions']['enabled'])
self.suggestions_requested = False
self.first_refresh = True
self.thread_warnings = ListWarnings(man=manager, i18n=i18n)
self.thread_warnings.signal_warnings.connect(self._show_warnings)
self.settings_window = None
self.search_performed = False
self.thread_save_theme = SaveTheme(theme_key='')
self.thread_load_installed = NotifyInstalledLoaded()
self.thread_load_installed.signal_loaded.connect(self._finish_loading_installed)
self.setMinimumHeight(int(screen_size.height() * 0.5))
self.setMinimumWidth(int(screen_size.width() * 0.6))
self._register_groups()
def _register_groups(self):
filters = (CHECK_APPS, CHECK_UPDATES, COMBO_CATEGORIES, COMBO_TYPES, INP_NAME)
self.comp_manager.register_group(GROUP_FILTERS, False, *filters)
self.comp_manager.register_group(GROUP_VIEW_SEARCH, False,
COMBO_CATEGORIES, COMBO_TYPES, INP_NAME, # filters
BT_INSTALLED, BT_SUGGESTIONS) # buttons
self.comp_manager.register_group(GROUP_VIEW_INSTALLED, False,
BT_REFRESH, BT_UPGRADE, # buttons
*filters)
self.comp_manager.register_group(GROUP_UPPER_BAR, False,
CHECK_APPS, CHECK_UPDATES, COMBO_CATEGORIES, COMBO_TYPES, INP_NAME,
BT_INSTALLED, BT_SUGGESTIONS, BT_REFRESH, BT_UPGRADE)
self.comp_manager.register_group(GROUP_LOWER_BTS, False, BT_SUGGESTIONS, BT_THEMES, BT_CUSTOM_ACTIONS, BT_SETTINGS, BT_ABOUT)
def update_custom_actions(self):
self.custom_actions = [a for a in self.manager.gen_custom_actions()]
def _update_process_progress(self, val: int):
if self.progress_controll_enabled:
self.thread_animate_progress.set_progress(val)
def _change_status(self, status: str = None):
if status:
self.label_status.setText(status + '...')
self.label_status.setCursor(QCursor(Qt.WaitCursor))
else:
self.label_status.setText('')
self.label_status.unsetCursor()
def _set_table_enabled(self, enabled: bool):
self.table_apps.setEnabled(enabled)
if enabled:
self.table_container.unsetCursor()
else:
self.table_container.setCursor(QCursor(Qt.WaitCursor))
def begin_apply_filters(self):
self.stop_notifying_package_states()
self._begin_action(action_label=self.i18n['manage_window.status.filtering'],
action_id=ACTION_APPLY_FILTERS)
self.comp_manager.disable_visible_from_groups(GROUP_UPPER_BAR, GROUP_LOWER_BTS)
self.comp_manager.set_component_read_only(INP_NAME, True)
self.thread_apply_filters.filters = self._gen_filters()
self.thread_apply_filters.pkgs = self.pkgs_available
self.thread_apply_filters.start()
self.setFocus(Qt.NoFocusReason)
def _finish_apply_filters(self):
self._finish_action(ACTION_APPLY_FILTERS)
self.update_bt_upgrade()
def stop_notifying_package_states(self):
if self.thread_notify_pkgs_ready.isRunning():
self.signal_stop_notifying.emit()
self.thread_notify_pkgs_ready.wait(1000)
def _update_table_and_upgrades(self, pkgs_info: dict):
self._update_table(pkgs_info=pkgs_info, signal=True)
if self.pkgs:
self._update_state_when_pkgs_ready()
self.stop_notifying_package_states()
self.thread_notify_pkgs_ready.pkgs = self.pkgs
self.thread_notify_pkgs_ready.work = True
self.thread_notify_pkgs_ready.start()
def _bind_async_action(self, action: AsyncAction, finished_call, only_finished: bool = False) -> AsyncAction:
action.signal_finished.connect(finished_call)
if not only_finished:
action.signal_confirmation.connect(self._ask_confirmation)
action.signal_output.connect(self._update_action_output)
action.signal_message.connect(self._show_message)
action.signal_status.connect(self._change_label_status)
action.signal_substatus.connect(self._change_label_substatus)
action.signal_progress.connect(self._update_process_progress)
action.signal_progress_control.connect(self.set_progress_controll)
action.signal_root_password.connect(self._pause_and_ask_root_password)
self.signal_user_res.connect(action.confirm)
self.signal_root_password.connect(action.set_root_password)
return action
def _ask_confirmation(self, msg: dict):
self.thread_animate_progress.pause()
extra_widgets = [to_widget(comp=c, i18n=self.i18n) for c in msg['components']] if msg.get('components') else None
diag = ConfirmationDialog(title=msg['title'],
body=msg['body'],
i18n=self.i18n,
widgets=extra_widgets,
confirmation_label=msg['confirmation_label'],
deny_label=msg['deny_label'],
deny_button=msg['deny_button'],
window_cancel=msg['window_cancel'],
confirmation_button=msg.get('confirmation_button', True))
diag.ask()
res = diag.confirmed
self.thread_animate_progress.animate()
self.signal_user_res.emit(res)
def _pause_and_ask_root_password(self):
self.thread_animate_progress.pause()
valid, password = RootDialog.ask_password(self.context, i18n=self.i18n, comp_manager=self.comp_manager)
self.thread_animate_progress.animate()
self.signal_root_password.emit(valid, password)
def _show_message(self, msg: dict):
self.thread_animate_progress.pause()
dialog.show_message(title=msg['title'], body=msg['body'], type_=msg['type'])
self.thread_animate_progress.animate()
def _show_warnings(self, warnings: List[str]):
if warnings:
dialog.show_message(title=self.i18n['warning'].capitalize(), body='<p>{}</p>'.format('<br/><br/>'.join(warnings)), type_=MessageType.WARNING)
def show(self):
super(ManageWindow, self).show()
if not self.thread_warnings.isFinished():
self.thread_warnings.start()
qt_utils.centralize(self)
def verify_warnings(self):
self.thread_warnings.start()
def _begin_loading_installed(self):
if self.installed_loaded:
self.search_bar.clear()
self.input_name.set_text('')
self._begin_action(self.i18n['manage_window.status.installed'])
self._handle_console_option(False)
self.comp_manager.set_components_visible(False)
self.suggestions_requested = False
self.search_performed = False
self.thread_load_installed.start()
else:
self.load_suggestions = False
self.begin_refresh_packages()
def _finish_loading_installed(self):
self._finish_action()
self.comp_manager.set_group_visible(GROUP_VIEW_INSTALLED, True)
self.update_pkgs(new_pkgs=None, as_installed=True)
self._hide_filters_no_packages()
self._update_bts_installed_and_suggestions()
self._set_lower_buttons_visible(True)
self._reorganize()
def _update_bts_installed_and_suggestions(self):
available_types = len(self.manager.get_managed_types())
self.comp_manager.set_component_visible(BT_INSTALLED, available_types > 0 and any([self.suggestions_requested, self.search_performed]))
self.comp_manager.set_component_visible(BT_SUGGESTIONS, available_types > 0)
def _hide_filters_no_packages(self):
if not self.pkgs:
self.comp_manager.set_group_visible(GROUP_FILTERS, False)
def _show_about(self):
if self.dialog_about is None:
self.dialog_about = AboutDialog(self.config)
self.dialog_about.show()
def _handle_updates_filter(self, status: int):
self.filter_updates = status == 2
self.begin_apply_filters()
def _handle_filter_only_apps(self, status: int):
self.filter_only_apps = status == 2
self.begin_apply_filters()
def _handle_type_filter(self, idx: int):
self.type_filter = self.combo_filter_type.itemData(idx)
self.combo_filter_type.adjustSize()
self.begin_apply_filters()
def _handle_category_filter(self, idx: int):
self.category_filter = self.combo_categories.itemData(idx)
self.begin_apply_filters()
def _update_state_when_pkgs_ready(self):
if self.progress_bar.isVisible():
return
self._reload_categories()
self._reorganize()
def _update_package_data(self, idx: int):
if self.table_apps.isEnabled():
pkg = self.pkgs[idx]
pkg.status = PackageViewStatus.READY
self.table_apps.update_package(pkg)
def _reload_categories(self):
categories = set()
for p in self.pkgs_available:
if p.model.categories:
for c in p.model.categories:
if c:
cat = c.strip().lower()
if cat:
categories.add(cat)
if categories:
self._update_categories(categories, keep_selected=True)
def changeEvent(self, e: QEvent):
if isinstance(e, QWindowStateChangeEvent):
self._maximized = self.isMaximized()
self.table_apps.change_headers_policy(maximized=self._maximized)
def _handle_console(self, checked: bool):
if checked:
self.textarea_details.show()
else:
self.textarea_details.hide()
def _handle_console_option(self, enable: bool):
if enable:
self.textarea_details.clear()
self.comp_manager.set_component_visible(CHECK_DETAILS, enable)
self.check_details.setChecked(False)
self.textarea_details.hide()
def begin_refresh_packages(self, pkg_types: Optional[Set[Type[SoftwarePackage]]] = None):
self.search_bar.clear()
self._begin_action(self.i18n['manage_window.status.refreshing'])
self.comp_manager.set_components_visible(False)
self._handle_console_option(False)
self.suggestions_requested = False
self.search_performed = False
self.thread_refresh.pkg_types = pkg_types
self.thread_refresh.start()
def _finish_refresh_packages(self, res: dict, as_installed: bool = True):
self._finish_action()
self._set_lower_buttons_visible(True)
self.comp_manager.set_component_visible(SEARCH_BAR, True)
if self.search_performed or self.suggestions_requested:
self.comp_manager.set_group_visible(GROUP_VIEW_SEARCH, True)
else:
self.comp_manager.set_group_visible(GROUP_VIEW_INSTALLED, True)
if self.update_pkgs(res['installed'], as_installed=as_installed, types=res['types']):
self._hide_filters_no_packages()
self._update_bts_installed_and_suggestions()
self._reorganize()
self.load_suggestions = False
self.types_changed = False
def load_without_packages(self):
self.load_suggestions = False
self._handle_console_option(False)
self._finish_refresh_packages({'installed': None, 'types': None}, as_installed=False)
def _begin_load_suggestions(self, filter_installed: bool):
self.search_bar.clear()
self._begin_action(self.i18n['manage_window.status.suggestions'])
self._handle_console_option(False)
self.comp_manager.set_components_visible(False)
self.suggestions_requested = True
self.thread_suggestions.filter_installed = filter_installed
self.thread_suggestions.start()
def _finish_load_suggestions(self, res: dict):
self._finish_search(res)
def begin_uninstall(self, pkg: PackageView):
pwd, proceed = self._ask_root_password(SoftwareAction.UNINSTALL, pkg)
if not proceed:
return
self._begin_action(action_label='{} {}'.format(self.i18n['manage_window.status.uninstalling'], pkg.model.name),
action_id=ACTION_UNINSTALL)
self.comp_manager.set_groups_visible(False, GROUP_UPPER_BAR, GROUP_LOWER_BTS)
self._handle_console_option(True)
self.thread_uninstall.pkg = pkg
self.thread_uninstall.root_pwd = pwd
self.thread_uninstall.start()
def _finish_uninstall(self, res: dict):
self._finish_action(action_id=ACTION_UNINSTALL)
if res['success']:
src_pkg = res['pkg']
if self._can_notify_user():
util.notify_user('{} ({}) {}'.format(src_pkg.model.name, src_pkg.model.get_type(), self.i18n['uninstalled']))
if res['removed']:
for list_idx, pkg_list in enumerate((self.pkgs_available, self.pkgs, self.pkgs_installed)):
if pkg_list:
removed_idxs = []
for pkgv_idx, pkgv in enumerate(pkg_list):
if len(removed_idxs) == len(res['removed']):
break
for model in res['removed']:
if pkgv.model == model:
if list_idx == 0: # updates the model
pkgv.update_model(model)
if not self.search_performed or list_idx == 2: # always from the installed packages
removed_idxs.append(pkgv_idx)
if self.search_performed and list_idx == 1: # only for displayed
self.table_apps.update_package(pkgv, change_update_col=True)
break # as the model has been found, stops the loop
if removed_idxs:
# updating the list
removed_idxs.sort()
for decrement, pkg_idx in enumerate(removed_idxs):
del pkg_list[pkg_idx - decrement]
if list_idx == 1: # updates the rows if the current list reprents the displayed packages:
for decrement, idx in enumerate(removed_idxs):
self.table_apps.removeRow(idx - decrement)
self._update_table_indexes()
self.update_bt_upgrade()
self.update_custom_actions()
self._show_console_checkbox_if_output()
notify_tray()
else:
self._show_console_errors()
if self._can_notify_user():
util.notify_user('{}: {}'.format(res['pkg'].model.name, self.i18n['notification.uninstall.failed']))
def _update_table_indexes(self):
if self.pkgs:
for new_idx, pkgv in enumerate(self.pkgs): # updating the package indexes
pkgv.table_index = new_idx
def begin_launch_package(self, pkg: PackageView):
self._begin_action(action_label=self.i18n['manage_window.status.running_app'].format(pkg.model.name),
action_id=ACTION_LAUNCH)
self.comp_manager.disable_visible()
self.thread_launch.pkg = pkg
self.thread_launch.start()
def _finish_launch_package(self, success: bool):
self._finish_action(action_id=ACTION_LAUNCH)
def _can_notify_user(self):
return bool(self.config['system']['notifications']) and (self.isHidden() or self.isMinimized())
def _change_label_status(self, status: str):
self.label_status.setText(status)
def _change_label_substatus(self, substatus: str):
self.label_substatus.setText('<p>{}</p>'.format(substatus))
if not substatus:
self.toolbar_substatus.hide()
elif not self.toolbar_substatus.isVisible() and self.progress_bar.isVisible():
self.toolbar_substatus.show()
def _reorganize(self):
if not self._maximized:
self.table_apps.change_headers_policy(QHeaderView.Stretch)
self.table_apps.change_headers_policy()
self._resize(accept_lower_width=len(self.pkgs) > 0)
def _update_table(self, pkgs_info: dict, signal: bool = False):
self.pkgs = pkgs_info['pkgs_displayed']
if pkgs_info['not_installed'] == 0:
update_check = sum_updates_displayed(pkgs_info) > 0
else:
update_check = False
self.table_apps.update_packages(self.pkgs, update_check_enabled=update_check)
if not self._maximized:
self.label_displayed.show()
self.table_apps.change_headers_policy(QHeaderView.Stretch)
self.table_apps.change_headers_policy()
self._resize(accept_lower_width=len(self.pkgs) > 0)
if len(self.pkgs) == 0 and len(self.pkgs_available) == 0:
self.label_displayed.setText('')
else:
self.label_displayed.setText('{} / {}'.format(len(self.pkgs), len(self.pkgs_available)))
else:
self.label_displayed.hide()
if signal:
self.signal_table_update.emit()
def update_bt_upgrade(self, pkgs_info: dict = None):
show_bt_upgrade = False
if not any([self.suggestions_requested, self.search_performed]) and (not pkgs_info or pkgs_info['not_installed'] == 0):
for pkg in (pkgs_info['pkgs_displayed'] if pkgs_info else self.pkgs):
if not pkg.model.is_update_ignored() and pkg.update_checked:
show_bt_upgrade = True
break
self.comp_manager.set_component_visible(BT_UPGRADE, show_bt_upgrade)
if show_bt_upgrade:
self._reorganize()
def change_update_state(self, pkgs_info: dict, trigger_filters: bool = True, keep_selected: bool = False):
self.update_bt_upgrade(pkgs_info)
if pkgs_info['updates'] > 0:
if pkgs_info['not_installed'] == 0:
if not self.comp_manager.is_visible(CHECK_UPDATES):
self.comp_manager.set_component_visible(CHECK_UPDATES, True)
if not self.filter_updates and not keep_selected:
self._change_checkbox(self.check_updates, True, 'filter_updates', trigger_filters)
if pkgs_info['napp_updates'] > 0 and self.filter_only_apps and not keep_selected:
self._change_checkbox(self.check_apps, False, 'filter_only_apps', trigger_filters)
else:
if not keep_selected:
self._change_checkbox(self.check_updates, False, 'filter_updates', trigger_filters)
self.comp_manager.set_component_visible(CHECK_UPDATES, False)
def _change_checkbox(self, checkbox: QCheckBox, checked: bool, attr: str = None, trigger: bool = True):
if not trigger:
checkbox.blockSignals(True)
checkbox.setChecked(checked)
if not trigger:
setattr(self, attr, checked)
checkbox.blockSignals(False)
def _gen_filters(self, ignore_updates: bool = False) -> dict:
return {
'only_apps': False if self.search_performed else self.filter_only_apps,
'type': self.type_filter,
'category': self.category_filter,
'updates': False if ignore_updates else self.filter_updates,
'name': self.input_name.text().lower() if self.input_name.text() else None,
'display_limit': None if self.filter_updates else self.display_limit
}
def update_pkgs(self, new_pkgs: Optional[List[SoftwarePackage]], as_installed: bool, types: Optional[Set[type]] = None, ignore_updates: bool = False, keep_filters: bool = False) -> bool:
self.input_name.set_text('')
pkgs_info = commons.new_pkgs_info()
filters = self._gen_filters(ignore_updates=ignore_updates)
if new_pkgs is not None:
old_installed = None
if as_installed:
old_installed = self.pkgs_installed
self.pkgs_installed = []
for pkg in new_pkgs:
app_model = PackageView(model=pkg, i18n=self.i18n)
commons.update_info(app_model, pkgs_info)
commons.apply_filters(app_model, filters, pkgs_info)
if old_installed and types:
for pkgv in old_installed:
if pkgv.model.__class__ not in types:
commons.update_info(pkgv, pkgs_info)
commons.apply_filters(pkgv, filters, pkgs_info)
else: # use installed
for pkgv in self.pkgs_installed:
commons.update_info(pkgv, pkgs_info)
commons.apply_filters(pkgv, filters, pkgs_info)
if pkgs_info['apps_count'] == 0:
if self.load_suggestions or self.types_changed:
if as_installed:
self.pkgs_installed = pkgs_info['pkgs']
self._begin_load_suggestions(filter_installed=False)
self.load_suggestions = False
return False
else:
if not keep_filters:
self._change_checkbox(self.check_apps, False, 'filter_only_apps', trigger=False)
self.check_apps.setCheckable(False)
else:
if not keep_filters:
self.check_apps.setCheckable(True)
self._change_checkbox(self.check_apps, True, 'filter_only_apps', trigger=False)
self.change_update_state(pkgs_info=pkgs_info, trigger_filters=False, keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed']))
self._update_categories(pkgs_info['categories'], keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed']))
self._update_type_filters(pkgs_info['available_types'], keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed']))
self._apply_filters(pkgs_info, ignore_updates=ignore_updates)
self.change_update_state(pkgs_info=pkgs_info, trigger_filters=False, keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed']))
self.pkgs_available = pkgs_info['pkgs']
if as_installed:
self.pkgs_installed = pkgs_info['pkgs']
self.pkgs = pkgs_info['pkgs_displayed']
self._update_table(pkgs_info=pkgs_info)
if new_pkgs:
self.stop_notifying_package_states()
self.thread_notify_pkgs_ready.work = True
self.thread_notify_pkgs_ready.pkgs = self.pkgs
self.thread_notify_pkgs_ready.start()
self._resize(accept_lower_width=bool(self.pkgs_installed))
if self.first_refresh:
qt_utils.centralize(self)
self.first_refresh = False
if not self.installed_loaded and as_installed:
self.installed_loaded = True
return True
def _apply_filters(self, pkgs_info: dict, ignore_updates: bool):
pkgs_info['pkgs_displayed'] = []
filters = self._gen_filters(ignore_updates=ignore_updates)
for pkgv in pkgs_info['pkgs']:
commons.apply_filters(pkgv, filters, pkgs_info)
def _clean_combo_types(self):
if self.combo_filter_type.count() > 1:
for _ in range(self.combo_filter_type.count() - 1):
self.combo_filter_type.removeItem(1)
def _update_type_filters(self, available_types: dict = None, keep_selected: bool = False):
if available_types is None:
self.comp_manager.set_component_visible(COMBO_TYPES, self.combo_filter_type.count() > 2)
else:
keeping_selected = keep_selected and available_types and self.type_filter in available_types
if not keeping_selected:
self.type_filter = self.any_type_filter
if not available_types:
self._clean_combo_types()
if available_types:
self._clean_combo_types()
sel_type = -1
for idx, item in enumerate(available_types.items()):
app_type, icon_path, label = item[0], item[1]['icon'], item[1]['label']
icon = self.cache_type_filter_icons.get(app_type)
if not icon:
icon = QIcon(icon_path)
self.cache_type_filter_icons[app_type] = icon
self.combo_filter_type.addItem(icon, label, app_type)
if keeping_selected and app_type == self.type_filter:
sel_type = idx + 1
self.combo_filter_type.blockSignals(True)
self.combo_filter_type.setCurrentIndex(sel_type if sel_type > -1 else 0)
self.combo_filter_type.blockSignals(False)
self.comp_manager.set_component_visible(COMBO_TYPES, len(available_types) > 1)
else:
self.comp_manager.set_component_visible(COMBO_TYPES, False)
def _update_categories(self, categories: Set[str] = None, keep_selected: bool = False):
if categories is None:
self.comp_manager.set_component_visible(COMBO_CATEGORIES, self.combo_categories.count() > 1)
else:
keeping_selected = keep_selected and categories and self.category_filter in categories
if not keeping_selected:
self.category_filter = self.any_category_filter
if categories:
if self.combo_categories.count() > 1:
for _ in range(self.combo_categories.count() - 1):
self.combo_categories.removeItem(1)
selected_cat = -1
cat_list = list(categories)
cat_list.sort()
for idx, c in enumerate(cat_list):
self.__add_category(c)
if keeping_selected and c == self.category_filter:
selected_cat = idx + 1
self.combo_categories.blockSignals(True)
self.combo_categories.setCurrentIndex(selected_cat if selected_cat > -1 else 0)
self.combo_categories.blockSignals(False)
self.comp_manager.set_component_visible(COMBO_CATEGORIES, True)
else:
self.comp_manager.set_component_visible(COMBO_CATEGORIES, False)
def __add_category(self, category: str):
i18n_cat = self.i18n.get('category.{}'.format(category), self.i18n.get(category, category))
self.combo_categories.addItem(i18n_cat.capitalize(), category)
def _get_current_categories(self) -> Set[str]:
if self.combo_categories.count() > 1:
return {self.combo_categories.itemData(idx) for idx in range(self.combo_categories.count()) if idx > 0}
def _resize(self, accept_lower_width: bool = True):
table_width = self.table_apps.get_width()
toolbar_width = self.toolbar_filters.sizeHint().width()
topbar_width = self.toolbar_status.sizeHint().width()
new_width = max(table_width, toolbar_width, topbar_width)
new_width *= 1.05 # this extra size is not because of the toolbar button, but the table upgrade buttons
if (self.pkgs and accept_lower_width) or new_width > self.width():
self.resize(int(new_width), self.height())
def set_progress_controll(self, enabled: bool):
self.progress_controll_enabled = enabled
def upgrade_selected(self):
body = QWidget()
body.setLayout(QHBoxLayout())
body.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Preferred)
body.layout().addWidget(QLabel(self.i18n['manage_window.upgrade_all.popup.body']))
body.layout().addWidget(UpgradeToggleButton(pkg=None, root=self, i18n=self.i18n, clickable=False))
if ConfirmationDialog(title=self.i18n['manage_window.upgrade_all.popup.title'],
i18n=self.i18n, body=None,
widgets=[body]).ask():
self._begin_action(action_label=self.i18n['manage_window.status.upgrading'],
action_id=ACTION_UPGRADE)
self.comp_manager.set_components_visible(False)
self._handle_console_option(True)
self.thread_update.pkgs = self.pkgs
self.thread_update.start()
def _finish_upgrade_selected(self, res: dict):
self._finish_action()
if res.get('id'):
output = self.textarea_details.toPlainText()
if output:
try:
Path(UpgradeSelected.UPGRADE_LOGS_DIR).mkdir(parents=True, exist_ok=True)
logs_path = '{}/{}.log'.format(UpgradeSelected.UPGRADE_LOGS_DIR, res['id'])
with open(logs_path, 'w+') as f:
f.write(output)
self.textarea_details.appendPlainText('\n*Upgrade summary generated at: {}'.format(UpgradeSelected.SUMMARY_FILE.format(res['id'])))
self.textarea_details.appendPlainText('*Upgrade logs generated at: {}'.format(logs_path))
except:
traceback.print_exc()
if res['success']:
self.comp_manager.remove_saved_state(ACTION_UPGRADE)
self.begin_refresh_packages(pkg_types=res['types'])
self._show_console_checkbox_if_output()
if self._can_notify_user():
util.notify_user('{} {}'.format(res['updated'], self.i18n['notification.update_selected.success']))
notify_tray()
else:
self.comp_manager.restore_state(ACTION_UPGRADE)
self._show_console_errors()
if self._can_notify_user():
util.notify_user(self.i18n['notification.update_selected.failed'])
self.update_custom_actions()
def _show_console_errors(self):
if self.textarea_details.toPlainText():
self.check_details.setChecked(True)
else:
self._handle_console_option(False)
self.comp_manager.set_component_visible(CHECK_DETAILS, False)
def _update_action_output(self, output: str):
self.textarea_details.appendPlainText(output)
def _begin_action(self, action_label: str, action_id: int = None):
self.thread_animate_progress.stop = False
self.thread_animate_progress.start()
self.progress_bar.setVisible(True)
if action_id is not None:
self.comp_manager.save_states(action_id, only_visible=True)
self._set_table_enabled(False)
self.comp_manager.set_component_visible(SEARCH_BAR, False)
self._change_status(action_label)
def _set_lower_buttons_visible(self, visible: bool):
self.comp_manager.set_group_visible(GROUP_LOWER_BTS, visible)
if visible:
self.comp_manager.set_component_visible(BT_CUSTOM_ACTIONS, bool(self.custom_actions))
def _finish_action(self, action_id: int = None):
self.thread_animate_progress.stop = True
self.thread_animate_progress.wait(msecs=1000)
self.progress_bar.setVisible(False)
self.progress_bar.setValue(0)
self.progress_bar.setTextVisible(False)
if action_id is not None:
self.comp_manager.restore_state(action_id)
self.comp_manager.set_component_visible(SEARCH_BAR, True)
self._change_status()
self._change_label_substatus('')
self._set_table_enabled(True)
self.progress_controll_enabled = True
def begin_downgrade(self, pkg: PackageView):
pwd, proceed = self._ask_root_password(SoftwareAction.DOWNGRADE, pkg)
if not proceed:
return
self._begin_action(action_label='{} {}'.format(self.i18n['manage_window.status.downgrading'], pkg.model.name),
action_id=ACTION_DOWNGRADE)
self.comp_manager.set_components_visible(False)
self._handle_console_option(True)
self.thread_downgrade.pkg = pkg
self.thread_downgrade.root_pwd = pwd
self.thread_downgrade.start()
def _finish_downgrade(self, res: dict):
self._finish_action()
if res['success']:
self.comp_manager.remove_saved_state(ACTION_DOWNGRADE)
if self._can_notify_user():
util.notify_user('{} {}'.format(res['app'], self.i18n['downgraded']))
self.begin_refresh_packages(pkg_types={res['app'].model.__class__} if len(self.pkgs) > 1 else None)
self._show_console_checkbox_if_output()
self.update_custom_actions()
notify_tray()
else:
self.comp_manager.restore_state(ACTION_DOWNGRADE)
self._show_console_errors()
if self._can_notify_user():
util.notify_user(self.i18n['notification.downgrade.failed'])
def begin_show_info(self, pkg: dict):
self._begin_action(self.i18n['manage_window.status.info'], action_id=ACTION_INFO)
self.comp_manager.disable_visible()
self.thread_show_info.pkg = pkg
self.thread_show_info.start()
def _finish_show_info(self, pkg_info: dict):
self._finish_action(action_id=ACTION_INFO)
if pkg_info:
if len(pkg_info) > 1:
dialog_info = InfoDialog(pkg_info=pkg_info, icon_cache=self.icon_cache,
i18n=self.i18n, screen_size=self.screen_size)
dialog_info.exec_()
else:
dialog.show_message(title=self.i18n['warning'].capitalize(),
body=self.i18n['manage_window.info.no_info'].format(bold(pkg_info['__app__'].model.name)),
type_=MessageType.WARNING)
def begin_show_screenshots(self, pkg: PackageView):
self._begin_action(action_label=self.i18n['manage_window.status.screenshots'].format(bold(pkg.model.name)),
action_id=ACTION_SCREENSHOTS)
self.comp_manager.disable_visible()
self.thread_screenshots.pkg = pkg
self.thread_screenshots.start()
def _finish_show_screenshots(self, res: dict):
self._finish_action(ACTION_SCREENSHOTS)
if res.get('screenshots'):
diag = ScreenshotsDialog(pkg=res['pkg'],
http_client=self.http_client,
icon_cache=self.icon_cache,
logger=self.logger,
i18n=self.i18n,
screenshots=res['screenshots'])
diag.exec_()
else:
dialog.show_message(title=self.i18n['error'],
body=self.i18n['popup.screenshots.no_screenshot.body'].format(bold(res['pkg'].model.name)),
type_=MessageType.ERROR)
def begin_show_history(self, pkg: PackageView):
self._begin_action(self.i18n['manage_window.status.history'], action_id=ACTION_HISTORY)
self.comp_manager.disable_visible()
self.thread_show_history.pkg = pkg
self.thread_show_history.start()
def _finish_show_history(self, res: dict):
self._finish_action(ACTION_HISTORY)
if res.get('error'):
self._handle_console_option(True)
self.textarea_details.appendPlainText(res['error'])
self.check_details.setChecked(True)
elif not res['history'].history:
dialog.show_message(title=self.i18n['action.history.no_history.title'],
body=self.i18n['action.history.no_history.body'].format(bold(res['history'].pkg.name)),
type_=MessageType.WARNING)
else:
dialog_history = HistoryDialog(res['history'], self.icon_cache, self.i18n)
dialog_history.exec_()
def _begin_search(self, word, action_id: int = None):
self.filter_updates = False
self._begin_action('{} {}'.format(self.i18n['manage_window.status.searching'], word if word else ''), action_id=action_id)
def search(self):
word = self.search_bar.text().strip()
if word:
self._handle_console(False)
self._begin_search(word, action_id=ACTION_SEARCH)
self.comp_manager.set_components_visible(False)
self.thread_search.word = word
self.thread_search.start()
def _finish_search(self, res: dict):
self._finish_action()
self.search_performed = True
if not res['error']:
self.comp_manager.set_group_visible(GROUP_VIEW_SEARCH, True)
self.update_pkgs(res['pkgs_found'], as_installed=False, ignore_updates=True)
self._set_lower_buttons_visible(True)
self._update_bts_installed_and_suggestions()
self._hide_filters_no_packages()
self._reorganize()
else:
self.comp_manager.restore_state(ACTION_SEARCH)
dialog.show_message(title=self.i18n['warning'].capitalize(), body=self.i18n[res['error']], type_=MessageType.WARNING)
def _ask_root_password(self, action: SoftwareAction, pkg: PackageView) -> Tuple[Optional[str], bool]:
pwd = None
requires_root = self.manager.requires_root(action, pkg.model)
if not user.is_root() and requires_root:
valid, pwd = RootDialog.ask_password(self.context, i18n=self.i18n, comp_manager=self.comp_manager)
if not valid:
return pwd, False
return pwd, True
def install(self, pkg: PackageView):
pwd, proceed = self._ask_root_password(SoftwareAction.INSTALL, pkg)
if not proceed:
return
self._begin_action('{} {}'.format(self.i18n['manage_window.status.installing'], pkg.model.name), action_id=ACTION_INSTALL)
self.comp_manager.set_groups_visible(False, GROUP_UPPER_BAR, GROUP_LOWER_BTS)
self._handle_console_option(True)
self.thread_install.pkg = pkg
self.thread_install.root_pwd = pwd
self.thread_install.start()
def _finish_install(self, res: dict):
self._finish_action(action_id=ACTION_INSTALL)
console_output = self.textarea_details.toPlainText()
if console_output:
log_path = f"{LOGS_DIR}/install/{res['pkg'].model.get_type()}/{res['pkg'].model.name}"
try:
Path(log_path).mkdir(parents=True, exist_ok=True)
log_file = f'{log_path}/{int(time.time())}.log'
with open(log_file, 'w+') as f:
f.write(console_output)
self.textarea_details.appendPlainText(self.i18n['console.install_logs.path'].format('"{}"'.format(log_file)))
except:
self.textarea_details.appendPlainText("[warning] Could not write install log file to '{}'".format(log_path))
if res['success']:
if self._can_notify_user():
util.notify_user(msg='{} ({}) {}'.format(res['pkg'].model.name, res['pkg'].model.get_type(), self.i18n['installed']))
models_updated = []
for key in ('installed', 'removed'):
if res.get(key):
models_updated.extend(res[key])
if models_updated:
installed_available_idxs = []
for idx, available in enumerate(self.pkgs_available):
for pidx, model in enumerate(models_updated):
if available.model == model:
available.update_model(model)
if model.installed:
installed_available_idxs.append((idx, pidx, available))
# re-indexing all installed so they always will be be displayed when no filters are applied
if installed_available_idxs:
# removing from available
installed_available_idxs.sort(key=operator.itemgetter(0))
for decrement, data in enumerate(installed_available_idxs):
del self.pkgs_available[data[0] - decrement]
# re-inserting into the available
installed_available_idxs.sort(key=operator.itemgetter(1))
for new_idx, data in enumerate(installed_available_idxs):
self.pkgs_available.insert(new_idx, data[2])
# updating the respective table rows:
for displayed in self.pkgs:
for model in models_updated:
if displayed.model == model:
self.table_apps.update_package(displayed, change_update_col=True)
self.update_bt_upgrade()
# updating installed packages
if res['removed'] and self.pkgs_installed:
to_remove = []
for idx, installed in enumerate(self.pkgs_installed):
for removed in res['removed']:
if installed.model == removed:
to_remove.append(idx)
if to_remove:
to_remove.sort()
for decrement, idx in enumerate(to_remove):
del self.pkgs_installed[idx - decrement]
if res['installed']:
for idx, model in enumerate(res['installed']):
self.pkgs_installed.insert(idx, PackageView(model, self.i18n))
self.update_custom_actions()
self.table_apps.change_headers_policy(policy=QHeaderView.Stretch, maximized=self._maximized)
self.table_apps.change_headers_policy(policy=QHeaderView.ResizeToContents, maximized=self._maximized)
self._resize(accept_lower_width=False)
else:
self._show_console_errors()
if self._can_notify_user():
util.notify_user('{}: {}'.format(res['pkg'].model.name, self.i18n['notification.install.failed']))
def _update_progress(self, value: int):
self.progress_bar.setValue(value)
def begin_execute_custom_action(self, pkg: Optional[PackageView], action: CustomSoftwareAction):
if pkg is None and action.requires_confirmation and \
not ConfirmationDialog(title=self.i18n['confirmation'].capitalize(),
body='<p>{}</p>'.format(self.i18n['custom_action.proceed_with'].capitalize().format(bold(self.i18n[action.i18n_label_key]))),
icon=QIcon(action.icon_path) if action.icon_path else QIcon(resource.get_path('img/logo.svg')),
i18n=self.i18n).ask():
return False
pwd = None
if not user.is_root() and action.requires_root:
valid, pwd = RootDialog.ask_password(self.context, i18n=self.i18n, comp_manager=self.comp_manager)
if not valid:
return
self._begin_action(action_label='{}{}'.format(self.i18n[action.i18n_status_key], ' {}'.format(pkg.model.name) if pkg else ''),
action_id=ACTION_CUSTOM_ACTION)
self.comp_manager.set_components_visible(False)
self._handle_console_option(True)
self.thread_custom_action.pkg = pkg
self.thread_custom_action.root_pwd = pwd
self.thread_custom_action.custom_action = action
self.thread_custom_action.start()
def _finish_execute_custom_action(self, res: dict):
self._finish_action()
if res['success']:
if res['action'].refresh:
self.comp_manager.remove_saved_state(ACTION_CUSTOM_ACTION)
self.begin_refresh_packages(pkg_types={res['pkg'].model.__class__} if res['pkg'] else None)
else:
self.comp_manager.restore_state(ACTION_CUSTOM_ACTION)
self._show_console_checkbox_if_output()
else:
self.comp_manager.restore_state(ACTION_CUSTOM_ACTION)
self._show_console_errors()
if res['error']:
dialog.show_message(title=self.i18n['warning' if res['error_type'] == MessageType.WARNING else 'error'].capitalize(),
body=self.i18n[res['error']],
type_=res['error_type'])
def _show_console_checkbox_if_output(self):
if self.textarea_details.toPlainText():
self.comp_manager.set_component_visible(CHECK_DETAILS, True)
else:
self.comp_manager.set_component_visible(CHECK_DETAILS, False)
def show_settings(self):
if self.settings_window:
self.settings_window.handle_display()
else:
self.settings_window = SettingsWindow(self.manager, self.i18n, self.screen_size, self)
self.settings_window.setMinimumWidth(int(self.screen_size.width() / 4))
self.settings_window.resize(self.size())
self.settings_window.adjustSize()
qt_utils.centralize(self.settings_window)
self.settings_window.show()
def _map_custom_action(self, action: CustomSoftwareAction, parent: QWidget) -> QCustomMenuAction:
if action.icon_path:
try:
if action.icon_path.startswith('/'):
icon = QIcon(action.icon_path)
else:
icon = QIcon.fromTheme(action.icon_path)
except:
icon = None
else:
icon = None
return QCustomMenuAction(parent=parent,
label=self.i18n[action.i18n_label_key],
action=lambda: self.begin_execute_custom_action(None, action),
icon=icon)
def show_custom_actions(self):
if self.custom_actions:
menu_row = QMenu()
menu_row.setCursor(QCursor(Qt.PointingHandCursor))
actions = [self._map_custom_action(a, menu_row) for a in self.custom_actions]
menu_row.addActions(actions)
menu_row.adjustSize()
menu_row.popup(QCursor.pos())
menu_row.exec_()
def begin_ignore_updates(self, pkg: PackageView):
status_key = 'ignore_updates' if not pkg.model.is_update_ignored() else 'ignore_updates_reverse'
self._begin_action(action_label=self.i18n['manage_window.status.{}'.format(status_key)].format(pkg.model.name),
action_id=ACTION_IGNORE_UPDATES)
self.comp_manager.disable_visible()
self.thread_ignore_updates.pkg = pkg
self.thread_ignore_updates.start()
def finish_ignore_updates(self, res: dict):
self._finish_action(action_id=ACTION_IGNORE_UPDATES)
if res['success']:
hide_package = commons.is_package_hidden(res['pkg'], self._gen_filters())
if hide_package:
idx_to_remove = None
for pkg in self.pkgs:
if pkg == res['pkg']:
idx_to_remove = pkg.table_index
break
if idx_to_remove is not None:
del self.pkgs[idx_to_remove]
self.table_apps.removeRow(idx_to_remove)
self._update_table_indexes()
self.update_bt_upgrade()
else:
for pkg in self.pkgs:
if pkg == res['pkg']:
pkg.update_model(res['pkg'].model)
self.table_apps.update_package(pkg, change_update_col=not any([self.search_performed, self.suggestions_requested]))
self.update_bt_upgrade()
break
for pkg_list in (self.pkgs_available, self.pkgs_installed):
if pkg_list:
for pkg in pkg_list:
if pkg == res['pkg']:
pkg.update_model(res['pkg'].model)
break
self._add_pkg_categories(res['pkg'])
dialog.show_message(title=self.i18n['success'].capitalize(),
body=self.i18n['action.{}.success'.format(res['action'])].format(bold(res['pkg'].model.name)),
type_=MessageType.INFO)
else:
dialog.show_message(title=self.i18n['fail'].capitalize(),
body=self.i18n['action.{}.fail'.format(res['action'])].format(bold(res['pkg'].model.name)),
type_=MessageType.ERROR)
def _add_pkg_categories(self, pkg: PackageView):
if pkg.model.categories:
pkg_categories = {c.strip().lower() for c in pkg.model.categories if c and c.strip()}
if pkg_categories:
current_categories = self._get_current_categories()
if current_categories:
pkg_categories = {c.strip().lower() for c in pkg.model.categories if c}
if pkg_categories:
categories_to_add = {c for c in pkg_categories if c and c not in current_categories}
if categories_to_add:
for cat in categories_to_add:
self.__add_category(cat)
else:
self._update_categories(pkg_categories)
def _map_theme_action(self, theme: ThemeMetadata, menu: QMenu) -> QCustomMenuAction:
def _change_theme():
set_theme(theme_key=theme.key, app=QApplication.instance(), logger=self.context.logger)
self.thread_save_theme.theme_key = theme.key
self.thread_save_theme.start()
return QCustomMenuAction(label=theme.get_i18n_name(self.i18n),
action=_change_theme,
parent=menu,
tooltip=theme.get_i18n_description(self.i18n))
def show_themes(self):
menu_row = QMenu()
menu_row.setCursor(QCursor(Qt.PointingHandCursor))
menu_row.addActions(self._map_theme_actions(menu_row))
menu_row.adjustSize()
menu_row.popup(QCursor.pos())
menu_row.exec_()
def _map_theme_actions(self, menu: QMenu) -> List[QCustomMenuAction]:
core_config = CoreConfigManager().get_config()
current_theme_key, current_action = core_config['ui']['theme'], None
actions = []
for t in read_all_themes_metadata():
if not t.abstract:
action = self._map_theme_action(t, menu)
if current_action is None and current_theme_key is not None and current_theme_key == t.key:
action.button.setProperty('current', 'true')
current_action = action
else:
actions.append(action)
if not current_action:
invalid_action = QCustomMenuAction(label=self.i18n['manage_window.bt_themes.option.invalid'], parent=menu)
invalid_action.button.setProperty('current', 'true')
current_action = invalid_action
actions.sort(key=lambda a: a.get_label())
actions.insert(0, current_action)
return actions
def reload(self):
self.thread_reload.start()
def _reload(self):
self.update_custom_actions()
self.verify_warnings()
self.types_changed = True
self.begin_refresh_packages()
|
import os
import time
import socket
from mmdet.apis import init_detector, inference_detector, show_result_pyplot, show_result_ins
import mmcv
# map
# config_file = '../configs/solo/decoupled_solo_r50_fpn_8gpu_3x.py'
# # download the checkpoint from model zoo and put it in `checkpoints/`
# checkpoint_file = '../checkpoints/DECOUPLED_SOLO_R50_3x.pth'
# config_file = '../configs/solo/solo_r50_fpn_8gpu_1x.py'
# checkpoint_file = '../checkpoints/SOLO_R50_1x.pth'
#
# config_file = '../configs/solo/solo_r50_fpn_8gpu_3x.py'
# checkpoint_file = '../checkpoints/SOLO_R50_3x.pth'
## AP
#
# config_file = './configs/solo/solo_r101_fpn_8gpu_3x.py'
# checkpoint_file = './checkpoints/SOLO_R101_3x.pth'
# config_file = '../configs/solo/decoupled_solo_r101_fpn_8gpu_3x.py'
# checkpoint_file = '../checkpoints/DECOUPLED_SOLO_R101_3x.pth'
# config_file = './configs/solov2/solov2_r101_fpn_8gpu_3x.py'
# checkpoint_file = './checkpoints/SOLOv2_R101_3x.pth'
# config_file = './configs/solov2/solov2_r101_dcn_fpn_8gpu_3x.py'
# checkpoint_file = './checkpoints/SOLOv2_R101_DCN_3x.pth'
# config_file = './configs/solov2/solov2_x101_dcn_fpn_8gpu_3x.py'
# checkpoint_file = './checkpoints/SOLOv2_X101_DCN_3x.pth'
## speed
# config_file = '../configs/solo/decoupled_solo_light_dcn_r50_fpn_8gpu_3x.py'
# checkpoint_file = '../checkpoints/DECOUPLED_SOLO_LIGHT_DCN_R50_3x.pth'
# config_file = './configs/solov2/solov2_light_512_dcn_r50_fpn_8gpu_3x.py'
# checkpoint_file = './checkpoints/SOLOv2_LIGHT_512_DCN_R50_3x.pth'
config_file = 'configs/solov2/solov2_light_448_r18_fpn_8gpu_3x.py'
checkpoint_file = './work_dir/0602/ps-X10DRG/solov2_light_448_r18_fpn_8gpu_3x/epoch_36.pth'
print(config_file)
# build the model from a config file and a checkpoint file
cuda_n = 0
print('gpu:', cuda_n)
os.environ['CUDA_VISIBLE_DEVICES'] = f'{cuda_n}'
model = init_detector(config_file, checkpoint_file, device=f'cuda')
#
# # test a single image
#
#
# for video_name in ['1', '2', '3']:
score_thr = 0.25
# for video_name in ['coco_72']:
# for video_name in ['Yotube-vos-3rd']:
# for video_name in ['transformed']:
save_dir = f'result/{socket.gethostname()}0530/'
# for video_name in ['cityscape_100', 'GTA5_99']:
for video_name in ['coco_72']:
# for video_name in ['Yotube-vos-3rd_rotate180']:
data_dir = f'data/{video_name}/'
out_img_dir = f"{save_dir}{config_file.split("/")[-1].split(".")[0]}/{video_name}_score_thr_{score_thr}/"
if not os.path.exists(out_img_dir):
os.makedirs(out_img_dir)
print('save', save_dir, os.path.abspath(save_dir), out_img_dir)
n = len(os.listdir(data_dir))
start = time.time()
# for i in range(1, 141):
for img in os.listdir(data_dir):
# img = f'{i}.jpg'
result = inference_detector(model, f'{data_dir}{img}')
show_result_ins(f'{data_dir}{img}', result, model.CLASSES, score_thr=score_thr, out_file=f"./{out_img_dir}{img}")
# print('save', os.path.abspath(f"../{out_img_dir}{img}"))
end = time.time()
# print()
# for img in os.listdir(directory):
# # print(f'{directory}{img}')
# # result = inference_detector(model, f'{directory}{img}')
# # show_result_ins(f'{directory}{img}', result, model.CLASSES, score_thr=0.25, out_file=f"../data/out/{img}")
# break
print('fps:', n/(end - start), 'n:', n)
| import os
import time
import socket
from mmdet.apis import init_detector, inference_detector, show_result_pyplot, show_result_ins
import mmcv
# map
# config_file = '../configs/solo/decoupled_solo_r50_fpn_8gpu_3x.py'
# # download the checkpoint from model zoo and put it in `checkpoints/`
# checkpoint_file = '../checkpoints/DECOUPLED_SOLO_R50_3x.pth'
# config_file = '../configs/solo/solo_r50_fpn_8gpu_1x.py'
# checkpoint_file = '../checkpoints/SOLO_R50_1x.pth'
#
# config_file = '../configs/solo/solo_r50_fpn_8gpu_3x.py'
# checkpoint_file = '../checkpoints/SOLO_R50_3x.pth'
## AP
#
# config_file = './configs/solo/solo_r101_fpn_8gpu_3x.py'
# checkpoint_file = './checkpoints/SOLO_R101_3x.pth'
# config_file = '../configs/solo/decoupled_solo_r101_fpn_8gpu_3x.py'
# checkpoint_file = '../checkpoints/DECOUPLED_SOLO_R101_3x.pth'
# config_file = './configs/solov2/solov2_r101_fpn_8gpu_3x.py'
# checkpoint_file = './checkpoints/SOLOv2_R101_3x.pth'
# config_file = './configs/solov2/solov2_r101_dcn_fpn_8gpu_3x.py'
# checkpoint_file = './checkpoints/SOLOv2_R101_DCN_3x.pth'
# config_file = './configs/solov2/solov2_x101_dcn_fpn_8gpu_3x.py'
# checkpoint_file = './checkpoints/SOLOv2_X101_DCN_3x.pth'
## speed
# config_file = '../configs/solo/decoupled_solo_light_dcn_r50_fpn_8gpu_3x.py'
# checkpoint_file = '../checkpoints/DECOUPLED_SOLO_LIGHT_DCN_R50_3x.pth'
# config_file = './configs/solov2/solov2_light_512_dcn_r50_fpn_8gpu_3x.py'
# checkpoint_file = './checkpoints/SOLOv2_LIGHT_512_DCN_R50_3x.pth'
config_file = 'configs/solov2/solov2_light_448_r18_fpn_8gpu_3x.py'
checkpoint_file = './work_dir/0602/ps-X10DRG/solov2_light_448_r18_fpn_8gpu_3x/epoch_36.pth'
print(config_file)
# build the model from a config file and a checkpoint file
cuda_n = 0
print('gpu:', cuda_n)
os.environ['CUDA_VISIBLE_DEVICES'] = f'{cuda_n}'
model = init_detector(config_file, checkpoint_file, device=f'cuda')
#
# # test a single image
#
#
# for video_name in ['1', '2', '3']:
score_thr = 0.25
# for video_name in ['coco_72']:
# for video_name in ['Yotube-vos-3rd']:
# for video_name in ['transformed']:
save_dir = f'result/{socket.gethostname()}0530/'
# for video_name in ['cityscape_100', 'GTA5_99']:
for video_name in ['coco_72']:
# for video_name in ['Yotube-vos-3rd_rotate180']:
data_dir = f'data/{video_name}/'
out_img_dir = f"{save_dir}{config_file.split('/')[-1].split('.')[0]}/{video_name}_score_thr_{score_thr}/"
if not os.path.exists(out_img_dir):
os.makedirs(out_img_dir)
print('save', save_dir, os.path.abspath(save_dir), out_img_dir)
n = len(os.listdir(data_dir))
start = time.time()
# for i in range(1, 141):
for img in os.listdir(data_dir):
# img = f'{i}.jpg'
result = inference_detector(model, f'{data_dir}{img}')
show_result_ins(f'{data_dir}{img}', result, model.CLASSES, score_thr=score_thr, out_file=f"./{out_img_dir}{img}")
# print('save', os.path.abspath(f"../{out_img_dir}{img}"))
end = time.time()
# print()
# for img in os.listdir(directory):
# # print(f'{directory}{img}')
# # result = inference_detector(model, f'{directory}{img}')
# # show_result_ins(f'{directory}{img}', result, model.CLASSES, score_thr=0.25, out_file=f"../data/out/{img}")
# break
print('fps:', n/(end - start), 'n:', n)
|
import re
import pickle
import tempfile
import pytest
from _pytest.config import Config
from _pytest._io.terminalwriter import TerminalWriter
from _pytest.reports import TestReport
from pytest_fold.tui_pytermtk import main as tuitk
from pytest_fold.tui_textual1 import main as tuitxt1
from pytest_fold.tui_textual2 import main as tuitxt2
from pytest_fold.utils import (
test_session_starts_matcher,
errors_section_matcher,
failures_section_matcher,
warnings_summary_matcher,
passes_section_matcher,
short_test_summary_matcher,
lastline_matcher,
MARKERS,
REPORTFILE,
MARKEDTERMINALOUTPUTFILE,
UNMARKEDTERMINALOUTPUTFILE,
)
# Don't collect tests from any of these files
collect_ignore = [
"setup.py",
"plugin.py",
]
# A list of TestReport objects generated by Pytest during test run.
# Each TestReport represents a single test's operation during one of
# Pytest's three phases: setup | call | teardown
reports = []
def pytest_addoption(parser):
"""Define the plugin's option flags as presented by Pytest"""
group = parser.getgroup("fold")
group.addoption(
"--fold",
action="store_true",
help="fold failed test output sections",
)
group.addoption(
"--fold-tui",
"--ft",
action="store",
default="pytermtk",
help="specify user interface ('pytermtk' ' k' | 'textual1' 't1' | 'textual2' 't2' | 'none' 'n')",
choices=["pytermtk", "k", "textual1", "t1", "textual2", "t2", "none", "n"],
)
def pytest_report_teststatus(report: TestReport, config: Config):
"""Construct list(s) of individial TestReport instances"""
reports.append(report)
@pytest.hookimpl(trylast=True)
def pytest_configure(config: Config) -> None:
"""
Write console output to a file for use by TUI
This code works by looking at every line sent by Pytest to the terminal,
and based on its category, marking or not marking it
"""
config.option.verbose = (
1 # force verbose mode for easier parsing of final test results
)
config.option.reportchars = (
"A" # force "display all" mode so all results can be shown
)
if config.option.fold:
tr = config.pluginmanager.getplugin("terminalreporter")
if tr is not None:
# identify and mark the very first line of terminal output
try:
config._pyfoldfirsttime
except AttributeError:
config._pyfoldfirsttime = True
config._pyfold_unmarked_outputfile = tempfile.TemporaryFile("wb+")
config._pyfold_marked_outputfile = tempfile.TemporaryFile("wb+")
oldwrite = tr._tw.write
# identify and mark each results section
def tee_write(s, **kwargs):
if re.search(test_session_starts_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_test_session_starts"] + "\n").encode(
"utf-8"
)
)
if re.search(errors_section_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_errors_section"] + "\n").encode("utf-8")
)
if re.search(failures_section_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_failures_section"] + "\n").encode("utf-8")
)
if re.search(warnings_summary_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_warnings_summary"] + "\n").encode("utf-8")
)
if re.search(passes_section_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_passes_section"] + "\n").encode("utf-8")
)
if re.search(short_test_summary_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_short_test_summary"] + "\n").encode(
"utf-8"
)
)
if re.search(lastline_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_last_line"] + "\n").encode("utf-8")
)
# Write this line's text along with its markup info to console
oldwrite(s, **kwargs)
# Mark up this line's text by passing it to an instance of TerminalWriter's
# 'markup' method. Do not pass "flush" to the method or it will throw an error.
s1 = s
kwargs.pop("flush") if "flush" in kwargs.keys() else None
s1 = TerminalWriter().markup(s, **kwargs)
# Encode the marked up line so it can be written to the config object.
# The Pytest config object can be used by plugins for conveying staeful
# info across an entire test run session.
if isinstance(s1, str):
marked_up = s1.encode("utf-8")
config._pyfold_marked_outputfile.write(marked_up)
# Write this line's original (unmarked) text to unmarked file
s_orig = s
kwargs.pop("flush") if "flush" in kwargs.keys() else None
s_orig = TerminalWriter().markup(s, **kwargs)
if isinstance(s_orig, str):
unmarked_up = s_orig.encode("utf-8")
config._pyfold_unmarked_outputfile.write(unmarked_up)
# Write to both terminal/console and tempfiles:
# _pyfold_marked_outputfile, _pyfold_unmarked_outputfile
tr._tw.write = tee_write
def pytest_unconfigure(config: Config):
"""
Write terminal and test results info to files for use by TUI
"""
# Write terminal output to file
if hasattr(config, "_pyfold_marked_outputfile"):
# get terminal contents, then write file
config._pyfold_marked_outputfile.seek(0)
markedsessionlog = config._pyfold_marked_outputfile.read()
config._pyfold_marked_outputfile.close()
if hasattr(config, "_pyfold_unmarked_outputfile"):
# get terminal contents, then write file
config._pyfold_unmarked_outputfile.seek(0)
unmarkedsessionlog = config._pyfold_unmarked_outputfile.read()
config._pyfold_unmarked_outputfile.close()
# Undo our patching in the terminal reporter
config.pluginmanager.getplugin("terminalreporter")
# Write marked-up results to file
with open(MARKEDTERMINALOUTPUTFILE, "wb") as marked_file:
marked_file.write(markedsessionlog)
# Write un-marked-up results to file
with open(UNMARKEDTERMINALOUTPUTFILE, "wb") as unmarked_file:
unmarked_file.write(unmarkedsessionlog)
# Write the reports list to file
with open(REPORTFILE, "wb") as report_file:
pickle.dump(reports, report_file)
# Launch the TUI
if config.getoption("--fold") == True:
pyfold_tui(config)
def pyfold_tui(config: Config) -> None:
"""
Final code invocation after Pytest run has completed.
This method calls the Pyfold TUI to display final results.
"""
# disable capturing while TUI runs to avoid error `redirected stdin is pseudofile, has
# no fileno()`; adapted from https://githubmemory.com/repo/jsbueno/terminedia/issues/25
if not config.getoption("--fold"):
return
capmanager = config.pluginmanager.getplugin("capturemanager")
try:
capmanager.suspend_global_capture(in_=True)
finally:
if config.getoption("--ft") in ["k", "pytermtk"]:
tuitk()
elif config.getoption("--ft") in ["t1", "textual1"]:
tuitxt1()
elif config.getoption("--ft") in ["t2", "textual2"]:
tuitxt2()
elif config.getoption("--ft") not in ["n", "none"]:
print(f"Incorrect choice for fold-tui: {config.getoption("--ft")}")
capmanager.resume_global_capture()
| import re
import pickle
import tempfile
import pytest
from _pytest.config import Config
from _pytest._io.terminalwriter import TerminalWriter
from _pytest.reports import TestReport
from pytest_fold.tui_pytermtk import main as tuitk
from pytest_fold.tui_textual1 import main as tuitxt1
from pytest_fold.tui_textual2 import main as tuitxt2
from pytest_fold.utils import (
test_session_starts_matcher,
errors_section_matcher,
failures_section_matcher,
warnings_summary_matcher,
passes_section_matcher,
short_test_summary_matcher,
lastline_matcher,
MARKERS,
REPORTFILE,
MARKEDTERMINALOUTPUTFILE,
UNMARKEDTERMINALOUTPUTFILE,
)
# Don't collect tests from any of these files
collect_ignore = [
"setup.py",
"plugin.py",
]
# A list of TestReport objects generated by Pytest during test run.
# Each TestReport represents a single test's operation during one of
# Pytest's three phases: setup | call | teardown
reports = []
def pytest_addoption(parser):
"""Define the plugin's option flags as presented by Pytest"""
group = parser.getgroup("fold")
group.addoption(
"--fold",
action="store_true",
help="fold failed test output sections",
)
group.addoption(
"--fold-tui",
"--ft",
action="store",
default="pytermtk",
help="specify user interface ('pytermtk' ' k' | 'textual1' 't1' | 'textual2' 't2' | 'none' 'n')",
choices=["pytermtk", "k", "textual1", "t1", "textual2", "t2", "none", "n"],
)
def pytest_report_teststatus(report: TestReport, config: Config):
"""Construct list(s) of individial TestReport instances"""
reports.append(report)
@pytest.hookimpl(trylast=True)
def pytest_configure(config: Config) -> None:
"""
Write console output to a file for use by TUI
This code works by looking at every line sent by Pytest to the terminal,
and based on its category, marking or not marking it
"""
config.option.verbose = (
1 # force verbose mode for easier parsing of final test results
)
config.option.reportchars = (
"A" # force "display all" mode so all results can be shown
)
if config.option.fold:
tr = config.pluginmanager.getplugin("terminalreporter")
if tr is not None:
# identify and mark the very first line of terminal output
try:
config._pyfoldfirsttime
except AttributeError:
config._pyfoldfirsttime = True
config._pyfold_unmarked_outputfile = tempfile.TemporaryFile("wb+")
config._pyfold_marked_outputfile = tempfile.TemporaryFile("wb+")
oldwrite = tr._tw.write
# identify and mark each results section
def tee_write(s, **kwargs):
if re.search(test_session_starts_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_test_session_starts"] + "\n").encode(
"utf-8"
)
)
if re.search(errors_section_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_errors_section"] + "\n").encode("utf-8")
)
if re.search(failures_section_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_failures_section"] + "\n").encode("utf-8")
)
if re.search(warnings_summary_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_warnings_summary"] + "\n").encode("utf-8")
)
if re.search(passes_section_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_passes_section"] + "\n").encode("utf-8")
)
if re.search(short_test_summary_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_short_test_summary"] + "\n").encode(
"utf-8"
)
)
if re.search(lastline_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_last_line"] + "\n").encode("utf-8")
)
# Write this line's text along with its markup info to console
oldwrite(s, **kwargs)
# Mark up this line's text by passing it to an instance of TerminalWriter's
# 'markup' method. Do not pass "flush" to the method or it will throw an error.
s1 = s
kwargs.pop("flush") if "flush" in kwargs.keys() else None
s1 = TerminalWriter().markup(s, **kwargs)
# Encode the marked up line so it can be written to the config object.
# The Pytest config object can be used by plugins for conveying staeful
# info across an entire test run session.
if isinstance(s1, str):
marked_up = s1.encode("utf-8")
config._pyfold_marked_outputfile.write(marked_up)
# Write this line's original (unmarked) text to unmarked file
s_orig = s
kwargs.pop("flush") if "flush" in kwargs.keys() else None
s_orig = TerminalWriter().markup(s, **kwargs)
if isinstance(s_orig, str):
unmarked_up = s_orig.encode("utf-8")
config._pyfold_unmarked_outputfile.write(unmarked_up)
# Write to both terminal/console and tempfiles:
# _pyfold_marked_outputfile, _pyfold_unmarked_outputfile
tr._tw.write = tee_write
def pytest_unconfigure(config: Config):
"""
Write terminal and test results info to files for use by TUI
"""
# Write terminal output to file
if hasattr(config, "_pyfold_marked_outputfile"):
# get terminal contents, then write file
config._pyfold_marked_outputfile.seek(0)
markedsessionlog = config._pyfold_marked_outputfile.read()
config._pyfold_marked_outputfile.close()
if hasattr(config, "_pyfold_unmarked_outputfile"):
# get terminal contents, then write file
config._pyfold_unmarked_outputfile.seek(0)
unmarkedsessionlog = config._pyfold_unmarked_outputfile.read()
config._pyfold_unmarked_outputfile.close()
# Undo our patching in the terminal reporter
config.pluginmanager.getplugin("terminalreporter")
# Write marked-up results to file
with open(MARKEDTERMINALOUTPUTFILE, "wb") as marked_file:
marked_file.write(markedsessionlog)
# Write un-marked-up results to file
with open(UNMARKEDTERMINALOUTPUTFILE, "wb") as unmarked_file:
unmarked_file.write(unmarkedsessionlog)
# Write the reports list to file
with open(REPORTFILE, "wb") as report_file:
pickle.dump(reports, report_file)
# Launch the TUI
if config.getoption("--fold") == True:
pyfold_tui(config)
def pyfold_tui(config: Config) -> None:
"""
Final code invocation after Pytest run has completed.
This method calls the Pyfold TUI to display final results.
"""
# disable capturing while TUI runs to avoid error `redirected stdin is pseudofile, has
# no fileno()`; adapted from https://githubmemory.com/repo/jsbueno/terminedia/issues/25
if not config.getoption("--fold"):
return
capmanager = config.pluginmanager.getplugin("capturemanager")
try:
capmanager.suspend_global_capture(in_=True)
finally:
if config.getoption("--ft") in ["k", "pytermtk"]:
tuitk()
elif config.getoption("--ft") in ["t1", "textual1"]:
tuitxt1()
elif config.getoption("--ft") in ["t2", "textual2"]:
tuitxt2()
elif config.getoption("--ft") not in ["n", "none"]:
print(f"Incorrect choice for fold-tui: {config.getoption('--ft')}")
capmanager.resume_global_capture()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2019, 2020 Matt Post <post@cs.jhu.edu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ingests data into the Anthology. It takes a list of one or more
ACLPUB proceedings/ directories and does the following:
- executes some basic sanity checks
- applies normalization to names and titles (e.g, fixed-case protection)
- generates the nexted XML in the Anthology repository
- copies the PDFs and attachments into place for rsyncing to the server
Updated in March 2020, this script replaces:
- the old ingest.py (which converted the old ACLPUB flat XML format)
- anthologize.pl in ACLPUB
- anthology_xml.py in ACLPUB
"""
import argparse
import iso639
import os
import re
import readline
import shutil
import sys
import lxml.etree as etree
from collections import defaultdict, OrderedDict
from datetime import datetime
from normalize_anth import normalize
from anthology.bibtex import read_bibtex
from anthology.index import AnthologyIndex
from anthology.people import PersonName
from anthology.sigs import SIGIndex
from anthology.utils import (
make_simple_element,
build_anthology_id,
deconstruct_anthology_id,
indent,
compute_hash_from_file,
)
from anthology.venues import VenueIndex
from itertools import chain
from typing import Dict, Any
from slugify import slugify
def log(text: str, fake: bool = False):
message = "[DRY RUN] " if fake else ""
print(f"{message}{text}", file=sys.stderr)
def read_meta(path: str) -> Dict[str, Any]:
meta = {"chairs": []}
with open(path) as instream:
for line in instream:
if re.match(r"^\s*$", line):
continue
key, value = line.rstrip().split(" ", maxsplit=1)
if key.startswith("chair"):
meta["chairs"].append(value)
else:
meta[key] = value
if "volume" in meta and re.match(rf"^[a-z0-1]+$", meta["volume"]) is None:
raise Exception(f"Invalid volume key '{meta["volume"]}' in {path}")
return meta
def maybe_copy(source_path, dest_path):
"""Copies the file if it's different from the target."""
if not os.path.exists(dest_path) or compute_hash_from_file(
source_path
) != compute_hash_from_file(dest_path):
log(f"Copying {source_path} -> {dest_path}", args.dry_run)
shutil.copyfile(source_path, dest_path)
def bib2xml(bibfilename, anthology_id):
"""
Moved here from ACLPUB's anthology_xml.py script.
"""
fields = [
'title',
'author',
'editor',
'booktitle',
'month',
'year',
'address',
'publisher',
'pages',
'abstract',
'url',
'doi',
'language',
]
try:
collection_id, volume_name, paper_no = deconstruct_anthology_id(anthology_id)
except ValueError:
print(f"Couldn't split {anthology_id}", file=sys.stderr)
sys.exit(1)
if paper_no == '':
return # skip the master bib file; we only process the individual files
bibdata = read_bibtex(bibfilename)
if len(bibdata.entries) != 1:
log(f"more than one entry in {bibfilename}")
bibkey, bibentry = bibdata.entries.items()[0]
if len(bibentry.fields) == 0:
log(f"parsing bib of paper {paper_no} failed")
sys.exit(1)
paper = make_simple_element("paper", attrib={"id": paper_no})
for field in list(bibentry.fields) + list(bibentry.persons):
if field not in fields:
log(f"unknown field {field}")
for field in fields:
if field in ['author', 'editor']:
if field in bibentry.persons:
for person in bibentry.persons[field]:
first_text = ' '.join(person.bibtex_first_names)
last_text = ' '.join(person.prelast_names + person.last_names)
if person.lineage_names:
last_text += ', ' + ' '.join(person.lineage_names)
# Don't distinguish between authors that have only a first name
# vs. authors that have only a last name; always make it a last name.
if last_text.strip() in [
'',
'-',
]: # Some START users have '-' for null
last_text = first_text
first_text = ''
name_node = make_simple_element(field, parent=paper)
make_simple_element("first", first_text, parent=name_node)
make_simple_element("last", last_text, parent=name_node)
else:
if field == 'url':
value = f"{anthology_id}"
elif field in bibentry.fields:
value = bibentry.fields[field]
elif field == 'bibtype':
value = bibentry.type
elif field == 'bibkey':
value = bibkey
else:
continue
try:
make_simple_element(field, text=value, parent=paper)
except:
print(
f"Couldn't process {bibfilename} for {anthology_id}", file=sys.stderr
)
sys.exit(2)
return paper
def main(args):
collections = defaultdict(OrderedDict)
volumes = {}
anthology_datadir = os.path.join(os.path.dirname(sys.argv[0]), "..", "data")
venue_index = VenueIndex(srcdir=anthology_datadir)
venue_keys = [venue["slug"].lower() for _, venue in venue_index.items()]
sig_index = SIGIndex(srcdir=anthology_datadir)
# Build list of volumes, confirm uniqueness
unseen_venues = []
for proceedings in args.proceedings:
meta = read_meta(os.path.join(proceedings, "meta"))
venue_abbrev = meta["abbrev"]
venue_slug = venue_index.get_slug(venue_abbrev)
if str(datetime.now().year) in venue_abbrev:
print(f"Fatal: Venue assembler put year in acronym: '{venue_abbrev}'")
sys.exit(1)
if re.match(r".*\d$", venue_abbrev) is not None:
print(
f"WARNING: Venue {venue_abbrev} ends in a number, this is probably a mistake"
)
if venue_slug not in venue_keys:
unseen_venues.append((venue_slug, venue_abbrev, meta["title"]))
meta["path"] = proceedings
meta["collection_id"] = collection_id = meta["year"] + "." + venue_slug
volume_name = meta["volume"].lower()
volume_full_id = f"{collection_id}-{volume_name}"
if volume_full_id in volumes:
print("Error: ")
collections[collection_id][volume_name] = {}
volumes[volume_full_id] = meta
if "sig" in meta:
print(
f"Add this line to {anthology_datadir}/sigs/{meta["sig"].lower()}.yaml:"
)
print(f" - {meta["year"]}:")
print(f" - {volume_full_id} # {meta["booktitle"]}")
# Make sure all venues exist
if len(unseen_venues) > 0:
for venue in unseen_venues:
slug, abbrev, title = venue
print(f"Creating venue '{abbrev}' ({title})")
venue_index.add_venue(abbrev, title)
venue_index.dump(directory=anthology_datadir)
# Copy over the PDFs and attachments
for volume, meta in volumes.items():
root_path = os.path.join(meta["path"], "cdrom")
collection_id = meta["collection_id"]
venue_name = meta["abbrev"].lower()
volume_name = meta["volume"].lower()
year = meta["year"]
pdfs_dest_dir = os.path.join(args.pdfs_dir, venue_name)
if not os.path.exists(pdfs_dest_dir):
os.makedirs(pdfs_dest_dir)
# copy the book
book_dest_path = (
os.path.join(pdfs_dest_dir, f"{collection_id}-{volume_name}") + ".pdf"
)
# try the standard filename, e.g., 2021.naacl-main.pdf
book_src_filename = f'{year}.{meta['abbrev']}-{volume_name}.pdf'
book_src_path = os.path.join(root_path, book_src_filename)
if not os.path.exists(book_src_path):
# try a different filename, e.g., "NLP4CALL-2021.pdf"
book_src_filename = f'{meta['abbrev']}-{year}.pdf'
book_src_path = os.path.join(root_path, book_src_filename)
if os.path.exists(book_src_path) and not args.dry_run:
maybe_copy(book_src_path, book_dest_path)
# copy the paper PDFs
pdf_src_dir = os.path.join(root_path, "pdf")
for pdf_file in os.listdir(pdf_src_dir):
# Skip . files
if os.path.basename(pdf_file).startswith("."):
continue
# names are {abbrev}{number}.pdf
match = re.match(rf".*\.(\d+)\.pdf", pdf_file)
if match is not None:
paper_num = int(match[1])
paper_id_full = f"{collection_id}-{volume_name}.{paper_num}"
bib_path = os.path.join(
root_path,
"bib",
pdf_file.replace("/pdf", "/bib/").replace(".pdf", ".bib"),
)
pdf_src_path = os.path.join(pdf_src_dir, pdf_file)
pdf_dest_path = os.path.join(
pdfs_dest_dir, f"{collection_id}-{volume_name}.{paper_num}.pdf"
)
if not args.dry_run:
maybe_copy(pdf_src_path, pdf_dest_path)
collections[collection_id][volume_name][paper_num] = {
"anthology_id": paper_id_full,
"bib": bib_path,
"pdf": pdf_dest_path,
"attachments": [],
}
# copy the attachments
if os.path.exists(os.path.join(root_path, "additional")):
attachments_dest_dir = os.path.join(args.attachments_dir, venue_name)
if not os.path.exists(attachments_dest_dir):
os.makedirs(attachments_dest_dir)
for attachment_file in os.listdir(os.path.join(root_path, "additional")):
if os.path.basename(attachment_file).startswith("."):
continue
attachment_file_path = os.path.join(
root_path, "additional", attachment_file
)
match = re.match(
rf"{year}\.{venue_name}-\w+\.(\d+)_?(\w+)\.(\w+)$", attachment_file
)
if match is None:
print(
f"* Warning: no attachment match for {attachment_file}",
file=sys.stderr,
)
sys.exit(2)
paper_num, type_, ext = match.groups()
paper_num = int(paper_num)
file_name = f"{collection_id}-{volume_name}.{paper_num}.{type_}.{ext}"
dest_path = os.path.join(attachments_dest_dir, file_name)
if not args.dry_run and not os.path.exists(dest_path):
log(f"Copying {attachment_file} -> {dest_path}", args.dry_run)
shutil.copyfile(attachment_file_path, dest_path)
collections[collection_id][volume_name][paper_num]["attachments"].append(
(dest_path, type_)
)
people = AnthologyIndex(None, srcdir=anthology_datadir)
def correct_caps(person, name_node, anth_id):
"""
Many people submit their names in "ALL CAPS" or "all lowercase".
Correct this with heuristics.
"""
name = name_node.text
if name.islower() or name.isupper():
# capitalize all parts
corrected = " ".join(list(map(lambda x: x.capitalize(), name.split())))
print(
f"-> Correcting capitalization of '{name}' to '{corrected}'",
file=sys.stderr,
)
name_node.text = corrected
def disambiguate_name(node, anth_id):
name = PersonName.from_element(node)
ids = people.get_ids(name)
if len(ids) > 1:
choice = -1
while choice < 0 or choice >= len(ids):
print(
f"({anth_id}): ambiguous author {name}; Please choose from the following:"
)
for i, id_ in enumerate(ids):
print(f"[{i}] {id_} ({people.get_comment(id_)})")
choice = int(input("--> "))
node.attrib["id"] = ids[choice]
for collection_id, collection in collections.items():
# Newly added volumes, so we can normalize and name-disambig later
newly_added_volumes = []
collection_file = os.path.join(
args.anthology_dir, "data", "xml", f"{collection_id}.xml"
)
if os.path.exists(collection_file):
root_node = etree.parse(collection_file).getroot()
else:
root_node = make_simple_element("collection", attrib={"id": collection_id})
for volume_id, volume in collection.items():
volume_node = make_simple_element(
"volume",
attrib={"id": volume_id, "ingest-date": args.ingest_date},
)
# Replace the existing one if present
existing_volume_node = root_node.find(f"./volume[@id='{volume_id}']")
for i, child in enumerate(root_node):
if child.attrib["id"] == volume_id:
root_node[i] = volume_node
break
else:
root_node.append(volume_node)
meta_node = None
for paper_num, paper in sorted(volume.items()):
paper_id_full = paper["anthology_id"]
bibfile = paper["bib"]
paper_node = bib2xml(bibfile, paper_id_full)
if paper_node.attrib["id"] == "0":
# create metadata subtree
meta_node = make_simple_element("meta", parent=volume_node)
title_node = paper_node.find("title")
title_node.tag = "booktitle"
meta_node.append(title_node)
for author_or_editor in chain(
paper_node.findall("./author"), paper_node.findall("./editor")
):
meta_node.append(author_or_editor)
author_or_editor.tag = "editor"
meta_node.append(paper_node.find("publisher"))
meta_node.append(paper_node.find("address"))
meta_node.append(paper_node.find("month"))
meta_node.append(paper_node.find("year"))
if book_dest_path is not None:
make_simple_element(
"url",
text=f"{collection_id}-{volume_name}",
attrib={"hash": compute_hash_from_file(book_dest_path)},
parent=meta_node,
)
# modify frontmatter tag
paper_node.tag = "frontmatter"
del paper_node.attrib["id"]
else:
# remove unneeded fields
for child in paper_node:
if child.tag in [
"editor",
"address",
"booktitle",
"publisher",
"year",
"month",
]:
paper_node.remove(child)
url = paper_node.find("./url")
if url is not None:
url.attrib["hash"] = compute_hash_from_file(paper["pdf"])
for path, type_ in paper["attachments"]:
make_simple_element(
"attachment",
text=os.path.basename(path),
attrib={
"type": type_,
"hash": compute_hash_from_file(path),
},
parent=paper_node,
)
if len(paper_node) > 0:
volume_node.append(paper_node)
# Normalize
for oldnode in paper_node:
normalize(oldnode, informat="latex")
# Adjust the language tag
language_node = paper_node.find("./language")
if language_node is not None:
try:
lang = iso639.languages.get(name=language_node.text)
except KeyError:
raise Exception(f"Can't find language '{language_node.text}'")
language_node.text = lang.part3
print(language_node.text)
# Fix author names
for name_node in chain(
paper_node.findall("./author"), paper_node.findall("./editor")
):
disambiguate_name(name_node, paper_id_full)
person = PersonName.from_element(name_node)
for name_part in name_node:
correct_caps(person, name_part, paper_id_full)
# Other data from the meta file
if "isbn" in meta:
make_simple_element("isbn", meta["isbn"], parent=meta_node)
indent(root_node)
tree = etree.ElementTree(root_node)
tree.write(
collection_file, encoding="UTF-8", xml_declaration=True, with_tail=True
)
if __name__ == "__main__":
now = datetime.now()
today = f"{now.year}-{now.month:02d}-{now.day:02d}"
parser = argparse.ArgumentParser()
parser.add_argument(
"proceedings", nargs="+", help="List of paths to ACLPUB proceedings/ directories."
)
parser.add_argument(
"--ingest-date",
"-d",
type=str,
default=today,
help="Ingestion date as YYYY-MM-DD. Default: %(default)s.",
)
anthology_path = os.path.join(os.path.dirname(sys.argv[0]), "..")
parser.add_argument(
"--anthology-dir",
"-r",
default=anthology_path,
help="Root path of ACL Anthology Github repo. Default: %(default)s.",
)
pdfs_path = os.path.join(os.environ["HOME"], "anthology-files", "pdf")
parser.add_argument(
"--pdfs-dir", "-p", default=pdfs_path, help="Root path for placement of PDF files"
)
attachments_path = os.path.join(os.environ["HOME"], "anthology-files", "attachments")
parser.add_argument(
"--attachments-dir",
"-a",
default=attachments_path,
help="Root path for placement of PDF files",
)
parser.add_argument(
"--dry-run", "-n", action="store_true", help="Don't actually copy anything."
)
args = parser.parse_args()
main(args)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2019, 2020 Matt Post <post@cs.jhu.edu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ingests data into the Anthology. It takes a list of one or more
ACLPUB proceedings/ directories and does the following:
- executes some basic sanity checks
- applies normalization to names and titles (e.g, fixed-case protection)
- generates the nexted XML in the Anthology repository
- copies the PDFs and attachments into place for rsyncing to the server
Updated in March 2020, this script replaces:
- the old ingest.py (which converted the old ACLPUB flat XML format)
- anthologize.pl in ACLPUB
- anthology_xml.py in ACLPUB
"""
import argparse
import iso639
import os
import re
import readline
import shutil
import sys
import lxml.etree as etree
from collections import defaultdict, OrderedDict
from datetime import datetime
from normalize_anth import normalize
from anthology.bibtex import read_bibtex
from anthology.index import AnthologyIndex
from anthology.people import PersonName
from anthology.sigs import SIGIndex
from anthology.utils import (
make_simple_element,
build_anthology_id,
deconstruct_anthology_id,
indent,
compute_hash_from_file,
)
from anthology.venues import VenueIndex
from itertools import chain
from typing import Dict, Any
from slugify import slugify
def log(text: str, fake: bool = False):
message = "[DRY RUN] " if fake else ""
print(f"{message}{text}", file=sys.stderr)
def read_meta(path: str) -> Dict[str, Any]:
meta = {"chairs": []}
with open(path) as instream:
for line in instream:
if re.match(r"^\s*$", line):
continue
key, value = line.rstrip().split(" ", maxsplit=1)
if key.startswith("chair"):
meta["chairs"].append(value)
else:
meta[key] = value
if "volume" in meta and re.match(rf"^[a-z0-1]+$", meta["volume"]) is None:
raise Exception(f"Invalid volume key '{meta['volume']}' in {path}")
return meta
def maybe_copy(source_path, dest_path):
"""Copies the file if it's different from the target."""
if not os.path.exists(dest_path) or compute_hash_from_file(
source_path
) != compute_hash_from_file(dest_path):
log(f"Copying {source_path} -> {dest_path}", args.dry_run)
shutil.copyfile(source_path, dest_path)
def bib2xml(bibfilename, anthology_id):
"""
Moved here from ACLPUB's anthology_xml.py script.
"""
fields = [
'title',
'author',
'editor',
'booktitle',
'month',
'year',
'address',
'publisher',
'pages',
'abstract',
'url',
'doi',
'language',
]
try:
collection_id, volume_name, paper_no = deconstruct_anthology_id(anthology_id)
except ValueError:
print(f"Couldn't split {anthology_id}", file=sys.stderr)
sys.exit(1)
if paper_no == '':
return # skip the master bib file; we only process the individual files
bibdata = read_bibtex(bibfilename)
if len(bibdata.entries) != 1:
log(f"more than one entry in {bibfilename}")
bibkey, bibentry = bibdata.entries.items()[0]
if len(bibentry.fields) == 0:
log(f"parsing bib of paper {paper_no} failed")
sys.exit(1)
paper = make_simple_element("paper", attrib={"id": paper_no})
for field in list(bibentry.fields) + list(bibentry.persons):
if field not in fields:
log(f"unknown field {field}")
for field in fields:
if field in ['author', 'editor']:
if field in bibentry.persons:
for person in bibentry.persons[field]:
first_text = ' '.join(person.bibtex_first_names)
last_text = ' '.join(person.prelast_names + person.last_names)
if person.lineage_names:
last_text += ', ' + ' '.join(person.lineage_names)
# Don't distinguish between authors that have only a first name
# vs. authors that have only a last name; always make it a last name.
if last_text.strip() in [
'',
'-',
]: # Some START users have '-' for null
last_text = first_text
first_text = ''
name_node = make_simple_element(field, parent=paper)
make_simple_element("first", first_text, parent=name_node)
make_simple_element("last", last_text, parent=name_node)
else:
if field == 'url':
value = f"{anthology_id}"
elif field in bibentry.fields:
value = bibentry.fields[field]
elif field == 'bibtype':
value = bibentry.type
elif field == 'bibkey':
value = bibkey
else:
continue
try:
make_simple_element(field, text=value, parent=paper)
except:
print(
f"Couldn't process {bibfilename} for {anthology_id}", file=sys.stderr
)
sys.exit(2)
return paper
def main(args):
collections = defaultdict(OrderedDict)
volumes = {}
anthology_datadir = os.path.join(os.path.dirname(sys.argv[0]), "..", "data")
venue_index = VenueIndex(srcdir=anthology_datadir)
venue_keys = [venue["slug"].lower() for _, venue in venue_index.items()]
sig_index = SIGIndex(srcdir=anthology_datadir)
# Build list of volumes, confirm uniqueness
unseen_venues = []
for proceedings in args.proceedings:
meta = read_meta(os.path.join(proceedings, "meta"))
venue_abbrev = meta["abbrev"]
venue_slug = venue_index.get_slug(venue_abbrev)
if str(datetime.now().year) in venue_abbrev:
print(f"Fatal: Venue assembler put year in acronym: '{venue_abbrev}'")
sys.exit(1)
if re.match(r".*\d$", venue_abbrev) is not None:
print(
f"WARNING: Venue {venue_abbrev} ends in a number, this is probably a mistake"
)
if venue_slug not in venue_keys:
unseen_venues.append((venue_slug, venue_abbrev, meta["title"]))
meta["path"] = proceedings
meta["collection_id"] = collection_id = meta["year"] + "." + venue_slug
volume_name = meta["volume"].lower()
volume_full_id = f"{collection_id}-{volume_name}"
if volume_full_id in volumes:
print("Error: ")
collections[collection_id][volume_name] = {}
volumes[volume_full_id] = meta
if "sig" in meta:
print(
f"Add this line to {anthology_datadir}/sigs/{meta['sig'].lower()}.yaml:"
)
print(f" - {meta['year']}:")
print(f" - {volume_full_id} # {meta['booktitle']}")
# Make sure all venues exist
if len(unseen_venues) > 0:
for venue in unseen_venues:
slug, abbrev, title = venue
print(f"Creating venue '{abbrev}' ({title})")
venue_index.add_venue(abbrev, title)
venue_index.dump(directory=anthology_datadir)
# Copy over the PDFs and attachments
for volume, meta in volumes.items():
root_path = os.path.join(meta["path"], "cdrom")
collection_id = meta["collection_id"]
venue_name = meta["abbrev"].lower()
volume_name = meta["volume"].lower()
year = meta["year"]
pdfs_dest_dir = os.path.join(args.pdfs_dir, venue_name)
if not os.path.exists(pdfs_dest_dir):
os.makedirs(pdfs_dest_dir)
# copy the book
book_dest_path = (
os.path.join(pdfs_dest_dir, f"{collection_id}-{volume_name}") + ".pdf"
)
# try the standard filename, e.g., 2021.naacl-main.pdf
book_src_filename = f'{year}.{meta["abbrev"]}-{volume_name}.pdf'
book_src_path = os.path.join(root_path, book_src_filename)
if not os.path.exists(book_src_path):
# try a different filename, e.g., "NLP4CALL-2021.pdf"
book_src_filename = f'{meta["abbrev"]}-{year}.pdf'
book_src_path = os.path.join(root_path, book_src_filename)
if os.path.exists(book_src_path) and not args.dry_run:
maybe_copy(book_src_path, book_dest_path)
# copy the paper PDFs
pdf_src_dir = os.path.join(root_path, "pdf")
for pdf_file in os.listdir(pdf_src_dir):
# Skip . files
if os.path.basename(pdf_file).startswith("."):
continue
# names are {abbrev}{number}.pdf
match = re.match(rf".*\.(\d+)\.pdf", pdf_file)
if match is not None:
paper_num = int(match[1])
paper_id_full = f"{collection_id}-{volume_name}.{paper_num}"
bib_path = os.path.join(
root_path,
"bib",
pdf_file.replace("/pdf", "/bib/").replace(".pdf", ".bib"),
)
pdf_src_path = os.path.join(pdf_src_dir, pdf_file)
pdf_dest_path = os.path.join(
pdfs_dest_dir, f"{collection_id}-{volume_name}.{paper_num}.pdf"
)
if not args.dry_run:
maybe_copy(pdf_src_path, pdf_dest_path)
collections[collection_id][volume_name][paper_num] = {
"anthology_id": paper_id_full,
"bib": bib_path,
"pdf": pdf_dest_path,
"attachments": [],
}
# copy the attachments
if os.path.exists(os.path.join(root_path, "additional")):
attachments_dest_dir = os.path.join(args.attachments_dir, venue_name)
if not os.path.exists(attachments_dest_dir):
os.makedirs(attachments_dest_dir)
for attachment_file in os.listdir(os.path.join(root_path, "additional")):
if os.path.basename(attachment_file).startswith("."):
continue
attachment_file_path = os.path.join(
root_path, "additional", attachment_file
)
match = re.match(
rf"{year}\.{venue_name}-\w+\.(\d+)_?(\w+)\.(\w+)$", attachment_file
)
if match is None:
print(
f"* Warning: no attachment match for {attachment_file}",
file=sys.stderr,
)
sys.exit(2)
paper_num, type_, ext = match.groups()
paper_num = int(paper_num)
file_name = f"{collection_id}-{volume_name}.{paper_num}.{type_}.{ext}"
dest_path = os.path.join(attachments_dest_dir, file_name)
if not args.dry_run and not os.path.exists(dest_path):
log(f"Copying {attachment_file} -> {dest_path}", args.dry_run)
shutil.copyfile(attachment_file_path, dest_path)
collections[collection_id][volume_name][paper_num]["attachments"].append(
(dest_path, type_)
)
people = AnthologyIndex(None, srcdir=anthology_datadir)
def correct_caps(person, name_node, anth_id):
"""
Many people submit their names in "ALL CAPS" or "all lowercase".
Correct this with heuristics.
"""
name = name_node.text
if name.islower() or name.isupper():
# capitalize all parts
corrected = " ".join(list(map(lambda x: x.capitalize(), name.split())))
print(
f"-> Correcting capitalization of '{name}' to '{corrected}'",
file=sys.stderr,
)
name_node.text = corrected
def disambiguate_name(node, anth_id):
name = PersonName.from_element(node)
ids = people.get_ids(name)
if len(ids) > 1:
choice = -1
while choice < 0 or choice >= len(ids):
print(
f"({anth_id}): ambiguous author {name}; Please choose from the following:"
)
for i, id_ in enumerate(ids):
print(f"[{i}] {id_} ({people.get_comment(id_)})")
choice = int(input("--> "))
node.attrib["id"] = ids[choice]
for collection_id, collection in collections.items():
# Newly added volumes, so we can normalize and name-disambig later
newly_added_volumes = []
collection_file = os.path.join(
args.anthology_dir, "data", "xml", f"{collection_id}.xml"
)
if os.path.exists(collection_file):
root_node = etree.parse(collection_file).getroot()
else:
root_node = make_simple_element("collection", attrib={"id": collection_id})
for volume_id, volume in collection.items():
volume_node = make_simple_element(
"volume",
attrib={"id": volume_id, "ingest-date": args.ingest_date},
)
# Replace the existing one if present
existing_volume_node = root_node.find(f"./volume[@id='{volume_id}']")
for i, child in enumerate(root_node):
if child.attrib["id"] == volume_id:
root_node[i] = volume_node
break
else:
root_node.append(volume_node)
meta_node = None
for paper_num, paper in sorted(volume.items()):
paper_id_full = paper["anthology_id"]
bibfile = paper["bib"]
paper_node = bib2xml(bibfile, paper_id_full)
if paper_node.attrib["id"] == "0":
# create metadata subtree
meta_node = make_simple_element("meta", parent=volume_node)
title_node = paper_node.find("title")
title_node.tag = "booktitle"
meta_node.append(title_node)
for author_or_editor in chain(
paper_node.findall("./author"), paper_node.findall("./editor")
):
meta_node.append(author_or_editor)
author_or_editor.tag = "editor"
meta_node.append(paper_node.find("publisher"))
meta_node.append(paper_node.find("address"))
meta_node.append(paper_node.find("month"))
meta_node.append(paper_node.find("year"))
if book_dest_path is not None:
make_simple_element(
"url",
text=f"{collection_id}-{volume_name}",
attrib={"hash": compute_hash_from_file(book_dest_path)},
parent=meta_node,
)
# modify frontmatter tag
paper_node.tag = "frontmatter"
del paper_node.attrib["id"]
else:
# remove unneeded fields
for child in paper_node:
if child.tag in [
"editor",
"address",
"booktitle",
"publisher",
"year",
"month",
]:
paper_node.remove(child)
url = paper_node.find("./url")
if url is not None:
url.attrib["hash"] = compute_hash_from_file(paper["pdf"])
for path, type_ in paper["attachments"]:
make_simple_element(
"attachment",
text=os.path.basename(path),
attrib={
"type": type_,
"hash": compute_hash_from_file(path),
},
parent=paper_node,
)
if len(paper_node) > 0:
volume_node.append(paper_node)
# Normalize
for oldnode in paper_node:
normalize(oldnode, informat="latex")
# Adjust the language tag
language_node = paper_node.find("./language")
if language_node is not None:
try:
lang = iso639.languages.get(name=language_node.text)
except KeyError:
raise Exception(f"Can't find language '{language_node.text}'")
language_node.text = lang.part3
print(language_node.text)
# Fix author names
for name_node in chain(
paper_node.findall("./author"), paper_node.findall("./editor")
):
disambiguate_name(name_node, paper_id_full)
person = PersonName.from_element(name_node)
for name_part in name_node:
correct_caps(person, name_part, paper_id_full)
# Other data from the meta file
if "isbn" in meta:
make_simple_element("isbn", meta["isbn"], parent=meta_node)
indent(root_node)
tree = etree.ElementTree(root_node)
tree.write(
collection_file, encoding="UTF-8", xml_declaration=True, with_tail=True
)
if __name__ == "__main__":
now = datetime.now()
today = f"{now.year}-{now.month:02d}-{now.day:02d}"
parser = argparse.ArgumentParser()
parser.add_argument(
"proceedings", nargs="+", help="List of paths to ACLPUB proceedings/ directories."
)
parser.add_argument(
"--ingest-date",
"-d",
type=str,
default=today,
help="Ingestion date as YYYY-MM-DD. Default: %(default)s.",
)
anthology_path = os.path.join(os.path.dirname(sys.argv[0]), "..")
parser.add_argument(
"--anthology-dir",
"-r",
default=anthology_path,
help="Root path of ACL Anthology Github repo. Default: %(default)s.",
)
pdfs_path = os.path.join(os.environ["HOME"], "anthology-files", "pdf")
parser.add_argument(
"--pdfs-dir", "-p", default=pdfs_path, help="Root path for placement of PDF files"
)
attachments_path = os.path.join(os.environ["HOME"], "anthology-files", "attachments")
parser.add_argument(
"--attachments-dir",
"-a",
default=attachments_path,
help="Root path for placement of PDF files",
)
parser.add_argument(
"--dry-run", "-n", action="store_true", help="Don't actually copy anything."
)
args = parser.parse_args()
main(args)
|
"""
This module contains shared fixtures.
"""
import json
import pytest
import selenium.webdriver
@pytest.fixture
def config(scope='session'):
# Read JSON file
with open('config.json') as config_file:
config = json.load(config_file)
# Assert values are acceptable
assert config['browser'] in ['Firefox', 'Chrome', 'Headless Chrome']
assert isinstance(config['implicit_wait'],int)
assert config['implicit_wait']>0
#Return config
return config
@pytest.fixture
def browser(config):
# Initialize the ChromeDriver instance
if config['browser'] == 'Firefox':
b = selenium.webdriver.Firefox()
elif config['browser'] == 'Chrome':
b = selenium.webdriver.Chrome()
elif config['browser'] == 'Headless Chrome':
opts = selenium.webdriver.ChromeOptions()
opts.add_argument('headless')
b = selenium.webdriver.Chrome(options=opts)
else:
raise Exception(f'Browser "{config['browser']}" is not supported')
# Make its calls wait up to 10 seconds for elements to appear
b.implicitly_wait(config['implicit_wait'])
# Return the WebDriver instance for the setup
yield b
# Quit the WebDriver instance for the cleanup
b.quit()
| """
This module contains shared fixtures.
"""
import json
import pytest
import selenium.webdriver
@pytest.fixture
def config(scope='session'):
# Read JSON file
with open('config.json') as config_file:
config = json.load(config_file)
# Assert values are acceptable
assert config['browser'] in ['Firefox', 'Chrome', 'Headless Chrome']
assert isinstance(config['implicit_wait'],int)
assert config['implicit_wait']>0
#Return config
return config
@pytest.fixture
def browser(config):
# Initialize the ChromeDriver instance
if config['browser'] == 'Firefox':
b = selenium.webdriver.Firefox()
elif config['browser'] == 'Chrome':
b = selenium.webdriver.Chrome()
elif config['browser'] == 'Headless Chrome':
opts = selenium.webdriver.ChromeOptions()
opts.add_argument('headless')
b = selenium.webdriver.Chrome(options=opts)
else:
raise Exception(f'Browser "{config["browser"]}" is not supported')
# Make its calls wait up to 10 seconds for elements to appear
b.implicitly_wait(config['implicit_wait'])
# Return the WebDriver instance for the setup
yield b
# Quit the WebDriver instance for the cleanup
b.quit()
|
# Copyright 2017 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import logging
import math
import os
import random
import string
import tempfile
import time
from collections import OrderedDict
import pytest
import sqlalchemy
import datetime
from streamsets.sdk.utils import Version
from streamsets.testframework.environments.databases import Db2Database, OracleDatabase, SQLServerDatabase, PostgreSqlDatabase
from streamsets.testframework.markers import credentialstore, database, sdc_min_version
from streamsets.testframework.utils import get_random_string
logger = logging.getLogger(__name__)
ROWS_IN_DATABASE = [
{'id': 1, 'name': 'Dima'},
{'id': 2, 'name': 'Jarcec'},
{'id': 3, 'name': 'Arvind'}
]
ROWS_TO_UPDATE = [
{'id': 2, 'name': 'Eddie'},
{'id': 4, 'name': 'Jarcec'}
]
LOOKUP_RAW_DATA = ['id'] + [str(row['id']) for row in ROWS_IN_DATABASE]
RAW_DATA = ['name'] + [row['name'] for row in ROWS_IN_DATABASE]
DEFAULT_DB2_SCHEMA = 'DB2INST1'
@database
def test_jdbc_multitable_consumer_origin_simple(sdc_builder, sdc_executor, database):
"""
Check if Jdbc Multi-table Origin can retrieve any records from a table.
Destination is Trash.
Verify input and output (via snapshot).
"""
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
table_name = '{}_{}'.format(src_table_prefix, get_random_string(string.ascii_lowercase, 20))
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{"tablePattern": f'%{src_table_prefix}%'}])
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
# Column names are converted to lower case since Oracle database column names are in upper case.
tuples_to_lower_name = lambda tup: (tup[0].lower(), tup[1])
rows_from_snapshot = [tuples_to_lower_name(list(record.field.items())[1])
for record in snapshot[pipeline[0].instance_name].output]
assert rows_from_snapshot == [('name', row['name']) for row in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_consumer_offset_resume(sdc_builder, sdc_executor, database):
"""Ensure that the Query consumer can resume where it ended and stop the pipeline when it reads all the data."""
if isinstance(database, OracleDatabase):
pytest.skip('This test does not support oracle and its upper casing of column names.')
metadata = sqlalchemy.MetaData()
table_name = get_random_string(string.ascii_lowercase, 20)
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
pipeline_builder = sdc_builder.get_pipeline_builder()
origin = pipeline_builder.add_stage('JDBC Query Consumer')
origin.incremental_mode = True
origin.sql_query = 'SELECT * FROM {0} WHERE '.format(table_name) + 'id > ${OFFSET} ORDER BY id'
origin.initial_offset = '0'
origin.offset_column = 'id'
trash = pipeline_builder.add_stage('Trash')
origin >> trash
finisher = pipeline_builder.add_stage("Pipeline Finisher Executor")
origin >= finisher
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
connection = database.engine.connect()
for i in range(len(ROWS_IN_DATABASE)):
# Insert one row to the database
connection.execute(table.insert(), [ROWS_IN_DATABASE[i]])
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
assert len(snapshot[origin].output) == 1
assert snapshot[origin].output[0].get_field_data('/id') == i + 1
# TLKT-249: Add wait_for_finished to get_status object
sdc_executor.get_pipeline_status(pipeline).wait_for_status('FINISHED')
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_consumer_non_incremental_mode(sdc_builder, sdc_executor, database):
"""Ensure that the Query consumer works properly in non-incremental mode."""
if database.type == 'Oracle':
pytest.skip("This test depends on proper case for column names that Oracle auto-uppers.")
metadata = sqlalchemy.MetaData()
table_name = get_random_string(string.ascii_lowercase, 20)
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
pipeline_builder = sdc_builder.get_pipeline_builder()
origin = pipeline_builder.add_stage('JDBC Query Consumer')
origin.incremental_mode = False
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
trash = pipeline_builder.add_stage('Trash')
origin >> trash
finisher = pipeline_builder.add_stage("Pipeline Finisher Executor")
origin >= finisher
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
# Run the pipeline N times, it should always read the same
for i in range(3):
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
assert len(snapshot[origin].output) == len(ROWS_IN_DATABASE)
assert snapshot[origin].output[0].get_field_data('/id') == 1
assert snapshot[origin].output[1].get_field_data('/id') == 2
assert snapshot[origin].output[2].get_field_data('/id') == 3
# TLKT-249: Add wait_for_finished to get_status object
sdc_executor.get_pipeline_status(pipeline).wait_for_status('FINISHED')
finally:
logger.info('Jdbc No More Data: Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_multitable_consumer_with_finisher(sdc_builder, sdc_executor, database):
"""
Test reading with Multi-table JDBC, output to trash.
Test some table names that start with numbers (SDC-5381).
Check if Pipeline Finished Executor works correctly.
"""
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{"tablePattern": f'%{src_table_prefix}%'}])
finisher = pipeline_builder.add_stage('Pipeline Finisher Executor')
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >= finisher
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
random.seed()
tables = []
metadata = sqlalchemy.MetaData()
try:
connection = database.engine.connect()
num_letters = 10
num_recs = 10
num_tables = 3
for i in range(0, num_tables):
if i % 2 == 1:
# table name starts with a number, contains mixed-case letters.
input_name = '{}_{}_{}'.format(str(i), src_table_prefix,
get_random_string(string.ascii_lowercase, num_letters))
else:
# table name comprised of mixed-case letters only.
input_name = '{}_{}'.format(src_table_prefix, get_random_string(string.ascii_lowercase, num_letters))
tables.append(sqlalchemy.Table(
input_name,
metadata,
sqlalchemy.Column('serial', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('data', sqlalchemy.Integer)
))
tables[i].create(database.engine)
rows = [{'serial': j, 'data': random.randint(0, 2100000000)} for j in range(1, num_recs + 1)]
connection.execute(tables[i].insert(), rows)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
finally:
for table in tables:
table.drop(database.engine)
# SDC-11009: Run away pipeline runners in JDBC Multithread origins when no-more-data generation delay is configured
@database
@sdc_min_version('3.2.0')
def test_jdbc_multitable_consumer_with_no_more_data_event_generation_delay(sdc_builder, sdc_executor, database):
"""
Make sure that when a delayed no-more-data is being processed, the pipeline properly waits on the processing to
finish before stopping.
source >> trash
>= delay (only for no-more-data) >> trash
"""
src_table = get_random_string(string.ascii_lowercase, 6)
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.no_more_data_event_generation_delay_in_seconds = 1
jdbc_multitable_consumer.table_configs = [{"tablePattern": f'%{src_table}%'}]
trash = pipeline_builder.add_stage('Trash')
delay = pipeline_builder.add_stage('Delay')
delay.delay_between_batches = 10 * 1000
delay.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
trash_event = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
jdbc_multitable_consumer >= delay
delay >> trash_event
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
metadata = sqlalchemy.MetaData()
try:
connection = database.engine.connect()
table = sqlalchemy.Table(
src_table,
metadata,
sqlalchemy.Column('serial', sqlalchemy.Integer, primary_key=True)
)
table.create(database.engine)
rows = [{'serial': 1}]
connection.execute(table.insert(), rows)
# We start the pipeline
sdc_executor.start_pipeline(pipeline)
# We wait three seconds - one second for the no-more-data to be generated and then some buffer time
time.sleep(3)
# Then we try to stop the pipeline, now the pipeline should not stop immediately and should in-fact wait
sdc_executor.stop_pipeline(pipeline).wait_for_stopped()
current_status = sdc_executor.get_pipeline_status(pipeline).response.json().get('status')
assert current_status == 'STOPPED'
# Validate expected metrics
history = sdc_executor.get_pipeline_history(pipeline)
# Total number of input records
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 1
# 1 record, 1 no-more-data (rest of events is discarded)
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 2
# The table itself contained only one record
assert history.latest.metrics.counter('stage.Trash_01.inputRecords.counter').count == 1
# Only no-more-data event should reach the destination
assert history.latest.metrics.counter('stage.Trash_02.inputRecords.counter').count == 1
# The max batch time should be slightly more then 10 (the delayed batch that we have caused)
# TODO: TLKT-167: Add access methods to metric objects
assert history.latest.metrics.timer('pipeline.batchProcessing.timer')._data.get('max') >= 10
finally:
if table is not None:
table.drop(database.engine)
def _get_random_name(database, prefix='', length=5):
"""Generate a random string to use as a database object name.
It handles letter case according to the database type, forcing upper-case (e.g. Oracle) or lower-case
(e.g. Postgres).
Args:
database: a :obj:`streamsets.testframework.environment.Database` object.
prefix: (:obj:`str`) add a prefix to the generated name. Default: ''.
length: (:obj:`int`) number of characters of the generated name (without counting ``prefix``).
"""
if isinstance(database, OracleDatabase):
name = '{}{}'.format(prefix.upper(), get_random_string(string.ascii_uppercase))
else:
name = '{}{}'.format(prefix.lower(), get_random_string(string.ascii_lowercase))
return name
def _create_table(table_name, database, schema_name=None):
"""Helper function to create a table with two columns: id (int, PK) and name (str).
Args:
table_name: (:obj:`str`) the name for the new table.
database: a :obj:`streamsets.testframework.environment.Database` object.
schema_name: (:obj:`str`, optional) when provided, create the new table in a specific schema; otherwise,
the default schema for the engine’s database connection is used.
Return:
The new table as a sqlalchemy.Table object.
"""
metadata = sqlalchemy.MetaData()
if type(database) == SQLServerDatabase:
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('name', sqlalchemy.String(32)),
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True,
autoincrement=False),
schema=schema_name)
else:
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('name', sqlalchemy.String(32)),
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
schema=schema_name)
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
return table
def _create_schema(schema_name, database):
"""Create a new schema in the database.
For RDBMs with no distinction between schema and database (e.g. MySQL), it creates a new database. For Oracle, it
creates a new user. For databases with schema objects, it creates a new schema.
Use ``_drop_schema()`` to remove schemas created by this function, to handle properly each case.
Args:
schema_name: (:obj:`str`) the schema name.
database: a :obj:`streamsets.testframework.environment.Database` object.
"""
if isinstance(database, OracleDatabase):
database.engine.execute('CREATE USER {user} IDENTIFIED BY {pwd}'.format(user=schema_name, pwd=schema_name))
database.engine.execute('GRANT CONNECT, RESOURCE TO {user}'.format(user=schema_name))
else:
schema = sqlalchemy.schema.CreateSchema(schema_name)
database.engine.execute(schema)
def _drop_schema(schema_name, database):
"""Remove a schema from the given database.
Args:
schema_name: (:obj:`str`) name of the schema to remove.
database: a :obj:`streamsets.testframework.environment.Database` object.
"""
if isinstance(database, OracleDatabase):
database.engine.execute('DROP USER {user} CASCADE'.format(user=schema_name))
else:
sqlalchemy.schema.DropSchema(schema_name)
@credentialstore
@database
def test_jdbc_lookup_processor(sdc_builder, sdc_executor, database):
"""Simple JDBC Lookup processor test.
Pipeline will enrich records with the 'name' by adding a field as 'FirstName'.
The pipeline looks like:
dev_raw_data_source >> jdbc_lookup >> trash
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(LOOKUP_RAW_DATA))
jdbc_lookup = pipeline_builder.add_stage('JDBC Lookup')
query_str = f"SELECT name FROM {table_name} WHERE id = "${{record:value("/id")}}'"
column_mappings = [dict(dataType='USE_COLUMN_TYPE',
columnName='name',
field='/FirstName')]
jdbc_lookup.set_attributes(sql_query=query_str,
column_mappings=column_mappings)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> jdbc_lookup >> trash
pipeline = pipeline_builder.build(title='JDBC Lookup').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
LOOKUP_EXPECTED_DATA = copy.deepcopy(ROWS_IN_DATABASE)
for record in LOOKUP_EXPECTED_DATA:
record.pop('id')
record['FirstName'] = record.pop('name')
try:
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline,
start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
rows_from_snapshot = [{list(record.field.keys())[1]: list(record.field.values())[1].value}
for record in snapshot[jdbc_lookup].output]
assert rows_from_snapshot == LOOKUP_EXPECTED_DATA
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_tee_processor(sdc_builder, sdc_executor, database):
"""Simple JDBC Tee processor test.
Pipeline will insert records into database and then pass generated database column 'id' to fields.
The pipeline looks like:
dev_raw_data_source >> jdbc_tee >> trash
"""
if isinstance(database, OracleDatabase):
pytest.skip('JDBC Tee Processor does not support Oracle')
elif type(database) == SQLServerDatabase:
pytest.skip('JDBC Tee Processor does not support SQL Server')
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(RAW_DATA))
jdbc_tee = pipeline_builder.add_stage('JDBC Tee')
# Note that here ids are not inserted. Database generates them automatically.
field_to_column_mapping = [dict(columnName='name',
dataType='USE_COLUMN_TYPE',
field='/name',
paramValue='?')]
generated_column_mappings = [dict(columnName='id',
dataType='USE_COLUMN_TYPE',
field='/id')]
jdbc_tee.set_attributes(default_operation='INSERT',
field_to_column_mapping=field_to_column_mapping,
generated_column_mappings=generated_column_mappings,
table_name=table_name)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> jdbc_tee >> trash
pipeline = pipeline_builder.build(title='JDBC Tee').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline,
start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
# Verify the JDBC Tee processor has got new ids which were generated by database.
rows_from_snapshot = [{list(item.field.keys())[0]: list(item.field.values())[0].value,
list(item.field.keys())[1]: int(list(item.field.values())[1].value)}
for item in snapshot[jdbc_tee].output]
assert rows_from_snapshot == ROWS_IN_DATABASE
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@pytest.mark.parametrize('use_multi_row', [True, False])
@sdc_min_version('3.0.0.0') # stop_after_first_batch
def test_jdbc_tee_processor_multi_ops(sdc_builder, sdc_executor, database, use_multi_row):
"""JDBC Tee processor with multiple operations
Pipeline will delete/update/insert records into database with one batch and then update 'id'
field if it is inserted. The 'operation' field is used for the record header sdc.operation.type
which defines the CRUD operation (1: Insert, 2: Delete, 3: Update). The pipeline looks like:
dev_raw_data_source >> expression evaluator >> jdbc_tee >> trash
"""
if isinstance(database, OracleDatabase):
pytest.skip('JDBC Tee Processor does not support Oracle')
elif type(database) == SQLServerDatabase:
pytest.skip('JDBC Tee Processor does not support SQL Server')
table_name = get_random_string(string.ascii_lowercase, 20)
pipeline_builder = sdc_builder.get_pipeline_builder()
DATA = [
{'operation': 2, 'name': 'Jarcec', 'id': 2}, # delete
{'operation': 3, 'name': 'Hari', 'id': 3}, # update
{'operation': 1, 'name': 'Eddie'} # insert, id will be added by JDBC Tee
]
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data='\n'.join(json.dumps(rec) for rec in DATA),
stop_after_first_batch=True)
HEADER_EXPRESSIONS = [dict(attributeToSet='sdc.operation.type',
headerAttributeExpression="${record:value('/operation')}")]
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.header_attribute_expressions = HEADER_EXPRESSIONS
FIELD_TO_COLUMN = [dict(columnName='name', field='/name', paramValue='?')]
jdbc_tee = pipeline_builder.add_stage('JDBC Tee')
jdbc_tee.set_attributes(default_operation='INSERT',
field_to_column_mapping=FIELD_TO_COLUMN,
generated_column_mappings=[dict(columnName='id', field='/id')],
table_name=table_name,
use_multi_row_operation=use_multi_row)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> expression_evaluator >> jdbc_tee >> trash
pipeline_title = 'JDBC Tee MultiOps MultiRow' if use_multi_row else 'JDBC Tee MultiOps SingleRow'
pipeline = pipeline_builder.build(title=pipeline_title).configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
table = _create_table(table_name, database)
try:
logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)
connection = database.engine.connect()
# Passing only names to get the correct sequence numbers esp. PostgreSQL
if type(database) == SQLServerDatabase:
connection.execute(table.insert(), [{'id': row['id'], 'name': row['name']} for row in ROWS_IN_DATABASE])
else:
connection.execute(table.insert(), [{'name': row['name']} for row in ROWS_IN_DATABASE])
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline,
start_pipeline=True).snapshot
sequence_id = len(ROWS_IN_DATABASE)
# Verify the database is updated.
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
expected_data = [(row['name'], row['id']) for row in ROWS_IN_DATABASE]
for record in DATA:
if record['operation'] == 1: # insert
sequence_id += 1
expected_data.append((record['name'], sequence_id))
elif record['operation'] == 2: # delete
expected_data = [row for row in expected_data if row[1] != record['id']]
elif record['operation'] == 3: # update
expected_data = [row if row[1] != record['id'] else (record['name'], row[1]) for row in expected_data]
assert data_from_database == expected_data
# Verify the JDBC Tee processor has the new ID which were generated by database.
jdbc_tee_output = snapshot[jdbc_tee].output
name_id_from_output = [(record.field['name'], record.field['id']) for record in jdbc_tee_output]
assert name_id_from_output == [('Jarcec', 2), ('Hari', 3), ('Eddie', sequence_id)]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_query_executor(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor
record_deduplicator >> trash
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
query_str = f"INSERT INTO {table_name} (name, id) VALUES ("${{record:value("/name")}}', '${{record:value("/id")}}')"
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor
record_deduplicator >> trash
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(RAW_DATA) - 1)
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.14.0') # multiple queries execution
def test_jdbc_query_executor_multiple_queries(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor
record_deduplicator >> trash
"""
table_name = f'stf_{get_random_string(string.ascii_lowercase, 20)}'
table = _create_table(table_name, database)
ROWS_IN_DATABASE_UPDATED = [
{'id': 1, 'name': 'Alex'},
{'id': 2, 'name': 'Alex'},
{'id': 3, 'name': 'Alex'}
]
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
query_str1 = f"INSERT INTO {table_name} (name, id) VALUES ("${{record:value("/name")}}', '${{record:value("/id")}}')"
query_str2 = f"UPDATE {table_name} SET name = "Alex" WHERE name = "${{record:value("/name")}}'"
jdbc_query_executor.set_attributes(sql_queries=[query_str1, query_str2])
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor
record_deduplicator >> trash
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(RAW_DATA) - 1)
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE_UPDATED]
finally:
logger.info(f'Dropping table {table_name} in {database.type} database ...')
table.drop(database.engine)
@database
@sdc_min_version('3.11.0')
def test_jdbc_query_executor_successful_query_event(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test for successful-query event type.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database. Event records are verified for successful-query event type.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
query_str = f"INSERT INTO {table_name} (name, id) VALUES ("${{record:value("/name")}}', '${{record:value("/id")}}')"
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash1 = pipeline_builder.add_stage('Trash')
trash2 = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
event_records = snapshot[jdbc_query_executor.instance_name].event_records
assert len(event_records) == 3
assert 'successful-query' == event_records[0].header['values']['sdc.event.type']
assert 'successful-query' == event_records[1].header['values']['sdc.event.type']
assert 'successful-query' == event_records[2].header['values']['sdc.event.type']
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.11.0')
def test_jdbc_query_executor_insert_query_result_count(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test for successful-query event type and query result count enabled.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database. Event records are verified for successful-query event type
and query-result field for the insert query.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
query_str = f"INSERT INTO {table_name} (name, id) VALUES ("${{record:value("/name")}}', '${{record:value("/id")}}')"
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
jdbc_query_executor.set_attributes(include_query_result_count_in_events=True)
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash1 = pipeline_builder.add_stage('Trash')
trash2 = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
event_records = snapshot[jdbc_query_executor.instance_name].event_records
assert len(event_records) == 3
assert 'successful-query' == event_records[0].header['values']['sdc.event.type']
assert 'successful-query' == event_records[1].header['values']['sdc.event.type']
assert 'successful-query' == event_records[2].header['values']['sdc.event.type']
assert '1 row(s) affected' == event_records[0].value['value']['query-result']['value']
assert '1 row(s) affected' == event_records[1].value['value']['query-result']['value']
assert '1 row(s) affected' == event_records[2].value['value']['query-result']['value']
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.0.0.0')
def test_jdbc_query_executor_lifecycle_events(sdc_builder, sdc_executor, database):
"""Verify that the JDBC Query Executor will work properly when used inside pipeline lifecycle stages."""
if isinstance(database, OracleDatabase):
pytest.skip('This test does not support Oracle')
elif type(database) == SQLServerDatabase:
pytest.skip('This test does not support SQL Server')
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('user', sqlalchemy.String(50)),
sqlalchemy.Column('event', sqlalchemy.String(50)))
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
query = f"INSERT INTO {table_name} VALUES ("${{record:value("/user")}}', '${{record:attribute("sdc.event.type")}}')"
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'TEXT'
source.raw_data='SOMETHING'
trash = builder.add_stage('Trash')
start_stage = builder.add_start_event_stage('JDBC Query')
if Version(sdc_builder.version) < Version('3.14.0'):
start_stage.set_attributes(sql_query=query)
else:
start_stage.set_attributes(sql_queries=[query])
stop_stage = builder.add_stop_event_stage('JDBC Query')
if Version(sdc_builder.version) < Version('3.14.0'):
stop_stage.set_attributes(sql_query=query)
else:
stop_stage.set_attributes(sql_queries=[query])
source >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = database.engine.execute(table.select())
db = sorted(result.fetchall(), key=lambda row: row[1])
result.close()
assert db[0][0] == 'admin'
assert db[0][1] == 'pipeline-start'
assert db[1][0] == ''
assert db[1][1] == 'pipeline-stop'
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_query_executor_failure_state(sdc_builder, sdc_executor, database):
"""Verify that the executor is properly called with the proper state on pipeline initialization failure."""
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('reason', sqlalchemy.String(50)))
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
query = f"INSERT INTO {table_name} VALUES ("${{record:value("/reason")}}')"
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('JDBC Multitable Consumer')
source.table_configs=[{"tablePattern": 'this_table_do_not_exists'}]
trash = builder.add_stage('Trash')
stop_stage = builder.add_stop_event_stage('JDBC Query')
if Version(sdc_builder.version) < Version('3.14.0'):
stop_stage.set_attributes(sql_query=query)
else:
stop_stage.set_attributes(sql_queries=[query])
source >> trash
pipeline = builder.build().configure_for_environment(database)
# Injecting failure - this URL won't exists, pipeline won't be able to start properly
source.jdbc_connection_string = "jdbc:mysql://this-do-not-exists:3306/awesome-db"
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline, wait=False).wait_for_status('START_ERROR', ignore_errors=True)
result = database.engine.execute(table.select())
db = result.fetchall()
result.close()
assert db[0][0] == 'FAILURE'
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.11.0')
def test_jdbc_query_executor_select_query_result_count(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test for successful-query event type and query result count enabled.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database and then the same data is queried. Event records are
verified for successful-query event type and query-result field for the select query.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor1 >= jdbc_query_executor2 >= trash1
record_deduplicator >> trash2
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
query_str1 = f"INSERT INTO {table_name} (name, id) VALUES ("${{record:value("/name")}}', '${{record:value("/id")}}')"
query_str2 = f"SELECT * FROM {table_name}"
jdbc_query_executor1 = pipeline_builder.add_stage('JDBC Query', type='executor')
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor1.set_attributes(sql_query=query_str1)
else:
jdbc_query_executor1.set_attributes(sql_queries=[query_str1])
jdbc_query_executor2 = pipeline_builder.add_stage('JDBC Query', type='executor')
jdbc_query_executor2.set_attributes(include_query_result_count_in_events=True)
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor2.set_attributes(sql_query=query_str2)
else:
jdbc_query_executor2.set_attributes(sql_queries=[query_str2])
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash1 = pipeline_builder.add_stage('Trash')
trash2 = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor1 >= jdbc_query_executor2 >= trash1
record_deduplicator >> trash2
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
event_records = snapshot[jdbc_query_executor2.instance_name].event_records
assert len(event_records) == 3
assert 'successful-query' == event_records[0].header['values']['sdc.event.type']
assert 'successful-query' == event_records[1].header['values']['sdc.event.type']
assert 'successful-query' == event_records[2].header['values']['sdc.event.type']
assert '3 row(s) returned' == event_records[0].value['value']['query-result']['value']
assert '3 row(s) returned' == event_records[1].value['value']['query-result']['value']
assert '3 row(s) returned' == event_records[2].value['value']['query-result']['value']
result = database.engine.execute(table.select())
result.close()
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.11.0')
def test_jdbc_query_executor_failed_query_event(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test for failed-query event type.
Pipeline will try to insert records into a non-existing table and the query would fail.
Event records are verified for failed-query event type.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
invalid_table = "INVALID_TABLE"
query_str = f"INSERT INTO {invalid_table} (name, id) VALUES ("${{record:value("/name")}}', '${{record:value("/id")}}')"
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash1 = pipeline_builder.add_stage('Trash')
trash2 = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
event_records = snapshot[jdbc_query_executor.instance_name].event_records
assert len(event_records) == 3
assert 'failed-query' == event_records[0].header['values']['sdc.event.type']
assert 'failed-query' == event_records[1].header['values']['sdc.event.type']
assert 'failed-query' == event_records[2].header['values']['sdc.event.type']
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == []
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.10.0')
@pytest.mark.parametrize('enable_parallel_execution', [True, False])
def test_jdbc_query_executor_parallel_query_execution(sdc_builder, sdc_executor, database, enable_parallel_execution):
"""Test JDBC Query Executor's parallel query execution mode.
Pipeline will insert records into database, then update the records.
Using sqlalchemy, we verify that correct data was inserted (and updated) in the database.
Pipeline configuration:
dev_raw_data_source >> jdbc_query_executor
"""
table_name = get_random_string(string.ascii_uppercase, 20)
table = _create_table(table_name, database)
# Make sure that we properly escape the table name. Ideally we would do escape for all databases, but since we
# know that all except postgre are passing, we only escape for Postgre for now.
enclosed_table = f'"{table_name}"' if type(database) == PostgreSqlDatabase else table_name
# first, the inserts - they will run in parallel,
# then all the updates will run sequentially
# net result is all records should get updated to the (last) new value.
# otherwise we've failed.
statements = []
for rec in ROWS_IN_DATABASE:
statements.extend([f"INSERT INTO {enclosed_table} (name, id) VALUES ("{rec["name"]}', {rec["id"]})",
f"UPDATE {enclosed_table} SET name = "bob" WHERE id = {rec["id"]}",
f"UPDATE {enclosed_table} SET name = "MERRICK" WHERE id = {rec["id"]}"])
# convert to string - Dev Raw Data Source Data Format tab does not seem
# to "unroll" the array into newline-terminated records.
statements = "\n".join(statements)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='TEXT', raw_data=statements)
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
query_str = "${record:value('/text')}"
jdbc_query_executor.set_attributes(enable_parallel_queries=enable_parallel_execution,
maximum_pool_size=2,
minimum_idle_connections=2)
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
dev_raw_data_source >> jdbc_query_executor
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE)*3)
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [('MERRICK', record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
def _create_jdbc_producer_pipeline(pipeline_builder, pipeline_title, raw_data, table_name, operation):
"""Helper function to create and return a pipeline with JDBC Producer
The Deduplicator assures there is only one ingest to database. The pipeline looks like:
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
"""
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
FIELD_MAPPINGS = [dict(field='/id', columnName='id'),
dict(field='/name', columnName='name')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation=operation,
table_name=table_name,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='STOP_PIPELINE')
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
return pipeline_builder.build(title=pipeline_title)
@database
def test_jdbc_producer_insert(sdc_builder, sdc_executor, database):
"""Simple JDBC Producer test with INSERT operation.
The pipeline inserts records into the database and verify that correct data is in the database.
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = '\n'.join(json.dumps(rec) for rec in ROWS_IN_DATABASE)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Insert', DATA, table_name, 'INSERT')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE))
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database('mysql', 'postgresql')
def test_jdbc_producer_insert_type_err(sdc_builder, sdc_executor, database):
"""This test covers invalid type coersion - writing string into int column. As different databases works differently,
we can't assert this across all supported databases. MySQL and PostgreSQL behaves the same way and we can properly
catch and generate JDBC_23. Other databases report coercion issues much later in the query cycle, sometimes even
in a way where we can't understand what and why has happened.
"""
ROWS_IN_DATABASE = [
{'id': 1, 'name': 'Dima'},
{'id': 'X', 'name': 'Jarcec'},
{'id': 3, 'name': 'Arvind'}
]
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = '\n'.join(json.dumps(rec) for rec in ROWS_IN_DATABASE)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=DATA, stop_after_first_batch=True)
FIELD_MAPPINGS = [dict(field='/id', columnName='id', dataType='INTEGER'),
dict(field='/name', columnName='name', dataType='STRING')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation='INSERT',
table_name=table_name,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='TO_ERROR')
dev_raw_data_source >> jdbc_producer
pipeline = pipeline_builder.build(title="JDBC producer with error")
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.get_pipeline_status(pipeline).wait_for_status('FINISHED')
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE
if record['id'] != 'X']
stage = snapshot[jdbc_producer.instance_name]
assert 'JDBC_23' == stage.error_records[0].header['errorCode']
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_insert_multiple_types(sdc_builder, sdc_executor, database):
"""Simple JDBC Producer test with INSERT operation.
The pipeline inserts 1000 records of multiple types.
The pipeline should look like:
dev_data_generator >> jdbc_producer
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')
dev_data_generator.fields_to_generate = [
{'field': 'field1', 'type': 'STRING'},
{'field': 'field2', 'type': 'DATETIME'},
{'field': 'field3', 'type': 'INTEGER'},
{'field': 'field4', 'precision': 10, 'scale': 2, 'type': 'DECIMAL'},
{'field': 'field5', 'type': 'DOUBLE'}
]
batch_size = 10000
dev_data_generator.set_attributes(delay_between_batches=0, batch_size=batch_size)
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('field1', sqlalchemy.String(50)),
sqlalchemy.Column('field2', sqlalchemy.DateTime),
sqlalchemy.Column('field3', sqlalchemy.Integer),
sqlalchemy.Column('field4', sqlalchemy.DECIMAL(10, 2)),
sqlalchemy.Column('field5', sqlalchemy.Float),
schema=None)
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
FIELD_MAPPINGS = [dict(field='/field1', columnName='field1', dataType='STRING'),
dict(field='/field2', columnName='field2', dataType='DATETIME'),
dict(field='/field3', columnName='field3', dataType='INTEGER'),
dict(field='/field4', columnName='field4', dataType='DECIMAL'),
dict(field='/field5', columnName='field5', dataType='FLOAT')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation='INSERT',
table_name=table_name,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='TO_ERROR')
dev_data_generator >> jdbc_producer
pipeline = pipeline_builder.build(title="JDBC producer multiple types")
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(batch_size, timeout_sec=3600)
snapshot = sdc_executor.capture_snapshot(pipeline).snapshot
sdc_executor.stop_pipeline(pipeline).wait_for_stopped()
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert len(data_from_database) > batch_size
stage = snapshot[jdbc_producer.instance_name]
assert len(stage.error_records) == 0
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
# SDC-10786: This test intends to cover the case really precise decimals being inserted into a Float column in MSSQL
@database('sqlserver')
def test_mssql_producer_bigdecimal(sdc_builder, sdc_executor, database):
"""
Insert a Decimal value with up to 38 decimals into a Float column in MSSQL.
This will look like:
dev_data_generator >> jdbc_producer
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = sqlalchemy.Table(
table_name,
sqlalchemy.MetaData(),
sqlalchemy.Column('a_value', sqlalchemy.Float()),
sqlalchemy.Column('b_value', sqlalchemy.Float()),
sqlalchemy.Column('c_value', sqlalchemy.Float()),
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, autoincrement=False)
)
table.create(database.engine)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')
dev_data_generator.fields_to_generate = [{'field': 'id', 'type': 'INTEGER'},
{'field': 'a_value', 'precision': 50, 'scale': 40, 'type': 'DECIMAL'},
{'field': 'b_value', 'precision': 5, 'scale': 2, 'type': 'DECIMAL'},
{'field': 'c_value', 'type': 'DECIMAL'}]
dev_data_generator.batch_size = 1
FIELD_MAPPINGS = [dict(field='/id', columnName='id'),
dict(field='/a_value', columnName='a_value'),
dict(field='/b_value', columnName='b_value'),
dict(field='/c_value', columnName='c_value')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation='INSERT',
table_name=table_name,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='STOP_PIPELINE')
dev_data_generator >> jdbc_producer
pipeline = pipeline_builder.build('MSSQL BigDecimal')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True, wait=True).snapshot
sdc_executor.stop_pipeline(pipeline)
records = [record.field for record in snapshot[dev_data_generator.instance_name].output]
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[0]) # order by id
result.close()
assert len(data_from_database) == 1
assert math.isclose(float(str(records[0]['a_value'])), data_from_database[0][0], rel_tol=0.02)
assert math.isclose(float(str(records[0]['b_value'])), data_from_database[0][1], rel_tol=0.02)
assert math.isclose(float(str(records[0]['c_value'])), data_from_database[0][2], rel_tol=0.02)
assert math.isclose(float(str(records[0]['id'])), data_from_database[0][3], rel_tol=0.02)
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_coerced_insert(sdc_builder, sdc_executor, database):
"""Extension of the Simple JDBC Producer test with INSERT operation.
The pipeline inserts records into the database.
In one record, data is represented as type String, where column is type Integer.
This should be passed to the database to coerce.
Verify that correct data is in the database.
Please note the use of local COERCE_ROWS_IN_DATABASE to insert
and global ROWS_IN_DATABASE to verify.
COERCE_ has id (integer) set to string.
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
COERCE_ROWS_IN_DATABASE = [
{'id': '1', 'name': 'Dima'},
{'id': '2', 'name': 'Jarcec'},
{'id': '3', 'name': 'Arvind'}
]
DATA = '\n'.join(json.dumps(rec) for rec in COERCE_ROWS_IN_DATABASE)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Insert', DATA, table_name, 'INSERT')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE))
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_delete(sdc_builder, sdc_executor, database):
"""Simple JDBC Producer test with DELETE operation.
The pipeline deletes records from the database and verify that correct data is in the database.
Records are deleted if the primary key is matched irrespective of other column values.
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
DATA = '\n'.join(json.dumps(rec) for rec in ROWS_TO_UPDATE)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Delete', DATA, table_name, 'DELETE')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_TO_UPDATE))
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = result.fetchall()
result.close()
removed_ids = [record['id'] for record in ROWS_TO_UPDATE]
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE if
record['id'] not in removed_ids]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_update(sdc_builder, sdc_executor, database):
"""Simple JDBC Producer test with UPDATE operation.
The pipeline updates records from the database and verify that correct data is in the database.
Records with matching primary key are updated, and no action for unmatched records.
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
DATA = '\n'.join(json.dumps(rec) for rec in ROWS_TO_UPDATE)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Update', DATA, table_name, 'UPDATE')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_TO_UPDATE))
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
updated_names = {record['id']: record['name'] for record in ROWS_IN_DATABASE}
updated_names.update({record['id']: record['name'] for record in ROWS_TO_UPDATE})
assert data_from_database == [(updated_names[record['id']], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
# SDC-10987: JDBC Multitable Consumer multiple offset columns with initial offset
@database
def test_jdbc_multitable_consumer_initial_offset_at_the_end(sdc_builder, sdc_executor, database):
"""
Set initial offset at the end of the table and verify that no records were read.
"""
table_name = get_random_string(string.ascii_lowercase, 10)
builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.table_configs = [{
"tablePattern": table_name,
"overrideDefaultOffsetColumns": True,
"offsetColumns": ["id"],
"offsetColumnToInitialOffsetValue": [{
"key": "id",
"value": "5"
}]
}]
trash = builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
pipeline = builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, quote=True),
sqlalchemy.Column('name', sqlalchemy.String(32), quote=True)
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline)
# Since the pipeline is not meant to read anything, we 'simply' wait
time.sleep(5)
sdc_executor.stop_pipeline(pipeline)
# There must be no records read
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 0
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 0
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
# SDC-10562: Row-level stage errors not being caught at pipeline
@sdc_min_version('3.0.0.0')
@database
def test_jdbc_producer_multirow_with_duplicates(sdc_builder, sdc_executor, database):
"""
Make sure that when using Multi Row insert, data related errors are send to error stream.
"""
if type(database) == SQLServerDatabase:
pytest.skip('This test is trying to insert explicit value to identity column which is not supported on SQL Server')
table_name = get_random_string(string.ascii_lowercase, 15)
builder = sdc_builder.get_pipeline_builder()
# Generate batch that will repeat the same primary key in the middle of the batch (on third row)
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'JSON'
source.raw_data = """{"id" : 1}\n{"id" : 2}\n{"id" : 1}\n{"id" : 3}"""
producer = builder.add_stage('JDBC Producer')
producer.table_name = table_name
producer.field_to_column_mapping = []
producer.default_operation = 'INSERT'
producer.use_multi_row_operation = True
if database.type == 'Oracle':
producer.enclose_object_names = True
source >> producer
pipeline = builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, quote=True)
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# Since we are inserting duplicate primary key, the batch should fail
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 4
assert history.latest.metrics.counter('pipeline.batchErrorRecords.counter').count == 4
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 0
# And similarly the database side should be empty as well
result = database.engine.execute(table.select())
data_from_database = result.fetchall()
result.close()
assert len(data_from_database) == 0
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_multitable(sdc_builder, sdc_executor, database):
"""Test for JDBC Producer with multiple destination table. We create 3 tables in the default schema and use an EL
expression to insert records according to the /table record field.
Pipeline:
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
"""
table1_name = _get_random_name(database, prefix='stf_table_')
table2_name = _get_random_name(database, prefix='stf_table_')
table3_name = _get_random_name(database, prefix='stf_table_')
table1 = _create_table(table1_name, database)
table2 = _create_table(table2_name, database)
table3 = _create_table(table3_name, database)
ROWS = [{'table': table1_name, 'id': 1, 'name': 'Roger Federer'},
{'table': table2_name, 'id': 2, 'name': 'Rafael Nadal'},
{'table': table3_name, 'id': 3, 'name': 'Dominic Thiem'}]
INPUT_DATA = '\n'.join(json.dumps(rec) for rec in ROWS)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Multitable Insert', INPUT_DATA,
"${record:value('/table')}", 'INSERT')
# JDBC Producer's "Table Name" property is converted to uppercase through the configure_for_environment() method
# when database is Oracle. However EL function names are case-sensitive; we overwrite it afterwards to avoid an EL
# error.
pipeline.configure_for_environment(database)
pipeline[2].set_attributes(table_name="${record:value('/table')}")
# For Oracle, the default value of JDBC Producer's "Schema Name" property in the database environment is the
# database name, but it should be the username instead.
if isinstance(database, OracleDatabase):
pipeline[2].set_attributes(schema_name=database.username.upper())
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS))
sdc_executor.stop_pipeline(pipeline)
result1 = database.engine.execute(table1.select())
result2 = database.engine.execute(table2.select())
result3 = database.engine.execute(table3.select())
data1 = result1.fetchall()
data2 = result2.fetchall()
data3 = result3.fetchall()
assert data1 == [(ROWS[0]['name'], ROWS[0]['id'])]
assert data2 == [(ROWS[1]['name'], ROWS[1]['id'])]
assert data3 == [(ROWS[2]['name'], ROWS[2]['id'])]
result1.close()
result2.close()
result3.close()
finally:
logger.info('Dropping tables %s, %s, %s in %s database...', table1_name, table2_name, table3_name,
database.type)
table1.drop(database.engine)
table2.drop(database.engine)
table3.drop(database.engine)
# Test SDC-10719
@database
@sdc_min_version('3.8.0')
def test_jdbc_producer_multischema(sdc_builder, sdc_executor, database):
"""Test for JDBC Producer in a multischema scenario with a single destination table for each schema. We create 3
schemas with one table for each, with the same name. Then we use an EL expression to insert records according to
the /schema record field.
Pipeline:
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
"""
schema1_name = _get_random_name(database, prefix='stf_schema_')
schema2_name = _get_random_name(database, prefix='stf_schema_')
schema3_name = _get_random_name(database, prefix='stf_schema_')
table_name = _get_random_name(database, prefix='stf_table_')
_create_schema(schema1_name, database)
_create_schema(schema2_name, database)
_create_schema(schema3_name, database)
table1 = _create_table(table_name, database, schema_name=schema1_name)
table2 = _create_table(table_name, database, schema_name=schema2_name)
table3 = _create_table(table_name, database, schema_name=schema3_name)
ROWS = [{'schema': schema1_name, 'id': 1, 'name': 'Roger Federer'},
{'schema': schema2_name, 'id': 2, 'name': 'Rafael Nadal'},
{'schema': schema3_name, 'id': 3, 'name': 'Dominic Thiem'}]
INPUT_DATA = '\n'.join(json.dumps(rec) for rec in ROWS)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Multischema Insert', INPUT_DATA,
table_name, 'INSERT')
# JDBC Producer's "Schema Name" property is set through the `database` environment under some circumstances
# (e.g. Sql Server database). We overwrite it afterwards for the test.
pipeline.configure_for_environment(database)
pipeline[2].set_attributes(schema_name="${record:value('/schema')}")
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS))
sdc_executor.stop_pipeline(pipeline)
result1 = database.engine.execute(table1.select())
result2 = database.engine.execute(table2.select())
result3 = database.engine.execute(table3.select())
data1 = result1.fetchall()
data2 = result2.fetchall()
data3 = result3.fetchall()
assert data1 == [(ROWS[0]['name'], ROWS[0]['id'])]
assert data2 == [(ROWS[1]['name'], ROWS[1]['id'])]
assert data3 == [(ROWS[2]['name'], ROWS[2]['id'])]
result1.close()
result2.close()
result3.close()
finally:
logger.info('Dropping table %s in schemas...', table_name)
table1.drop(database.engine)
table2.drop(database.engine)
table3.drop(database.engine)
logger.info('Dropping schemas %s, %s, %s...', schema1_name, schema2_name, schema3_name)
_drop_schema(schema1_name, database)
_drop_schema(schema2_name, database)
_drop_schema(schema3_name, database)
# Test SDC-10719
@database
@sdc_min_version('3.8.0')
def test_jdbc_producer_multischema_multitable(sdc_builder, sdc_executor, database):
"""Test a JDBC Producer in a multischema scenario with different destination tables for each schema. We create 3
schemas with one table for each, with different names. Then we use an EL expressions to insert records according to
the /schema and /table record fields.
There were a limitation in previous versions that affected to MySQL and MemSQL. These RDBMs do not differentiate
between schema and database. SDC used the database configured in the JDBC connection string, and looked for database
metadata filtering by database+schema. If the schema were other than the database of the connection string, metadata
could not be retrieved. This was a problem in a multischema scenario, where several schemas are employed.
Pipeline:
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
"""
schema1_name = _get_random_name(database, prefix='stf_schema_')
schema2_name = _get_random_name(database, prefix='stf_schema_')
schema3_name = _get_random_name(database, prefix='stf_schema_')
table1_name = _get_random_name(database, prefix='stf_table_')
table2_name = _get_random_name(database, prefix='stf_table_')
table3_name = _get_random_name(database, prefix='stf_table_')
_create_schema(schema1_name, database)
_create_schema(schema2_name, database)
_create_schema(schema3_name, database)
table1 = _create_table(table1_name, database, schema_name=schema1_name)
table2 = _create_table(table2_name, database, schema_name=schema2_name)
table3 = _create_table(table3_name, database, schema_name=schema3_name)
ROWS = [{'schema': schema1_name, 'table': table1_name, 'id': 1, 'name': 'Roger Federer'},
{'schema': schema2_name, 'table': table2_name, 'id': 2, 'name': 'Rafael Nadal'},
{'schema': schema3_name, 'table': table3_name, 'id': 3, 'name': 'Dominic Thiem'}]
INPUT_DATA = '\n'.join(json.dumps(rec) for rec in ROWS)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Multischema and Multitable Insert',
INPUT_DATA, "${record:value('/table')}", 'INSERT')
# JDBC Producer's "Schema Name" property is set through the `database` environment under some circumstances
# (e.g. Sql Server database). We overwrite it afterwards for the test.
pipeline.configure_for_environment(database)
pipeline[2].set_attributes(schema_name="${record:value('/schema')}")
# JDBC Producer's "Table Name" property is converted to uppercase through the configure_for_environment() method
# when database is Oracle. However EL function names are case-sensitive; we overwrite it afterwards to avoid an EL
# error.
pipeline[2].set_attributes(table_name="${record:value('/table')}")
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS))
sdc_executor.stop_pipeline(pipeline)
result1 = database.engine.execute(table1.select())
result2 = database.engine.execute(table2.select())
result3 = database.engine.execute(table3.select())
data1 = result1.fetchall()
data2 = result2.fetchall()
data3 = result3.fetchall()
assert data1 == [(ROWS[0]['name'], ROWS[0]['id'])]
assert data2 == [(ROWS[1]['name'], ROWS[1]['id'])]
assert data3 == [(ROWS[2]['name'], ROWS[2]['id'])]
result1.close()
result2.close()
result3.close()
finally:
logger.info('Dropping tables %s, %s, %s...', table1_name, table2_name, table3_name)
table1.drop(database.engine)
table2.drop(database.engine)
table3.drop(database.engine)
logger.info('Dropping schemas %s, %s, %s...', schema1_name, schema2_name, schema3_name)
_drop_schema(schema1_name, database)
_drop_schema(schema2_name, database)
_drop_schema(schema3_name, database)
# SDC-11063: Do not reoder update statements in JDBC destination
@sdc_min_version('3.0.0.0')
@pytest.mark.parametrize('multi_row', [True, False])
@database
def test_jdbc_producer_ordering(sdc_builder, sdc_executor, multi_row, database):
"""Ensure that variously intertwined operations won't be executed out of order in harmful way."""
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, quote=True, autoincrement=False),
sqlalchemy.Column('a', sqlalchemy.Integer, quote=True),
sqlalchemy.Column('b', sqlalchemy.Integer, quote=True)
)
RAW_DATA = [
# Update id=5
{"op": 3, "id": 5, "a": 2, "b": 2},
# Insert id=4
{"op": 1, "id": 4, "a": 1, "b": 1},
# Update id=4
{"op": 3, "id": 4, "a": 2, "b": 2},
# Delete id=5
{"op": 2, "id": 5},
# Insert id=1
{"op": 1, "id": 1, "a": 1, "b": 1},
# Update id=1
{"op": 3, "id": 1, "a": 2},
# Insert id=2
{"op": 1, "id": 2, "a": 1, "b": 1},
# Delete id=2
{"op": 2, "id": 2},
# Update id=1
{"op": 3, "id": 1, "a": 2, "b": 2},
# Insert id=3
{"op": 1, "id": 3, "a": 1, "b": 1},
# Update id=1
{"op": 3, "id": 1, "a": 3},
# Update id=3
{"op": 3, "id": 3, "a": 5},
# Delete id=3
{"op": 2, "id": 3}
]
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'JSON'
source.raw_data = '\n'.join(json.dumps(rec) for rec in RAW_DATA)
expression = builder.add_stage('Expression Evaluator')
expression.header_attribute_expressions = [
{'attributeToSet': 'sdc.operation.type', 'headerAttributeExpression': '${record:value("/op")}'}
]
remover = builder.add_stage('Field Remover')
remover.set_attributes(fields=['/op'], action='REMOVE')
producer = builder.add_stage('JDBC Producer')
producer.field_to_column_mapping = []
producer.default_operation = 'UPDATE'
producer.table_name = table_name
producer.use_multi_row_operation = multi_row
if database.type == 'Oracle':
producer.enclose_object_names = True
source >> expression >> remover >> producer
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
# The table will start with single row (id=5)
logger.info('Inserting rows into %s in %s database', table_name, database.type)
connection = database.engine.connect()
connection.execute(table.insert(), {'id': 5, 'a': 1, 'b': 1})
# Finally run the pipeline and verify it's outcome
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = database.engine.execute(table.select())
db = sorted(result.fetchall(), key=lambda row: row[0]) # order by id
result.close()
assert len(db) == 2
# id=1
assert 1 == db[0][0]
assert 3 == db[0][1]
assert 2 == db[0][2]
# id=5
assert 4 == db[1][0]
assert 2 == db[1][1]
assert 2 == db[1][2]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@sdc_min_version('3.0.0.0')
@database
def test_jdbc_multitable_events(sdc_builder, sdc_executor, database):
"""
Validate that we properly generate events
"""
if database.type == 'Oracle':
pytest.skip("This test depends on auto-created ID that doesn't work properly on Oracle")
table_prefix = get_random_string(string.ascii_lowercase, 20)
table_a = '{}_a'.format(table_prefix)
table_b = '{}_b'.format(table_prefix)
table_events = '{}_events'.format(table_prefix)
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('JDBC Multitable Consumer')
source.transaction_isolation = 'TRANSACTION_READ_COMMITTED'
source.table_configs = [{
'tablePattern': f'{table_prefix}%',
"enableNonIncremental": True,
'tableExclusionPattern': table_events
}]
trash = builder.add_stage('Trash')
expression = builder.add_stage('Expression Evaluator')
expression.field_expressions = [{
'fieldToSet': '/tbl',
'expression': '${record:value("/table")}${record:value("/tables[0]")}'
}, {
'fieldToSet': '/tbls',
'expression': '${record:value("/tables[0]")},${record:value("/tables[1]")}'
}, {
'fieldToSet': '/event',
'expression': '${record:eventType()}'
}
]
producer = builder.add_stage('JDBC Producer')
producer.table_name = table_events
producer.default_operation = 'INSERT'
producer.field_to_column_mapping = [
dict(field='/event', columnName='event'),
dict(field='/tbl', columnName='tbl'),
dict(field='/tbls', columnName='tbls')
]
source >> trash
source >= expression
expression >> producer
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
# We need three tables for this test
metadata = sqlalchemy.MetaData()
a = sqlalchemy.Table(
table_a,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True)
)
b = sqlalchemy.Table(
table_b,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=False)
)
events = sqlalchemy.Table(
table_events,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('event', sqlalchemy.String(50)),
sqlalchemy.Column('tbl', sqlalchemy.String(150)),
sqlalchemy.Column('tbls', sqlalchemy.String(150))
)
try:
logger.info('Creating tables %s, %s and %s in %s database ...', table_a, table_b, table_events, database.type)
a.create(database.engine)
b.create(database.engine)
events.create(database.engine)
logger.info('Inserting rows into %s and %s', table_a, table_b)
connection = database.engine.connect()
connection.execute(a.insert(), {'id': 1})
connection.execute(b.insert(), {'id': 1})
# Start the pipeline
status = sdc_executor.start_pipeline(pipeline)
# Read two records, generate 4 events, 6 records
status.wait_for_pipeline_output_records_count(6)
result = database.engine.execute(events.select())
db = sorted(result.fetchall(), key=lambda row: row[0]) # order by stamp
result.close()
assert len(db) == 4
tbls = set()
assert 'table-finished' == db[0][1]
tbls.add(db[0][2])
assert 'table-finished' == db[1][1]
tbls.add(db[1][2])
assert table_a in tbls
assert table_b in tbls
assert 'schema-finished' == db[2][1]
tbls = set(db[2][3].split(","))
assert table_a in tbls
assert table_b in tbls
assert 'no-more-data' == db[3][1]
# Portable truncate
events.drop(database.engine)
events.create(database.engine)
# Second iteration - insert one new row
logger.info('Inserting rows into %s', table_a)
connection = database.engine.connect()
connection.execute(a.insert(), {'id': 2})
# 1 record, 3 events more
status.wait_for_pipeline_output_records_count(10)
result = database.engine.execute(events.select())
db = sorted(result.fetchall(), key=lambda row: row[0]) # order by stamp
result.close()
assert len(db) == 3
assert 'table-finished' == db[0][1]
assert table_a == db[0][2]
assert 'schema-finished' == db[1][1]
tbls = set(db[1][3].split(","))
assert table_a in tbls
assert table_b in tbls
assert 'no-more-data' == db[2][1]
# Now let's stop the pipeline and start it again
# SDC-10022: Multitable JDBC Origin with non-incremental table does not properly trigger 'no-more-data' event
sdc_executor.stop_pipeline(pipeline)
# Portable truncate
events.drop(database.engine)
events.create(database.engine)
# Start the pipeline and wait for it to read three records (3 events)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(3)
assert 'table-finished' == db[0][1]
assert table_a == db[0][2]
assert 'schema-finished' == db[1][1]
tbls = set(db[1][3].split(","))
assert table_a in tbls
assert table_b in tbls
assert 'no-more-data' == db[2][1]
finally:
sdc_executor.stop_pipeline(pipeline)
logger.info('Dropping tables %s, %s and %s in %s database...', table_a, table_b, table_events, database.type)
a.drop(database.engine)
b.drop(database.engine)
events.drop(database.engine)
# SDC-11092: Improve the ability of JDBC Destination to cover non-standard Data related SQL Error codes
@sdc_min_version('3.0.0.0')
@pytest.mark.parametrize('multi_row', [True, False])
@database('oracle')
def test_jdbc_producer_oracle_data_errors(sdc_builder, sdc_executor, multi_row, database):
"""Ensure that data related error in Oracle will be sent to eror stream rather then shutting the pipeline down."""
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('ID', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('STR', sqlalchemy.String(2)),
)
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'JSON'
source.raw_data = '{"ID" : 1, "STR": "Longer then 2 characters"}'
producer = builder.add_stage('JDBC Producer')
producer.field_to_column_mapping = []
producer.default_operation = 'INSERT'
producer.table_name = table_name
producer.use_multi_row_operation = multi_row
source >> producer
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# The table in database needs to be empty
result = database.engine.execute(table.select())
db = sorted(result.fetchall(), key=lambda row: row[0]) # order by id
result.close()
assert len(db) == 0
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 1
assert history.latest.metrics.counter('pipeline.batchErrorRecords.counter').count == 1
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 0
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
# SDC-11082: Extend support for TIMESTAMP WITH TIMEZONE Datatypes
@sdc_min_version('3.0.0.0')
@database('oracle')
# https://docs.oracle.com/cd/B28359_01/server.111/b28318/datatype.htm#CNCPT1821
# We don't support UriType (requires difficult workaround in JDBC)
@pytest.mark.parametrize('sql_type,insert_fragment,expected_type,expected_value', [
('number', '1', 'DECIMAL', '1'),
('char(2)', "'AB'", 'STRING', 'AB'),
('varchar(4)', "'ABCD'", 'STRING', 'ABCD'),
('varchar2(4)', "'NVAR'", 'STRING', 'NVAR'),
('nchar(3)', "'NCH'", 'STRING', 'NCH'),
('nvarchar2(4)', "'NVAR'", 'STRING', 'NVAR'),
('binary_float', '1.0', 'FLOAT', '1.0'),
('binary_double', '2.0', 'DOUBLE', '2.0'),
('date', "TO_DATE('1998-1-1 6:22:33', 'YYYY-MM-DD HH24:MI:SS')", 'DATETIME', 883635753000),
('timestamp', "TIMESTAMP'1998-1-2 6:00:00'", 'DATETIME', 883720800000),
('timestamp with time zone', "TIMESTAMP'1998-1-3 6:00:00-5:00'", 'ZONED_DATETIME', '1998-01-03T06:00:00-05:00'),
('timestamp with local time zone', "TIMESTAMP'1998-1-4 6:00:00-5:00'", 'ZONED_DATETIME', '1998-01-04T11:00:00Z'),
('long', "'LONG'", 'STRING', 'LONG'),
('blob', "utl_raw.cast_to_raw('BLOB')", 'BYTE_ARRAY', 'QkxPQg=='),
('clob', "'CLOB'", 'STRING', 'CLOB'),
('nclob', "'NCLOB'", 'STRING', 'NCLOB'),
('XMLType', "xmltype('<a></a>')", 'STRING', '<a></a>')
])
@pytest.mark.parametrize('use_table_origin', [True, False])
def test_jdbc_multitable_oracle_types(sdc_builder, sdc_executor, database, use_table_origin, sql_type, insert_fragment,
expected_type, expected_value):
"""Test all feasible Oracle types."""
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
id number primary key,
data_column {sql_type} NULL
)
""")
# And insert a row with actual value
connection.execute(f"INSERT INTO {table_name} VALUES(1, {insert_fragment})")
# And a null
connection.execute(f"INSERT INTO {table_name} VALUES(2, NULL)")
builder = sdc_builder.get_pipeline_builder()
if use_table_origin:
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": f'%{table_name}%'}]
origin.on_unknown_type = 'CONVERT_TO_STRING'
else:
origin = builder.add_stage('JDBC Query Consumer')
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
origin.incremental_mode = False
origin.on_unknown_type = 'CONVERT_TO_STRING'
trash = builder.add_stage('Trash')
origin >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[origin].output) == 2
record = snapshot[origin].output[0]
null_record = snapshot[origin].output[1]
# Since we are controlling types, we want to check explicit values inside the record rather the the python
# wrappers.
# TLKT-177: Add ability for field to return raw value
assert record.field['DATA_COLUMN'].type == expected_type
assert null_record.field['DATA_COLUMN'].type == expected_type
assert record.field['DATA_COLUMN']._data['value'] == expected_value
assert null_record.field['DATA_COLUMN'] == None
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
# SDC-11324: JDBC MultiTable origin can create duplicate offsets
@database('mysql')
def test_jdbc_multitable_duplicate_offsets(sdc_builder, sdc_executor, database):
"""Validate that we will not create duplicate offsets. """
table_name = get_random_string(string.ascii_lowercase, 10)
pipeline_builder = sdc_builder.get_pipeline_builder()
origin = pipeline_builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": table_name}]
origin.max_batch_size_in_records = 1
trash = pipeline_builder.add_stage('Trash')
origin >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE))
sdc_executor.stop_pipeline(pipeline)
# We should have transition 4 records
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == len(ROWS_IN_DATABASE)
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == len(ROWS_IN_DATABASE)
# And most importantly, validate offset
offset = sdc_executor.api_client.get_pipeline_committed_offsets(pipeline.id).response.json()
assert offset is not None
assert offset['offsets'] is not None
expected_offset = {
f"tableName={table_name};;;partitioned=false;;;partitionSequence=-1;;;partitionStartOffsets=;;;partitionMaxOffsets=;;;usingNonIncrementalLoad=false": "id=3",
"$com.streamsets.pipeline.stage.origin.jdbc.table.TableJdbcSource.offset.version$": "2"
}
assert offset['offsets'] == expected_offset
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
# SDC-11326: JDBC MultiTable origin forgets offset of non-incremental table on consecutive execution
@database('mysql')
@sdc_min_version('3.0.0.0')
def test_jdbc_multitable_lost_nonincremental_offset(sdc_builder, sdc_executor, database):
"""Validate the origin does not loose non-incremental offset on various runs."""
table_name = get_random_string(string.ascii_lowercase, 10)
pipeline_builder = sdc_builder.get_pipeline_builder()
origin = pipeline_builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": table_name, "enableNonIncremental": True}]
origin.max_batch_size_in_records = 1
trash = pipeline_builder.add_stage('Trash')
origin >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=False),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE))
sdc_executor.stop_pipeline(pipeline)
# We should have read all the records
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == len(ROWS_IN_DATABASE)
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == len(ROWS_IN_DATABASE)
# And most importantly, validate offset
offset = sdc_executor.api_client.get_pipeline_committed_offsets(pipeline.id).response.json()
assert offset is not None
assert offset['offsets'] is not None
expected_offset = {
f"tableName={table_name};;;partitioned=false;;;partitionSequence=-1;;;partitionStartOffsets=;;;partitionMaxOffsets=;;;usingNonIncrementalLoad=true": "completed=true",
"$com.streamsets.pipeline.stage.origin.jdbc.table.TableJdbcSource.offset.version$": "2"
}
assert offset['offsets'] == expected_offset
for _ in range(5):
sdc_executor.start_pipeline(pipeline)
# Since the pipeline won't read anything, give it few seconds to "idle"
time.sleep(2)
sdc_executor.stop_pipeline(pipeline)
# And it really should not have read anything!
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 0
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 0
# And offset should not have changed
offset = sdc_executor.api_client.get_pipeline_committed_offsets(pipeline.id).response.json()
assert offset is not None
assert offset['offsets'] is not None
assert offset['offsets'] == expected_offset
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@sdc_min_version('3.9.0')
@database('oracle')
def test_jdbc_multitable_oracle_split_by_timestamp_with_timezone(sdc_builder, sdc_executor, database):
"""Make sure that we can properly partition TIMESTAMP WITH TIMEZONE type."""
table_name = get_random_string(string.ascii_uppercase, 20)
table_name_dest = get_random_string(string.ascii_uppercase, 20)
connection = database.engine.connect()
comparing_query = f"""(
select * from {table_name}
minus
select * from {table_name_dest}
) union (
select * from {table_name_dest}
minus
select * from {table_name}
)"""
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
ID number primary key,
TZ timestamp(6) with time zone
)
""")
# Create destination table
connection.execute(f"""CREATE TABLE {table_name_dest} AS SELECT * FROM {table_name} WHERE 1=0""")
# Insert a few rows
for m in range(0, 5):
for s in range(0, 59):
connection.execute(f"INSERT INTO {table_name} VALUES({m*100+s}, TIMESTAMP'2019-01-01 10:{m}:{s}-5:00')")
connection.execute("commit")
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{
"tablePattern": f'%{table_name}%',
"overrideDefaultOffsetColumns": True,
"offsetColumns": ["TZ"],
"enableNonIncremental": False,
"partitioningMode": "REQUIRED",
"partitionSize": "30",
"maxNumActivePartitions": -1
}]
origin.number_of_threads = 2
origin.maximum_pool_size = 2
origin.max_batch_size_in_records = 30
finisher = builder.add_stage('Pipeline Finisher Executor')
finisher.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
FIELD_MAPPINGS = [dict(field='/ID', columnName='ID'),
dict(field='/TZ', columnName='TZ')]
destination = builder.add_stage('JDBC Producer')
destination.set_attributes(default_operation='INSERT',
table_name=table_name_dest,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='STOP_PIPELINE')
origin >> destination
origin >= finisher
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = [row.items() for row in connection.execute(comparing_query)]
assert len(result) == 0
# Insert few more rows and validate the outcome again
for m in range(6, 8):
for s in range(0, 59):
connection.execute(f"INSERT INTO {table_name} VALUES({m*100+s}, TIMESTAMP'2019-01-01 10:{m}:{s}-5:00')")
connection.execute("commit")
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = [row.items() for row in connection.execute(comparing_query)]
assert len(result) == 0
finally:
logger.info('Dropping table %s and %s in %s database ...', table_name, table_name_dest, database.type)
connection.execute(f"DROP TABLE {table_name}")
connection.execute(f"DROP TABLE {table_name_dest}")
def _get_date_from_days(d):
return datetime.date(1970, 1, 1) + datetime.timedelta(days=d)
@database('oracle')
def test_jdbc_multitable_oracle_split_by_date(sdc_builder, sdc_executor, database):
"""Make sure that we can properly partition DATE type.
More precisely, we want to run this pipeline:
multitable >> jdbc
multitable >= finisher
With more than one thread and using a DATE column as a offset column.
This feature was not available until version 3.11.0, and was detected and
solved in ESC-513.
"""
table_name = get_random_string(string.ascii_uppercase, 20)
table_name_dest = get_random_string(string.ascii_uppercase, 20)
connection = database.engine.connect()
comparing_query = f"""(
select * from {table_name}
minus
select * from {table_name_dest}
) union (
select * from {table_name_dest}
minus
select * from {table_name}
)"""
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
ID number primary key,
DT date
)
""")
# Create destination table
connection.execute(f"""CREATE TABLE {table_name_dest} AS SELECT * FROM {table_name} WHERE 1=0""")
# Insert a few rows
for m in range(0, 5):
for s in range(0, 59):
identifier = 100 * m + s
connection.execute(
f"INSERT INTO {table_name} VALUES({identifier}, DATE'{_get_date_from_days(identifier)}')"
)
connection.execute("commit")
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('JDBC Multitable Consumer')
# Partition size is set to 259200000 which corresponds to 30 days in ms,
# since dates are translated to timestamps
origin.table_configs = [{
"tablePattern": f'%{table_name}%',
"overrideDefaultOffsetColumns": True,
"offsetColumns": ["DT"], # Should cause SDC < 3.11.0 to throw an UnsupportedOperationException
"enableNonIncremental": False,
"partitioningMode": "REQUIRED",
"partitionSize": "259200000", # 30 days = 30*24*60*60*1000 (259200000)ms
"maxNumActivePartitions": 2
}]
origin.number_of_threads = 2
origin.maximum_pool_size = 2
finisher = builder.add_stage('Pipeline Finisher Executor')
finisher.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
FIELD_MAPPINGS = [dict(field='/ID', columnName='ID'),
dict(field='/DT', columnName='DT')]
destination = builder.add_stage('JDBC Producer')
destination.set_attributes(default_operation='INSERT',
table_name=table_name_dest,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='STOP_PIPELINE')
origin >> destination
origin >= finisher
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = [row.items() for row in connection.execute(comparing_query)]
assert len(result) == 0
# Insert few more rows and validate the outcome again
for m in range(6, 8):
for s in range(0, 59):
identifier = 100 * m + s
connection.execute(
f"INSERT INTO {table_name} VALUES({identifier}, DATE'{_get_date_from_days(identifier)}')"
)
connection.execute("commit")
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = [row.items() for row in connection.execute(comparing_query)]
assert len(result) == 0
finally:
logger.info('Dropping table %s and %s in %s database ...', table_name, table_name_dest, database.type)
connection.execute(f"DROP TABLE {table_name}")
connection.execute(f"DROP TABLE {table_name_dest}")
@sdc_min_version('3.9.0')
@database('mysql')
def test_jdbc_multitable_consumer_origin_high_resolution_timestamp_offset(sdc_builder, sdc_executor, database):
"""
Check if Jdbc Multi-table Origin can retrieve any records from a table using as an offset a high resolution
timestamp of milliseconds order. It is checked that the records read have a timestamp greater than the timestamp
used as initial offset.
Pipeline looks like:
jdbc_multitable_consumer >> trash
"""
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
table_name = f'{src_table_prefix}_{get_random_string(string.ascii_lowercase, 20)}'
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{'tablePattern': f'%{src_table_prefix}%',
'overrideDefaultOffsetColumns': True,
'offsetColumns': ['added'],
'offsetColumnToInitialOffsetValue': [{
'key': 'added',
'value': '${time:extractNanosecondsFromString(' +
'"1996-12-02 00:00:00.020111000")}'
}]
}])
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
connection = database.engine.connect()
# Create table
logger.info('Creating table %s in %s database ...', table_name, database.type)
connection.execute(f"""
CREATE TABLE {table_name}(
id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
name varchar(100) NOT NULL,
age INT UNSIGNED NOT NULL,
added TIMESTAMP(6) NOT NULL
)
""")
# Insert rows
logger.info('Adding four rows into %s database ...', database.type)
connection.execute(f'INSERT INTO {table_name} VALUES(1, "Charly", 14, "2005-02-08 14:00:00.100105002")')
connection.execute(f'INSERT INTO {table_name} VALUES(2, "Paco", 28, "1992-05-25 11:00:00.000201010")')
connection.execute(f'INSERT INTO {table_name} VALUES(3, "Eugenio", 21, "1996-12-01 23:00:00.020111")')
connection.execute(f'INSERT INTO {table_name} VALUES(4, "Romualdo", 19, "2000-06-15 18:30:00.10523121")')
try:
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
name_id_from_output = [(record.field['name'], record.field['id'])
for record in snapshot[jdbc_multitable_consumer].output]
assert len(name_id_from_output) == 2
assert name_id_from_output == [('Romualdo', 4), ('Charly', 1)]
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
connection.execute(f'DROP TABLE {table_name}')
@database
@sdc_min_version('3.0.0.0')
def test_jdbc_multitable_consumer_partitioned_large_offset_gaps(sdc_builder, sdc_executor, database):
"""
Ensure that the multi-table JDBC origin can handle large gaps between offset columns in partitioned mode
The destination is trash, and there is a finisher waiting for the no-more-data event
The pipeline will be started, and we will capture two snapshots (to ensure all expected rows are covered),
then assert those captured snapshot rows match the expected data.
This is a test for SDC-10053
"""
if database.type == 'Oracle':
pytest.skip("This test depends on proper case for column names that Oracle auto-uppers.")
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
table_name = '{}_{}'.format(src_table_prefix, get_random_string(string.ascii_lowercase, 20))
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{
"tablePattern": f'{table_name}',
"enableNonIncremental": False,
"partitioningMode": "REQUIRED",
"partitionSize": "1000000",
"maxNumActivePartitions": -1
}])
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
finisher = pipeline_builder.add_stage("Pipeline Finisher Executor")
finisher.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
jdbc_multitable_consumer >= finisher
pipeline = pipeline_builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding four rows into %s table, with a large gap in the primary keys ...', table_name)
connection = database.engine.connect()
rows_with_gap = ROWS_IN_DATABASE + [{'id': 5000000, 'name': 'Evil Jeff'}]
connection.execute(table.insert(), rows_with_gap)
connection.close()
sdc_executor.add_pipeline(pipeline)
# need to capture two batches, one for row IDs 1-3, and one for the last row after the large gap
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, batches=2, start_pipeline=True).snapshot
rows_from_snapshot = [(record.get_field_data('/name').value, record.get_field_data('/id').value)
for batch in snapshot.snapshot_batches
for record in batch.stage_outputs[jdbc_multitable_consumer.instance_name].output]
expected_data = [(row['name'], row['id']) for row in rows_with_gap]
logger.info('Actual %s expected %s', rows_from_snapshot, expected_data)
assert rows_from_snapshot == expected_data
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@sdc_min_version('3.0.0.0')
@database('mysql')
# https://dev.mysql.com/doc/refman/8.0/en/data-types.html
# We don't support BIT generally (the driver is doing funky 'random' mappings on certain versions)
@pytest.mark.parametrize('sql_type,insert_fragment,expected_type,expected_value', [
('TINYINT', '-128', 'SHORT', -128),
('TINYINT UNSIGNED', '255', 'SHORT', 255),
('SMALLINT', '-32768', 'SHORT', -32768),
('SMALLINT UNSIGNED', '65535', 'SHORT', -1), # Support for unsigned isn't entirely correct!
('MEDIUMINT', '-8388608', 'INTEGER', '-8388608'),
('MEDIUMINT UNSIGNED', '16777215', 'INTEGER', '16777215'),
('INT', '-2147483648', 'INTEGER', '-2147483648'),
('INT UNSIGNED', '4294967295', 'INTEGER', '-1'), # Support for unsigned isn't entirely correct!
('BIGINT', '-9223372036854775807', 'LONG', '-9223372036854775807'),
('BIGINT UNSIGNED', '18446744073709551615', 'LONG', '-1'), # Support for unsigned isn't entirely correct!
('DECIMAL(5, 2)', '5.20', 'DECIMAL', '5.20'),
('NUMERIC(5, 2)', '5.20', 'DECIMAL', '5.20'),
('FLOAT', '5.2', 'FLOAT', '5.2'),
('DOUBLE', '5.2', 'DOUBLE', '5.2'),
# ('BIT(8)',"b'01010101'", 'BYTE_ARRAY', 'VQ=='),
('DATE', "'2019-01-01'", 'DATE', 1546300800000),
('DATETIME', "'2019-01-01 5:00:00'", 'DATETIME', 1546318800000),
('TIMESTAMP', "'2019-01-01 5:00:00'", 'DATETIME', 1546318800000),
('TIME', "'5:00:00'", 'TIME', 18000000),
('YEAR', "'2019'", 'DATE', 1546300800000),
('CHAR(5)', "'Hello'", 'STRING', 'Hello'),
('VARCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('BINARY(5)', "'Hello'", 'BYTE_ARRAY', 'SGVsbG8='),
('VARBINARY(5)', "'Hello'", 'BYTE_ARRAY', 'SGVsbG8='),
('BLOB', "'Hello'", 'BYTE_ARRAY', 'SGVsbG8='),
('TEXT', "'Hello'", 'STRING', 'Hello'),
("ENUM('a', 'b')", "'a'", 'STRING', 'a'),
("set('a', 'b')", "'a,b'", 'STRING', 'a,b'),
("POINT", "POINT(1, 1)", 'BYTE_ARRAY', 'AAAAAAEBAAAAAAAAAAAA8D8AAAAAAADwPw=='),
("LINESTRING", "LineString(Point(0,0), Point(10,10), Point(20,25), Point(50,60))", 'BYTE_ARRAY',
'AAAAAAECAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAkQAAAAAAAACRAAAAAAAAANEAAAAAAAAA5QAAAAAAAAElAAAAAAAAATkA='),
("POLYGON",
"Polygon(LineString(Point(0,0),Point(10,0),Point(10,10),Point(0,10),Point(0,0)),LineString(Point(5,5),Point(7,5),Point(7,7),Point(5,7),Point(5,5)))",
'BYTE_ARRAY',
'AAAAAAEDAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJEAAAAAAAAAAAAAAAAAAACRAAAAAAAAAJEAAAAAAAAAAAAAAAAAAACRAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAUQAAAAAAAABRAAAAAAAAAHEAAAAAAAAAUQAAAAAAAABxAAAAAAAAAHEAAAAAAAAAUQAAAAAAAABxAAAAAAAAAFEAAAAAAAAAUQA=='),
("JSON", "'{\"a\":\"b\"}'", 'STRING', '{\"a\": \"b\"}'),
])
@pytest.mark.parametrize('use_table_origin', [True, False])
def test_jdbc_multitable_mysql_types(sdc_builder, sdc_executor, database, use_table_origin, sql_type, insert_fragment,
expected_type, expected_value):
"""Test all feasible Mysql types."""
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
id int primary key,
data_column {sql_type} NULL
)
""")
# And insert a row with actual value
connection.execute(f"INSERT INTO {table_name} VALUES(1, {insert_fragment})")
# And a null
connection.execute(f"INSERT INTO {table_name} VALUES(2, NULL)")
builder = sdc_builder.get_pipeline_builder()
if use_table_origin:
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": f'%{table_name}%'}]
origin.on_unknown_type = 'CONVERT_TO_STRING'
else:
origin = builder.add_stage('JDBC Query Consumer')
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
origin.incremental_mode = False
origin.on_unknown_type = 'CONVERT_TO_STRING'
trash = builder.add_stage('Trash')
origin >> trash
pipeline = builder.build(f"MySQL Type {sql_type} with value {insert_fragment}").configure_for_environment(
database)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[origin].output) == 2
record = snapshot[origin].output[0]
null_record = snapshot[origin].output[1]
# Since we are controlling types, we want to check explicit values inside the record rather the the python
# wrappers.
# TLKT-177: Add ability for field to return raw value
assert record.field['data_column'].type == expected_type
assert null_record.field['data_column'].type == expected_type
assert record.field['data_column']._data['value'] == expected_value
assert null_record.field['data_column'] == None
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
@sdc_min_version('3.0.0.0')
@database('postgresql')
# https://www.postgresql.org/docs/11/datatype.html
# Not testing 'serial' family explicitly as that is just an alias
# Not supporting tsvector tsquery as that doesn't seem fit for us
# bit(n) is not supported
# xml is not supported
# domain types (as a category are not supported)
# pg_lsn not supported
@pytest.mark.parametrize('sql_type,insert_fragment,expected_type,expected_value', [
('smallint', '-32768', 'SHORT', -32768),
('integer', '2147483647', 'INTEGER', '2147483647'),
('bigint', '-9223372036854775808', 'LONG', '-9223372036854775808'),
('decimal(5,2)', '5.20', 'DECIMAL', '5.20'),
('numeric(5,2)', '5.20', 'DECIMAL', '5.20'),
('real', '5.20', 'FLOAT', '5.2'),
('double precision', '5.20', 'DOUBLE', '5.2'),
('money', '12.34', 'DOUBLE', '12.34'),
('char(5)', "'Hello'", 'STRING', 'Hello'),
('varchar(5)', "'Hello'", 'STRING', 'Hello'),
('text', "'Hello'", 'STRING', 'Hello'),
('bytea', "'\\xDEADBEEF'", 'BYTE_ARRAY', '3q2+7w=='),
('timestamp', "'2003-04-12 04:05:06'", 'DATETIME', 1050120306000),
('timestamp with time zone', "'2003-04-12 04:05:06 America/New_York'", 'DATETIME', 1050134706000),
# For PostgreSQL, we don't create ZONED_DATETIME
('date', "'2019-01-01'", 'DATE', 1546300800000),
('time', "'5:00:00'", 'TIME', 18000000),
('time with time zone', "'04:05:06-08:00'", 'TIME', 43506000),
('interval', "INTERVAL '1' YEAR", 'STRING', '1 years 0 mons 0 days 0 hours 0 mins 0.00 secs'),
('boolean', "true", 'BOOLEAN', True),
('ai', "'sad'", 'STRING', 'sad'),
('point', "'(1, 1)'", 'STRING', '(1.0,1.0)'),
('line', "'{1, 1, 1}'", 'STRING', '{1.0,1.0,1.0}'),
('lseg', "'((1,1)(2,2))'", 'STRING', '[(1.0,1.0),(2.0,2.0)]'),
('box', "'(1,1)(2,2)'", 'STRING', '(2.0,2.0),(1.0,1.0)'),
('path', "'((1,1),(2,2))'", 'STRING', '((1.0,1.0),(2.0,2.0))'),
('polygon', "'((1,1),(2,2))'", 'STRING', '((1.0,1.0),(2.0,2.0))'),
('circle', "'<(1,1),5>'", 'STRING', '<(1.0,1.0),5.0>'),
('inet', "'127.0.0.1/16'", 'STRING', '127.0.0.1/16'),
('cidr', "'127.0.0.0/16'", 'STRING', '127.0.0.0/16'),
('macaddr', "'08:00:2b:01:02:03'", 'STRING', '08:00:2b:01:02:03'),
# ('macaddr8', "'08:00:2b:01:02:03'", 'STRING', '08:00:2b:ff:fe:01:02:03'),
# ('bit(8)', "b'10101010'", 'BYTE_ARRAY', '08:00:2b:ff:fe:01:02:03'), # Doesn't work at all today
('bit varying(3)', "b'101'", 'STRING', '101'),
('uuid', "'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'", 'STRING', 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'),
# ('xml', "'<foo>bar</foo>'", 'STRING', ''), # Doesn't work properly today
("json", "'{\"a\":\"b\"}'", 'STRING', '{"a":"b"}'),
("jsonb", "'{\"a\":\"b\"}'", 'STRING', '{"a": "b"}'),
("integer[3][3]", "'{{1,2,3},{4,5,6},{7,8,9}}'", 'STRING', '{{1,2,3},{4,5,6},{7,8,9}}'),
("ct", "ROW(1, 2)", 'STRING', '(1,2)'),
("int4range", "'[1,2)'", 'STRING', '[1,2)'),
("int8range", "'[1,2)'", 'STRING', '[1,2)'),
("numrange", "'[1,2)'", 'STRING', '[1,2)'),
("tsrange", "'[2010-01-01 14:30, 2010-01-01 15:30)'", 'STRING', '["2010-01-01 14:30:00","2010-01-01 15:30:00")'),
("tstzrange", "'[2010-01-01 14:30 America/New_York, 2010-01-01 15:30 America/New_York)'", 'STRING',
'["2010-01-01 19:30:00+00","2010-01-01 20:30:00+00")'),
("daterange", "'[2010-01-01, 2010-01-02)'", 'STRING', '[2010-01-01,2010-01-02)'),
])
@pytest.mark.parametrize('use_table_origin', [True, False])
def test_jdbc_postgresql_types(sdc_builder, sdc_executor, database, use_table_origin, sql_type, insert_fragment,
expected_type, expected_value):
"""Test all feasible PostgreSQL types."""
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
try:
# Create enum type conditionally
connection.execute(f"""
DO
$$
BEGIN
IF NOT EXISTS (SELECT * FROM pg_type typ
INNER JOIN pg_namespace nsp ON nsp.oid = typ.typnamespace
WHERE nsp.nspname = current_schema() AND typ.typname = 'ai') THEN
CREATE TYPE ai AS ENUM ('sad', 'ok', 'happy');
END IF;
END;
$$
LANGUAGE plpgsql;
""")
# Create enum complex type conditionally
connection.execute(f"""
DO
$$
BEGIN
IF NOT EXISTS (SELECT * FROM pg_type typ
INNER JOIN pg_namespace nsp ON nsp.oid = typ.typnamespace
WHERE nsp.nspname = current_schema() AND typ.typname = 'ct') THEN
CREATE TYPE ct AS (a int, b int);
END IF;
END;
$$
LANGUAGE plpgsql;
""")
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
id int primary key,
data_column {sql_type} NULL
)
""")
# And insert a row with actual value
connection.execute(f"INSERT INTO {table_name} VALUES(1, {insert_fragment})")
# And a null
connection.execute(f"INSERT INTO {table_name} VALUES(2, NULL)")
builder = sdc_builder.get_pipeline_builder()
if use_table_origin:
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": f'%{table_name}%'}]
origin.on_unknown_type = 'CONVERT_TO_STRING'
else:
origin = builder.add_stage('JDBC Query Consumer')
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
origin.incremental_mode = False
origin.on_unknown_type = 'CONVERT_TO_STRING'
trash = builder.add_stage('Trash')
origin >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[origin].output) == 2
record = snapshot[origin].output[0]
null_record = snapshot[origin].output[1]
# Since we are controlling types, we want to check explicit values inside the record rather the the python
# wrappers.
# TLKT-177: Add ability for field to return raw value
assert record.field['data_column'].type == expected_type
assert null_record.field['data_column'].type == expected_type
assert record.field['data_column']._data['value'] == expected_value
assert null_record.field['data_column'] == None
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
@sdc_min_version('3.0.0.0')
@database('sqlserver')
# https://docs.microsoft.com/en-us/sql/t-sql/data-types/data-types-transact-sql?view=sql-server-2017
# hiearchyid types not supported
# Geometry and geography not supported
@pytest.mark.parametrize('sql_type,insert_fragment,expected_type,expected_value', [
('DATE', "'2019-01-01'", 'DATE', 1546300800000),
('DATETIME', "'2004-05-23T14:25:10'", 'DATETIME', 1085322310000),
('DATETIME2', "'2004-05-23T14:25:10'", 'DATETIME', 1085322310000),
('DATETIMEOFFSET', "'2004-05-23 14:25:10.3456 -08:00'", 'DEPENDS_ON_VERSION', 'depends_on_version'),
('SMALLDATETIME', "'2004-05-23T14:25:10'", 'DATETIME', 1085322300000),
('TIME', "'14:25:10'", 'TIME', 51910000),
('BIT', "1", 'BOOLEAN', True),
('DECIMAL(5,2)', '5.20', 'DECIMAL', '5.20'),
('NUMERIC(5,2)', '5.20', 'DECIMAL', '5.20'),
('REAL', '5.20', 'FLOAT', '5.2'),
('FLOAT', '5.20', 'DOUBLE', '5.2'),
('TINYINT', '255', 'SHORT', 255),
('SMALLINT', '-32768', 'SHORT', -32768),
('INT', '-2147483648', 'INTEGER', '-2147483648'),
('BIGINT', '-9223372036854775807', 'LONG', '-9223372036854775807'),
('MONEY', '255.60', 'DECIMAL', '255.6000'),
('SMALLMONEY', '255.60', 'DECIMAL', '255.6000'),
('BINARY(5)', "CAST('Hello' AS BINARY(5))", 'BYTE_ARRAY', 'SGVsbG8='),
('VARBINARY(5)', "CAST('Hello' AS VARBINARY(5))", 'BYTE_ARRAY', 'SGVsbG8='),
('CHAR(5)', "'Hello'", 'STRING', 'Hello'),
('VARCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('NCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('NVARCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('TEXT', "'Hello'", 'STRING', 'Hello'),
('NTEXT', "'Hello'", 'STRING', 'Hello'),
('IMAGE', "CAST('Hello' AS IMAGE)", 'BYTE_ARRAY', 'SGVsbG8='),
# ('GEOGRAPHY',"geography::STGeomFromText('LINESTRING(-122.360 47.656, -122.343 47.656 )', 4326)", 'BYTE_ARRAY', '5hAAAAEUhxbZzvfTR0DXo3A9CpdewIcW2c7300dAy6FFtvOVXsA='),
# ('GEOMETRY',"geometry::STGeomFromText('LINESTRING (100 100, 20 180, 180 180)', 0)", 'BYTE_ARRAY', 'AAAAAAEEAwAAAAAAAAAAAFlAAAAAAAAAWUAAAAAAAAA0QAAAAAAAgGZAAAAAAACAZkAAAAAAAIBmQAEAAAABAAAAAAEAAAD/////AAAAAAI='),
('XML', "'<a></a>'", 'STRING', '<a/>')
])
@pytest.mark.parametrize('use_table_origin', [True, False])
def test_jdbc_sqlserver_types(sdc_builder, sdc_executor, database, use_table_origin, sql_type, insert_fragment,
expected_type, expected_value):
"""Test all feasible SQL Server types."""
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
id int primary key,
data_column {sql_type} NULL
)
""")
# And insert a row with actual value
connection.execute(f"INSERT INTO {table_name} VALUES(1, {insert_fragment})")
# And a null
connection.execute(f"INSERT INTO {table_name} VALUES(2, NULL)")
builder = sdc_builder.get_pipeline_builder()
if use_table_origin:
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": f'%{table_name}%'}]
else:
origin = builder.add_stage('JDBC Query Consumer')
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
origin.incremental_mode = False
trash = builder.add_stage('Trash')
# As a part of SDC-10125, DATETIMEOFFSET is natively supported in SDC, and is converted into ZONED_DATETIME
if sql_type == 'DATETIMEOFFSET':
if Version(sdc_builder.version) >= Version('3.14.0'):
expected_type = 'ZONED_DATETIME'
expected_value = '2004-05-23T14:25:10.3456-08:00'
else:
expected_type = 'STRING'
expected_value = '2004-05-23 14:25:10.3456 -08:00'
# This unknown_type_action setting is required, otherwise DATETIMEOFFSET tests for SDC < 3.14 will fail.
origin.on_unknown_type = 'CONVERT_TO_STRING'
origin >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[origin].output) == 2
record = snapshot[origin].output[0]
null_record = snapshot[origin].output[1]
# Since we are controlling types, we want to check explicit values inside the record rather the the python
# wrappers.
# TLKT-177: Add ability for field to return raw value
assert record.field['data_column'].type == expected_type
assert null_record.field['data_column'].type == expected_type
assert record.field['data_column']._data['value'] == expected_value
assert null_record.field['data_column'] == None
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
@sdc_min_version('3.12.0')
@database('sqlserver')
@pytest.mark.parametrize('on_unknown_type_action', ['CONVERT_TO_STRING', 'STOP_PIPELINE'])
def test_jdbc_sqlserver_on_unknown_type_action(sdc_builder, sdc_executor, database, on_unknown_type_action):
"""Test JDBC Multitable Consumer with MS-SQL server for the on_unknown_type action.
This is to verify SDC-12764.
When the 'On Unknown Type' action is set to STOP_PIPELINE,the pipeline should stop with a StageException Error since it cannot convert DATETIMEOFFSET field
When the 'On Unknown Type' action is set to CONVERT_TO_STRING, the pipeline should convert the unknown type to string and process next record
The pipeline will look like:
JDBC_Multitable_Consumer >> trash
"""
if Version(sdc_builder.version) >= Version('3.14.0'):
pytest.skip("Skipping SQLServer Unknown Type action check, since DATETIMEOFFSET field is now natively supported from SDC Version 3.14.0")
column_type = 'DATETIMEOFFSET'
INPUT_DATE = "'2004-05-23T14:25:10'"
EXPECTED_OUTCOME = OrderedDict(id=1, date_offset='2004-05-23 14:25:10 +00:00')
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
pipeline_builder = sdc_builder.get_pipeline_builder()
# Setup Origin with specified unknown type action
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{"tablePattern": f'%{table_name}%'}],
on_unknown_type=on_unknown_type_action)
# Setup destination
trash=pipeline_builder.add_stage('Trash')
# Connect the pipeline stages
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
# Create table and add a row
connection.execute(f"""
CREATE TABLE {table_name}(
id int primary key,
date_offset {column_type} NOT NULL
)
""")
connection.execute(f"INSERT INTO {table_name} VALUES(1, {INPUT_DATE})")
try:
if on_unknown_type_action == 'STOP_PIPELINE':
# Pipeline should stop with StageException
with pytest.raises(Exception):
sdc_executor.start_pipeline(pipeline)
sdc_executor.stop_pipeline(pipeline)
status = sdc_executor.get_pipeline_status(pipeline).response.json().get('status')
assert 'RUN_ERROR' == status
else:
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
output_records = snapshot[jdbc_multitable_consumer].output
assert len(output_records) == 1
assert output_records[0].field == EXPECTED_OUTCOME
finally:
status = sdc_executor.get_pipeline_status(pipeline).response.json().get('status')
if status == 'RUNNING':
sdc_executor.stop_pipeline(pipeline)
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
@sdc_min_version('3.14.0')
@database('sqlserver')
def test_jdbc_sqlserver_datetimeoffset_as_primary_key(sdc_builder, sdc_executor, database):
"""Test JDBC Multitable Consumer with SQLServer table configured with DATETIMEOFFSET column as primary key.
The pipeline will look like:
JDBC_Multitable_Consumer >> trash
"""
INPUT_COLUMN_TYPE, INPUT_DATE = 'DATETIMEOFFSET', "'2004-05-23 14:25:10.3456 -08:00'"
EXPECTED_TYPE, EXPECTED_VALUE = 'ZONED_DATETIME', '2004-05-23T14:25:10.3456-08:00'
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{"tablePattern": f'%{table_name}%'}])
trash=pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
connection.execute(f"""
CREATE TABLE {table_name}(
dto {INPUT_COLUMN_TYPE} NOT NULL PRIMARY KEY
)
""")
connection.execute(f"INSERT INTO {table_name} VALUES({INPUT_DATE})")
try:
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[jdbc_multitable_consumer].output) == 1
record = snapshot[jdbc_multitable_consumer].output[0]
assert record.field['dto'].type == EXPECTED_TYPE
assert record.field['dto'].value == EXPECTED_VALUE
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
# Test for SDC-13288
@database('db2')
def test_jdbc_producer_db2_long_record(sdc_builder, sdc_executor, database):
"""Test that JDBC Producer correctly sends record when setting Custom Data SQLSTATE for db2 database instead of
throwing StageException. The pipelines reads a file with 5 records 1 by 1 having the last record being biggest
than the db2 table column size. That throws an error with an specific SQL Code (22001). Having that code in Custom
Data SQLSTATE sends the last record to error.
The pipeline looks like:
directory_origin >> jdbc_producer
In order to create the file read by directory origin another pipeline is used that looks like:
dev_raw_data_source >> local_fs
"""
# Insert data into file.
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
csv_records = ['1,hello', '2,hello', '3,hello', '4,hello', '5,hellolargerword']
_setup_delimited_file(sdc_executor, tmp_directory, csv_records)
# Create directory origin.
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='DELIMITED',
file_name_pattern='sdc*', file_name_pattern_mode='GLOB',
file_post_processing='DELETE', files_directory=tmp_directory,
batch_size_in_recs=1)
# Create jdbc producer destination.
# Create table. db2 internal sets table name in uppercase. Thus using directly ascii uppercase.
table_name = get_random_string(string.ascii_uppercase, 20)
database.engine.execute(f'CREATE TABLE {table_name} (id VARCHAR(20) NOT NULL PRIMARY KEY, a VARCHAR(10));')
field_to_column_mapping = [dict(columnName='ID',
dataType='USE_COLUMN_TYPE',
field='/0',
paramValue='?'),
dict(columnName='A',
dataType='USE_COLUMN_TYPE',
field='/1',
paramValue='?')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation="INSERT",
schema_name=DEFAULT_DB2_SCHEMA,
table_name=table_name,
field_to_column_mapping=field_to_column_mapping,
stage_on_record_error='TO_ERROR',
data_sqlstate_codes=["22001"])
directory >> jdbc_producer
directory_jdbc_producer_pipeline = pipeline_builder.build(
title='Directory - JDBC Producer. Test DB2 sql code error').configure_for_environment(database)
sdc_executor.add_pipeline(directory_jdbc_producer_pipeline)
try:
snapshot = sdc_executor.capture_snapshot(directory_jdbc_producer_pipeline, start_pipeline=True, batch_size=1,
batches=5).snapshot
sdc_executor.stop_pipeline(directory_jdbc_producer_pipeline)
assert 5 == len(snapshot.snapshot_batches)
result = database.engine.execute(f'SELECT ID,A FROM {table_name};')
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # Order by id.
result.close()
# Assert records in database include from id=1 to id=4 excluding id=5. Columns => record[0] = id, record[1] = a.
assert data_from_database == [(record[0], record[1]) for record in
[unified_record.split(',') for unified_record in csv_records[:-1]]]
stage = snapshot.snapshot_batches[4][jdbc_producer.instance_name]
assert 1 == len(stage.error_records)
error_record = stage.error_records[0]
assert 'hellolargerword' == error_record.field['1']
assert 'JDBC_14' == error_record.header['errorCode']
assert 'SQLSTATE=22001' in error_record.header['errorMessage']
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
database.engine.execute(f'DROP TABLE {table_name}')
def _setup_delimited_file(sdc_executor, tmp_directory, csv_records):
"""Setup csv records and save in local system. The pipelines looks like:
dev_raw_data_source >> local_fs
"""
raw_data = "\n".join(csv_records)
pipeline_builder = sdc_executor.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='TEXT', raw_data=raw_data, stop_after_first_batch=True)
local_fs = pipeline_builder.add_stage('Local FS', type='destination')
local_fs.set_attributes(data_format='TEXT',
directory_template=tmp_directory,
files_prefix='sdc-${sdc:id()}', files_suffix='csv')
dev_raw_data_source >> local_fs
files_pipeline = pipeline_builder.build('Generate files pipeline')
sdc_executor.add_pipeline(files_pipeline)
# Generate some batches/files.
sdc_executor.start_pipeline(files_pipeline).wait_for_finished(timeout_sec=5)
return csv_records
# SDC-13556: Do not spin JDBC Destination and Tee Processor machinery for empty batches
@sdc_min_version('3.14.0')
@database('mysql')
@pytest.mark.parametrize('use_multi_row', [True, False])
def test_jdbc_tee_commits_on_empty_batches(use_multi_row, sdc_builder, sdc_executor, database):
"""Ensure that the JDBC Tee processor won't generate commits on empty batches. Since it's generally difficult
to create empty batches in SDC, we use scripting origin to generate them and then check commit timer (which also
contains count) to ensure that we don't generate excessive commits on the database."""
builder = sdc_builder.get_pipeline_builder()
table_name = get_random_string(string.ascii_lowercase, 20)
script = """
// First batch contains exactly one record
var batch = sdc.createBatch();
var record = sdc.createRecord('generated data');
record.value = {'name': 'A'};
batch.add(record);
batch.process("batch", "non-empty");
// Sent 1000 batches that will be empty
var step;
for (step = 0; step < 1000; step++) {
batch = sdc.createBatch();
batch.process("whatever", "batch-" + step);
}
"""
origin = builder.add_stage('JavaScript Scripting')
origin.record_type='NATIVE_OBJECTS'
origin.user_script=script
tee = builder.add_stage('JDBC Tee')
tee.default_operation = 'INSERT'
tee.field_to_column_mapping = [dict(columnName='name', field='/name', paramValue='?')]
tee.generated_column_mappings = [dict(columnName='id', field='/id')]
tee.table_name = table_name
tee.use_multi_row_operation = use_multi_row
trash = builder.add_stage('Trash')
origin >> tee >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
table = _create_table(table_name, database)
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# First of all, verify that the table have exactly one record with expected values
result = database.engine.execute(table.select())
db = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert len(db) == 1
assert db[0][0] == 'A'
assert db[0][1] == 1
# Second of all, we should see exactly 1001 batches generated by our scripting origin
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchCount.counter').count == 1001
# Then let's explore how many commits have we generated to ensure that we don't have 1001 commits
expected_commits = 1 if use_multi_row else 2
assert history.latest.metrics.timer('custom.JDBCTee_01.Commit Timer.0.timer').count == expected_commits
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.15.0')
def test_multitable_quote_column_names(sdc_builder, sdc_executor, database):
"""
Ensure that we properly quote all table and column names when querying the database.
"""
table_name = "table_" + get_random_string(string.ascii_letters, 10)
offset_name = "column_" + get_random_string(string.ascii_letters, 10)
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs=[{"tablePattern": f'%{table_name}%'}]
origin.max_batch_size_in_records = 10
trash = builder.add_stage('Trash')
origin >> trash
pipeline = builder.build().configure_for_environment(database)
# Work-arounding STF behavior of upper-casing table name configuration
origin.table_configs[0]["tablePattern"] = f'%{table_name}%'
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column(offset_name, sqlalchemy.Integer, primary_key=True, quote=True),
quote = True
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), [{offset_name: 1}])
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
# We want to run for a few seconds to see if any errors show up (like that did in previous versions)
time.sleep(10)
sdc_executor.stop_pipeline(pipeline)
# There should be no errors reported
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('stage.JDBCMultitableConsumer_01.errorRecords.counter').count == 0
assert history.latest.metrics.counter('stage.JDBCMultitableConsumer_01.stageErrors.counter').count == 0
# And verify that we properly read that one record
assert len(snapshot[origin].output) == 1
assert snapshot[origin].output[0].get_field_data('/' + offset_name) == 1
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.0.0.0')
def test_jdbc_multitable_consumer_duplicates_read_when_initial_offset_configured(sdc_builder, sdc_executor, database):
"""
SDC-13625 Integration test for SDC-13624 - MT Consumer ingests duplicates when initial offset is specified
Setup origin as follows:
partitioning enabled + num_threads and num partitions > 1 + override offset column set
+ initial value specified for offset
Verify that origin does not ingest the records more than once (duplicates) when initial value for offset is set
Pipeline:
JDBC MT Consumer >> Trash
>= Pipeline Finisher (no-more-data)
"""
if database.type == 'Oracle':
pytest.skip("This test depends on proper case for column names that Oracle auto-uppers.")
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
table_name = '{}_{}'.format(src_table_prefix, get_random_string(string.ascii_lowercase, 20))
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{
"tablePattern": f'{table_name}',
"enableNonIncremental": False,
"partitioningMode": "REQUIRED",
"partitionSize": "100000",
"maxNumActivePartitions": 5,
'overrideDefaultOffsetColumns': True,
'offsetColumns': ['created'],
'offsetColumnToInitialOffsetValue': [{
'key': 'created',
'value': '0'
}]
}])
jdbc_multitable_consumer.number_of_threads = 2
jdbc_multitable_consumer.maximum_pool_size = 2
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
finisher = pipeline_builder.add_stage("Pipeline Finisher Executor")
finisher.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
jdbc_multitable_consumer >= finisher
pipeline = pipeline_builder.build().configure_for_environment(database)
ONE_MILLION = 1000000
rows_in_table = [{'id': i, 'name': get_random_string(string.ascii_lowercase, 5), 'created': i + ONE_MILLION}
for i in range(1, 21)]
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(5)),
sqlalchemy.Column('created', sqlalchemy.Integer)
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding 20 rows into %s table', table_name)
connection = database.engine.connect()
connection.execute(table.insert(), rows_in_table)
connection.close()
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, batches=2, start_pipeline=True).snapshot
rows_from_snapshot = [(record.get_field_data('/name').value,
record.get_field_data('/id').value,
record.get_field_data('/created').value)
for batch in snapshot.snapshot_batches
for record in batch.stage_outputs[jdbc_multitable_consumer.instance_name].output]
expected_data = [(row['name'], row['id'], row['created']) for row in rows_in_table]
assert rows_from_snapshot == expected_data
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
| # Copyright 2017 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import logging
import math
import os
import random
import string
import tempfile
import time
from collections import OrderedDict
import pytest
import sqlalchemy
import datetime
from streamsets.sdk.utils import Version
from streamsets.testframework.environments.databases import Db2Database, OracleDatabase, SQLServerDatabase, PostgreSqlDatabase
from streamsets.testframework.markers import credentialstore, database, sdc_min_version
from streamsets.testframework.utils import get_random_string
logger = logging.getLogger(__name__)
ROWS_IN_DATABASE = [
{'id': 1, 'name': 'Dima'},
{'id': 2, 'name': 'Jarcec'},
{'id': 3, 'name': 'Arvind'}
]
ROWS_TO_UPDATE = [
{'id': 2, 'name': 'Eddie'},
{'id': 4, 'name': 'Jarcec'}
]
LOOKUP_RAW_DATA = ['id'] + [str(row['id']) for row in ROWS_IN_DATABASE]
RAW_DATA = ['name'] + [row['name'] for row in ROWS_IN_DATABASE]
DEFAULT_DB2_SCHEMA = 'DB2INST1'
@database
def test_jdbc_multitable_consumer_origin_simple(sdc_builder, sdc_executor, database):
"""
Check if Jdbc Multi-table Origin can retrieve any records from a table.
Destination is Trash.
Verify input and output (via snapshot).
"""
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
table_name = '{}_{}'.format(src_table_prefix, get_random_string(string.ascii_lowercase, 20))
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{"tablePattern": f'%{src_table_prefix}%'}])
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
# Column names are converted to lower case since Oracle database column names are in upper case.
tuples_to_lower_name = lambda tup: (tup[0].lower(), tup[1])
rows_from_snapshot = [tuples_to_lower_name(list(record.field.items())[1])
for record in snapshot[pipeline[0].instance_name].output]
assert rows_from_snapshot == [('name', row['name']) for row in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_consumer_offset_resume(sdc_builder, sdc_executor, database):
"""Ensure that the Query consumer can resume where it ended and stop the pipeline when it reads all the data."""
if isinstance(database, OracleDatabase):
pytest.skip('This test does not support oracle and its upper casing of column names.')
metadata = sqlalchemy.MetaData()
table_name = get_random_string(string.ascii_lowercase, 20)
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
pipeline_builder = sdc_builder.get_pipeline_builder()
origin = pipeline_builder.add_stage('JDBC Query Consumer')
origin.incremental_mode = True
origin.sql_query = 'SELECT * FROM {0} WHERE '.format(table_name) + 'id > ${OFFSET} ORDER BY id'
origin.initial_offset = '0'
origin.offset_column = 'id'
trash = pipeline_builder.add_stage('Trash')
origin >> trash
finisher = pipeline_builder.add_stage("Pipeline Finisher Executor")
origin >= finisher
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
connection = database.engine.connect()
for i in range(len(ROWS_IN_DATABASE)):
# Insert one row to the database
connection.execute(table.insert(), [ROWS_IN_DATABASE[i]])
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
assert len(snapshot[origin].output) == 1
assert snapshot[origin].output[0].get_field_data('/id') == i + 1
# TLKT-249: Add wait_for_finished to get_status object
sdc_executor.get_pipeline_status(pipeline).wait_for_status('FINISHED')
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_consumer_non_incremental_mode(sdc_builder, sdc_executor, database):
"""Ensure that the Query consumer works properly in non-incremental mode."""
if database.type == 'Oracle':
pytest.skip("This test depends on proper case for column names that Oracle auto-uppers.")
metadata = sqlalchemy.MetaData()
table_name = get_random_string(string.ascii_lowercase, 20)
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
pipeline_builder = sdc_builder.get_pipeline_builder()
origin = pipeline_builder.add_stage('JDBC Query Consumer')
origin.incremental_mode = False
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
trash = pipeline_builder.add_stage('Trash')
origin >> trash
finisher = pipeline_builder.add_stage("Pipeline Finisher Executor")
origin >= finisher
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
# Run the pipeline N times, it should always read the same
for i in range(3):
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
assert len(snapshot[origin].output) == len(ROWS_IN_DATABASE)
assert snapshot[origin].output[0].get_field_data('/id') == 1
assert snapshot[origin].output[1].get_field_data('/id') == 2
assert snapshot[origin].output[2].get_field_data('/id') == 3
# TLKT-249: Add wait_for_finished to get_status object
sdc_executor.get_pipeline_status(pipeline).wait_for_status('FINISHED')
finally:
logger.info('Jdbc No More Data: Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_multitable_consumer_with_finisher(sdc_builder, sdc_executor, database):
"""
Test reading with Multi-table JDBC, output to trash.
Test some table names that start with numbers (SDC-5381).
Check if Pipeline Finished Executor works correctly.
"""
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{"tablePattern": f'%{src_table_prefix}%'}])
finisher = pipeline_builder.add_stage('Pipeline Finisher Executor')
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >= finisher
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
random.seed()
tables = []
metadata = sqlalchemy.MetaData()
try:
connection = database.engine.connect()
num_letters = 10
num_recs = 10
num_tables = 3
for i in range(0, num_tables):
if i % 2 == 1:
# table name starts with a number, contains mixed-case letters.
input_name = '{}_{}_{}'.format(str(i), src_table_prefix,
get_random_string(string.ascii_lowercase, num_letters))
else:
# table name comprised of mixed-case letters only.
input_name = '{}_{}'.format(src_table_prefix, get_random_string(string.ascii_lowercase, num_letters))
tables.append(sqlalchemy.Table(
input_name,
metadata,
sqlalchemy.Column('serial', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('data', sqlalchemy.Integer)
))
tables[i].create(database.engine)
rows = [{'serial': j, 'data': random.randint(0, 2100000000)} for j in range(1, num_recs + 1)]
connection.execute(tables[i].insert(), rows)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
finally:
for table in tables:
table.drop(database.engine)
# SDC-11009: Run away pipeline runners in JDBC Multithread origins when no-more-data generation delay is configured
@database
@sdc_min_version('3.2.0')
def test_jdbc_multitable_consumer_with_no_more_data_event_generation_delay(sdc_builder, sdc_executor, database):
"""
Make sure that when a delayed no-more-data is being processed, the pipeline properly waits on the processing to
finish before stopping.
source >> trash
>= delay (only for no-more-data) >> trash
"""
src_table = get_random_string(string.ascii_lowercase, 6)
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.no_more_data_event_generation_delay_in_seconds = 1
jdbc_multitable_consumer.table_configs = [{"tablePattern": f'%{src_table}%'}]
trash = pipeline_builder.add_stage('Trash')
delay = pipeline_builder.add_stage('Delay')
delay.delay_between_batches = 10 * 1000
delay.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
trash_event = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
jdbc_multitable_consumer >= delay
delay >> trash_event
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
metadata = sqlalchemy.MetaData()
try:
connection = database.engine.connect()
table = sqlalchemy.Table(
src_table,
metadata,
sqlalchemy.Column('serial', sqlalchemy.Integer, primary_key=True)
)
table.create(database.engine)
rows = [{'serial': 1}]
connection.execute(table.insert(), rows)
# We start the pipeline
sdc_executor.start_pipeline(pipeline)
# We wait three seconds - one second for the no-more-data to be generated and then some buffer time
time.sleep(3)
# Then we try to stop the pipeline, now the pipeline should not stop immediately and should in-fact wait
sdc_executor.stop_pipeline(pipeline).wait_for_stopped()
current_status = sdc_executor.get_pipeline_status(pipeline).response.json().get('status')
assert current_status == 'STOPPED'
# Validate expected metrics
history = sdc_executor.get_pipeline_history(pipeline)
# Total number of input records
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 1
# 1 record, 1 no-more-data (rest of events is discarded)
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 2
# The table itself contained only one record
assert history.latest.metrics.counter('stage.Trash_01.inputRecords.counter').count == 1
# Only no-more-data event should reach the destination
assert history.latest.metrics.counter('stage.Trash_02.inputRecords.counter').count == 1
# The max batch time should be slightly more then 10 (the delayed batch that we have caused)
# TODO: TLKT-167: Add access methods to metric objects
assert history.latest.metrics.timer('pipeline.batchProcessing.timer')._data.get('max') >= 10
finally:
if table is not None:
table.drop(database.engine)
def _get_random_name(database, prefix='', length=5):
"""Generate a random string to use as a database object name.
It handles letter case according to the database type, forcing upper-case (e.g. Oracle) or lower-case
(e.g. Postgres).
Args:
database: a :obj:`streamsets.testframework.environment.Database` object.
prefix: (:obj:`str`) add a prefix to the generated name. Default: ''.
length: (:obj:`int`) number of characters of the generated name (without counting ``prefix``).
"""
if isinstance(database, OracleDatabase):
name = '{}{}'.format(prefix.upper(), get_random_string(string.ascii_uppercase))
else:
name = '{}{}'.format(prefix.lower(), get_random_string(string.ascii_lowercase))
return name
def _create_table(table_name, database, schema_name=None):
"""Helper function to create a table with two columns: id (int, PK) and name (str).
Args:
table_name: (:obj:`str`) the name for the new table.
database: a :obj:`streamsets.testframework.environment.Database` object.
schema_name: (:obj:`str`, optional) when provided, create the new table in a specific schema; otherwise,
the default schema for the engine’s database connection is used.
Return:
The new table as a sqlalchemy.Table object.
"""
metadata = sqlalchemy.MetaData()
if type(database) == SQLServerDatabase:
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('name', sqlalchemy.String(32)),
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True,
autoincrement=False),
schema=schema_name)
else:
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('name', sqlalchemy.String(32)),
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
schema=schema_name)
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
return table
def _create_schema(schema_name, database):
"""Create a new schema in the database.
For RDBMs with no distinction between schema and database (e.g. MySQL), it creates a new database. For Oracle, it
creates a new user. For databases with schema objects, it creates a new schema.
Use ``_drop_schema()`` to remove schemas created by this function, to handle properly each case.
Args:
schema_name: (:obj:`str`) the schema name.
database: a :obj:`streamsets.testframework.environment.Database` object.
"""
if isinstance(database, OracleDatabase):
database.engine.execute('CREATE USER {user} IDENTIFIED BY {pwd}'.format(user=schema_name, pwd=schema_name))
database.engine.execute('GRANT CONNECT, RESOURCE TO {user}'.format(user=schema_name))
else:
schema = sqlalchemy.schema.CreateSchema(schema_name)
database.engine.execute(schema)
def _drop_schema(schema_name, database):
"""Remove a schema from the given database.
Args:
schema_name: (:obj:`str`) name of the schema to remove.
database: a :obj:`streamsets.testframework.environment.Database` object.
"""
if isinstance(database, OracleDatabase):
database.engine.execute('DROP USER {user} CASCADE'.format(user=schema_name))
else:
sqlalchemy.schema.DropSchema(schema_name)
@credentialstore
@database
def test_jdbc_lookup_processor(sdc_builder, sdc_executor, database):
"""Simple JDBC Lookup processor test.
Pipeline will enrich records with the 'name' by adding a field as 'FirstName'.
The pipeline looks like:
dev_raw_data_source >> jdbc_lookup >> trash
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(LOOKUP_RAW_DATA))
jdbc_lookup = pipeline_builder.add_stage('JDBC Lookup')
query_str = f"SELECT name FROM {table_name} WHERE id = '${{record:value('/id')}}'"
column_mappings = [dict(dataType='USE_COLUMN_TYPE',
columnName='name',
field='/FirstName')]
jdbc_lookup.set_attributes(sql_query=query_str,
column_mappings=column_mappings)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> jdbc_lookup >> trash
pipeline = pipeline_builder.build(title='JDBC Lookup').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
LOOKUP_EXPECTED_DATA = copy.deepcopy(ROWS_IN_DATABASE)
for record in LOOKUP_EXPECTED_DATA:
record.pop('id')
record['FirstName'] = record.pop('name')
try:
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline,
start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
rows_from_snapshot = [{list(record.field.keys())[1]: list(record.field.values())[1].value}
for record in snapshot[jdbc_lookup].output]
assert rows_from_snapshot == LOOKUP_EXPECTED_DATA
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_tee_processor(sdc_builder, sdc_executor, database):
"""Simple JDBC Tee processor test.
Pipeline will insert records into database and then pass generated database column 'id' to fields.
The pipeline looks like:
dev_raw_data_source >> jdbc_tee >> trash
"""
if isinstance(database, OracleDatabase):
pytest.skip('JDBC Tee Processor does not support Oracle')
elif type(database) == SQLServerDatabase:
pytest.skip('JDBC Tee Processor does not support SQL Server')
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(RAW_DATA))
jdbc_tee = pipeline_builder.add_stage('JDBC Tee')
# Note that here ids are not inserted. Database generates them automatically.
field_to_column_mapping = [dict(columnName='name',
dataType='USE_COLUMN_TYPE',
field='/name',
paramValue='?')]
generated_column_mappings = [dict(columnName='id',
dataType='USE_COLUMN_TYPE',
field='/id')]
jdbc_tee.set_attributes(default_operation='INSERT',
field_to_column_mapping=field_to_column_mapping,
generated_column_mappings=generated_column_mappings,
table_name=table_name)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> jdbc_tee >> trash
pipeline = pipeline_builder.build(title='JDBC Tee').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline,
start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
# Verify the JDBC Tee processor has got new ids which were generated by database.
rows_from_snapshot = [{list(item.field.keys())[0]: list(item.field.values())[0].value,
list(item.field.keys())[1]: int(list(item.field.values())[1].value)}
for item in snapshot[jdbc_tee].output]
assert rows_from_snapshot == ROWS_IN_DATABASE
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@pytest.mark.parametrize('use_multi_row', [True, False])
@sdc_min_version('3.0.0.0') # stop_after_first_batch
def test_jdbc_tee_processor_multi_ops(sdc_builder, sdc_executor, database, use_multi_row):
"""JDBC Tee processor with multiple operations
Pipeline will delete/update/insert records into database with one batch and then update 'id'
field if it is inserted. The 'operation' field is used for the record header sdc.operation.type
which defines the CRUD operation (1: Insert, 2: Delete, 3: Update). The pipeline looks like:
dev_raw_data_source >> expression evaluator >> jdbc_tee >> trash
"""
if isinstance(database, OracleDatabase):
pytest.skip('JDBC Tee Processor does not support Oracle')
elif type(database) == SQLServerDatabase:
pytest.skip('JDBC Tee Processor does not support SQL Server')
table_name = get_random_string(string.ascii_lowercase, 20)
pipeline_builder = sdc_builder.get_pipeline_builder()
DATA = [
{'operation': 2, 'name': 'Jarcec', 'id': 2}, # delete
{'operation': 3, 'name': 'Hari', 'id': 3}, # update
{'operation': 1, 'name': 'Eddie'} # insert, id will be added by JDBC Tee
]
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data='\n'.join(json.dumps(rec) for rec in DATA),
stop_after_first_batch=True)
HEADER_EXPRESSIONS = [dict(attributeToSet='sdc.operation.type',
headerAttributeExpression="${record:value('/operation')}")]
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.header_attribute_expressions = HEADER_EXPRESSIONS
FIELD_TO_COLUMN = [dict(columnName='name', field='/name', paramValue='?')]
jdbc_tee = pipeline_builder.add_stage('JDBC Tee')
jdbc_tee.set_attributes(default_operation='INSERT',
field_to_column_mapping=FIELD_TO_COLUMN,
generated_column_mappings=[dict(columnName='id', field='/id')],
table_name=table_name,
use_multi_row_operation=use_multi_row)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> expression_evaluator >> jdbc_tee >> trash
pipeline_title = 'JDBC Tee MultiOps MultiRow' if use_multi_row else 'JDBC Tee MultiOps SingleRow'
pipeline = pipeline_builder.build(title=pipeline_title).configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
table = _create_table(table_name, database)
try:
logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)
connection = database.engine.connect()
# Passing only names to get the correct sequence numbers esp. PostgreSQL
if type(database) == SQLServerDatabase:
connection.execute(table.insert(), [{'id': row['id'], 'name': row['name']} for row in ROWS_IN_DATABASE])
else:
connection.execute(table.insert(), [{'name': row['name']} for row in ROWS_IN_DATABASE])
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline,
start_pipeline=True).snapshot
sequence_id = len(ROWS_IN_DATABASE)
# Verify the database is updated.
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
expected_data = [(row['name'], row['id']) for row in ROWS_IN_DATABASE]
for record in DATA:
if record['operation'] == 1: # insert
sequence_id += 1
expected_data.append((record['name'], sequence_id))
elif record['operation'] == 2: # delete
expected_data = [row for row in expected_data if row[1] != record['id']]
elif record['operation'] == 3: # update
expected_data = [row if row[1] != record['id'] else (record['name'], row[1]) for row in expected_data]
assert data_from_database == expected_data
# Verify the JDBC Tee processor has the new ID which were generated by database.
jdbc_tee_output = snapshot[jdbc_tee].output
name_id_from_output = [(record.field['name'], record.field['id']) for record in jdbc_tee_output]
assert name_id_from_output == [('Jarcec', 2), ('Hari', 3), ('Eddie', sequence_id)]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_query_executor(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor
record_deduplicator >> trash
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
query_str = f"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor
record_deduplicator >> trash
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(RAW_DATA) - 1)
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.14.0') # multiple queries execution
def test_jdbc_query_executor_multiple_queries(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor
record_deduplicator >> trash
"""
table_name = f'stf_{get_random_string(string.ascii_lowercase, 20)}'
table = _create_table(table_name, database)
ROWS_IN_DATABASE_UPDATED = [
{'id': 1, 'name': 'Alex'},
{'id': 2, 'name': 'Alex'},
{'id': 3, 'name': 'Alex'}
]
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
query_str1 = f"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
query_str2 = f"UPDATE {table_name} SET name = 'Alex' WHERE name = '${{record:value('/name')}}'"
jdbc_query_executor.set_attributes(sql_queries=[query_str1, query_str2])
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor
record_deduplicator >> trash
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(RAW_DATA) - 1)
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE_UPDATED]
finally:
logger.info(f'Dropping table {table_name} in {database.type} database ...')
table.drop(database.engine)
@database
@sdc_min_version('3.11.0')
def test_jdbc_query_executor_successful_query_event(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test for successful-query event type.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database. Event records are verified for successful-query event type.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
query_str = f"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash1 = pipeline_builder.add_stage('Trash')
trash2 = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
event_records = snapshot[jdbc_query_executor.instance_name].event_records
assert len(event_records) == 3
assert 'successful-query' == event_records[0].header['values']['sdc.event.type']
assert 'successful-query' == event_records[1].header['values']['sdc.event.type']
assert 'successful-query' == event_records[2].header['values']['sdc.event.type']
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.11.0')
def test_jdbc_query_executor_insert_query_result_count(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test for successful-query event type and query result count enabled.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database. Event records are verified for successful-query event type
and query-result field for the insert query.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
query_str = f"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
jdbc_query_executor.set_attributes(include_query_result_count_in_events=True)
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash1 = pipeline_builder.add_stage('Trash')
trash2 = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
event_records = snapshot[jdbc_query_executor.instance_name].event_records
assert len(event_records) == 3
assert 'successful-query' == event_records[0].header['values']['sdc.event.type']
assert 'successful-query' == event_records[1].header['values']['sdc.event.type']
assert 'successful-query' == event_records[2].header['values']['sdc.event.type']
assert '1 row(s) affected' == event_records[0].value['value']['query-result']['value']
assert '1 row(s) affected' == event_records[1].value['value']['query-result']['value']
assert '1 row(s) affected' == event_records[2].value['value']['query-result']['value']
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.0.0.0')
def test_jdbc_query_executor_lifecycle_events(sdc_builder, sdc_executor, database):
"""Verify that the JDBC Query Executor will work properly when used inside pipeline lifecycle stages."""
if isinstance(database, OracleDatabase):
pytest.skip('This test does not support Oracle')
elif type(database) == SQLServerDatabase:
pytest.skip('This test does not support SQL Server')
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('user', sqlalchemy.String(50)),
sqlalchemy.Column('event', sqlalchemy.String(50)))
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
query = f"INSERT INTO {table_name} VALUES ('${{record:value('/user')}}', '${{record:attribute('sdc.event.type')}}')"
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'TEXT'
source.raw_data='SOMETHING'
trash = builder.add_stage('Trash')
start_stage = builder.add_start_event_stage('JDBC Query')
if Version(sdc_builder.version) < Version('3.14.0'):
start_stage.set_attributes(sql_query=query)
else:
start_stage.set_attributes(sql_queries=[query])
stop_stage = builder.add_stop_event_stage('JDBC Query')
if Version(sdc_builder.version) < Version('3.14.0'):
stop_stage.set_attributes(sql_query=query)
else:
stop_stage.set_attributes(sql_queries=[query])
source >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = database.engine.execute(table.select())
db = sorted(result.fetchall(), key=lambda row: row[1])
result.close()
assert db[0][0] == 'admin'
assert db[0][1] == 'pipeline-start'
assert db[1][0] == ''
assert db[1][1] == 'pipeline-stop'
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_query_executor_failure_state(sdc_builder, sdc_executor, database):
"""Verify that the executor is properly called with the proper state on pipeline initialization failure."""
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('reason', sqlalchemy.String(50)))
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
query = f"INSERT INTO {table_name} VALUES ('${{record:value('/reason')}}')"
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('JDBC Multitable Consumer')
source.table_configs=[{"tablePattern": 'this_table_do_not_exists'}]
trash = builder.add_stage('Trash')
stop_stage = builder.add_stop_event_stage('JDBC Query')
if Version(sdc_builder.version) < Version('3.14.0'):
stop_stage.set_attributes(sql_query=query)
else:
stop_stage.set_attributes(sql_queries=[query])
source >> trash
pipeline = builder.build().configure_for_environment(database)
# Injecting failure - this URL won't exists, pipeline won't be able to start properly
source.jdbc_connection_string = "jdbc:mysql://this-do-not-exists:3306/awesome-db"
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline, wait=False).wait_for_status('START_ERROR', ignore_errors=True)
result = database.engine.execute(table.select())
db = result.fetchall()
result.close()
assert db[0][0] == 'FAILURE'
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.11.0')
def test_jdbc_query_executor_select_query_result_count(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test for successful-query event type and query result count enabled.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database and then the same data is queried. Event records are
verified for successful-query event type and query-result field for the select query.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor1 >= jdbc_query_executor2 >= trash1
record_deduplicator >> trash2
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
query_str1 = f"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
query_str2 = f"SELECT * FROM {table_name}"
jdbc_query_executor1 = pipeline_builder.add_stage('JDBC Query', type='executor')
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor1.set_attributes(sql_query=query_str1)
else:
jdbc_query_executor1.set_attributes(sql_queries=[query_str1])
jdbc_query_executor2 = pipeline_builder.add_stage('JDBC Query', type='executor')
jdbc_query_executor2.set_attributes(include_query_result_count_in_events=True)
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor2.set_attributes(sql_query=query_str2)
else:
jdbc_query_executor2.set_attributes(sql_queries=[query_str2])
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash1 = pipeline_builder.add_stage('Trash')
trash2 = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor1 >= jdbc_query_executor2 >= trash1
record_deduplicator >> trash2
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
event_records = snapshot[jdbc_query_executor2.instance_name].event_records
assert len(event_records) == 3
assert 'successful-query' == event_records[0].header['values']['sdc.event.type']
assert 'successful-query' == event_records[1].header['values']['sdc.event.type']
assert 'successful-query' == event_records[2].header['values']['sdc.event.type']
assert '3 row(s) returned' == event_records[0].value['value']['query-result']['value']
assert '3 row(s) returned' == event_records[1].value['value']['query-result']['value']
assert '3 row(s) returned' == event_records[2].value['value']['query-result']['value']
result = database.engine.execute(table.select())
result.close()
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.11.0')
def test_jdbc_query_executor_failed_query_event(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test for failed-query event type.
Pipeline will try to insert records into a non-existing table and the query would fail.
Event records are verified for failed-query event type.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
invalid_table = "INVALID_TABLE"
query_str = f"INSERT INTO {invalid_table} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash1 = pipeline_builder.add_stage('Trash')
trash2 = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
event_records = snapshot[jdbc_query_executor.instance_name].event_records
assert len(event_records) == 3
assert 'failed-query' == event_records[0].header['values']['sdc.event.type']
assert 'failed-query' == event_records[1].header['values']['sdc.event.type']
assert 'failed-query' == event_records[2].header['values']['sdc.event.type']
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == []
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.10.0')
@pytest.mark.parametrize('enable_parallel_execution', [True, False])
def test_jdbc_query_executor_parallel_query_execution(sdc_builder, sdc_executor, database, enable_parallel_execution):
"""Test JDBC Query Executor's parallel query execution mode.
Pipeline will insert records into database, then update the records.
Using sqlalchemy, we verify that correct data was inserted (and updated) in the database.
Pipeline configuration:
dev_raw_data_source >> jdbc_query_executor
"""
table_name = get_random_string(string.ascii_uppercase, 20)
table = _create_table(table_name, database)
# Make sure that we properly escape the table name. Ideally we would do escape for all databases, but since we
# know that all except postgre are passing, we only escape for Postgre for now.
enclosed_table = f'"{table_name}"' if type(database) == PostgreSqlDatabase else table_name
# first, the inserts - they will run in parallel,
# then all the updates will run sequentially
# net result is all records should get updated to the (last) new value.
# otherwise we've failed.
statements = []
for rec in ROWS_IN_DATABASE:
statements.extend([f"INSERT INTO {enclosed_table} (name, id) VALUES ('{rec['name']}', {rec['id']})",
f"UPDATE {enclosed_table} SET name = 'bob' WHERE id = {rec['id']}",
f"UPDATE {enclosed_table} SET name = 'MERRICK' WHERE id = {rec['id']}"])
# convert to string - Dev Raw Data Source Data Format tab does not seem
# to "unroll" the array into newline-terminated records.
statements = "\n".join(statements)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='TEXT', raw_data=statements)
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
query_str = "${record:value('/text')}"
jdbc_query_executor.set_attributes(enable_parallel_queries=enable_parallel_execution,
maximum_pool_size=2,
minimum_idle_connections=2)
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
dev_raw_data_source >> jdbc_query_executor
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE)*3)
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [('MERRICK', record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
def _create_jdbc_producer_pipeline(pipeline_builder, pipeline_title, raw_data, table_name, operation):
"""Helper function to create and return a pipeline with JDBC Producer
The Deduplicator assures there is only one ingest to database. The pipeline looks like:
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
"""
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
FIELD_MAPPINGS = [dict(field='/id', columnName='id'),
dict(field='/name', columnName='name')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation=operation,
table_name=table_name,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='STOP_PIPELINE')
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
return pipeline_builder.build(title=pipeline_title)
@database
def test_jdbc_producer_insert(sdc_builder, sdc_executor, database):
"""Simple JDBC Producer test with INSERT operation.
The pipeline inserts records into the database and verify that correct data is in the database.
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = '\n'.join(json.dumps(rec) for rec in ROWS_IN_DATABASE)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Insert', DATA, table_name, 'INSERT')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE))
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database('mysql', 'postgresql')
def test_jdbc_producer_insert_type_err(sdc_builder, sdc_executor, database):
"""This test covers invalid type coersion - writing string into int column. As different databases works differently,
we can't assert this across all supported databases. MySQL and PostgreSQL behaves the same way and we can properly
catch and generate JDBC_23. Other databases report coercion issues much later in the query cycle, sometimes even
in a way where we can't understand what and why has happened.
"""
ROWS_IN_DATABASE = [
{'id': 1, 'name': 'Dima'},
{'id': 'X', 'name': 'Jarcec'},
{'id': 3, 'name': 'Arvind'}
]
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = '\n'.join(json.dumps(rec) for rec in ROWS_IN_DATABASE)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=DATA, stop_after_first_batch=True)
FIELD_MAPPINGS = [dict(field='/id', columnName='id', dataType='INTEGER'),
dict(field='/name', columnName='name', dataType='STRING')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation='INSERT',
table_name=table_name,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='TO_ERROR')
dev_raw_data_source >> jdbc_producer
pipeline = pipeline_builder.build(title="JDBC producer with error")
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.get_pipeline_status(pipeline).wait_for_status('FINISHED')
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE
if record['id'] != 'X']
stage = snapshot[jdbc_producer.instance_name]
assert 'JDBC_23' == stage.error_records[0].header['errorCode']
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_insert_multiple_types(sdc_builder, sdc_executor, database):
"""Simple JDBC Producer test with INSERT operation.
The pipeline inserts 1000 records of multiple types.
The pipeline should look like:
dev_data_generator >> jdbc_producer
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')
dev_data_generator.fields_to_generate = [
{'field': 'field1', 'type': 'STRING'},
{'field': 'field2', 'type': 'DATETIME'},
{'field': 'field3', 'type': 'INTEGER'},
{'field': 'field4', 'precision': 10, 'scale': 2, 'type': 'DECIMAL'},
{'field': 'field5', 'type': 'DOUBLE'}
]
batch_size = 10000
dev_data_generator.set_attributes(delay_between_batches=0, batch_size=batch_size)
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('field1', sqlalchemy.String(50)),
sqlalchemy.Column('field2', sqlalchemy.DateTime),
sqlalchemy.Column('field3', sqlalchemy.Integer),
sqlalchemy.Column('field4', sqlalchemy.DECIMAL(10, 2)),
sqlalchemy.Column('field5', sqlalchemy.Float),
schema=None)
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
FIELD_MAPPINGS = [dict(field='/field1', columnName='field1', dataType='STRING'),
dict(field='/field2', columnName='field2', dataType='DATETIME'),
dict(field='/field3', columnName='field3', dataType='INTEGER'),
dict(field='/field4', columnName='field4', dataType='DECIMAL'),
dict(field='/field5', columnName='field5', dataType='FLOAT')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation='INSERT',
table_name=table_name,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='TO_ERROR')
dev_data_generator >> jdbc_producer
pipeline = pipeline_builder.build(title="JDBC producer multiple types")
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(batch_size, timeout_sec=3600)
snapshot = sdc_executor.capture_snapshot(pipeline).snapshot
sdc_executor.stop_pipeline(pipeline).wait_for_stopped()
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert len(data_from_database) > batch_size
stage = snapshot[jdbc_producer.instance_name]
assert len(stage.error_records) == 0
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
# SDC-10786: This test intends to cover the case really precise decimals being inserted into a Float column in MSSQL
@database('sqlserver')
def test_mssql_producer_bigdecimal(sdc_builder, sdc_executor, database):
"""
Insert a Decimal value with up to 38 decimals into a Float column in MSSQL.
This will look like:
dev_data_generator >> jdbc_producer
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = sqlalchemy.Table(
table_name,
sqlalchemy.MetaData(),
sqlalchemy.Column('a_value', sqlalchemy.Float()),
sqlalchemy.Column('b_value', sqlalchemy.Float()),
sqlalchemy.Column('c_value', sqlalchemy.Float()),
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, autoincrement=False)
)
table.create(database.engine)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')
dev_data_generator.fields_to_generate = [{'field': 'id', 'type': 'INTEGER'},
{'field': 'a_value', 'precision': 50, 'scale': 40, 'type': 'DECIMAL'},
{'field': 'b_value', 'precision': 5, 'scale': 2, 'type': 'DECIMAL'},
{'field': 'c_value', 'type': 'DECIMAL'}]
dev_data_generator.batch_size = 1
FIELD_MAPPINGS = [dict(field='/id', columnName='id'),
dict(field='/a_value', columnName='a_value'),
dict(field='/b_value', columnName='b_value'),
dict(field='/c_value', columnName='c_value')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation='INSERT',
table_name=table_name,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='STOP_PIPELINE')
dev_data_generator >> jdbc_producer
pipeline = pipeline_builder.build('MSSQL BigDecimal')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True, wait=True).snapshot
sdc_executor.stop_pipeline(pipeline)
records = [record.field for record in snapshot[dev_data_generator.instance_name].output]
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[0]) # order by id
result.close()
assert len(data_from_database) == 1
assert math.isclose(float(str(records[0]['a_value'])), data_from_database[0][0], rel_tol=0.02)
assert math.isclose(float(str(records[0]['b_value'])), data_from_database[0][1], rel_tol=0.02)
assert math.isclose(float(str(records[0]['c_value'])), data_from_database[0][2], rel_tol=0.02)
assert math.isclose(float(str(records[0]['id'])), data_from_database[0][3], rel_tol=0.02)
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_coerced_insert(sdc_builder, sdc_executor, database):
"""Extension of the Simple JDBC Producer test with INSERT operation.
The pipeline inserts records into the database.
In one record, data is represented as type String, where column is type Integer.
This should be passed to the database to coerce.
Verify that correct data is in the database.
Please note the use of local COERCE_ROWS_IN_DATABASE to insert
and global ROWS_IN_DATABASE to verify.
COERCE_ has id (integer) set to string.
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
COERCE_ROWS_IN_DATABASE = [
{'id': '1', 'name': 'Dima'},
{'id': '2', 'name': 'Jarcec'},
{'id': '3', 'name': 'Arvind'}
]
DATA = '\n'.join(json.dumps(rec) for rec in COERCE_ROWS_IN_DATABASE)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Insert', DATA, table_name, 'INSERT')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE))
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_delete(sdc_builder, sdc_executor, database):
"""Simple JDBC Producer test with DELETE operation.
The pipeline deletes records from the database and verify that correct data is in the database.
Records are deleted if the primary key is matched irrespective of other column values.
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
DATA = '\n'.join(json.dumps(rec) for rec in ROWS_TO_UPDATE)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Delete', DATA, table_name, 'DELETE')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_TO_UPDATE))
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = result.fetchall()
result.close()
removed_ids = [record['id'] for record in ROWS_TO_UPDATE]
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE if
record['id'] not in removed_ids]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_update(sdc_builder, sdc_executor, database):
"""Simple JDBC Producer test with UPDATE operation.
The pipeline updates records from the database and verify that correct data is in the database.
Records with matching primary key are updated, and no action for unmatched records.
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
DATA = '\n'.join(json.dumps(rec) for rec in ROWS_TO_UPDATE)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Update', DATA, table_name, 'UPDATE')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_TO_UPDATE))
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
updated_names = {record['id']: record['name'] for record in ROWS_IN_DATABASE}
updated_names.update({record['id']: record['name'] for record in ROWS_TO_UPDATE})
assert data_from_database == [(updated_names[record['id']], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
# SDC-10987: JDBC Multitable Consumer multiple offset columns with initial offset
@database
def test_jdbc_multitable_consumer_initial_offset_at_the_end(sdc_builder, sdc_executor, database):
"""
Set initial offset at the end of the table and verify that no records were read.
"""
table_name = get_random_string(string.ascii_lowercase, 10)
builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.table_configs = [{
"tablePattern": table_name,
"overrideDefaultOffsetColumns": True,
"offsetColumns": ["id"],
"offsetColumnToInitialOffsetValue": [{
"key": "id",
"value": "5"
}]
}]
trash = builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
pipeline = builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, quote=True),
sqlalchemy.Column('name', sqlalchemy.String(32), quote=True)
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline)
# Since the pipeline is not meant to read anything, we 'simply' wait
time.sleep(5)
sdc_executor.stop_pipeline(pipeline)
# There must be no records read
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 0
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 0
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
# SDC-10562: Row-level stage errors not being caught at pipeline
@sdc_min_version('3.0.0.0')
@database
def test_jdbc_producer_multirow_with_duplicates(sdc_builder, sdc_executor, database):
"""
Make sure that when using Multi Row insert, data related errors are send to error stream.
"""
if type(database) == SQLServerDatabase:
pytest.skip('This test is trying to insert explicit value to identity column which is not supported on SQL Server')
table_name = get_random_string(string.ascii_lowercase, 15)
builder = sdc_builder.get_pipeline_builder()
# Generate batch that will repeat the same primary key in the middle of the batch (on third row)
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'JSON'
source.raw_data = """{"id" : 1}\n{"id" : 2}\n{"id" : 1}\n{"id" : 3}"""
producer = builder.add_stage('JDBC Producer')
producer.table_name = table_name
producer.field_to_column_mapping = []
producer.default_operation = 'INSERT'
producer.use_multi_row_operation = True
if database.type == 'Oracle':
producer.enclose_object_names = True
source >> producer
pipeline = builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, quote=True)
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# Since we are inserting duplicate primary key, the batch should fail
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 4
assert history.latest.metrics.counter('pipeline.batchErrorRecords.counter').count == 4
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 0
# And similarly the database side should be empty as well
result = database.engine.execute(table.select())
data_from_database = result.fetchall()
result.close()
assert len(data_from_database) == 0
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_multitable(sdc_builder, sdc_executor, database):
"""Test for JDBC Producer with multiple destination table. We create 3 tables in the default schema and use an EL
expression to insert records according to the /table record field.
Pipeline:
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
"""
table1_name = _get_random_name(database, prefix='stf_table_')
table2_name = _get_random_name(database, prefix='stf_table_')
table3_name = _get_random_name(database, prefix='stf_table_')
table1 = _create_table(table1_name, database)
table2 = _create_table(table2_name, database)
table3 = _create_table(table3_name, database)
ROWS = [{'table': table1_name, 'id': 1, 'name': 'Roger Federer'},
{'table': table2_name, 'id': 2, 'name': 'Rafael Nadal'},
{'table': table3_name, 'id': 3, 'name': 'Dominic Thiem'}]
INPUT_DATA = '\n'.join(json.dumps(rec) for rec in ROWS)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Multitable Insert', INPUT_DATA,
"${record:value('/table')}", 'INSERT')
# JDBC Producer's "Table Name" property is converted to uppercase through the configure_for_environment() method
# when database is Oracle. However EL function names are case-sensitive; we overwrite it afterwards to avoid an EL
# error.
pipeline.configure_for_environment(database)
pipeline[2].set_attributes(table_name="${record:value('/table')}")
# For Oracle, the default value of JDBC Producer's "Schema Name" property in the database environment is the
# database name, but it should be the username instead.
if isinstance(database, OracleDatabase):
pipeline[2].set_attributes(schema_name=database.username.upper())
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS))
sdc_executor.stop_pipeline(pipeline)
result1 = database.engine.execute(table1.select())
result2 = database.engine.execute(table2.select())
result3 = database.engine.execute(table3.select())
data1 = result1.fetchall()
data2 = result2.fetchall()
data3 = result3.fetchall()
assert data1 == [(ROWS[0]['name'], ROWS[0]['id'])]
assert data2 == [(ROWS[1]['name'], ROWS[1]['id'])]
assert data3 == [(ROWS[2]['name'], ROWS[2]['id'])]
result1.close()
result2.close()
result3.close()
finally:
logger.info('Dropping tables %s, %s, %s in %s database...', table1_name, table2_name, table3_name,
database.type)
table1.drop(database.engine)
table2.drop(database.engine)
table3.drop(database.engine)
# Test SDC-10719
@database
@sdc_min_version('3.8.0')
def test_jdbc_producer_multischema(sdc_builder, sdc_executor, database):
"""Test for JDBC Producer in a multischema scenario with a single destination table for each schema. We create 3
schemas with one table for each, with the same name. Then we use an EL expression to insert records according to
the /schema record field.
Pipeline:
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
"""
schema1_name = _get_random_name(database, prefix='stf_schema_')
schema2_name = _get_random_name(database, prefix='stf_schema_')
schema3_name = _get_random_name(database, prefix='stf_schema_')
table_name = _get_random_name(database, prefix='stf_table_')
_create_schema(schema1_name, database)
_create_schema(schema2_name, database)
_create_schema(schema3_name, database)
table1 = _create_table(table_name, database, schema_name=schema1_name)
table2 = _create_table(table_name, database, schema_name=schema2_name)
table3 = _create_table(table_name, database, schema_name=schema3_name)
ROWS = [{'schema': schema1_name, 'id': 1, 'name': 'Roger Federer'},
{'schema': schema2_name, 'id': 2, 'name': 'Rafael Nadal'},
{'schema': schema3_name, 'id': 3, 'name': 'Dominic Thiem'}]
INPUT_DATA = '\n'.join(json.dumps(rec) for rec in ROWS)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Multischema Insert', INPUT_DATA,
table_name, 'INSERT')
# JDBC Producer's "Schema Name" property is set through the `database` environment under some circumstances
# (e.g. Sql Server database). We overwrite it afterwards for the test.
pipeline.configure_for_environment(database)
pipeline[2].set_attributes(schema_name="${record:value('/schema')}")
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS))
sdc_executor.stop_pipeline(pipeline)
result1 = database.engine.execute(table1.select())
result2 = database.engine.execute(table2.select())
result3 = database.engine.execute(table3.select())
data1 = result1.fetchall()
data2 = result2.fetchall()
data3 = result3.fetchall()
assert data1 == [(ROWS[0]['name'], ROWS[0]['id'])]
assert data2 == [(ROWS[1]['name'], ROWS[1]['id'])]
assert data3 == [(ROWS[2]['name'], ROWS[2]['id'])]
result1.close()
result2.close()
result3.close()
finally:
logger.info('Dropping table %s in schemas...', table_name)
table1.drop(database.engine)
table2.drop(database.engine)
table3.drop(database.engine)
logger.info('Dropping schemas %s, %s, %s...', schema1_name, schema2_name, schema3_name)
_drop_schema(schema1_name, database)
_drop_schema(schema2_name, database)
_drop_schema(schema3_name, database)
# Test SDC-10719
@database
@sdc_min_version('3.8.0')
def test_jdbc_producer_multischema_multitable(sdc_builder, sdc_executor, database):
"""Test a JDBC Producer in a multischema scenario with different destination tables for each schema. We create 3
schemas with one table for each, with different names. Then we use an EL expressions to insert records according to
the /schema and /table record fields.
There were a limitation in previous versions that affected to MySQL and MemSQL. These RDBMs do not differentiate
between schema and database. SDC used the database configured in the JDBC connection string, and looked for database
metadata filtering by database+schema. If the schema were other than the database of the connection string, metadata
could not be retrieved. This was a problem in a multischema scenario, where several schemas are employed.
Pipeline:
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
"""
schema1_name = _get_random_name(database, prefix='stf_schema_')
schema2_name = _get_random_name(database, prefix='stf_schema_')
schema3_name = _get_random_name(database, prefix='stf_schema_')
table1_name = _get_random_name(database, prefix='stf_table_')
table2_name = _get_random_name(database, prefix='stf_table_')
table3_name = _get_random_name(database, prefix='stf_table_')
_create_schema(schema1_name, database)
_create_schema(schema2_name, database)
_create_schema(schema3_name, database)
table1 = _create_table(table1_name, database, schema_name=schema1_name)
table2 = _create_table(table2_name, database, schema_name=schema2_name)
table3 = _create_table(table3_name, database, schema_name=schema3_name)
ROWS = [{'schema': schema1_name, 'table': table1_name, 'id': 1, 'name': 'Roger Federer'},
{'schema': schema2_name, 'table': table2_name, 'id': 2, 'name': 'Rafael Nadal'},
{'schema': schema3_name, 'table': table3_name, 'id': 3, 'name': 'Dominic Thiem'}]
INPUT_DATA = '\n'.join(json.dumps(rec) for rec in ROWS)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Multischema and Multitable Insert',
INPUT_DATA, "${record:value('/table')}", 'INSERT')
# JDBC Producer's "Schema Name" property is set through the `database` environment under some circumstances
# (e.g. Sql Server database). We overwrite it afterwards for the test.
pipeline.configure_for_environment(database)
pipeline[2].set_attributes(schema_name="${record:value('/schema')}")
# JDBC Producer's "Table Name" property is converted to uppercase through the configure_for_environment() method
# when database is Oracle. However EL function names are case-sensitive; we overwrite it afterwards to avoid an EL
# error.
pipeline[2].set_attributes(table_name="${record:value('/table')}")
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS))
sdc_executor.stop_pipeline(pipeline)
result1 = database.engine.execute(table1.select())
result2 = database.engine.execute(table2.select())
result3 = database.engine.execute(table3.select())
data1 = result1.fetchall()
data2 = result2.fetchall()
data3 = result3.fetchall()
assert data1 == [(ROWS[0]['name'], ROWS[0]['id'])]
assert data2 == [(ROWS[1]['name'], ROWS[1]['id'])]
assert data3 == [(ROWS[2]['name'], ROWS[2]['id'])]
result1.close()
result2.close()
result3.close()
finally:
logger.info('Dropping tables %s, %s, %s...', table1_name, table2_name, table3_name)
table1.drop(database.engine)
table2.drop(database.engine)
table3.drop(database.engine)
logger.info('Dropping schemas %s, %s, %s...', schema1_name, schema2_name, schema3_name)
_drop_schema(schema1_name, database)
_drop_schema(schema2_name, database)
_drop_schema(schema3_name, database)
# SDC-11063: Do not reoder update statements in JDBC destination
@sdc_min_version('3.0.0.0')
@pytest.mark.parametrize('multi_row', [True, False])
@database
def test_jdbc_producer_ordering(sdc_builder, sdc_executor, multi_row, database):
"""Ensure that variously intertwined operations won't be executed out of order in harmful way."""
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, quote=True, autoincrement=False),
sqlalchemy.Column('a', sqlalchemy.Integer, quote=True),
sqlalchemy.Column('b', sqlalchemy.Integer, quote=True)
)
RAW_DATA = [
# Update id=5
{"op": 3, "id": 5, "a": 2, "b": 2},
# Insert id=4
{"op": 1, "id": 4, "a": 1, "b": 1},
# Update id=4
{"op": 3, "id": 4, "a": 2, "b": 2},
# Delete id=5
{"op": 2, "id": 5},
# Insert id=1
{"op": 1, "id": 1, "a": 1, "b": 1},
# Update id=1
{"op": 3, "id": 1, "a": 2},
# Insert id=2
{"op": 1, "id": 2, "a": 1, "b": 1},
# Delete id=2
{"op": 2, "id": 2},
# Update id=1
{"op": 3, "id": 1, "a": 2, "b": 2},
# Insert id=3
{"op": 1, "id": 3, "a": 1, "b": 1},
# Update id=1
{"op": 3, "id": 1, "a": 3},
# Update id=3
{"op": 3, "id": 3, "a": 5},
# Delete id=3
{"op": 2, "id": 3}
]
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'JSON'
source.raw_data = '\n'.join(json.dumps(rec) for rec in RAW_DATA)
expression = builder.add_stage('Expression Evaluator')
expression.header_attribute_expressions = [
{'attributeToSet': 'sdc.operation.type', 'headerAttributeExpression': '${record:value("/op")}'}
]
remover = builder.add_stage('Field Remover')
remover.set_attributes(fields=['/op'], action='REMOVE')
producer = builder.add_stage('JDBC Producer')
producer.field_to_column_mapping = []
producer.default_operation = 'UPDATE'
producer.table_name = table_name
producer.use_multi_row_operation = multi_row
if database.type == 'Oracle':
producer.enclose_object_names = True
source >> expression >> remover >> producer
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
# The table will start with single row (id=5)
logger.info('Inserting rows into %s in %s database', table_name, database.type)
connection = database.engine.connect()
connection.execute(table.insert(), {'id': 5, 'a': 1, 'b': 1})
# Finally run the pipeline and verify it's outcome
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = database.engine.execute(table.select())
db = sorted(result.fetchall(), key=lambda row: row[0]) # order by id
result.close()
assert len(db) == 2
# id=1
assert 1 == db[0][0]
assert 3 == db[0][1]
assert 2 == db[0][2]
# id=5
assert 4 == db[1][0]
assert 2 == db[1][1]
assert 2 == db[1][2]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@sdc_min_version('3.0.0.0')
@database
def test_jdbc_multitable_events(sdc_builder, sdc_executor, database):
"""
Validate that we properly generate events
"""
if database.type == 'Oracle':
pytest.skip("This test depends on auto-created ID that doesn't work properly on Oracle")
table_prefix = get_random_string(string.ascii_lowercase, 20)
table_a = '{}_a'.format(table_prefix)
table_b = '{}_b'.format(table_prefix)
table_events = '{}_events'.format(table_prefix)
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('JDBC Multitable Consumer')
source.transaction_isolation = 'TRANSACTION_READ_COMMITTED'
source.table_configs = [{
'tablePattern': f'{table_prefix}%',
"enableNonIncremental": True,
'tableExclusionPattern': table_events
}]
trash = builder.add_stage('Trash')
expression = builder.add_stage('Expression Evaluator')
expression.field_expressions = [{
'fieldToSet': '/tbl',
'expression': '${record:value("/table")}${record:value("/tables[0]")}'
}, {
'fieldToSet': '/tbls',
'expression': '${record:value("/tables[0]")},${record:value("/tables[1]")}'
}, {
'fieldToSet': '/event',
'expression': '${record:eventType()}'
}
]
producer = builder.add_stage('JDBC Producer')
producer.table_name = table_events
producer.default_operation = 'INSERT'
producer.field_to_column_mapping = [
dict(field='/event', columnName='event'),
dict(field='/tbl', columnName='tbl'),
dict(field='/tbls', columnName='tbls')
]
source >> trash
source >= expression
expression >> producer
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
# We need three tables for this test
metadata = sqlalchemy.MetaData()
a = sqlalchemy.Table(
table_a,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True)
)
b = sqlalchemy.Table(
table_b,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=False)
)
events = sqlalchemy.Table(
table_events,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('event', sqlalchemy.String(50)),
sqlalchemy.Column('tbl', sqlalchemy.String(150)),
sqlalchemy.Column('tbls', sqlalchemy.String(150))
)
try:
logger.info('Creating tables %s, %s and %s in %s database ...', table_a, table_b, table_events, database.type)
a.create(database.engine)
b.create(database.engine)
events.create(database.engine)
logger.info('Inserting rows into %s and %s', table_a, table_b)
connection = database.engine.connect()
connection.execute(a.insert(), {'id': 1})
connection.execute(b.insert(), {'id': 1})
# Start the pipeline
status = sdc_executor.start_pipeline(pipeline)
# Read two records, generate 4 events, 6 records
status.wait_for_pipeline_output_records_count(6)
result = database.engine.execute(events.select())
db = sorted(result.fetchall(), key=lambda row: row[0]) # order by stamp
result.close()
assert len(db) == 4
tbls = set()
assert 'table-finished' == db[0][1]
tbls.add(db[0][2])
assert 'table-finished' == db[1][1]
tbls.add(db[1][2])
assert table_a in tbls
assert table_b in tbls
assert 'schema-finished' == db[2][1]
tbls = set(db[2][3].split(","))
assert table_a in tbls
assert table_b in tbls
assert 'no-more-data' == db[3][1]
# Portable truncate
events.drop(database.engine)
events.create(database.engine)
# Second iteration - insert one new row
logger.info('Inserting rows into %s', table_a)
connection = database.engine.connect()
connection.execute(a.insert(), {'id': 2})
# 1 record, 3 events more
status.wait_for_pipeline_output_records_count(10)
result = database.engine.execute(events.select())
db = sorted(result.fetchall(), key=lambda row: row[0]) # order by stamp
result.close()
assert len(db) == 3
assert 'table-finished' == db[0][1]
assert table_a == db[0][2]
assert 'schema-finished' == db[1][1]
tbls = set(db[1][3].split(","))
assert table_a in tbls
assert table_b in tbls
assert 'no-more-data' == db[2][1]
# Now let's stop the pipeline and start it again
# SDC-10022: Multitable JDBC Origin with non-incremental table does not properly trigger 'no-more-data' event
sdc_executor.stop_pipeline(pipeline)
# Portable truncate
events.drop(database.engine)
events.create(database.engine)
# Start the pipeline and wait for it to read three records (3 events)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(3)
assert 'table-finished' == db[0][1]
assert table_a == db[0][2]
assert 'schema-finished' == db[1][1]
tbls = set(db[1][3].split(","))
assert table_a in tbls
assert table_b in tbls
assert 'no-more-data' == db[2][1]
finally:
sdc_executor.stop_pipeline(pipeline)
logger.info('Dropping tables %s, %s and %s in %s database...', table_a, table_b, table_events, database.type)
a.drop(database.engine)
b.drop(database.engine)
events.drop(database.engine)
# SDC-11092: Improve the ability of JDBC Destination to cover non-standard Data related SQL Error codes
@sdc_min_version('3.0.0.0')
@pytest.mark.parametrize('multi_row', [True, False])
@database('oracle')
def test_jdbc_producer_oracle_data_errors(sdc_builder, sdc_executor, multi_row, database):
"""Ensure that data related error in Oracle will be sent to eror stream rather then shutting the pipeline down."""
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('ID', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('STR', sqlalchemy.String(2)),
)
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'JSON'
source.raw_data = '{"ID" : 1, "STR": "Longer then 2 characters"}'
producer = builder.add_stage('JDBC Producer')
producer.field_to_column_mapping = []
producer.default_operation = 'INSERT'
producer.table_name = table_name
producer.use_multi_row_operation = multi_row
source >> producer
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# The table in database needs to be empty
result = database.engine.execute(table.select())
db = sorted(result.fetchall(), key=lambda row: row[0]) # order by id
result.close()
assert len(db) == 0
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 1
assert history.latest.metrics.counter('pipeline.batchErrorRecords.counter').count == 1
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 0
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
# SDC-11082: Extend support for TIMESTAMP WITH TIMEZONE Datatypes
@sdc_min_version('3.0.0.0')
@database('oracle')
# https://docs.oracle.com/cd/B28359_01/server.111/b28318/datatype.htm#CNCPT1821
# We don't support UriType (requires difficult workaround in JDBC)
@pytest.mark.parametrize('sql_type,insert_fragment,expected_type,expected_value', [
('number', '1', 'DECIMAL', '1'),
('char(2)', "'AB'", 'STRING', 'AB'),
('varchar(4)', "'ABCD'", 'STRING', 'ABCD'),
('varchar2(4)', "'NVAR'", 'STRING', 'NVAR'),
('nchar(3)', "'NCH'", 'STRING', 'NCH'),
('nvarchar2(4)', "'NVAR'", 'STRING', 'NVAR'),
('binary_float', '1.0', 'FLOAT', '1.0'),
('binary_double', '2.0', 'DOUBLE', '2.0'),
('date', "TO_DATE('1998-1-1 6:22:33', 'YYYY-MM-DD HH24:MI:SS')", 'DATETIME', 883635753000),
('timestamp', "TIMESTAMP'1998-1-2 6:00:00'", 'DATETIME', 883720800000),
('timestamp with time zone', "TIMESTAMP'1998-1-3 6:00:00-5:00'", 'ZONED_DATETIME', '1998-01-03T06:00:00-05:00'),
('timestamp with local time zone', "TIMESTAMP'1998-1-4 6:00:00-5:00'", 'ZONED_DATETIME', '1998-01-04T11:00:00Z'),
('long', "'LONG'", 'STRING', 'LONG'),
('blob', "utl_raw.cast_to_raw('BLOB')", 'BYTE_ARRAY', 'QkxPQg=='),
('clob', "'CLOB'", 'STRING', 'CLOB'),
('nclob', "'NCLOB'", 'STRING', 'NCLOB'),
('XMLType', "xmltype('<a></a>')", 'STRING', '<a></a>')
])
@pytest.mark.parametrize('use_table_origin', [True, False])
def test_jdbc_multitable_oracle_types(sdc_builder, sdc_executor, database, use_table_origin, sql_type, insert_fragment,
expected_type, expected_value):
"""Test all feasible Oracle types."""
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
id number primary key,
data_column {sql_type} NULL
)
""")
# And insert a row with actual value
connection.execute(f"INSERT INTO {table_name} VALUES(1, {insert_fragment})")
# And a null
connection.execute(f"INSERT INTO {table_name} VALUES(2, NULL)")
builder = sdc_builder.get_pipeline_builder()
if use_table_origin:
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": f'%{table_name}%'}]
origin.on_unknown_type = 'CONVERT_TO_STRING'
else:
origin = builder.add_stage('JDBC Query Consumer')
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
origin.incremental_mode = False
origin.on_unknown_type = 'CONVERT_TO_STRING'
trash = builder.add_stage('Trash')
origin >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[origin].output) == 2
record = snapshot[origin].output[0]
null_record = snapshot[origin].output[1]
# Since we are controlling types, we want to check explicit values inside the record rather the the python
# wrappers.
# TLKT-177: Add ability for field to return raw value
assert record.field['DATA_COLUMN'].type == expected_type
assert null_record.field['DATA_COLUMN'].type == expected_type
assert record.field['DATA_COLUMN']._data['value'] == expected_value
assert null_record.field['DATA_COLUMN'] == None
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
# SDC-11324: JDBC MultiTable origin can create duplicate offsets
@database('mysql')
def test_jdbc_multitable_duplicate_offsets(sdc_builder, sdc_executor, database):
"""Validate that we will not create duplicate offsets. """
table_name = get_random_string(string.ascii_lowercase, 10)
pipeline_builder = sdc_builder.get_pipeline_builder()
origin = pipeline_builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": table_name}]
origin.max_batch_size_in_records = 1
trash = pipeline_builder.add_stage('Trash')
origin >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE))
sdc_executor.stop_pipeline(pipeline)
# We should have transition 4 records
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == len(ROWS_IN_DATABASE)
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == len(ROWS_IN_DATABASE)
# And most importantly, validate offset
offset = sdc_executor.api_client.get_pipeline_committed_offsets(pipeline.id).response.json()
assert offset is not None
assert offset['offsets'] is not None
expected_offset = {
f"tableName={table_name};;;partitioned=false;;;partitionSequence=-1;;;partitionStartOffsets=;;;partitionMaxOffsets=;;;usingNonIncrementalLoad=false": "id=3",
"$com.streamsets.pipeline.stage.origin.jdbc.table.TableJdbcSource.offset.version$": "2"
}
assert offset['offsets'] == expected_offset
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
# SDC-11326: JDBC MultiTable origin forgets offset of non-incremental table on consecutive execution
@database('mysql')
@sdc_min_version('3.0.0.0')
def test_jdbc_multitable_lost_nonincremental_offset(sdc_builder, sdc_executor, database):
"""Validate the origin does not loose non-incremental offset on various runs."""
table_name = get_random_string(string.ascii_lowercase, 10)
pipeline_builder = sdc_builder.get_pipeline_builder()
origin = pipeline_builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": table_name, "enableNonIncremental": True}]
origin.max_batch_size_in_records = 1
trash = pipeline_builder.add_stage('Trash')
origin >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=False),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE))
sdc_executor.stop_pipeline(pipeline)
# We should have read all the records
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == len(ROWS_IN_DATABASE)
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == len(ROWS_IN_DATABASE)
# And most importantly, validate offset
offset = sdc_executor.api_client.get_pipeline_committed_offsets(pipeline.id).response.json()
assert offset is not None
assert offset['offsets'] is not None
expected_offset = {
f"tableName={table_name};;;partitioned=false;;;partitionSequence=-1;;;partitionStartOffsets=;;;partitionMaxOffsets=;;;usingNonIncrementalLoad=true": "completed=true",
"$com.streamsets.pipeline.stage.origin.jdbc.table.TableJdbcSource.offset.version$": "2"
}
assert offset['offsets'] == expected_offset
for _ in range(5):
sdc_executor.start_pipeline(pipeline)
# Since the pipeline won't read anything, give it few seconds to "idle"
time.sleep(2)
sdc_executor.stop_pipeline(pipeline)
# And it really should not have read anything!
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 0
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 0
# And offset should not have changed
offset = sdc_executor.api_client.get_pipeline_committed_offsets(pipeline.id).response.json()
assert offset is not None
assert offset['offsets'] is not None
assert offset['offsets'] == expected_offset
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@sdc_min_version('3.9.0')
@database('oracle')
def test_jdbc_multitable_oracle_split_by_timestamp_with_timezone(sdc_builder, sdc_executor, database):
"""Make sure that we can properly partition TIMESTAMP WITH TIMEZONE type."""
table_name = get_random_string(string.ascii_uppercase, 20)
table_name_dest = get_random_string(string.ascii_uppercase, 20)
connection = database.engine.connect()
comparing_query = f"""(
select * from {table_name}
minus
select * from {table_name_dest}
) union (
select * from {table_name_dest}
minus
select * from {table_name}
)"""
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
ID number primary key,
TZ timestamp(6) with time zone
)
""")
# Create destination table
connection.execute(f"""CREATE TABLE {table_name_dest} AS SELECT * FROM {table_name} WHERE 1=0""")
# Insert a few rows
for m in range(0, 5):
for s in range(0, 59):
connection.execute(f"INSERT INTO {table_name} VALUES({m*100+s}, TIMESTAMP'2019-01-01 10:{m}:{s}-5:00')")
connection.execute("commit")
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{
"tablePattern": f'%{table_name}%',
"overrideDefaultOffsetColumns": True,
"offsetColumns": ["TZ"],
"enableNonIncremental": False,
"partitioningMode": "REQUIRED",
"partitionSize": "30",
"maxNumActivePartitions": -1
}]
origin.number_of_threads = 2
origin.maximum_pool_size = 2
origin.max_batch_size_in_records = 30
finisher = builder.add_stage('Pipeline Finisher Executor')
finisher.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
FIELD_MAPPINGS = [dict(field='/ID', columnName='ID'),
dict(field='/TZ', columnName='TZ')]
destination = builder.add_stage('JDBC Producer')
destination.set_attributes(default_operation='INSERT',
table_name=table_name_dest,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='STOP_PIPELINE')
origin >> destination
origin >= finisher
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = [row.items() for row in connection.execute(comparing_query)]
assert len(result) == 0
# Insert few more rows and validate the outcome again
for m in range(6, 8):
for s in range(0, 59):
connection.execute(f"INSERT INTO {table_name} VALUES({m*100+s}, TIMESTAMP'2019-01-01 10:{m}:{s}-5:00')")
connection.execute("commit")
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = [row.items() for row in connection.execute(comparing_query)]
assert len(result) == 0
finally:
logger.info('Dropping table %s and %s in %s database ...', table_name, table_name_dest, database.type)
connection.execute(f"DROP TABLE {table_name}")
connection.execute(f"DROP TABLE {table_name_dest}")
def _get_date_from_days(d):
return datetime.date(1970, 1, 1) + datetime.timedelta(days=d)
@database('oracle')
def test_jdbc_multitable_oracle_split_by_date(sdc_builder, sdc_executor, database):
"""Make sure that we can properly partition DATE type.
More precisely, we want to run this pipeline:
multitable >> jdbc
multitable >= finisher
With more than one thread and using a DATE column as a offset column.
This feature was not available until version 3.11.0, and was detected and
solved in ESC-513.
"""
table_name = get_random_string(string.ascii_uppercase, 20)
table_name_dest = get_random_string(string.ascii_uppercase, 20)
connection = database.engine.connect()
comparing_query = f"""(
select * from {table_name}
minus
select * from {table_name_dest}
) union (
select * from {table_name_dest}
minus
select * from {table_name}
)"""
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
ID number primary key,
DT date
)
""")
# Create destination table
connection.execute(f"""CREATE TABLE {table_name_dest} AS SELECT * FROM {table_name} WHERE 1=0""")
# Insert a few rows
for m in range(0, 5):
for s in range(0, 59):
identifier = 100 * m + s
connection.execute(
f"INSERT INTO {table_name} VALUES({identifier}, DATE'{_get_date_from_days(identifier)}')"
)
connection.execute("commit")
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('JDBC Multitable Consumer')
# Partition size is set to 259200000 which corresponds to 30 days in ms,
# since dates are translated to timestamps
origin.table_configs = [{
"tablePattern": f'%{table_name}%',
"overrideDefaultOffsetColumns": True,
"offsetColumns": ["DT"], # Should cause SDC < 3.11.0 to throw an UnsupportedOperationException
"enableNonIncremental": False,
"partitioningMode": "REQUIRED",
"partitionSize": "259200000", # 30 days = 30*24*60*60*1000 (259200000)ms
"maxNumActivePartitions": 2
}]
origin.number_of_threads = 2
origin.maximum_pool_size = 2
finisher = builder.add_stage('Pipeline Finisher Executor')
finisher.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
FIELD_MAPPINGS = [dict(field='/ID', columnName='ID'),
dict(field='/DT', columnName='DT')]
destination = builder.add_stage('JDBC Producer')
destination.set_attributes(default_operation='INSERT',
table_name=table_name_dest,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='STOP_PIPELINE')
origin >> destination
origin >= finisher
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = [row.items() for row in connection.execute(comparing_query)]
assert len(result) == 0
# Insert few more rows and validate the outcome again
for m in range(6, 8):
for s in range(0, 59):
identifier = 100 * m + s
connection.execute(
f"INSERT INTO {table_name} VALUES({identifier}, DATE'{_get_date_from_days(identifier)}')"
)
connection.execute("commit")
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = [row.items() for row in connection.execute(comparing_query)]
assert len(result) == 0
finally:
logger.info('Dropping table %s and %s in %s database ...', table_name, table_name_dest, database.type)
connection.execute(f"DROP TABLE {table_name}")
connection.execute(f"DROP TABLE {table_name_dest}")
@sdc_min_version('3.9.0')
@database('mysql')
def test_jdbc_multitable_consumer_origin_high_resolution_timestamp_offset(sdc_builder, sdc_executor, database):
"""
Check if Jdbc Multi-table Origin can retrieve any records from a table using as an offset a high resolution
timestamp of milliseconds order. It is checked that the records read have a timestamp greater than the timestamp
used as initial offset.
Pipeline looks like:
jdbc_multitable_consumer >> trash
"""
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
table_name = f'{src_table_prefix}_{get_random_string(string.ascii_lowercase, 20)}'
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{'tablePattern': f'%{src_table_prefix}%',
'overrideDefaultOffsetColumns': True,
'offsetColumns': ['added'],
'offsetColumnToInitialOffsetValue': [{
'key': 'added',
'value': '${time:extractNanosecondsFromString(' +
'"1996-12-02 00:00:00.020111000")}'
}]
}])
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
connection = database.engine.connect()
# Create table
logger.info('Creating table %s in %s database ...', table_name, database.type)
connection.execute(f"""
CREATE TABLE {table_name}(
id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
name varchar(100) NOT NULL,
age INT UNSIGNED NOT NULL,
added TIMESTAMP(6) NOT NULL
)
""")
# Insert rows
logger.info('Adding four rows into %s database ...', database.type)
connection.execute(f'INSERT INTO {table_name} VALUES(1, "Charly", 14, "2005-02-08 14:00:00.100105002")')
connection.execute(f'INSERT INTO {table_name} VALUES(2, "Paco", 28, "1992-05-25 11:00:00.000201010")')
connection.execute(f'INSERT INTO {table_name} VALUES(3, "Eugenio", 21, "1996-12-01 23:00:00.020111")')
connection.execute(f'INSERT INTO {table_name} VALUES(4, "Romualdo", 19, "2000-06-15 18:30:00.10523121")')
try:
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
name_id_from_output = [(record.field['name'], record.field['id'])
for record in snapshot[jdbc_multitable_consumer].output]
assert len(name_id_from_output) == 2
assert name_id_from_output == [('Romualdo', 4), ('Charly', 1)]
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
connection.execute(f'DROP TABLE {table_name}')
@database
@sdc_min_version('3.0.0.0')
def test_jdbc_multitable_consumer_partitioned_large_offset_gaps(sdc_builder, sdc_executor, database):
"""
Ensure that the multi-table JDBC origin can handle large gaps between offset columns in partitioned mode
The destination is trash, and there is a finisher waiting for the no-more-data event
The pipeline will be started, and we will capture two snapshots (to ensure all expected rows are covered),
then assert those captured snapshot rows match the expected data.
This is a test for SDC-10053
"""
if database.type == 'Oracle':
pytest.skip("This test depends on proper case for column names that Oracle auto-uppers.")
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
table_name = '{}_{}'.format(src_table_prefix, get_random_string(string.ascii_lowercase, 20))
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{
"tablePattern": f'{table_name}',
"enableNonIncremental": False,
"partitioningMode": "REQUIRED",
"partitionSize": "1000000",
"maxNumActivePartitions": -1
}])
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
finisher = pipeline_builder.add_stage("Pipeline Finisher Executor")
finisher.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
jdbc_multitable_consumer >= finisher
pipeline = pipeline_builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding four rows into %s table, with a large gap in the primary keys ...', table_name)
connection = database.engine.connect()
rows_with_gap = ROWS_IN_DATABASE + [{'id': 5000000, 'name': 'Evil Jeff'}]
connection.execute(table.insert(), rows_with_gap)
connection.close()
sdc_executor.add_pipeline(pipeline)
# need to capture two batches, one for row IDs 1-3, and one for the last row after the large gap
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, batches=2, start_pipeline=True).snapshot
rows_from_snapshot = [(record.get_field_data('/name').value, record.get_field_data('/id').value)
for batch in snapshot.snapshot_batches
for record in batch.stage_outputs[jdbc_multitable_consumer.instance_name].output]
expected_data = [(row['name'], row['id']) for row in rows_with_gap]
logger.info('Actual %s expected %s', rows_from_snapshot, expected_data)
assert rows_from_snapshot == expected_data
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@sdc_min_version('3.0.0.0')
@database('mysql')
# https://dev.mysql.com/doc/refman/8.0/en/data-types.html
# We don't support BIT generally (the driver is doing funky 'random' mappings on certain versions)
@pytest.mark.parametrize('sql_type,insert_fragment,expected_type,expected_value', [
('TINYINT', '-128', 'SHORT', -128),
('TINYINT UNSIGNED', '255', 'SHORT', 255),
('SMALLINT', '-32768', 'SHORT', -32768),
('SMALLINT UNSIGNED', '65535', 'SHORT', -1), # Support for unsigned isn't entirely correct!
('MEDIUMINT', '-8388608', 'INTEGER', '-8388608'),
('MEDIUMINT UNSIGNED', '16777215', 'INTEGER', '16777215'),
('INT', '-2147483648', 'INTEGER', '-2147483648'),
('INT UNSIGNED', '4294967295', 'INTEGER', '-1'), # Support for unsigned isn't entirely correct!
('BIGINT', '-9223372036854775807', 'LONG', '-9223372036854775807'),
('BIGINT UNSIGNED', '18446744073709551615', 'LONG', '-1'), # Support for unsigned isn't entirely correct!
('DECIMAL(5, 2)', '5.20', 'DECIMAL', '5.20'),
('NUMERIC(5, 2)', '5.20', 'DECIMAL', '5.20'),
('FLOAT', '5.2', 'FLOAT', '5.2'),
('DOUBLE', '5.2', 'DOUBLE', '5.2'),
# ('BIT(8)',"b'01010101'", 'BYTE_ARRAY', 'VQ=='),
('DATE', "'2019-01-01'", 'DATE', 1546300800000),
('DATETIME', "'2019-01-01 5:00:00'", 'DATETIME', 1546318800000),
('TIMESTAMP', "'2019-01-01 5:00:00'", 'DATETIME', 1546318800000),
('TIME', "'5:00:00'", 'TIME', 18000000),
('YEAR', "'2019'", 'DATE', 1546300800000),
('CHAR(5)', "'Hello'", 'STRING', 'Hello'),
('VARCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('BINARY(5)', "'Hello'", 'BYTE_ARRAY', 'SGVsbG8='),
('VARBINARY(5)', "'Hello'", 'BYTE_ARRAY', 'SGVsbG8='),
('BLOB', "'Hello'", 'BYTE_ARRAY', 'SGVsbG8='),
('TEXT', "'Hello'", 'STRING', 'Hello'),
("ENUM('a', 'b')", "'a'", 'STRING', 'a'),
("set('a', 'b')", "'a,b'", 'STRING', 'a,b'),
("POINT", "POINT(1, 1)", 'BYTE_ARRAY', 'AAAAAAEBAAAAAAAAAAAA8D8AAAAAAADwPw=='),
("LINESTRING", "LineString(Point(0,0), Point(10,10), Point(20,25), Point(50,60))", 'BYTE_ARRAY',
'AAAAAAECAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAkQAAAAAAAACRAAAAAAAAANEAAAAAAAAA5QAAAAAAAAElAAAAAAAAATkA='),
("POLYGON",
"Polygon(LineString(Point(0,0),Point(10,0),Point(10,10),Point(0,10),Point(0,0)),LineString(Point(5,5),Point(7,5),Point(7,7),Point(5,7),Point(5,5)))",
'BYTE_ARRAY',
'AAAAAAEDAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJEAAAAAAAAAAAAAAAAAAACRAAAAAAAAAJEAAAAAAAAAAAAAAAAAAACRAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAUQAAAAAAAABRAAAAAAAAAHEAAAAAAAAAUQAAAAAAAABxAAAAAAAAAHEAAAAAAAAAUQAAAAAAAABxAAAAAAAAAFEAAAAAAAAAUQA=='),
("JSON", "'{\"a\":\"b\"}'", 'STRING', '{\"a\": \"b\"}'),
])
@pytest.mark.parametrize('use_table_origin', [True, False])
def test_jdbc_multitable_mysql_types(sdc_builder, sdc_executor, database, use_table_origin, sql_type, insert_fragment,
expected_type, expected_value):
"""Test all feasible Mysql types."""
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
id int primary key,
data_column {sql_type} NULL
)
""")
# And insert a row with actual value
connection.execute(f"INSERT INTO {table_name} VALUES(1, {insert_fragment})")
# And a null
connection.execute(f"INSERT INTO {table_name} VALUES(2, NULL)")
builder = sdc_builder.get_pipeline_builder()
if use_table_origin:
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": f'%{table_name}%'}]
origin.on_unknown_type = 'CONVERT_TO_STRING'
else:
origin = builder.add_stage('JDBC Query Consumer')
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
origin.incremental_mode = False
origin.on_unknown_type = 'CONVERT_TO_STRING'
trash = builder.add_stage('Trash')
origin >> trash
pipeline = builder.build(f"MySQL Type {sql_type} with value {insert_fragment}").configure_for_environment(
database)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[origin].output) == 2
record = snapshot[origin].output[0]
null_record = snapshot[origin].output[1]
# Since we are controlling types, we want to check explicit values inside the record rather the the python
# wrappers.
# TLKT-177: Add ability for field to return raw value
assert record.field['data_column'].type == expected_type
assert null_record.field['data_column'].type == expected_type
assert record.field['data_column']._data['value'] == expected_value
assert null_record.field['data_column'] == None
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
@sdc_min_version('3.0.0.0')
@database('postgresql')
# https://www.postgresql.org/docs/11/datatype.html
# Not testing 'serial' family explicitly as that is just an alias
# Not supporting tsvector tsquery as that doesn't seem fit for us
# bit(n) is not supported
# xml is not supported
# domain types (as a category are not supported)
# pg_lsn not supported
@pytest.mark.parametrize('sql_type,insert_fragment,expected_type,expected_value', [
('smallint', '-32768', 'SHORT', -32768),
('integer', '2147483647', 'INTEGER', '2147483647'),
('bigint', '-9223372036854775808', 'LONG', '-9223372036854775808'),
('decimal(5,2)', '5.20', 'DECIMAL', '5.20'),
('numeric(5,2)', '5.20', 'DECIMAL', '5.20'),
('real', '5.20', 'FLOAT', '5.2'),
('double precision', '5.20', 'DOUBLE', '5.2'),
('money', '12.34', 'DOUBLE', '12.34'),
('char(5)', "'Hello'", 'STRING', 'Hello'),
('varchar(5)', "'Hello'", 'STRING', 'Hello'),
('text', "'Hello'", 'STRING', 'Hello'),
('bytea', "'\\xDEADBEEF'", 'BYTE_ARRAY', '3q2+7w=='),
('timestamp', "'2003-04-12 04:05:06'", 'DATETIME', 1050120306000),
('timestamp with time zone', "'2003-04-12 04:05:06 America/New_York'", 'DATETIME', 1050134706000),
# For PostgreSQL, we don't create ZONED_DATETIME
('date', "'2019-01-01'", 'DATE', 1546300800000),
('time', "'5:00:00'", 'TIME', 18000000),
('time with time zone', "'04:05:06-08:00'", 'TIME', 43506000),
('interval', "INTERVAL '1' YEAR", 'STRING', '1 years 0 mons 0 days 0 hours 0 mins 0.00 secs'),
('boolean', "true", 'BOOLEAN', True),
('ai', "'sad'", 'STRING', 'sad'),
('point', "'(1, 1)'", 'STRING', '(1.0,1.0)'),
('line', "'{1, 1, 1}'", 'STRING', '{1.0,1.0,1.0}'),
('lseg', "'((1,1)(2,2))'", 'STRING', '[(1.0,1.0),(2.0,2.0)]'),
('box', "'(1,1)(2,2)'", 'STRING', '(2.0,2.0),(1.0,1.0)'),
('path', "'((1,1),(2,2))'", 'STRING', '((1.0,1.0),(2.0,2.0))'),
('polygon', "'((1,1),(2,2))'", 'STRING', '((1.0,1.0),(2.0,2.0))'),
('circle', "'<(1,1),5>'", 'STRING', '<(1.0,1.0),5.0>'),
('inet', "'127.0.0.1/16'", 'STRING', '127.0.0.1/16'),
('cidr', "'127.0.0.0/16'", 'STRING', '127.0.0.0/16'),
('macaddr', "'08:00:2b:01:02:03'", 'STRING', '08:00:2b:01:02:03'),
# ('macaddr8', "'08:00:2b:01:02:03'", 'STRING', '08:00:2b:ff:fe:01:02:03'),
# ('bit(8)', "b'10101010'", 'BYTE_ARRAY', '08:00:2b:ff:fe:01:02:03'), # Doesn't work at all today
('bit varying(3)', "b'101'", 'STRING', '101'),
('uuid', "'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'", 'STRING', 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'),
# ('xml', "'<foo>bar</foo>'", 'STRING', ''), # Doesn't work properly today
("json", "'{\"a\":\"b\"}'", 'STRING', '{"a":"b"}'),
("jsonb", "'{\"a\":\"b\"}'", 'STRING', '{"a": "b"}'),
("integer[3][3]", "'{{1,2,3},{4,5,6},{7,8,9}}'", 'STRING', '{{1,2,3},{4,5,6},{7,8,9}}'),
("ct", "ROW(1, 2)", 'STRING', '(1,2)'),
("int4range", "'[1,2)'", 'STRING', '[1,2)'),
("int8range", "'[1,2)'", 'STRING', '[1,2)'),
("numrange", "'[1,2)'", 'STRING', '[1,2)'),
("tsrange", "'[2010-01-01 14:30, 2010-01-01 15:30)'", 'STRING', '["2010-01-01 14:30:00","2010-01-01 15:30:00")'),
("tstzrange", "'[2010-01-01 14:30 America/New_York, 2010-01-01 15:30 America/New_York)'", 'STRING',
'["2010-01-01 19:30:00+00","2010-01-01 20:30:00+00")'),
("daterange", "'[2010-01-01, 2010-01-02)'", 'STRING', '[2010-01-01,2010-01-02)'),
])
@pytest.mark.parametrize('use_table_origin', [True, False])
def test_jdbc_postgresql_types(sdc_builder, sdc_executor, database, use_table_origin, sql_type, insert_fragment,
expected_type, expected_value):
"""Test all feasible PostgreSQL types."""
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
try:
# Create enum type conditionally
connection.execute(f"""
DO
$$
BEGIN
IF NOT EXISTS (SELECT * FROM pg_type typ
INNER JOIN pg_namespace nsp ON nsp.oid = typ.typnamespace
WHERE nsp.nspname = current_schema() AND typ.typname = 'ai') THEN
CREATE TYPE ai AS ENUM ('sad', 'ok', 'happy');
END IF;
END;
$$
LANGUAGE plpgsql;
""")
# Create enum complex type conditionally
connection.execute(f"""
DO
$$
BEGIN
IF NOT EXISTS (SELECT * FROM pg_type typ
INNER JOIN pg_namespace nsp ON nsp.oid = typ.typnamespace
WHERE nsp.nspname = current_schema() AND typ.typname = 'ct') THEN
CREATE TYPE ct AS (a int, b int);
END IF;
END;
$$
LANGUAGE plpgsql;
""")
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
id int primary key,
data_column {sql_type} NULL
)
""")
# And insert a row with actual value
connection.execute(f"INSERT INTO {table_name} VALUES(1, {insert_fragment})")
# And a null
connection.execute(f"INSERT INTO {table_name} VALUES(2, NULL)")
builder = sdc_builder.get_pipeline_builder()
if use_table_origin:
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": f'%{table_name}%'}]
origin.on_unknown_type = 'CONVERT_TO_STRING'
else:
origin = builder.add_stage('JDBC Query Consumer')
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
origin.incremental_mode = False
origin.on_unknown_type = 'CONVERT_TO_STRING'
trash = builder.add_stage('Trash')
origin >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[origin].output) == 2
record = snapshot[origin].output[0]
null_record = snapshot[origin].output[1]
# Since we are controlling types, we want to check explicit values inside the record rather the the python
# wrappers.
# TLKT-177: Add ability for field to return raw value
assert record.field['data_column'].type == expected_type
assert null_record.field['data_column'].type == expected_type
assert record.field['data_column']._data['value'] == expected_value
assert null_record.field['data_column'] == None
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
@sdc_min_version('3.0.0.0')
@database('sqlserver')
# https://docs.microsoft.com/en-us/sql/t-sql/data-types/data-types-transact-sql?view=sql-server-2017
# hiearchyid types not supported
# Geometry and geography not supported
@pytest.mark.parametrize('sql_type,insert_fragment,expected_type,expected_value', [
('DATE', "'2019-01-01'", 'DATE', 1546300800000),
('DATETIME', "'2004-05-23T14:25:10'", 'DATETIME', 1085322310000),
('DATETIME2', "'2004-05-23T14:25:10'", 'DATETIME', 1085322310000),
('DATETIMEOFFSET', "'2004-05-23 14:25:10.3456 -08:00'", 'DEPENDS_ON_VERSION', 'depends_on_version'),
('SMALLDATETIME', "'2004-05-23T14:25:10'", 'DATETIME', 1085322300000),
('TIME', "'14:25:10'", 'TIME', 51910000),
('BIT', "1", 'BOOLEAN', True),
('DECIMAL(5,2)', '5.20', 'DECIMAL', '5.20'),
('NUMERIC(5,2)', '5.20', 'DECIMAL', '5.20'),
('REAL', '5.20', 'FLOAT', '5.2'),
('FLOAT', '5.20', 'DOUBLE', '5.2'),
('TINYINT', '255', 'SHORT', 255),
('SMALLINT', '-32768', 'SHORT', -32768),
('INT', '-2147483648', 'INTEGER', '-2147483648'),
('BIGINT', '-9223372036854775807', 'LONG', '-9223372036854775807'),
('MONEY', '255.60', 'DECIMAL', '255.6000'),
('SMALLMONEY', '255.60', 'DECIMAL', '255.6000'),
('BINARY(5)', "CAST('Hello' AS BINARY(5))", 'BYTE_ARRAY', 'SGVsbG8='),
('VARBINARY(5)', "CAST('Hello' AS VARBINARY(5))", 'BYTE_ARRAY', 'SGVsbG8='),
('CHAR(5)', "'Hello'", 'STRING', 'Hello'),
('VARCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('NCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('NVARCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('TEXT', "'Hello'", 'STRING', 'Hello'),
('NTEXT', "'Hello'", 'STRING', 'Hello'),
('IMAGE', "CAST('Hello' AS IMAGE)", 'BYTE_ARRAY', 'SGVsbG8='),
# ('GEOGRAPHY',"geography::STGeomFromText('LINESTRING(-122.360 47.656, -122.343 47.656 )', 4326)", 'BYTE_ARRAY', '5hAAAAEUhxbZzvfTR0DXo3A9CpdewIcW2c7300dAy6FFtvOVXsA='),
# ('GEOMETRY',"geometry::STGeomFromText('LINESTRING (100 100, 20 180, 180 180)', 0)", 'BYTE_ARRAY', 'AAAAAAEEAwAAAAAAAAAAAFlAAAAAAAAAWUAAAAAAAAA0QAAAAAAAgGZAAAAAAACAZkAAAAAAAIBmQAEAAAABAAAAAAEAAAD/////AAAAAAI='),
('XML', "'<a></a>'", 'STRING', '<a/>')
])
@pytest.mark.parametrize('use_table_origin', [True, False])
def test_jdbc_sqlserver_types(sdc_builder, sdc_executor, database, use_table_origin, sql_type, insert_fragment,
expected_type, expected_value):
"""Test all feasible SQL Server types."""
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
id int primary key,
data_column {sql_type} NULL
)
""")
# And insert a row with actual value
connection.execute(f"INSERT INTO {table_name} VALUES(1, {insert_fragment})")
# And a null
connection.execute(f"INSERT INTO {table_name} VALUES(2, NULL)")
builder = sdc_builder.get_pipeline_builder()
if use_table_origin:
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": f'%{table_name}%'}]
else:
origin = builder.add_stage('JDBC Query Consumer')
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
origin.incremental_mode = False
trash = builder.add_stage('Trash')
# As a part of SDC-10125, DATETIMEOFFSET is natively supported in SDC, and is converted into ZONED_DATETIME
if sql_type == 'DATETIMEOFFSET':
if Version(sdc_builder.version) >= Version('3.14.0'):
expected_type = 'ZONED_DATETIME'
expected_value = '2004-05-23T14:25:10.3456-08:00'
else:
expected_type = 'STRING'
expected_value = '2004-05-23 14:25:10.3456 -08:00'
# This unknown_type_action setting is required, otherwise DATETIMEOFFSET tests for SDC < 3.14 will fail.
origin.on_unknown_type = 'CONVERT_TO_STRING'
origin >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[origin].output) == 2
record = snapshot[origin].output[0]
null_record = snapshot[origin].output[1]
# Since we are controlling types, we want to check explicit values inside the record rather the the python
# wrappers.
# TLKT-177: Add ability for field to return raw value
assert record.field['data_column'].type == expected_type
assert null_record.field['data_column'].type == expected_type
assert record.field['data_column']._data['value'] == expected_value
assert null_record.field['data_column'] == None
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
@sdc_min_version('3.12.0')
@database('sqlserver')
@pytest.mark.parametrize('on_unknown_type_action', ['CONVERT_TO_STRING', 'STOP_PIPELINE'])
def test_jdbc_sqlserver_on_unknown_type_action(sdc_builder, sdc_executor, database, on_unknown_type_action):
"""Test JDBC Multitable Consumer with MS-SQL server for the on_unknown_type action.
This is to verify SDC-12764.
When the 'On Unknown Type' action is set to STOP_PIPELINE,the pipeline should stop with a StageException Error since it cannot convert DATETIMEOFFSET field
When the 'On Unknown Type' action is set to CONVERT_TO_STRING, the pipeline should convert the unknown type to string and process next record
The pipeline will look like:
JDBC_Multitable_Consumer >> trash
"""
if Version(sdc_builder.version) >= Version('3.14.0'):
pytest.skip("Skipping SQLServer Unknown Type action check, since DATETIMEOFFSET field is now natively supported from SDC Version 3.14.0")
column_type = 'DATETIMEOFFSET'
INPUT_DATE = "'2004-05-23T14:25:10'"
EXPECTED_OUTCOME = OrderedDict(id=1, date_offset='2004-05-23 14:25:10 +00:00')
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
pipeline_builder = sdc_builder.get_pipeline_builder()
# Setup Origin with specified unknown type action
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{"tablePattern": f'%{table_name}%'}],
on_unknown_type=on_unknown_type_action)
# Setup destination
trash=pipeline_builder.add_stage('Trash')
# Connect the pipeline stages
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
# Create table and add a row
connection.execute(f"""
CREATE TABLE {table_name}(
id int primary key,
date_offset {column_type} NOT NULL
)
""")
connection.execute(f"INSERT INTO {table_name} VALUES(1, {INPUT_DATE})")
try:
if on_unknown_type_action == 'STOP_PIPELINE':
# Pipeline should stop with StageException
with pytest.raises(Exception):
sdc_executor.start_pipeline(pipeline)
sdc_executor.stop_pipeline(pipeline)
status = sdc_executor.get_pipeline_status(pipeline).response.json().get('status')
assert 'RUN_ERROR' == status
else:
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
output_records = snapshot[jdbc_multitable_consumer].output
assert len(output_records) == 1
assert output_records[0].field == EXPECTED_OUTCOME
finally:
status = sdc_executor.get_pipeline_status(pipeline).response.json().get('status')
if status == 'RUNNING':
sdc_executor.stop_pipeline(pipeline)
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
@sdc_min_version('3.14.0')
@database('sqlserver')
def test_jdbc_sqlserver_datetimeoffset_as_primary_key(sdc_builder, sdc_executor, database):
"""Test JDBC Multitable Consumer with SQLServer table configured with DATETIMEOFFSET column as primary key.
The pipeline will look like:
JDBC_Multitable_Consumer >> trash
"""
INPUT_COLUMN_TYPE, INPUT_DATE = 'DATETIMEOFFSET', "'2004-05-23 14:25:10.3456 -08:00'"
EXPECTED_TYPE, EXPECTED_VALUE = 'ZONED_DATETIME', '2004-05-23T14:25:10.3456-08:00'
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{"tablePattern": f'%{table_name}%'}])
trash=pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
connection.execute(f"""
CREATE TABLE {table_name}(
dto {INPUT_COLUMN_TYPE} NOT NULL PRIMARY KEY
)
""")
connection.execute(f"INSERT INTO {table_name} VALUES({INPUT_DATE})")
try:
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[jdbc_multitable_consumer].output) == 1
record = snapshot[jdbc_multitable_consumer].output[0]
assert record.field['dto'].type == EXPECTED_TYPE
assert record.field['dto'].value == EXPECTED_VALUE
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
# Test for SDC-13288
@database('db2')
def test_jdbc_producer_db2_long_record(sdc_builder, sdc_executor, database):
"""Test that JDBC Producer correctly sends record when setting Custom Data SQLSTATE for db2 database instead of
throwing StageException. The pipelines reads a file with 5 records 1 by 1 having the last record being biggest
than the db2 table column size. That throws an error with an specific SQL Code (22001). Having that code in Custom
Data SQLSTATE sends the last record to error.
The pipeline looks like:
directory_origin >> jdbc_producer
In order to create the file read by directory origin another pipeline is used that looks like:
dev_raw_data_source >> local_fs
"""
# Insert data into file.
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
csv_records = ['1,hello', '2,hello', '3,hello', '4,hello', '5,hellolargerword']
_setup_delimited_file(sdc_executor, tmp_directory, csv_records)
# Create directory origin.
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='DELIMITED',
file_name_pattern='sdc*', file_name_pattern_mode='GLOB',
file_post_processing='DELETE', files_directory=tmp_directory,
batch_size_in_recs=1)
# Create jdbc producer destination.
# Create table. db2 internal sets table name in uppercase. Thus using directly ascii uppercase.
table_name = get_random_string(string.ascii_uppercase, 20)
database.engine.execute(f'CREATE TABLE {table_name} (id VARCHAR(20) NOT NULL PRIMARY KEY, a VARCHAR(10));')
field_to_column_mapping = [dict(columnName='ID',
dataType='USE_COLUMN_TYPE',
field='/0',
paramValue='?'),
dict(columnName='A',
dataType='USE_COLUMN_TYPE',
field='/1',
paramValue='?')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation="INSERT",
schema_name=DEFAULT_DB2_SCHEMA,
table_name=table_name,
field_to_column_mapping=field_to_column_mapping,
stage_on_record_error='TO_ERROR',
data_sqlstate_codes=["22001"])
directory >> jdbc_producer
directory_jdbc_producer_pipeline = pipeline_builder.build(
title='Directory - JDBC Producer. Test DB2 sql code error').configure_for_environment(database)
sdc_executor.add_pipeline(directory_jdbc_producer_pipeline)
try:
snapshot = sdc_executor.capture_snapshot(directory_jdbc_producer_pipeline, start_pipeline=True, batch_size=1,
batches=5).snapshot
sdc_executor.stop_pipeline(directory_jdbc_producer_pipeline)
assert 5 == len(snapshot.snapshot_batches)
result = database.engine.execute(f'SELECT ID,A FROM {table_name};')
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # Order by id.
result.close()
# Assert records in database include from id=1 to id=4 excluding id=5. Columns => record[0] = id, record[1] = a.
assert data_from_database == [(record[0], record[1]) for record in
[unified_record.split(',') for unified_record in csv_records[:-1]]]
stage = snapshot.snapshot_batches[4][jdbc_producer.instance_name]
assert 1 == len(stage.error_records)
error_record = stage.error_records[0]
assert 'hellolargerword' == error_record.field['1']
assert 'JDBC_14' == error_record.header['errorCode']
assert 'SQLSTATE=22001' in error_record.header['errorMessage']
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
database.engine.execute(f'DROP TABLE {table_name}')
def _setup_delimited_file(sdc_executor, tmp_directory, csv_records):
"""Setup csv records and save in local system. The pipelines looks like:
dev_raw_data_source >> local_fs
"""
raw_data = "\n".join(csv_records)
pipeline_builder = sdc_executor.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='TEXT', raw_data=raw_data, stop_after_first_batch=True)
local_fs = pipeline_builder.add_stage('Local FS', type='destination')
local_fs.set_attributes(data_format='TEXT',
directory_template=tmp_directory,
files_prefix='sdc-${sdc:id()}', files_suffix='csv')
dev_raw_data_source >> local_fs
files_pipeline = pipeline_builder.build('Generate files pipeline')
sdc_executor.add_pipeline(files_pipeline)
# Generate some batches/files.
sdc_executor.start_pipeline(files_pipeline).wait_for_finished(timeout_sec=5)
return csv_records
# SDC-13556: Do not spin JDBC Destination and Tee Processor machinery for empty batches
@sdc_min_version('3.14.0')
@database('mysql')
@pytest.mark.parametrize('use_multi_row', [True, False])
def test_jdbc_tee_commits_on_empty_batches(use_multi_row, sdc_builder, sdc_executor, database):
"""Ensure that the JDBC Tee processor won't generate commits on empty batches. Since it's generally difficult
to create empty batches in SDC, we use scripting origin to generate them and then check commit timer (which also
contains count) to ensure that we don't generate excessive commits on the database."""
builder = sdc_builder.get_pipeline_builder()
table_name = get_random_string(string.ascii_lowercase, 20)
script = """
// First batch contains exactly one record
var batch = sdc.createBatch();
var record = sdc.createRecord('generated data');
record.value = {'name': 'A'};
batch.add(record);
batch.process("batch", "non-empty");
// Sent 1000 batches that will be empty
var step;
for (step = 0; step < 1000; step++) {
batch = sdc.createBatch();
batch.process("whatever", "batch-" + step);
}
"""
origin = builder.add_stage('JavaScript Scripting')
origin.record_type='NATIVE_OBJECTS'
origin.user_script=script
tee = builder.add_stage('JDBC Tee')
tee.default_operation = 'INSERT'
tee.field_to_column_mapping = [dict(columnName='name', field='/name', paramValue='?')]
tee.generated_column_mappings = [dict(columnName='id', field='/id')]
tee.table_name = table_name
tee.use_multi_row_operation = use_multi_row
trash = builder.add_stage('Trash')
origin >> tee >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
table = _create_table(table_name, database)
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# First of all, verify that the table have exactly one record with expected values
result = database.engine.execute(table.select())
db = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert len(db) == 1
assert db[0][0] == 'A'
assert db[0][1] == 1
# Second of all, we should see exactly 1001 batches generated by our scripting origin
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchCount.counter').count == 1001
# Then let's explore how many commits have we generated to ensure that we don't have 1001 commits
expected_commits = 1 if use_multi_row else 2
assert history.latest.metrics.timer('custom.JDBCTee_01.Commit Timer.0.timer').count == expected_commits
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.15.0')
def test_multitable_quote_column_names(sdc_builder, sdc_executor, database):
"""
Ensure that we properly quote all table and column names when querying the database.
"""
table_name = "table_" + get_random_string(string.ascii_letters, 10)
offset_name = "column_" + get_random_string(string.ascii_letters, 10)
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs=[{"tablePattern": f'%{table_name}%'}]
origin.max_batch_size_in_records = 10
trash = builder.add_stage('Trash')
origin >> trash
pipeline = builder.build().configure_for_environment(database)
# Work-arounding STF behavior of upper-casing table name configuration
origin.table_configs[0]["tablePattern"] = f'%{table_name}%'
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column(offset_name, sqlalchemy.Integer, primary_key=True, quote=True),
quote = True
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), [{offset_name: 1}])
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
# We want to run for a few seconds to see if any errors show up (like that did in previous versions)
time.sleep(10)
sdc_executor.stop_pipeline(pipeline)
# There should be no errors reported
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('stage.JDBCMultitableConsumer_01.errorRecords.counter').count == 0
assert history.latest.metrics.counter('stage.JDBCMultitableConsumer_01.stageErrors.counter').count == 0
# And verify that we properly read that one record
assert len(snapshot[origin].output) == 1
assert snapshot[origin].output[0].get_field_data('/' + offset_name) == 1
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.0.0.0')
def test_jdbc_multitable_consumer_duplicates_read_when_initial_offset_configured(sdc_builder, sdc_executor, database):
"""
SDC-13625 Integration test for SDC-13624 - MT Consumer ingests duplicates when initial offset is specified
Setup origin as follows:
partitioning enabled + num_threads and num partitions > 1 + override offset column set
+ initial value specified for offset
Verify that origin does not ingest the records more than once (duplicates) when initial value for offset is set
Pipeline:
JDBC MT Consumer >> Trash
>= Pipeline Finisher (no-more-data)
"""
if database.type == 'Oracle':
pytest.skip("This test depends on proper case for column names that Oracle auto-uppers.")
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
table_name = '{}_{}'.format(src_table_prefix, get_random_string(string.ascii_lowercase, 20))
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{
"tablePattern": f'{table_name}',
"enableNonIncremental": False,
"partitioningMode": "REQUIRED",
"partitionSize": "100000",
"maxNumActivePartitions": 5,
'overrideDefaultOffsetColumns': True,
'offsetColumns': ['created'],
'offsetColumnToInitialOffsetValue': [{
'key': 'created',
'value': '0'
}]
}])
jdbc_multitable_consumer.number_of_threads = 2
jdbc_multitable_consumer.maximum_pool_size = 2
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
finisher = pipeline_builder.add_stage("Pipeline Finisher Executor")
finisher.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
jdbc_multitable_consumer >= finisher
pipeline = pipeline_builder.build().configure_for_environment(database)
ONE_MILLION = 1000000
rows_in_table = [{'id': i, 'name': get_random_string(string.ascii_lowercase, 5), 'created': i + ONE_MILLION}
for i in range(1, 21)]
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(5)),
sqlalchemy.Column('created', sqlalchemy.Integer)
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding 20 rows into %s table', table_name)
connection = database.engine.connect()
connection.execute(table.insert(), rows_in_table)
connection.close()
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, batches=2, start_pipeline=True).snapshot
rows_from_snapshot = [(record.get_field_data('/name').value,
record.get_field_data('/id').value,
record.get_field_data('/created').value)
for batch in snapshot.snapshot_batches
for record in batch.stage_outputs[jdbc_multitable_consumer.instance_name].output]
expected_data = [(row['name'], row['id'], row['created']) for row in rows_in_table]
assert rows_from_snapshot == expected_data
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
|
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import directstream
class source:
def __init__(self):
self.domains = ['m4ufree.info']
self.base_link = 'http://m4ufree.info'
self.include_link = '/include/autocomplete.php?q='
self.search_link = '/tag/%s'
def movie(self, imdb, title, year):
try:
t = cleantitle.get(title)
#r = cache.get(self.mfree_mvcache, 170)
#r = [i for i in r if t == i[0] and year == i[1]][0]
q = (title.translate(None, '\/:*?"\'<>|!,')).replace(' ', '-').replace('--', '-').lower()
q = urlparse.urljoin(self.base_link, self.search_link % q)
r = client.request(q)
r = zip(client.parseDOM(r, 'a', ret='href', attrs = {'class': 'top-item'}), client.parseDOM(r, 'a', attrs = {'class': 'top-item'}))
r = [(i[0], re.sub('^Watch\s*|<.+?>|</.+?>', '', i[1])) for i in r]
r = [(i[0], re.findall('(.+?) (?:\(|)(\d{4})(?:\)|)$', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def mfree_mvcache(self):
try:
u = urlparse.urljoin(self.base_link, self.include_link)
r = client.request(u).splitlines()
r = [re.findall('(.+?) (?:\(|)(\d{4})(?:\)|)$', i.strip()) for i in r]
r = [(cleantitle.get(i[0][0]), i[0][1]) for i in r if len(i) > 0]
return r
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
quality = client.parseDOM(r, 'h3', attrs = {'title': 'Quality.+?'})[0]
quality = client.parseDOM(quality, 'span')[0]
if quality.lower() in ['ts', 'tc', 'cam']: raise Exception()
url = client.parseDOM(r, 'a', ret='href')
url = [i for i in url if '-full-movie-' in i][0]
r = client.request(url)
headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url}
servers = client.parseDOM(r, 'span', ret='link', attrs = {'class': '[^"]*btn-eps(?:\s+|)'})
for server in servers:
try:
url = '/demo.php?v=%s' % server
url = urlparse.urljoin(self.base_link, url)
r += str(client.request(url, headers=headers))
except:
pass
links = client.parseDOM(r, 'source', ret='src', attrs = {'type': 'video/mp4'})
links += client.parseDOM(r, 'iframe', ret='src')
for link in links:
try:
if not link.startswith('http'): link = urlparse.urljoin(self.base_link, link)
url = client.request(link, output='geturl')
quality = directstream.googletag(url)[0]['quality']
sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'MFree', 'url': url, 'direct': True, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
return url
except:
return
| # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import directstream
class source:
def __init__(self):
self.domains = ['m4ufree.info']
self.base_link = 'http://m4ufree.info'
self.include_link = '/include/autocomplete.php?q='
self.search_link = '/tag/%s'
def movie(self, imdb, title, year):
try:
t = cleantitle.get(title)
#r = cache.get(self.mfree_mvcache, 170)
#r = [i for i in r if t == i[0] and year == i[1]][0]
q = (title.translate(None, '\/:*?"\'<>|!,')).replace(' ', '-').replace('--', '-').lower()
q = urlparse.urljoin(self.base_link, self.search_link % q)
r = client.request(q)
r = zip(client.parseDOM(r, 'a', ret='href', attrs = {'class': 'top-item'}), client.parseDOM(r, 'a', attrs = {'class': 'top-item'}))
r = [(i[0], re.sub('^Watch\s*|<.+?>|</.+?>', '', i[1])) for i in r]
r = [(i[0], re.findall('(.+?) (?:\(|)(\d{4})(?:\)|)$', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def mfree_mvcache(self):
try:
u = urlparse.urljoin(self.base_link, self.include_link)
r = client.request(u).splitlines()
r = [re.findall('(.+?) (?:\(|)(\d{4})(?:\)|)$', i.strip()) for i in r]
r = [(cleantitle.get(i[0][0]), i[0][1]) for i in r if len(i) > 0]
return r
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
quality = client.parseDOM(r, 'h3', attrs = {'title': 'Quality.+?'})[0]
quality = client.parseDOM(quality, 'span')[0]
if quality.lower() in ['ts', 'tc', 'cam']: raise Exception()
url = client.parseDOM(r, 'a', ret='href')
url = [i for i in url if '-full-movie-' in i][0]
r = client.request(url)
headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url}
servers = client.parseDOM(r, 'span', ret='link', attrs = {'class': '[^"]*btn-eps(?:\s+|)'})
for server in servers:
try:
url = '/demo.php?v=%s' % server
url = urlparse.urljoin(self.base_link, url)
r += str(client.request(url, headers=headers))
except:
pass
links = client.parseDOM(r, 'source', ret='src', attrs = {'type': 'video/mp4'})
links += client.parseDOM(r, 'iframe', ret='src')
for link in links:
try:
if not link.startswith('http'): link = urlparse.urljoin(self.base_link, link)
url = client.request(link, output='geturl')
quality = directstream.googletag(url)[0]['quality']
sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'MFree', 'url': url, 'direct': True, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
return url
except:
return
|
from datetime import datetime
from matplotlib import pylab as plt
from requests_cache import CachedSession
CACHE_EXPIRATION_SECS = 3600*24*356
YEAR_RANGE = range(2018, 2022)
MARKERS = ["o", "s", "d", "+", "*"]
RIRS = {
'AFRINIC': {
'url': 'https://ftp.ripe.net/ripe/rpki/afrinic.tal/',
'marker': 'o',
},
'APNIC': {
'url': 'https://ftp.ripe.net/ripe/rpki/apnic.tal/',
'marker': 's',
},
'ARIN': {
'url': 'https://ftp.ripe.net/ripe/rpki/arin.tal/',
'marker': 'd'
},
'LACNIC': {
'url': 'https://ftp.ripe.net/ripe/rpki/lacnic.tal/',
'marker': '+',
},
'RIPE': {
'url': 'https://ftp.ripe.net/ripe/rpki/ripencc.tal/',
'marker': '*',
}
}
session = CachedSession(ExpirationTime = CACHE_EXPIRATION_SECS)
plt.figure(figsize=(7,4))
for rir, rir_info in RIRS.items():
x = []
y = []
for year in YEAR_RANGE:
for month in range(1,13):
roa_count = -1 # skip the header
parsed_url = f'{rir_info['url']}/{year}/{month:02d}/15/roas.csv'
csv = session.get( parsed_url )
if csv.status_code != 200:
print(parsed_url)
print(csv.status_code)
continue
for line in csv.iter_lines(decode_unicode=True):
roa_count += 1
if roa_count > 0:
x.append( datetime(year, month, 15) )
y.append( roa_count )
plt.plot(x, y, label=rir, marker=rir_info['marker'])
plt.grid( True )
plt.legend()
plt.ylabel('Number of ROAs')
plt.xticks(rotation=45)
plt.tight_layout()
plt.savefig(f'roa_count_{YEAR_RANGE[0]}_{YEAR_RANGE[-1]}.png')
plt.savefig(f'roa_count_{YEAR_RANGE[0]}_{YEAR_RANGE[-1]}.pdf')
| from datetime import datetime
from matplotlib import pylab as plt
from requests_cache import CachedSession
CACHE_EXPIRATION_SECS = 3600*24*356
YEAR_RANGE = range(2018, 2022)
MARKERS = ["o", "s", "d", "+", "*"]
RIRS = {
'AFRINIC': {
'url': 'https://ftp.ripe.net/ripe/rpki/afrinic.tal/',
'marker': 'o',
},
'APNIC': {
'url': 'https://ftp.ripe.net/ripe/rpki/apnic.tal/',
'marker': 's',
},
'ARIN': {
'url': 'https://ftp.ripe.net/ripe/rpki/arin.tal/',
'marker': 'd'
},
'LACNIC': {
'url': 'https://ftp.ripe.net/ripe/rpki/lacnic.tal/',
'marker': '+',
},
'RIPE': {
'url': 'https://ftp.ripe.net/ripe/rpki/ripencc.tal/',
'marker': '*',
}
}
session = CachedSession(ExpirationTime = CACHE_EXPIRATION_SECS)
plt.figure(figsize=(7,4))
for rir, rir_info in RIRS.items():
x = []
y = []
for year in YEAR_RANGE:
for month in range(1,13):
roa_count = -1 # skip the header
parsed_url = f'{rir_info["url"]}/{year}/{month:02d}/15/roas.csv'
csv = session.get( parsed_url )
if csv.status_code != 200:
print(parsed_url)
print(csv.status_code)
continue
for line in csv.iter_lines(decode_unicode=True):
roa_count += 1
if roa_count > 0:
x.append( datetime(year, month, 15) )
y.append( roa_count )
plt.plot(x, y, label=rir, marker=rir_info['marker'])
plt.grid( True )
plt.legend()
plt.ylabel('Number of ROAs')
plt.xticks(rotation=45)
plt.tight_layout()
plt.savefig(f'roa_count_{YEAR_RANGE[0]}_{YEAR_RANGE[-1]}.png')
plt.savefig(f'roa_count_{YEAR_RANGE[0]}_{YEAR_RANGE[-1]}.pdf')
|
import inspect
import logging
import os
import re
import subprocess
from typing import Dict, Any
from pyhttpd.certs import CertificateSpec
from pyhttpd.conf import HttpdConf
from pyhttpd.env import HttpdTestEnv, HttpdTestSetup
log = logging.getLogger(__name__)
class H2TestSetup(HttpdTestSetup):
def __init__(self, env: 'HttpdTestEnv'):
super().__init__(env=env)
self.add_source_dir(os.path.dirname(inspect.getfile(H2TestSetup)))
self.add_modules(["http2", "proxy_http2", "cgid", "autoindex", "ssl"])
def make(self):
super().make()
self._add_h2test()
self._setup_data_1k_1m()
def _add_h2test(self):
local_dir = os.path.dirname(inspect.getfile(H2TestSetup))
p = subprocess.run([self.env.apxs, '-c', 'mod_h2test.c'],
capture_output=True,
cwd=os.path.join(local_dir, 'mod_h2test'))
rv = p.returncode
if rv != 0:
log.error(f"compiling md_h2test failed: {p.stderr}")
raise Exception(f"compiling md_h2test failed: {p.stderr}")
modules_conf = os.path.join(self.env.server_dir, 'conf/modules.conf')
with open(modules_conf, 'a') as fd:
# load our test module which is not installed
fd.write(f"LoadModule h2test_module \"{local_dir}/mod_h2test/.libs/mod_h2test.so\"\n")
def _setup_data_1k_1m(self):
s90 = "01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678\n"
with open(os.path.join(self.env.gen_dir, "data-1k"), 'w') as f:
for i in range(10):
f.write(f"{i:09d}-{s90}")
with open(os.path.join(self.env.gen_dir, "data-10k"), 'w') as f:
for i in range(100):
f.write(f"{i:09d}-{s90}")
with open(os.path.join(self.env.gen_dir, "data-100k"), 'w') as f:
for i in range(1000):
f.write(f"{i:09d}-{s90}")
with open(os.path.join(self.env.gen_dir, "data-1m"), 'w') as f:
for i in range(10000):
f.write(f"{i:09d}-{s90}")
class H2TestEnv(HttpdTestEnv):
def __init__(self, pytestconfig=None):
super().__init__(pytestconfig=pytestconfig)
self.add_httpd_conf([
"H2MinWorkers 1",
"H2MaxWorkers 64",
"Protocols h2 http/1.1 h2c",
])
self.add_httpd_log_modules(["http2", "proxy_http2", "h2test"])
self.add_cert_specs([
CertificateSpec(domains=[
f"push.{self._http_tld}",
f"hints.{self._http_tld}",
f"ssl.{self._http_tld}",
f"pad0.{self._http_tld}",
f"pad1.{self._http_tld}",
f"pad2.{self._http_tld}",
f"pad3.{self._http_tld}",
f"pad8.{self._http_tld}",
]),
CertificateSpec(domains=[f"noh2.{self.http_tld}"], key_type='rsa2048'),
])
self.httpd_error_log.set_ignored_lognos([
'AH02032',
'AH01276',
'AH01630',
'AH00135',
'AH02261', # Re-negotiation handshake failed (our test_101)
'AH03490', # scoreboard full, happens on limit tests
])
self.httpd_error_log.add_ignored_patterns([
re.compile(r'.*malformed header from script \'hecho.py\': Bad header: x.*'),
re.compile(r'.*:tls_post_process_client_hello:.*'),
re.compile(r'.*:tls_process_client_certificate:.*'),
re.compile(r'.*have incompatible TLS configurations.'),
])
def setup_httpd(self, setup: HttpdTestSetup = None):
super().setup_httpd(setup=H2TestSetup(env=self))
class H2Conf(HttpdConf):
def __init__(self, env: HttpdTestEnv, extras: Dict[str, Any] = None):
super().__init__(env=env, extras=HttpdConf.merge_extras(extras, {
f"cgi.{env.http_tld}": [
"SSLOptions +StdEnvVars",
"AddHandler cgi-script .py",
]
}))
def start_vhost(self, domains, port=None, doc_root="htdocs", with_ssl=None,
ssl_module=None, with_certificates=None):
super().start_vhost(domains=domains, port=port, doc_root=doc_root,
with_ssl=with_ssl, ssl_module=ssl_module,
with_certificates=with_certificates)
if f"noh2.{self.env.http_tld}" in domains:
protos = ["http/1.1"]
elif port == self.env.https_port or with_ssl is True:
protos = ["h2", "http/1.1"]
else:
protos = ["h2c", "http/1.1"]
if f"test2.{self.env.http_tld}" in domains:
protos = reversed(protos)
self.add(f"Protocols {" ".join(protos)}")
return self
def add_vhost_noh2(self):
domains = [f"noh2.{self.env.http_tld}", f"noh2-alias.{self.env.http_tld}"]
self.start_vhost(domains=domains, port=self.env.https_port, doc_root="htdocs/noh2")
self.add(["Protocols http/1.1", "SSLOptions +StdEnvVars"])
self.end_vhost()
self.start_vhost(domains=domains, port=self.env.http_port, doc_root="htdocs/noh2")
self.add(["Protocols http/1.1", "SSLOptions +StdEnvVars"])
self.end_vhost()
return self
def add_vhost_test1(self, proxy_self=False, h2proxy_self=False):
return super().add_vhost_test1(proxy_self=proxy_self, h2proxy_self=h2proxy_self)
def add_vhost_test2(self):
return super().add_vhost_test2()
| import inspect
import logging
import os
import re
import subprocess
from typing import Dict, Any
from pyhttpd.certs import CertificateSpec
from pyhttpd.conf import HttpdConf
from pyhttpd.env import HttpdTestEnv, HttpdTestSetup
log = logging.getLogger(__name__)
class H2TestSetup(HttpdTestSetup):
def __init__(self, env: 'HttpdTestEnv'):
super().__init__(env=env)
self.add_source_dir(os.path.dirname(inspect.getfile(H2TestSetup)))
self.add_modules(["http2", "proxy_http2", "cgid", "autoindex", "ssl"])
def make(self):
super().make()
self._add_h2test()
self._setup_data_1k_1m()
def _add_h2test(self):
local_dir = os.path.dirname(inspect.getfile(H2TestSetup))
p = subprocess.run([self.env.apxs, '-c', 'mod_h2test.c'],
capture_output=True,
cwd=os.path.join(local_dir, 'mod_h2test'))
rv = p.returncode
if rv != 0:
log.error(f"compiling md_h2test failed: {p.stderr}")
raise Exception(f"compiling md_h2test failed: {p.stderr}")
modules_conf = os.path.join(self.env.server_dir, 'conf/modules.conf')
with open(modules_conf, 'a') as fd:
# load our test module which is not installed
fd.write(f"LoadModule h2test_module \"{local_dir}/mod_h2test/.libs/mod_h2test.so\"\n")
def _setup_data_1k_1m(self):
s90 = "01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678\n"
with open(os.path.join(self.env.gen_dir, "data-1k"), 'w') as f:
for i in range(10):
f.write(f"{i:09d}-{s90}")
with open(os.path.join(self.env.gen_dir, "data-10k"), 'w') as f:
for i in range(100):
f.write(f"{i:09d}-{s90}")
with open(os.path.join(self.env.gen_dir, "data-100k"), 'w') as f:
for i in range(1000):
f.write(f"{i:09d}-{s90}")
with open(os.path.join(self.env.gen_dir, "data-1m"), 'w') as f:
for i in range(10000):
f.write(f"{i:09d}-{s90}")
class H2TestEnv(HttpdTestEnv):
def __init__(self, pytestconfig=None):
super().__init__(pytestconfig=pytestconfig)
self.add_httpd_conf([
"H2MinWorkers 1",
"H2MaxWorkers 64",
"Protocols h2 http/1.1 h2c",
])
self.add_httpd_log_modules(["http2", "proxy_http2", "h2test"])
self.add_cert_specs([
CertificateSpec(domains=[
f"push.{self._http_tld}",
f"hints.{self._http_tld}",
f"ssl.{self._http_tld}",
f"pad0.{self._http_tld}",
f"pad1.{self._http_tld}",
f"pad2.{self._http_tld}",
f"pad3.{self._http_tld}",
f"pad8.{self._http_tld}",
]),
CertificateSpec(domains=[f"noh2.{self.http_tld}"], key_type='rsa2048'),
])
self.httpd_error_log.set_ignored_lognos([
'AH02032',
'AH01276',
'AH01630',
'AH00135',
'AH02261', # Re-negotiation handshake failed (our test_101)
'AH03490', # scoreboard full, happens on limit tests
])
self.httpd_error_log.add_ignored_patterns([
re.compile(r'.*malformed header from script \'hecho.py\': Bad header: x.*'),
re.compile(r'.*:tls_post_process_client_hello:.*'),
re.compile(r'.*:tls_process_client_certificate:.*'),
re.compile(r'.*have incompatible TLS configurations.'),
])
def setup_httpd(self, setup: HttpdTestSetup = None):
super().setup_httpd(setup=H2TestSetup(env=self))
class H2Conf(HttpdConf):
def __init__(self, env: HttpdTestEnv, extras: Dict[str, Any] = None):
super().__init__(env=env, extras=HttpdConf.merge_extras(extras, {
f"cgi.{env.http_tld}": [
"SSLOptions +StdEnvVars",
"AddHandler cgi-script .py",
]
}))
def start_vhost(self, domains, port=None, doc_root="htdocs", with_ssl=None,
ssl_module=None, with_certificates=None):
super().start_vhost(domains=domains, port=port, doc_root=doc_root,
with_ssl=with_ssl, ssl_module=ssl_module,
with_certificates=with_certificates)
if f"noh2.{self.env.http_tld}" in domains:
protos = ["http/1.1"]
elif port == self.env.https_port or with_ssl is True:
protos = ["h2", "http/1.1"]
else:
protos = ["h2c", "http/1.1"]
if f"test2.{self.env.http_tld}" in domains:
protos = reversed(protos)
self.add(f"Protocols {' '.join(protos)}")
return self
def add_vhost_noh2(self):
domains = [f"noh2.{self.env.http_tld}", f"noh2-alias.{self.env.http_tld}"]
self.start_vhost(domains=domains, port=self.env.https_port, doc_root="htdocs/noh2")
self.add(["Protocols http/1.1", "SSLOptions +StdEnvVars"])
self.end_vhost()
self.start_vhost(domains=domains, port=self.env.http_port, doc_root="htdocs/noh2")
self.add(["Protocols http/1.1", "SSLOptions +StdEnvVars"])
self.end_vhost()
return self
def add_vhost_test1(self, proxy_self=False, h2proxy_self=False):
return super().add_vhost_test1(proxy_self=proxy_self, h2proxy_self=h2proxy_self)
def add_vhost_test2(self):
return super().add_vhost_test2()
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import random
import uuid
from google.api_core import client_options
import google.api_core.exceptions
import google.auth
from google.cloud import bigquery
from google.cloud import bigquery_datatransfer
from google.cloud import pubsub_v1
import pytest
RESOURCE_PREFIX = "python_bigquery_datatransfer_samples_snippets"
RESOURCE_DATE_FORMAT = "%Y%m%d%H%M%S"
RESOURCE_DATE_LENGTH = 4 + 2 + 2 + 2 + 2 + 2
def resource_prefix() -> str:
timestamp = datetime.datetime.utcnow().strftime(RESOURCE_DATE_FORMAT)
random_string = hex(random.randrange(1000000))[2:]
return f"{RESOURCE_PREFIX}_{timestamp}_{random_string}"
def resource_name_to_date(resource_name: str):
start_date = len(RESOURCE_PREFIX) + 1
date_string = resource_name[start_date : start_date + RESOURCE_DATE_LENGTH]
parsed_date = datetime.datetime.strptime(date_string, RESOURCE_DATE_FORMAT)
return parsed_date
@pytest.fixture(scope="session", autouse=True)
def cleanup_pubsub_topics(pubsub_client: pubsub_v1.PublisherClient, project_id):
yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=1)
for topic in pubsub_client.list_topics(project=f"projects/{project_id}"):
topic_id = topic.name.split("/")[-1]
if (
topic_id.startswith(RESOURCE_PREFIX)
and resource_name_to_date(topic_id) < yesterday
):
pubsub_client.delete_topic(topic=topic.name)
def temp_suffix():
now = datetime.datetime.now()
return f"{now.strftime("%Y%m%d%H%M%S")}_{uuid.uuid4().hex[:8]}"
@pytest.fixture(scope="session")
def bigquery_client(default_credentials):
credentials, project_id = default_credentials
return bigquery.Client(credentials=credentials, project=project_id)
@pytest.fixture(scope="session")
def pubsub_client(default_credentials):
credentials, _ = default_credentials
return pubsub_v1.PublisherClient(credentials=credentials)
@pytest.fixture(scope="session")
def pubsub_topic(pubsub_client: pubsub_v1.PublisherClient, project_id):
topic_id = resource_prefix()
topic_path = pubsub_v1.PublisherClient.topic_path(project_id, topic_id)
pubsub_client.create_topic(name=topic_path)
yield topic_path
pubsub_client.delete_topic(topic=topic_path)
@pytest.fixture(scope="session")
def dataset_id(bigquery_client, project_id):
dataset_id = f"bqdts_{temp_suffix()}"
bigquery_client.create_dataset(f"{project_id}.{dataset_id}")
yield dataset_id
bigquery_client.delete_dataset(dataset_id, delete_contents=True)
@pytest.fixture(scope="session")
def default_credentials():
return google.auth.default(["https://www.googleapis.com/auth/cloud-platform"])
@pytest.fixture(scope="session")
def project_id():
return os.environ["GOOGLE_CLOUD_PROJECT"]
@pytest.fixture(scope="session")
def service_account_name(default_credentials):
credentials, _ = default_credentials
# The service_account_email attribute is not available when running with
# user account credentials, but should be available when running from our
# continuous integration tests.
return getattr(credentials, "service_account_email", None)
@pytest.fixture(scope="session")
def transfer_client(default_credentials, project_id):
credentials, _ = default_credentials
options = client_options.ClientOptions(quota_project_id=project_id)
transfer_client = bigquery_datatransfer.DataTransferServiceClient(
credentials=credentials, client_options=options
)
# Ensure quota is always attributed to the correct project.
bigquery_datatransfer.DataTransferServiceClient = lambda: transfer_client
return transfer_client
@pytest.fixture(scope="session")
def transfer_config_name(transfer_client, project_id, dataset_id, service_account_name):
from . import manage_transfer_configs, scheduled_query
# Use the transfer_client fixture so we know quota is attributed to the
# correct project.
assert transfer_client is not None
# To conserve limited BQ-DTS quota, this fixture creates only one transfer
# config for a whole session and is used to test the scheduled_query.py and
# the delete operation in manage_transfer_configs.py.
transfer_config = scheduled_query.create_scheduled_query(
{
"project_id": project_id,
"dataset_id": dataset_id,
"service_account_name": service_account_name,
}
)
yield transfer_config.name
manage_transfer_configs.delete_config(
{"transfer_config_name": transfer_config.name}
)
@pytest.fixture
def to_delete_configs(transfer_client):
to_delete = []
yield to_delete
for config_name in to_delete:
try:
transfer_client.delete_transfer_config(name=config_name)
except google.api_core.exceptions.GoogleAPICallError:
pass
| # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import random
import uuid
from google.api_core import client_options
import google.api_core.exceptions
import google.auth
from google.cloud import bigquery
from google.cloud import bigquery_datatransfer
from google.cloud import pubsub_v1
import pytest
RESOURCE_PREFIX = "python_bigquery_datatransfer_samples_snippets"
RESOURCE_DATE_FORMAT = "%Y%m%d%H%M%S"
RESOURCE_DATE_LENGTH = 4 + 2 + 2 + 2 + 2 + 2
def resource_prefix() -> str:
timestamp = datetime.datetime.utcnow().strftime(RESOURCE_DATE_FORMAT)
random_string = hex(random.randrange(1000000))[2:]
return f"{RESOURCE_PREFIX}_{timestamp}_{random_string}"
def resource_name_to_date(resource_name: str):
start_date = len(RESOURCE_PREFIX) + 1
date_string = resource_name[start_date : start_date + RESOURCE_DATE_LENGTH]
parsed_date = datetime.datetime.strptime(date_string, RESOURCE_DATE_FORMAT)
return parsed_date
@pytest.fixture(scope="session", autouse=True)
def cleanup_pubsub_topics(pubsub_client: pubsub_v1.PublisherClient, project_id):
yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=1)
for topic in pubsub_client.list_topics(project=f"projects/{project_id}"):
topic_id = topic.name.split("/")[-1]
if (
topic_id.startswith(RESOURCE_PREFIX)
and resource_name_to_date(topic_id) < yesterday
):
pubsub_client.delete_topic(topic=topic.name)
def temp_suffix():
now = datetime.datetime.now()
return f"{now.strftime('%Y%m%d%H%M%S')}_{uuid.uuid4().hex[:8]}"
@pytest.fixture(scope="session")
def bigquery_client(default_credentials):
credentials, project_id = default_credentials
return bigquery.Client(credentials=credentials, project=project_id)
@pytest.fixture(scope="session")
def pubsub_client(default_credentials):
credentials, _ = default_credentials
return pubsub_v1.PublisherClient(credentials=credentials)
@pytest.fixture(scope="session")
def pubsub_topic(pubsub_client: pubsub_v1.PublisherClient, project_id):
topic_id = resource_prefix()
topic_path = pubsub_v1.PublisherClient.topic_path(project_id, topic_id)
pubsub_client.create_topic(name=topic_path)
yield topic_path
pubsub_client.delete_topic(topic=topic_path)
@pytest.fixture(scope="session")
def dataset_id(bigquery_client, project_id):
dataset_id = f"bqdts_{temp_suffix()}"
bigquery_client.create_dataset(f"{project_id}.{dataset_id}")
yield dataset_id
bigquery_client.delete_dataset(dataset_id, delete_contents=True)
@pytest.fixture(scope="session")
def default_credentials():
return google.auth.default(["https://www.googleapis.com/auth/cloud-platform"])
@pytest.fixture(scope="session")
def project_id():
return os.environ["GOOGLE_CLOUD_PROJECT"]
@pytest.fixture(scope="session")
def service_account_name(default_credentials):
credentials, _ = default_credentials
# The service_account_email attribute is not available when running with
# user account credentials, but should be available when running from our
# continuous integration tests.
return getattr(credentials, "service_account_email", None)
@pytest.fixture(scope="session")
def transfer_client(default_credentials, project_id):
credentials, _ = default_credentials
options = client_options.ClientOptions(quota_project_id=project_id)
transfer_client = bigquery_datatransfer.DataTransferServiceClient(
credentials=credentials, client_options=options
)
# Ensure quota is always attributed to the correct project.
bigquery_datatransfer.DataTransferServiceClient = lambda: transfer_client
return transfer_client
@pytest.fixture(scope="session")
def transfer_config_name(transfer_client, project_id, dataset_id, service_account_name):
from . import manage_transfer_configs, scheduled_query
# Use the transfer_client fixture so we know quota is attributed to the
# correct project.
assert transfer_client is not None
# To conserve limited BQ-DTS quota, this fixture creates only one transfer
# config for a whole session and is used to test the scheduled_query.py and
# the delete operation in manage_transfer_configs.py.
transfer_config = scheduled_query.create_scheduled_query(
{
"project_id": project_id,
"dataset_id": dataset_id,
"service_account_name": service_account_name,
}
)
yield transfer_config.name
manage_transfer_configs.delete_config(
{"transfer_config_name": transfer_config.name}
)
@pytest.fixture
def to_delete_configs(transfer_client):
to_delete = []
yield to_delete
for config_name in to_delete:
try:
transfer_client.delete_transfer_config(name=config_name)
except google.api_core.exceptions.GoogleAPICallError:
pass
|
#!/usr/bin/env python3
""" Health authority back end REST and static content server """
__copyright__ = """
Copyright 2020 Diomidis Spinellis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__ = "Apache 2.0"
import argparse
from dp3t.protocols.server_database import ServerDatabase
from epidose.common.daemon import Daemon
from flask import Flask, abort, jsonify, request, send_from_directory
import logging
from os.path import basename, dirname
API_VERSION = "1"
app = Flask("ha-server")
db = None
FILTER_LOCATION = "/var/lib/epidose/filter.bin"
DATABASE_LOCATION = "/var/lib/epidose/server-database.db"
UPDATE_LOCATION = "/var/lib/epidose/update.sh"
def shutdown_server():
func = request.environ.get("werkzeug.server.shutdown")
if func is None:
raise RuntimeError("Not running with the Werkzeug Server")
func()
@app.before_request
def before_request():
global db
if not db:
db = ServerDatabase(DATABASE_LOCATION)
db.connect(reuse_if_open=True)
@app.after_request
def after_request(response):
global db
if not app.config["TESTING"]:
db.close()
return response
@app.route("/filter", methods=["GET"])
def filter():
"""Send the Cuckoo filter as a static file.
In a production deployment this should be handled by the front-end server,
such as nginx.
"""
return send_from_directory(dirname(FILTER_LOCATION), basename(FILTER_LOCATION))
@app.route("/update", methods=["GET"])
def update():
"""Send the update shell script as a static file."""
return send_from_directory(dirname(UPDATE_LOCATION), basename(UPDATE_LOCATION))
@app.route("/shutdown")
def shutdown():
if app.debug:
shutdown_server()
return "Server shutting down..."
else:
abort(405)
@app.route("/version", methods=["GET"])
def version():
return jsonify({"version": API_VERSION})
@app.route("/add_contagious", methods=["POST"])
def add_contagious():
content = request.json
with db.atomic():
logger.debug(f"Add new data with authorization {content["authorization"]}")
# TODO: Check authorization
for rec in content["data"]:
epoch = rec["epoch"]
seed = bytes.fromhex(rec["seed"])
db.add_epoch_seed(epoch, seed)
logger.debug(f"Add {epoch} {seed.hex()}")
# TODO: Delete authorization
return "OK"
def initialize(args):
"""Initialize the server's database and logger. """
global daemon
daemon = Daemon("ha_server", args)
# Setup logging
global logger
logger = daemon.get_logger()
# Connect to the database
global db
db = ServerDatabase(args.database)
def main():
parser = argparse.ArgumentParser(
description="Health authority back end REST and static content server "
)
parser.add_argument(
"-d", "--debug", help="Run in debug mode logging to stderr", action="store_true"
)
global DATABASE_LOCATION
parser.add_argument(
"-D",
"--database",
help="Specify the database location",
default=DATABASE_LOCATION,
)
global FILTER_LOCATION
parser.add_argument(
"-f",
"--filter",
help="Specify the location of the Cuckoo filter",
default=FILTER_LOCATION,
)
parser.add_argument(
"-s",
"--server-name",
help="Specify the server name (0.0.0.0 for externally visible)",
default="127.0.0.1",
)
parser.add_argument("-p", "--port", help="Set TCP port to listen", type=int)
parser.add_argument(
"-v", "--verbose", help="Set verbose logging", action="store_true"
)
args = parser.parse_args()
initialize(args)
FILTER_LOCATION = args.filter
DATABASE_LOCATION = args.database
# Daemonize with gunicorn or other means, because the daemonize
# module has trouble dealing with the lock files when the app
# reloads itself.
app.run(debug=args.debug, host=args.server_name, port=args.port)
if __name__ == "__main__":
main()
else:
global logger
logger = logging.getLogger("gunicorn.error")
| #!/usr/bin/env python3
""" Health authority back end REST and static content server """
__copyright__ = """
Copyright 2020 Diomidis Spinellis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__ = "Apache 2.0"
import argparse
from dp3t.protocols.server_database import ServerDatabase
from epidose.common.daemon import Daemon
from flask import Flask, abort, jsonify, request, send_from_directory
import logging
from os.path import basename, dirname
API_VERSION = "1"
app = Flask("ha-server")
db = None
FILTER_LOCATION = "/var/lib/epidose/filter.bin"
DATABASE_LOCATION = "/var/lib/epidose/server-database.db"
UPDATE_LOCATION = "/var/lib/epidose/update.sh"
def shutdown_server():
func = request.environ.get("werkzeug.server.shutdown")
if func is None:
raise RuntimeError("Not running with the Werkzeug Server")
func()
@app.before_request
def before_request():
global db
if not db:
db = ServerDatabase(DATABASE_LOCATION)
db.connect(reuse_if_open=True)
@app.after_request
def after_request(response):
global db
if not app.config["TESTING"]:
db.close()
return response
@app.route("/filter", methods=["GET"])
def filter():
"""Send the Cuckoo filter as a static file.
In a production deployment this should be handled by the front-end server,
such as nginx.
"""
return send_from_directory(dirname(FILTER_LOCATION), basename(FILTER_LOCATION))
@app.route("/update", methods=["GET"])
def update():
"""Send the update shell script as a static file."""
return send_from_directory(dirname(UPDATE_LOCATION), basename(UPDATE_LOCATION))
@app.route("/shutdown")
def shutdown():
if app.debug:
shutdown_server()
return "Server shutting down..."
else:
abort(405)
@app.route("/version", methods=["GET"])
def version():
return jsonify({"version": API_VERSION})
@app.route("/add_contagious", methods=["POST"])
def add_contagious():
content = request.json
with db.atomic():
logger.debug(f"Add new data with authorization {content['authorization']}")
# TODO: Check authorization
for rec in content["data"]:
epoch = rec["epoch"]
seed = bytes.fromhex(rec["seed"])
db.add_epoch_seed(epoch, seed)
logger.debug(f"Add {epoch} {seed.hex()}")
# TODO: Delete authorization
return "OK"
def initialize(args):
"""Initialize the server's database and logger. """
global daemon
daemon = Daemon("ha_server", args)
# Setup logging
global logger
logger = daemon.get_logger()
# Connect to the database
global db
db = ServerDatabase(args.database)
def main():
parser = argparse.ArgumentParser(
description="Health authority back end REST and static content server "
)
parser.add_argument(
"-d", "--debug", help="Run in debug mode logging to stderr", action="store_true"
)
global DATABASE_LOCATION
parser.add_argument(
"-D",
"--database",
help="Specify the database location",
default=DATABASE_LOCATION,
)
global FILTER_LOCATION
parser.add_argument(
"-f",
"--filter",
help="Specify the location of the Cuckoo filter",
default=FILTER_LOCATION,
)
parser.add_argument(
"-s",
"--server-name",
help="Specify the server name (0.0.0.0 for externally visible)",
default="127.0.0.1",
)
parser.add_argument("-p", "--port", help="Set TCP port to listen", type=int)
parser.add_argument(
"-v", "--verbose", help="Set verbose logging", action="store_true"
)
args = parser.parse_args()
initialize(args)
FILTER_LOCATION = args.filter
DATABASE_LOCATION = args.database
# Daemonize with gunicorn or other means, because the daemonize
# module has trouble dealing with the lock files when the app
# reloads itself.
app.run(debug=args.debug, host=args.server_name, port=args.port)
if __name__ == "__main__":
main()
else:
global logger
logger = logging.getLogger("gunicorn.error")
|
async def greet(ctx):
greetings = [
"Ahn nyong ha se yo",
"Ahn-nyong-ha-se-yo",
"Ahoj",
"An-nyŏng-ha-se-yo",
"As-salamu alaykum",
"Assalamo aleikum",
"Assalamualaikum",
"Avuxeni",
"Bonġu",
"Bonjour",
"Bună ziua",
"Ciao",
"Cześć",
"Dia dhuit",
"Dobar dan",
"Dobra većer",
"Dobro jutro",
"God dag",
"Góðan dag",
"Grüß gott",
"Guten tag",
"Hafa adai",
"Hallå",
"Hallo",
"Hello",
"Hoi",
"Hola",
"How ya doing",
"How you doing",
"Howdy",
"Hujambo",
"Hyvää päivää",
"Ia orna",
"Jo napot",
"Konnichiwa",
"Marhaba",
"Merhaba",
"Moïen",
"Namaskar",
"Namaste",
"Namastē",
"Nde-ewo",
"Nǐ hǎo",
"Niltze",
"Now then",
"Olá",
"Salam",
"Salve",
"Sawasdee",
"Sawubona",
"Selamat siang",
"Shalom",
"Shwmae",
"Sveiki",
"Wassup",
"What's up",
"Xin chào",
"Yasou",
"Zdraveite",
"Zdravo",
"Zdravstvuyte",
"안녕하세요",
"こんにちは",
"你好",
]
message = ctx.content.lower()
# if no one is tagged in the message
if "@" not in message:
message_greetings = []
# check if any of the greetings are in the message
for greeting in greetings:
if greeting.lower() in message:
message_greetings.append(greeting)
# if any are, format them into a greeting back to the user
if len(message_greetings) > 0:
greetings_string = message_greetings[0]
if len(message_greetings) > 1:
first_greeting = message_greetings[0]
other_greetings = []
for greeting in message_greetings[1 : len(message_greetings)]:
other_greetings.append(greeting.lower())
all_greetings = [first_greeting] + other_greetings
if len(message_greetings) > 2:
greetings_string = (
f"{", ".join(all_greetings[0:-1])} and {all_greetings[-1]}"
)
else:
greetings_string = " and ".join(all_greetings)
# respond to user
await ctx.channel.send(f"{greetings_string}, @{ctx.author.name}!")
| async def greet(ctx):
greetings = [
"Ahn nyong ha se yo",
"Ahn-nyong-ha-se-yo",
"Ahoj",
"An-nyŏng-ha-se-yo",
"As-salamu alaykum",
"Assalamo aleikum",
"Assalamualaikum",
"Avuxeni",
"Bonġu",
"Bonjour",
"Bună ziua",
"Ciao",
"Cześć",
"Dia dhuit",
"Dobar dan",
"Dobra većer",
"Dobro jutro",
"God dag",
"Góðan dag",
"Grüß gott",
"Guten tag",
"Hafa adai",
"Hallå",
"Hallo",
"Hello",
"Hoi",
"Hola",
"How ya doing",
"How you doing",
"Howdy",
"Hujambo",
"Hyvää päivää",
"Ia orna",
"Jo napot",
"Konnichiwa",
"Marhaba",
"Merhaba",
"Moïen",
"Namaskar",
"Namaste",
"Namastē",
"Nde-ewo",
"Nǐ hǎo",
"Niltze",
"Now then",
"Olá",
"Salam",
"Salve",
"Sawasdee",
"Sawubona",
"Selamat siang",
"Shalom",
"Shwmae",
"Sveiki",
"Wassup",
"What's up",
"Xin chào",
"Yasou",
"Zdraveite",
"Zdravo",
"Zdravstvuyte",
"안녕하세요",
"こんにちは",
"你好",
]
message = ctx.content.lower()
# if no one is tagged in the message
if "@" not in message:
message_greetings = []
# check if any of the greetings are in the message
for greeting in greetings:
if greeting.lower() in message:
message_greetings.append(greeting)
# if any are, format them into a greeting back to the user
if len(message_greetings) > 0:
greetings_string = message_greetings[0]
if len(message_greetings) > 1:
first_greeting = message_greetings[0]
other_greetings = []
for greeting in message_greetings[1 : len(message_greetings)]:
other_greetings.append(greeting.lower())
all_greetings = [first_greeting] + other_greetings
if len(message_greetings) > 2:
greetings_string = (
f"{', '.join(all_greetings[0:-1])} and {all_greetings[-1]}"
)
else:
greetings_string = " and ".join(all_greetings)
# respond to user
await ctx.channel.send(f"{greetings_string}, @{ctx.author.name}!")
|
import discord
from discord.ext import commands
from decorators import *
from io import BytesIO
from urllib.parse import quote
from base64 import b64encode
from json import loads
class encoding(commands.Cog):
def __init__(self):
self.ciphers = loads(open("./assets/json/encode.json", "r").read())
pass
@command(["jumble"])
@cooldown(2)
@require_args()
async def shuffle(self, ctx, *args):
return await ctx.reply(ctx.bot.util.shuffle(" ".join(args)))
@command(["morse-code"])
@cooldown(5)
@require_args()
async def morse(self, ctx, *args):
total = ""
for char in " ".join(args).lower():
total += " " + self.ciphers.get(char, { "morse": char })["morse"]
return await ctx.reply(total[1:])
@command(["blind"])
@cooldown(5)
@require_args()
async def braille(self, ctx, *args):
total = ""
for char in " ".join(args).lower():
total += self.ciphers.get(char, { "braille": char })["braille"]
return await ctx.reply(total)
@command(["curve", "curve-text"])
@cooldown(5)
@require_args()
async def cursive(self, ctx, *args):
total = ""
for char in " ".join(args).lower():
total += self.ciphers.get(char, { "cursive": char })["cursive"]
return await ctx.reply(total)
@command(["fancy-text"])
@cooldown(5)
@require_args()
async def fancy(self, ctx, *args):
total = ""
for char in " ".join(args).lower():
total += self.ciphers.get(char, { "fancy": char })["fancy"]
return await ctx.reply(total)
@command(["upside-down", "upsidedown", "flip-text", "textflip"])
@cooldown(5)
@require_args()
async def fliptext(self, ctx, *args):
total = ""
for char in " ".join(args).lower():
total += self.ciphers.get(char, { "upside-down": char })["upside-down"]
return await ctx.reply(total)
@command()
@cooldown(4)
@require_args()
@permissions(bot=['attach_files'])
async def ascii(self, ctx, *args):
await ctx.trigger_typing()
parser = ctx.bot.Parser(args)
parser.parse(('hastebin',))
if (not parser) or (not parser.has("image")):
if not parser.other:
return await ctx.bot.cmds.invalid_args(ctx)
ascii = await ctx.bot.util.request(
"http://artii.herokuapp.com/make",
text=' '.join(parser.other)
)
if parser.has("hastebin"):
try:
response = await ctx.bot.http._HTTPClient__session.post("https://paste.mod.gg/documents", data=ascii)
assert response.status < 400
json = await response.json()
await ctx.success_embed(description=f"[**Click here to see the asciified text.**](https://paste.mod.gg/{json["key"]})")
del ascii, image, parser, json
return
except AssertionError:
pass
await ctx.reply(f'```{ascii[:2000]}```')
del ascii, parser
return
parser.shift("image")
image = await ctx.bot.Parser.parse_image(ctx, parser.other)
string = await ctx.bot.Image.asciify(image)
if hastebin:
try:
response = await ctx.bot.http._HTTPClient__session.post("https://paste.mod.gg/documents", data=string)
assert response.status < 400
json = await response.json()
await ctx.success_embed(description=f"[**Click here to see the asciified image.**](https://paste.mod.gg/{json["key"]})")
del string, image, parser, hastebin, json
return
except AssertionError:
pass
await ctx.bot.http.send_files(ctx.channel.id, content="", files=[discord.File(BytesIO(bytes(string, 'utf-8')), "asciified.txt")])
del string, image, parser, hastebin
@command()
@cooldown(2)
@permissions(bot=['attach_files'])
@require_args()
async def barcode(self, ctx, *args):
await ctx.trigger_typing()
return await ctx.send_image('http://www.barcode-generator.org/zint/api.php?bc_number=20&bc_data=' + quote(' '.join(args))[:75])
@command(['qrcode', 'qr-code'])
@cooldown(2)
@permissions(bot=['attach_files'])
@require_args()
async def qr(self, ctx, *args):
await ctx.trigger_typing()
return await ctx.send_image('https://api.qrserver.com/v1/create-qr-code/?size=150x150&data=' + quote(' '.join(args))[:75])
@command()
@cooldown(2)
@require_args()
async def binary(self, ctx, *args):
return await ctx.reply('```'+''.join(map(lambda x: f"{ord(x):08b}", ' '.join(args)))[:2000]+'```')
@command()
@cooldown(2)
@require_args(2)
async def caesar(self, ctx, *args):
offset = ctx.bot.Parser.get_numbers(args)
if not offset:
return await ctx.bot.cmds.invalid_args(ctx)
return await ctx.reply(ctx.bot.util.caesar(str(' '.join(args).replace(str(offset[0]), '')), offset[0]))
@command()
@cooldown(2)
@require_args()
async def atbash(self, ctx, *args):
return await ctx.reply(ctx.bot.util.atbash(' '.join(args)))
@command()
@cooldown(2)
@require_args()
async def reverse(self, ctx, *args):
return await ctx.reply(' '.join(args)[::-1])
@command(['b64'])
@cooldown(2)
@require_args()
async def base64(self, ctx, *args):
return await ctx.reply(b64encode(' '.join(args).encode('ascii')).decode('ascii'))
def setup(client):
client.add_cog(encoding()) | import discord
from discord.ext import commands
from decorators import *
from io import BytesIO
from urllib.parse import quote
from base64 import b64encode
from json import loads
class encoding(commands.Cog):
def __init__(self):
self.ciphers = loads(open("./assets/json/encode.json", "r").read())
pass
@command(["jumble"])
@cooldown(2)
@require_args()
async def shuffle(self, ctx, *args):
return await ctx.reply(ctx.bot.util.shuffle(" ".join(args)))
@command(["morse-code"])
@cooldown(5)
@require_args()
async def morse(self, ctx, *args):
total = ""
for char in " ".join(args).lower():
total += " " + self.ciphers.get(char, { "morse": char })["morse"]
return await ctx.reply(total[1:])
@command(["blind"])
@cooldown(5)
@require_args()
async def braille(self, ctx, *args):
total = ""
for char in " ".join(args).lower():
total += self.ciphers.get(char, { "braille": char })["braille"]
return await ctx.reply(total)
@command(["curve", "curve-text"])
@cooldown(5)
@require_args()
async def cursive(self, ctx, *args):
total = ""
for char in " ".join(args).lower():
total += self.ciphers.get(char, { "cursive": char })["cursive"]
return await ctx.reply(total)
@command(["fancy-text"])
@cooldown(5)
@require_args()
async def fancy(self, ctx, *args):
total = ""
for char in " ".join(args).lower():
total += self.ciphers.get(char, { "fancy": char })["fancy"]
return await ctx.reply(total)
@command(["upside-down", "upsidedown", "flip-text", "textflip"])
@cooldown(5)
@require_args()
async def fliptext(self, ctx, *args):
total = ""
for char in " ".join(args).lower():
total += self.ciphers.get(char, { "upside-down": char })["upside-down"]
return await ctx.reply(total)
@command()
@cooldown(4)
@require_args()
@permissions(bot=['attach_files'])
async def ascii(self, ctx, *args):
await ctx.trigger_typing()
parser = ctx.bot.Parser(args)
parser.parse(('hastebin',))
if (not parser) or (not parser.has("image")):
if not parser.other:
return await ctx.bot.cmds.invalid_args(ctx)
ascii = await ctx.bot.util.request(
"http://artii.herokuapp.com/make",
text=' '.join(parser.other)
)
if parser.has("hastebin"):
try:
response = await ctx.bot.http._HTTPClient__session.post("https://paste.mod.gg/documents", data=ascii)
assert response.status < 400
json = await response.json()
await ctx.success_embed(description=f"[**Click here to see the asciified text.**](https://paste.mod.gg/{json['key']})")
del ascii, image, parser, json
return
except AssertionError:
pass
await ctx.reply(f'```{ascii[:2000]}```')
del ascii, parser
return
parser.shift("image")
image = await ctx.bot.Parser.parse_image(ctx, parser.other)
string = await ctx.bot.Image.asciify(image)
if hastebin:
try:
response = await ctx.bot.http._HTTPClient__session.post("https://paste.mod.gg/documents", data=string)
assert response.status < 400
json = await response.json()
await ctx.success_embed(description=f"[**Click here to see the asciified image.**](https://paste.mod.gg/{json['key']})")
del string, image, parser, hastebin, json
return
except AssertionError:
pass
await ctx.bot.http.send_files(ctx.channel.id, content="", files=[discord.File(BytesIO(bytes(string, 'utf-8')), "asciified.txt")])
del string, image, parser, hastebin
@command()
@cooldown(2)
@permissions(bot=['attach_files'])
@require_args()
async def barcode(self, ctx, *args):
await ctx.trigger_typing()
return await ctx.send_image('http://www.barcode-generator.org/zint/api.php?bc_number=20&bc_data=' + quote(' '.join(args))[:75])
@command(['qrcode', 'qr-code'])
@cooldown(2)
@permissions(bot=['attach_files'])
@require_args()
async def qr(self, ctx, *args):
await ctx.trigger_typing()
return await ctx.send_image('https://api.qrserver.com/v1/create-qr-code/?size=150x150&data=' + quote(' '.join(args))[:75])
@command()
@cooldown(2)
@require_args()
async def binary(self, ctx, *args):
return await ctx.reply('```'+''.join(map(lambda x: f"{ord(x):08b}", ' '.join(args)))[:2000]+'```')
@command()
@cooldown(2)
@require_args(2)
async def caesar(self, ctx, *args):
offset = ctx.bot.Parser.get_numbers(args)
if not offset:
return await ctx.bot.cmds.invalid_args(ctx)
return await ctx.reply(ctx.bot.util.caesar(str(' '.join(args).replace(str(offset[0]), '')), offset[0]))
@command()
@cooldown(2)
@require_args()
async def atbash(self, ctx, *args):
return await ctx.reply(ctx.bot.util.atbash(' '.join(args)))
@command()
@cooldown(2)
@require_args()
async def reverse(self, ctx, *args):
return await ctx.reply(' '.join(args)[::-1])
@command(['b64'])
@cooldown(2)
@require_args()
async def base64(self, ctx, *args):
return await ctx.reply(b64encode(' '.join(args).encode('ascii')).decode('ascii'))
def setup(client):
client.add_cog(encoding()) |
#main game section
# %%
plansza_do_gry = {'7':' ','8':' ','9':' ',
'4':' ','5':' ','6':' ',
'1':' ','2':' ','3':' '}
klawisze_gry=[]
for key in plansza_do_gry:
klawisze_gry.append(key)
# print(klawisze_gry)
def drukuj_plansze(pole):
print(f"{pole["7"]} | {pole["8"]} | {pole["9"]}")
print('- + - + -')
print(f"{pole["4"]} | {pole["5"]} | {pole["6"]}")
print('- + - + -')
print(f"{pole["1"]} | {pole["2"]} | {pole["3"]}")
# drukuj_plansze(plansza_do_gry)
def gra():
gracz = 'X'
licznik=0
for i in range(10):
drukuj_plansze(plansza_do_gry)
move=input(f"To jest ruch, {gracz}. Wybierz gdzie chcesz postawić znak")
if plansza_do_gry[move] == ' ':
plansza_do_gry[move] = gracz
licznik += 1
else:
print('miejsce zajęte\nwstaw znak w inne pole')
continue
if licznik >=5: #i
if plansza_do_gry['7'] == plansza_do_gry['8'] == plansza_do_gry['9'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['4'] == plansza_do_gry['5'] == plansza_do_gry['6'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['1'] == plansza_do_gry['2'] == plansza_do_gry['3'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['1'] == plansza_do_gry['4'] == plansza_do_gry['7'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['2'] == plansza_do_gry['5'] == plansza_do_gry['8'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['3'] == plansza_do_gry['6'] == plansza_do_gry['9'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['1'] == plansza_do_gry['5'] == plansza_do_gry['9'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['3'] == plansza_do_gry['5'] == plansza_do_gry['7'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
if licznik == 9:
print("\nKoniec Gry")
print("remis")
if gracz == 'X':
gracz = 'O'
else:
gracz = 'X'
restart = input('grasz ponownie?/n(t/n')
if restart == 't' or restart == 'T':
for key in klawisze_gry:
plansza_do_gry[key] = ' '
gra() #wywołanie rekurencyjne
#superfunkcja
if __name__ == '__main__': #dotyczy pakietów i pakowania do pakietu
gra()
# %%
| #main game section
# %%
plansza_do_gry = {'7':' ','8':' ','9':' ',
'4':' ','5':' ','6':' ',
'1':' ','2':' ','3':' '}
klawisze_gry=[]
for key in plansza_do_gry:
klawisze_gry.append(key)
# print(klawisze_gry)
def drukuj_plansze(pole):
print(f"{pole['7']} | {pole['8']} | {pole['9']}")
print('- + - + -')
print(f"{pole['4']} | {pole['5']} | {pole['6']}")
print('- + - + -')
print(f"{pole['1']} | {pole['2']} | {pole['3']}")
# drukuj_plansze(plansza_do_gry)
def gra():
gracz = 'X'
licznik=0
for i in range(10):
drukuj_plansze(plansza_do_gry)
move=input(f"To jest ruch, {gracz}. Wybierz gdzie chcesz postawić znak")
if plansza_do_gry[move] == ' ':
plansza_do_gry[move] = gracz
licznik += 1
else:
print('miejsce zajęte\nwstaw znak w inne pole')
continue
if licznik >=5: #i
if plansza_do_gry['7'] == plansza_do_gry['8'] == plansza_do_gry['9'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['4'] == plansza_do_gry['5'] == plansza_do_gry['6'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['1'] == plansza_do_gry['2'] == plansza_do_gry['3'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['1'] == plansza_do_gry['4'] == plansza_do_gry['7'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['2'] == plansza_do_gry['5'] == plansza_do_gry['8'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['3'] == plansza_do_gry['6'] == plansza_do_gry['9'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['1'] == plansza_do_gry['5'] == plansza_do_gry['9'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['3'] == plansza_do_gry['5'] == plansza_do_gry['7'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
if licznik == 9:
print("\nKoniec Gry")
print("remis")
if gracz == 'X':
gracz = 'O'
else:
gracz = 'X'
restart = input('grasz ponownie?/n(t/n')
if restart == 't' or restart == 'T':
for key in klawisze_gry:
plansza_do_gry[key] = ' '
gra() #wywołanie rekurencyjne
#superfunkcja
if __name__ == '__main__': #dotyczy pakietów i pakowania do pakietu
gra()
# %%
|
import argparse
import discretisedfield as df
def convert_files(input_files, output_files):
for input_file, output_file in zip(input_files, output_files):
field = df.Field.fromfile(input_file)
field.write(output_file)
def main():
parser = argparse.ArgumentParser(
prog='ovf2vtk',
description='ovf2vtk - ovf to VTK format conversion'
)
parser.add_argument('--infile', type=argparse.FileType('r'),
help='One or more input files', nargs='+',
required=True)
parser.add_argument('--outfile', type=argparse.FileType('w'), nargs='+',
help='One or more output files, optional')
args = parser.parse_args()
if args.outfile:
if len(args.infile) == len(args.outfile):
input_files = [f.name for f in args.infile]
output_files = [f.name for f in args.outfile]
else:
print('\nError: The number of input and output '
'files does not match.')
return 0
else:
input_files = [f.name for f in args.infile]
output_files = [f'{f.split('.')[0]}.vtk' for f in input_files]
convert_files(input_files, output_files)
if __name__ == "__main__":
main()
| import argparse
import discretisedfield as df
def convert_files(input_files, output_files):
for input_file, output_file in zip(input_files, output_files):
field = df.Field.fromfile(input_file)
field.write(output_file)
def main():
parser = argparse.ArgumentParser(
prog='ovf2vtk',
description='ovf2vtk - ovf to VTK format conversion'
)
parser.add_argument('--infile', type=argparse.FileType('r'),
help='One or more input files', nargs='+',
required=True)
parser.add_argument('--outfile', type=argparse.FileType('w'), nargs='+',
help='One or more output files, optional')
args = parser.parse_args()
if args.outfile:
if len(args.infile) == len(args.outfile):
input_files = [f.name for f in args.infile]
output_files = [f.name for f in args.outfile]
else:
print('\nError: The number of input and output '
'files does not match.')
return 0
else:
input_files = [f.name for f in args.infile]
output_files = [f'{f.split(".")[0]}.vtk' for f in input_files]
convert_files(input_files, output_files)
if __name__ == "__main__":
main()
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
import re
from abc import ABC, abstractmethod
from textwrap import dedent
from typing import Callable, ClassVar, Iterator, Optional, cast
from typing_extensions import final
from pants.backend.docker.registries import ALL_DEFAULT_REGISTRIES
from pants.base.build_environment import get_buildroot
from pants.core.goals.run import RestartableField
from pants.engine.addresses import Address
from pants.engine.fs import GlobMatchErrorBehavior
from pants.engine.target import (
COMMON_TARGET_FIELDS,
AsyncFieldMixin,
BoolField,
Dependencies,
DictStringToStringField,
InvalidFieldException,
OptionalSingleSourceField,
StringField,
StringSequenceField,
Target,
)
from pants.util.docutil import doc_url
# Common help text to be applied to each field that supports value interpolation.
_interpolation_help = (
"{kind} may use placeholders in curly braces to be interpolated. The placeholders are derived "
"from various sources, such as the Dockerfile instructions and build args.\n\n"
)
class DockerImageBuildArgsField(StringSequenceField):
alias = "extra_build_args"
default = ()
help = (
"Build arguments (`--build-arg`) to use when building this image. "
"Entries are either strings in the form `ARG_NAME=value` to set an explicit value; "
"or just `ARG_NAME` to copy the value from Pants's own environment.\n\n"
"Use `[docker].build_args` to set default build args for all images."
)
class DockerImageContextRootField(StringField):
alias = "context_root"
help = (
"Specify which directory to use as the Docker build context root. This affects the file "
"paths to use for the `COPY` and `ADD` instructions. For example, whether "
"`COPY files/f.txt` should look for the file relative to the build root: "
"`<build root>/files/f.txt` vs relative to the BUILD file: "
"`<build root>/path_to_build_file/files/f.txt`.\n\n"
"Specify the `context_root` path as `files` for relative to build root, or as `./files` "
"for relative to the BUILD file.\n\n"
"If `context_root` is not specified, it defaults to `[docker].default_context_root`."
)
@classmethod
def compute_value(cls, raw_value: Optional[str], address: Address) -> Optional[str]:
value_or_default = super().compute_value(raw_value, address=address)
if isinstance(value_or_default, str) and value_or_default.startswith("/"):
val = value_or_default.strip("/")
raise InvalidFieldException(
f"The `{cls.alias}` field in target {address} must be a relative path, but was "
f"{value_or_default!r}. Use {val!r} for a path relative to the build root, or "
f"{"./" + val!r} for a path relative to the BUILD file (i.e. {os.path.join(address.spec_path, val)!r})."
)
return value_or_default
class DockerImageSourceField(OptionalSingleSourceField):
default = "Dockerfile"
# When the default glob value is in effect, we don't want the normal glob match error behavior
# to kick in for a missing Dockerfile, in case there are `instructions` provided, in which case
# we generate the Dockerfile instead. If there are no `instructions`, or there are both
# `instructions` and a Dockerfile hydrated from the `source` glob, we error out with a message
# to the user.
default_glob_match_error_behavior = GlobMatchErrorBehavior.ignore
help = (
"The Dockerfile to use when building the Docker image.\n\n"
"Use the `instructions` field instead if you prefer not having the Dockerfile in your "
"source tree."
)
class DockerImageInstructionsField(StringSequenceField):
alias = "instructions"
required = False
help = (
"The `Dockerfile` content, typically one instruction per list item.\n\n"
"Use the `source` field instead if you prefer having the Dockerfile in your source tree."
"\n\n"
+ dedent(
"""\
Example:
# example/BUILD
docker_image(
instructions=[
"FROM base/image:1.0",
"RUN echo example",
],
)
"""
)
)
class DockerImageTagsField(StringSequenceField):
alias = "image_tags"
default = ("latest",)
help = (
"Any tags to apply to the Docker image name (the version is usually applied as a tag).\n\n"
+ _interpolation_help.format(kind="tag")
+ f"See {doc_url("tagging-docker-images")}."
)
class DockerImageTargetStageField(StringField):
alias = "target_stage"
help = (
"Specify target build stage, rather than building the entire `Dockerfile`.\n\n"
"When using multi-stage build, you may name your stages, and can target them when building "
"to only selectively build a certain stage. See also the `--docker-build-target-stage` "
"option.\n\n"
"Read more about [multi-stage Docker builds]"
"(https://docs.docker.com/develop/develop-images/multistage-build/#stop-at-a-specific-build-stage)"
)
class DockerImageDependenciesField(Dependencies):
supports_transitive_excludes = True
class DockerImageRegistriesField(StringSequenceField):
alias = "registries"
default = (ALL_DEFAULT_REGISTRIES,)
help = (
"List of addresses or configured aliases to any Docker registries to use for the "
"built image.\n\n"
"The address is a domain name with optional port for your registry, and any registry "
"aliases are prefixed with `@` for addresses in the [docker].registries configuration "
"section.\n\n"
"By default, all configured registries with `default = true` are used.\n\n"
+ dedent(
"""\
Example:
# pants.toml
[docker.registries.my-registry-alias]
address = "myregistrydomain:port"
default = false # optional
# example/BUILD
docker_image(
registries = [
"@my-registry-alias",
"myregistrydomain:port",
],
)
"""
)
+ (
"The above example shows two valid `registry` options: using an alias to a configured "
"registry and the address to a registry verbatim in the BUILD file."
)
)
class DockerImageRepositoryField(StringField):
alias = "repository"
help = (
'The repository name for the Docker image. e.g. "<repository>/<name>".\n\n'
"It uses the `[docker].default_repository` by default.\n\n"
+ _interpolation_help.format(kind="repository")
+ "Additional placeholders for the repository field are: `name`, `directory` and "
"`parent_directory`.\n\nSee the documentation for `[docker].default_repository` for more "
"information."
)
class DockerImageSkipPushField(BoolField):
alias = "skip_push"
default = False
help = "If set to true, do not push this image to registries when running `./pants publish`."
OptionValueFormatter = Callable[[str], str]
class DockerBuildOptionFieldMixin(ABC):
"""Inherit this mixin class to provide options to `docker build`."""
docker_build_option: ClassVar[str]
@abstractmethod
def option_values(self, *, value_formatter: OptionValueFormatter) -> Iterator[str]:
"""Subclasses must implement this, to turn their `self.value` into none, one or more option
values."""
@final
def options(self, value_formatter: OptionValueFormatter) -> Iterator[str]:
for value in self.option_values(value_formatter=value_formatter):
yield from (self.docker_build_option, value)
class DockerImageBuildImageLabelsOptionField(DockerBuildOptionFieldMixin, DictStringToStringField):
alias = "image_labels"
help = (
"Provide image metadata.\n\n"
+ _interpolation_help.format(kind="label value")
+ "See [Docker labels](https://docs.docker.com/config/labels-custom-metadata/"
"#manage-labels-on-objects) for more information."
)
docker_build_option = "--label"
def option_values(self, value_formatter: OptionValueFormatter) -> Iterator[str]:
for label, value in (self.value or {}).items():
yield f"{label}={value_formatter(value)}"
class DockerImageBuildSecretsOptionField(
AsyncFieldMixin, DockerBuildOptionFieldMixin, DictStringToStringField
):
alias = "secrets"
help = (
"Secret files to expose to the build (only if BuildKit enabled).\n\n"
"Secrets may use absolute paths, or paths relative to your build root, or the BUILD file "
"if prefixed with `./`. The id should be valid as used by the Docker build `--secret` "
"option. See [Docker secrets](https://docs.docker.com/engine/swarm/secrets/) for more "
"information.\n\n"
+ dedent(
"""\
Example:
docker_image(
secrets={
"mysecret": "/var/secrets/some-secret",
"repo-secret": "src/proj/secrets/some-secret",
"target-secret": "./secrets/some-secret",
}
)
"""
)
)
docker_build_option = "--secret"
def option_values(self, **kwargs) -> Iterator[str]:
# os.path.join() discards preceding parts if encountering an abs path, e.g. if the secret
# `path` is an absolute path, the `buildroot` and `spec_path` will not be considered. Also,
# an empty path part is ignored.
for secret, path in (self.value or {}).items():
full_path = os.path.join(
get_buildroot(),
self.address.spec_path if re.match(r"\.{1,2}/", path) else "",
path,
)
yield f"id={secret},src={os.path.normpath(full_path)}"
class DockerImageBuildSSHOptionField(DockerBuildOptionFieldMixin, StringSequenceField):
alias = "ssh"
default = ()
help = (
"SSH agent socket or keys to expose to the build (only if BuildKit enabled) "
"(format: default|<id>[=<socket>|<key>[,<key>]])\n\n"
"The exposed agent and/or keys can then be used in your `Dockerfile` by mounting them in "
"your `RUN` instructions:\n\n"
" RUN --mount=type=ssh ...\n\n"
"See [Docker documentation](https://docs.docker.com/develop/develop-images"
"/build_enhancements/#using-ssh-to-access-private-data-in-builds) for more information."
)
docker_build_option = "--ssh"
def option_values(self, **kwargs) -> Iterator[str]:
yield from cast("tuple[str]", self.value)
class DockerImageTarget(Target):
alias = "docker_image"
core_fields = (
*COMMON_TARGET_FIELDS,
DockerImageBuildArgsField,
DockerImageDependenciesField,
DockerImageSourceField,
DockerImageInstructionsField,
DockerImageContextRootField,
DockerImageTagsField,
DockerImageRegistriesField,
DockerImageRepositoryField,
DockerImageBuildImageLabelsOptionField,
DockerImageBuildSecretsOptionField,
DockerImageBuildSSHOptionField,
DockerImageSkipPushField,
DockerImageTargetStageField,
RestartableField,
)
help = (
"The `docker_image` target describes how to build and tag a Docker image.\n\n"
"Any dependencies, as inferred or explicitly specified, will be included in the Docker "
"build context, after being packaged if applicable.\n\n"
"By default, will use a Dockerfile from the same directory as the BUILD file this target "
"is defined in. Point at another file with the `source` field, or use the `instructions` "
"field to have the Dockerfile contents verbatim directly in the BUILD file.\n\n"
"Dependencies on upstream/base images defined by another `docker_image` are inferred if "
"referenced by a build argument with a default value of the target address.\n\n"
+ dedent(
"""\
Example:
# src/docker/downstream/Dockerfile
ARG BASE=src/docker/upstream:image
FROM $BASE
...
"""
)
)
| # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
import re
from abc import ABC, abstractmethod
from textwrap import dedent
from typing import Callable, ClassVar, Iterator, Optional, cast
from typing_extensions import final
from pants.backend.docker.registries import ALL_DEFAULT_REGISTRIES
from pants.base.build_environment import get_buildroot
from pants.core.goals.run import RestartableField
from pants.engine.addresses import Address
from pants.engine.fs import GlobMatchErrorBehavior
from pants.engine.target import (
COMMON_TARGET_FIELDS,
AsyncFieldMixin,
BoolField,
Dependencies,
DictStringToStringField,
InvalidFieldException,
OptionalSingleSourceField,
StringField,
StringSequenceField,
Target,
)
from pants.util.docutil import doc_url
# Common help text to be applied to each field that supports value interpolation.
_interpolation_help = (
"{kind} may use placeholders in curly braces to be interpolated. The placeholders are derived "
"from various sources, such as the Dockerfile instructions and build args.\n\n"
)
class DockerImageBuildArgsField(StringSequenceField):
alias = "extra_build_args"
default = ()
help = (
"Build arguments (`--build-arg`) to use when building this image. "
"Entries are either strings in the form `ARG_NAME=value` to set an explicit value; "
"or just `ARG_NAME` to copy the value from Pants's own environment.\n\n"
"Use `[docker].build_args` to set default build args for all images."
)
class DockerImageContextRootField(StringField):
alias = "context_root"
help = (
"Specify which directory to use as the Docker build context root. This affects the file "
"paths to use for the `COPY` and `ADD` instructions. For example, whether "
"`COPY files/f.txt` should look for the file relative to the build root: "
"`<build root>/files/f.txt` vs relative to the BUILD file: "
"`<build root>/path_to_build_file/files/f.txt`.\n\n"
"Specify the `context_root` path as `files` for relative to build root, or as `./files` "
"for relative to the BUILD file.\n\n"
"If `context_root` is not specified, it defaults to `[docker].default_context_root`."
)
@classmethod
def compute_value(cls, raw_value: Optional[str], address: Address) -> Optional[str]:
value_or_default = super().compute_value(raw_value, address=address)
if isinstance(value_or_default, str) and value_or_default.startswith("/"):
val = value_or_default.strip("/")
raise InvalidFieldException(
f"The `{cls.alias}` field in target {address} must be a relative path, but was "
f"{value_or_default!r}. Use {val!r} for a path relative to the build root, or "
f"{'./' + val!r} for a path relative to the BUILD file (i.e. {os.path.join(address.spec_path, val)!r})."
)
return value_or_default
class DockerImageSourceField(OptionalSingleSourceField):
default = "Dockerfile"
# When the default glob value is in effect, we don't want the normal glob match error behavior
# to kick in for a missing Dockerfile, in case there are `instructions` provided, in which case
# we generate the Dockerfile instead. If there are no `instructions`, or there are both
# `instructions` and a Dockerfile hydrated from the `source` glob, we error out with a message
# to the user.
default_glob_match_error_behavior = GlobMatchErrorBehavior.ignore
help = (
"The Dockerfile to use when building the Docker image.\n\n"
"Use the `instructions` field instead if you prefer not having the Dockerfile in your "
"source tree."
)
class DockerImageInstructionsField(StringSequenceField):
alias = "instructions"
required = False
help = (
"The `Dockerfile` content, typically one instruction per list item.\n\n"
"Use the `source` field instead if you prefer having the Dockerfile in your source tree."
"\n\n"
+ dedent(
"""\
Example:
# example/BUILD
docker_image(
instructions=[
"FROM base/image:1.0",
"RUN echo example",
],
)
"""
)
)
class DockerImageTagsField(StringSequenceField):
alias = "image_tags"
default = ("latest",)
help = (
"Any tags to apply to the Docker image name (the version is usually applied as a tag).\n\n"
+ _interpolation_help.format(kind="tag")
+ f"See {doc_url('tagging-docker-images')}."
)
class DockerImageTargetStageField(StringField):
alias = "target_stage"
help = (
"Specify target build stage, rather than building the entire `Dockerfile`.\n\n"
"When using multi-stage build, you may name your stages, and can target them when building "
"to only selectively build a certain stage. See also the `--docker-build-target-stage` "
"option.\n\n"
"Read more about [multi-stage Docker builds]"
"(https://docs.docker.com/develop/develop-images/multistage-build/#stop-at-a-specific-build-stage)"
)
class DockerImageDependenciesField(Dependencies):
supports_transitive_excludes = True
class DockerImageRegistriesField(StringSequenceField):
alias = "registries"
default = (ALL_DEFAULT_REGISTRIES,)
help = (
"List of addresses or configured aliases to any Docker registries to use for the "
"built image.\n\n"
"The address is a domain name with optional port for your registry, and any registry "
"aliases are prefixed with `@` for addresses in the [docker].registries configuration "
"section.\n\n"
"By default, all configured registries with `default = true` are used.\n\n"
+ dedent(
"""\
Example:
# pants.toml
[docker.registries.my-registry-alias]
address = "myregistrydomain:port"
default = false # optional
# example/BUILD
docker_image(
registries = [
"@my-registry-alias",
"myregistrydomain:port",
],
)
"""
)
+ (
"The above example shows two valid `registry` options: using an alias to a configured "
"registry and the address to a registry verbatim in the BUILD file."
)
)
class DockerImageRepositoryField(StringField):
alias = "repository"
help = (
'The repository name for the Docker image. e.g. "<repository>/<name>".\n\n'
"It uses the `[docker].default_repository` by default.\n\n"
+ _interpolation_help.format(kind="repository")
+ "Additional placeholders for the repository field are: `name`, `directory` and "
"`parent_directory`.\n\nSee the documentation for `[docker].default_repository` for more "
"information."
)
class DockerImageSkipPushField(BoolField):
alias = "skip_push"
default = False
help = "If set to true, do not push this image to registries when running `./pants publish`."
OptionValueFormatter = Callable[[str], str]
class DockerBuildOptionFieldMixin(ABC):
"""Inherit this mixin class to provide options to `docker build`."""
docker_build_option: ClassVar[str]
@abstractmethod
def option_values(self, *, value_formatter: OptionValueFormatter) -> Iterator[str]:
"""Subclasses must implement this, to turn their `self.value` into none, one or more option
values."""
@final
def options(self, value_formatter: OptionValueFormatter) -> Iterator[str]:
for value in self.option_values(value_formatter=value_formatter):
yield from (self.docker_build_option, value)
class DockerImageBuildImageLabelsOptionField(DockerBuildOptionFieldMixin, DictStringToStringField):
alias = "image_labels"
help = (
"Provide image metadata.\n\n"
+ _interpolation_help.format(kind="label value")
+ "See [Docker labels](https://docs.docker.com/config/labels-custom-metadata/"
"#manage-labels-on-objects) for more information."
)
docker_build_option = "--label"
def option_values(self, value_formatter: OptionValueFormatter) -> Iterator[str]:
for label, value in (self.value or {}).items():
yield f"{label}={value_formatter(value)}"
class DockerImageBuildSecretsOptionField(
AsyncFieldMixin, DockerBuildOptionFieldMixin, DictStringToStringField
):
alias = "secrets"
help = (
"Secret files to expose to the build (only if BuildKit enabled).\n\n"
"Secrets may use absolute paths, or paths relative to your build root, or the BUILD file "
"if prefixed with `./`. The id should be valid as used by the Docker build `--secret` "
"option. See [Docker secrets](https://docs.docker.com/engine/swarm/secrets/) for more "
"information.\n\n"
+ dedent(
"""\
Example:
docker_image(
secrets={
"mysecret": "/var/secrets/some-secret",
"repo-secret": "src/proj/secrets/some-secret",
"target-secret": "./secrets/some-secret",
}
)
"""
)
)
docker_build_option = "--secret"
def option_values(self, **kwargs) -> Iterator[str]:
# os.path.join() discards preceding parts if encountering an abs path, e.g. if the secret
# `path` is an absolute path, the `buildroot` and `spec_path` will not be considered. Also,
# an empty path part is ignored.
for secret, path in (self.value or {}).items():
full_path = os.path.join(
get_buildroot(),
self.address.spec_path if re.match(r"\.{1,2}/", path) else "",
path,
)
yield f"id={secret},src={os.path.normpath(full_path)}"
class DockerImageBuildSSHOptionField(DockerBuildOptionFieldMixin, StringSequenceField):
alias = "ssh"
default = ()
help = (
"SSH agent socket or keys to expose to the build (only if BuildKit enabled) "
"(format: default|<id>[=<socket>|<key>[,<key>]])\n\n"
"The exposed agent and/or keys can then be used in your `Dockerfile` by mounting them in "
"your `RUN` instructions:\n\n"
" RUN --mount=type=ssh ...\n\n"
"See [Docker documentation](https://docs.docker.com/develop/develop-images"
"/build_enhancements/#using-ssh-to-access-private-data-in-builds) for more information."
)
docker_build_option = "--ssh"
def option_values(self, **kwargs) -> Iterator[str]:
yield from cast("tuple[str]", self.value)
class DockerImageTarget(Target):
alias = "docker_image"
core_fields = (
*COMMON_TARGET_FIELDS,
DockerImageBuildArgsField,
DockerImageDependenciesField,
DockerImageSourceField,
DockerImageInstructionsField,
DockerImageContextRootField,
DockerImageTagsField,
DockerImageRegistriesField,
DockerImageRepositoryField,
DockerImageBuildImageLabelsOptionField,
DockerImageBuildSecretsOptionField,
DockerImageBuildSSHOptionField,
DockerImageSkipPushField,
DockerImageTargetStageField,
RestartableField,
)
help = (
"The `docker_image` target describes how to build and tag a Docker image.\n\n"
"Any dependencies, as inferred or explicitly specified, will be included in the Docker "
"build context, after being packaged if applicable.\n\n"
"By default, will use a Dockerfile from the same directory as the BUILD file this target "
"is defined in. Point at another file with the `source` field, or use the `instructions` "
"field to have the Dockerfile contents verbatim directly in the BUILD file.\n\n"
"Dependencies on upstream/base images defined by another `docker_image` are inferred if "
"referenced by a build argument with a default value of the target address.\n\n"
+ dedent(
"""\
Example:
# src/docker/downstream/Dockerfile
ARG BASE=src/docker/upstream:image
FROM $BASE
...
"""
)
)
|
# -*- coding: utf-8 -*-
import hashlib
from unittest.mock import MagicMock
from asyncy.AppConfig import Expose
from asyncy.Containers import Containers
from asyncy.Exceptions import ActionNotFound, ContainerSpecNotRegisteredError,\
EnvironmentVariableNotFound, K8sError
from asyncy.Kubernetes import Kubernetes
from asyncy.constants.LineConstants import LineConstants
from asyncy.constants.ServiceConstants import ServiceConstants
from asyncy.entities.Volume import Volume
from asyncy.processing import Story
import pytest
from pytest import fixture, mark
@fixture
def line():
return MagicMock()
def test_is_service_reusable(story):
story.app.services = {
'alpine': {
'configuration': {
'actions': {
'echo': {
'run': 'foo'
}
}
}
}
}
line = {
LineConstants.service: 'alpine',
LineConstants.command: 'echo'
}
assert Containers.is_service_reusable(story.app, line) is False
story.app.services['alpine']['configuration']['actions']['echo'][
'run'] = None
assert Containers.is_service_reusable(story.app, line) is True
@mark.parametrize('reusable', [False, True])
@mark.parametrize('name', ['alpine', 'a!lpine', 'ALPINE', '__aLpInE'])
def test_get_container_name(patch, story, line, reusable, name):
patch.object(Containers, 'is_service_reusable', return_value=reusable)
story.app.app_id = 'my_app'
story.app.version = 'v2'
ret = Containers.get_container_name(story.app, story.name, line, name)
if reusable:
assert ret == f'alpine-{Containers.hash_service_name(story.app, name)}'
else:
h = Containers.hash_service_name_and_story_line(story.app, story.name,
line, name)
assert ret == f'alpine-{h}'
@mark.asyncio
async def test_exec():
with pytest.raises(K8sError):
await Containers.exec(None, None, None, None, None)
@mark.asyncio
async def test_container_get_hostname(patch, story, line):
story.app.app_id = 'my_app'
patch.object(Containers, 'get_container_name', return_value='foo')
ret = await Containers.get_hostname(story, line, 'foo')
assert ret == 'foo.my_app.svc.cluster.local'
@mark.asyncio
async def test_clean_app(patch, async_mock):
patch.object(Kubernetes, 'clean_namespace', new=async_mock())
app = MagicMock()
await Containers.clean_app(app)
Kubernetes.clean_namespace.mock.assert_called_with(app)
@mark.asyncio
async def test_remove_volume(patch, story, line, async_mock):
patch.object(Kubernetes, 'remove_volume', new=async_mock())
await Containers.remove_volume(story.app, 'foo')
Kubernetes.remove_volume.mock.assert_called_with(story.app, 'foo')
@mark.asyncio
async def test_prepare_for_deployment(patch, async_mock):
patch.object(Kubernetes, 'clean_namespace', new=async_mock())
story = MagicMock()
await Containers.prepare_for_deployment(story)
Kubernetes.clean_namespace.mock.assert_called_with(story.app)
def test_format_command(logger, app, echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = echo_service
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo', '{"msg":"foo"}'] == cmd
@mark.parametrize('reusable', [True, False])
def test_hash_volume_name(patch, story, line, reusable):
line['ln'] = '1'
patch.object(Containers, 'is_service_reusable', return_value=reusable)
name = 'my_volume'
service = 'foo'
key = name + '-' + service
if not reusable:
key = f'{key}-{line['ln']}'
expected = f'myvolume-' + hashlib.sha1(key.encode('utf-8')).hexdigest()
assert Containers.hash_volume_name(story.app, line, service, name) == \
expected
def test_hash_ingress_name():
e = Expose(service='service',
service_expose_name='expose_name',
http_path='expose_path')
ret = Containers.hash_ingress_name(e)
assert ret == 'exposename-0cf994f170f9d213bb814f74baca87ea149f7536'
@mark.asyncio
async def test_expose_service(app, patch, async_mock):
container_name = 'container_name'
patch.object(Containers, 'get_container_name',
return_value=container_name)
patch.object(Containers, 'create_and_start', new=async_mock())
patch.object(Kubernetes, 'create_ingress', new=async_mock())
e = Expose(service='service',
service_expose_name='expose_name',
http_path='expose_path')
ingress_name = Containers.hash_ingress_name(e)
hostname = f'{app.app_dns}--{Containers.get_simple_name(e.service)}'
await Containers.expose_service(app, e)
Containers.create_and_start.mock.assert_called_with(app, None, e.service,
container_name)
Kubernetes.create_ingress.mock.assert_called_with(ingress_name, app, e,
container_name,
hostname=hostname)
def test_service_name_and_story_line(patch, story):
patch.object(hashlib, 'sha1')
story.name = 'story_name'
story.app.version = 'v29'
ret = Containers.hash_service_name_and_story_line(
story.app, story.name, {'ln': '1'}, 'alpine')
hashlib.sha1.assert_called_with(f'alpine-v29-{story.name}-1'
.encode('utf-8'))
assert ret == hashlib.sha1().hexdigest()
def test_service_name(patch, story):
story.app.version = 'v2'
patch.object(hashlib, 'sha1')
ret = Containers.hash_service_name(story.app, 'alpine')
hashlib.sha1.assert_called_with(f'alpine-v2'.encode('utf-8'))
assert ret == hashlib.sha1().hexdigest()
@mark.asyncio
async def test_create_and_start_no_action(story):
story.app.services = {'alpine': {'configuration': {}}}
with pytest.raises(ActionNotFound):
await Containers.create_and_start(story.app, {'command': 'foo'},
'alpine', 'alpine')
@mark.parametrize('run_command', [None, ['/bin/bash', 'sleep', '10000']])
@mark.parametrize('with_volumes', [True, False])
@mark.parametrize('missing_required_var', [False, True])
@mark.asyncio
async def test_start(patch, story, async_mock,
missing_required_var,
run_command, with_volumes):
line = {
LineConstants.service: 'alpine',
LineConstants.command: 'echo',
'ln': '1'
}
patch.object(Kubernetes, 'create_pod', new=async_mock())
story.app.services = {
'alpine': {
ServiceConstants.config: {
'actions': {
'echo': {
}
},
'volumes': {
'db': {
'persist': True,
'target': '/db'
},
'tmp': {
'persist': False,
'target': '/tmp'
}
},
'environment': {
'param_1': {
'required': True
},
'alpine_only': {}
}
}
}
}
if not with_volumes:
del story.app.services['alpine'][ServiceConstants.config]['volumes']
if run_command is not None:
story.app.services['alpine'][ServiceConstants.config]['actions'][
'echo'] = {'run': {'command': run_command}}
story.app.environment = {
'alpine': {
'alpine_only': True,
'param_1': 'hello_world'
},
'global': 'yes'
}
if missing_required_var:
story.app.environment['alpine']['param_1'] = None
patch.object(Containers, 'get_container_name',
return_value='asyncy-alpine')
expected_volumes = []
if with_volumes:
hash_db = Containers.hash_volume_name(story.app, line, 'alpine', 'db')
hash_tmp = Containers.hash_volume_name(story.app, line, 'alpine',
'tmp')
expected_volumes = [
Volume(persist=True, name=hash_db, mount_path='/db'),
Volume(persist=False, name=hash_tmp, mount_path='/tmp'),
]
if missing_required_var:
with pytest.raises(EnvironmentVariableNotFound):
await Containers.start(story, line)
return
else:
await Containers.start(story, line)
Kubernetes.create_pod.mock.assert_called_with(
app=story.app, service='alpine',
image='alpine', container_name='asyncy-alpine',
start_command=run_command or ['tail', '-f', '/dev/null'],
shutdown_command=None,
env={'alpine_only': True, 'param_1': 'hello_world'},
volumes=expected_volumes)
@mark.asyncio
async def test_init(story, patch, async_mock):
patch.object(Kubernetes, 'create_namespace', new=async_mock())
await Containers.init(story.app)
Kubernetes.create_namespace.mock.assert_called_with(story.app)
def test_format_command_no_format(logger, app, echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = echo_service
config = app.services['alpine'][ServiceConstants.config]
config['actions']['echo']['format'] = None
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo', '{"msg":"foo"}'] == cmd
def test_format_command_no_spec(logger, app, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = {}
with pytest.raises(ContainerSpecNotRegisteredError):
Containers.format_command(story, echo_line, 'alpine', 'echo')
def test_format_command_no_args(logger, app, echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = echo_service
echo_service['alpine'][ServiceConstants.config]['actions']['echo'][
'arguments'] = None
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo'] == cmd
def test_format_command_with_format(patch, logger, app,
echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
patch.object(story, 'argument_by_name', return_value='asyncy')
app.services = echo_service
config = app.services['alpine'][ServiceConstants.config]
config['actions']['echo']['format'] = 'echo {msg}'
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo', 'asyncy'] == cmd
| # -*- coding: utf-8 -*-
import hashlib
from unittest.mock import MagicMock
from asyncy.AppConfig import Expose
from asyncy.Containers import Containers
from asyncy.Exceptions import ActionNotFound, ContainerSpecNotRegisteredError,\
EnvironmentVariableNotFound, K8sError
from asyncy.Kubernetes import Kubernetes
from asyncy.constants.LineConstants import LineConstants
from asyncy.constants.ServiceConstants import ServiceConstants
from asyncy.entities.Volume import Volume
from asyncy.processing import Story
import pytest
from pytest import fixture, mark
@fixture
def line():
return MagicMock()
def test_is_service_reusable(story):
story.app.services = {
'alpine': {
'configuration': {
'actions': {
'echo': {
'run': 'foo'
}
}
}
}
}
line = {
LineConstants.service: 'alpine',
LineConstants.command: 'echo'
}
assert Containers.is_service_reusable(story.app, line) is False
story.app.services['alpine']['configuration']['actions']['echo'][
'run'] = None
assert Containers.is_service_reusable(story.app, line) is True
@mark.parametrize('reusable', [False, True])
@mark.parametrize('name', ['alpine', 'a!lpine', 'ALPINE', '__aLpInE'])
def test_get_container_name(patch, story, line, reusable, name):
patch.object(Containers, 'is_service_reusable', return_value=reusable)
story.app.app_id = 'my_app'
story.app.version = 'v2'
ret = Containers.get_container_name(story.app, story.name, line, name)
if reusable:
assert ret == f'alpine-{Containers.hash_service_name(story.app, name)}'
else:
h = Containers.hash_service_name_and_story_line(story.app, story.name,
line, name)
assert ret == f'alpine-{h}'
@mark.asyncio
async def test_exec():
with pytest.raises(K8sError):
await Containers.exec(None, None, None, None, None)
@mark.asyncio
async def test_container_get_hostname(patch, story, line):
story.app.app_id = 'my_app'
patch.object(Containers, 'get_container_name', return_value='foo')
ret = await Containers.get_hostname(story, line, 'foo')
assert ret == 'foo.my_app.svc.cluster.local'
@mark.asyncio
async def test_clean_app(patch, async_mock):
patch.object(Kubernetes, 'clean_namespace', new=async_mock())
app = MagicMock()
await Containers.clean_app(app)
Kubernetes.clean_namespace.mock.assert_called_with(app)
@mark.asyncio
async def test_remove_volume(patch, story, line, async_mock):
patch.object(Kubernetes, 'remove_volume', new=async_mock())
await Containers.remove_volume(story.app, 'foo')
Kubernetes.remove_volume.mock.assert_called_with(story.app, 'foo')
@mark.asyncio
async def test_prepare_for_deployment(patch, async_mock):
patch.object(Kubernetes, 'clean_namespace', new=async_mock())
story = MagicMock()
await Containers.prepare_for_deployment(story)
Kubernetes.clean_namespace.mock.assert_called_with(story.app)
def test_format_command(logger, app, echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = echo_service
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo', '{"msg":"foo"}'] == cmd
@mark.parametrize('reusable', [True, False])
def test_hash_volume_name(patch, story, line, reusable):
line['ln'] = '1'
patch.object(Containers, 'is_service_reusable', return_value=reusable)
name = 'my_volume'
service = 'foo'
key = name + '-' + service
if not reusable:
key = f'{key}-{line["ln"]}'
expected = f'myvolume-' + hashlib.sha1(key.encode('utf-8')).hexdigest()
assert Containers.hash_volume_name(story.app, line, service, name) == \
expected
def test_hash_ingress_name():
e = Expose(service='service',
service_expose_name='expose_name',
http_path='expose_path')
ret = Containers.hash_ingress_name(e)
assert ret == 'exposename-0cf994f170f9d213bb814f74baca87ea149f7536'
@mark.asyncio
async def test_expose_service(app, patch, async_mock):
container_name = 'container_name'
patch.object(Containers, 'get_container_name',
return_value=container_name)
patch.object(Containers, 'create_and_start', new=async_mock())
patch.object(Kubernetes, 'create_ingress', new=async_mock())
e = Expose(service='service',
service_expose_name='expose_name',
http_path='expose_path')
ingress_name = Containers.hash_ingress_name(e)
hostname = f'{app.app_dns}--{Containers.get_simple_name(e.service)}'
await Containers.expose_service(app, e)
Containers.create_and_start.mock.assert_called_with(app, None, e.service,
container_name)
Kubernetes.create_ingress.mock.assert_called_with(ingress_name, app, e,
container_name,
hostname=hostname)
def test_service_name_and_story_line(patch, story):
patch.object(hashlib, 'sha1')
story.name = 'story_name'
story.app.version = 'v29'
ret = Containers.hash_service_name_and_story_line(
story.app, story.name, {'ln': '1'}, 'alpine')
hashlib.sha1.assert_called_with(f'alpine-v29-{story.name}-1'
.encode('utf-8'))
assert ret == hashlib.sha1().hexdigest()
def test_service_name(patch, story):
story.app.version = 'v2'
patch.object(hashlib, 'sha1')
ret = Containers.hash_service_name(story.app, 'alpine')
hashlib.sha1.assert_called_with(f'alpine-v2'.encode('utf-8'))
assert ret == hashlib.sha1().hexdigest()
@mark.asyncio
async def test_create_and_start_no_action(story):
story.app.services = {'alpine': {'configuration': {}}}
with pytest.raises(ActionNotFound):
await Containers.create_and_start(story.app, {'command': 'foo'},
'alpine', 'alpine')
@mark.parametrize('run_command', [None, ['/bin/bash', 'sleep', '10000']])
@mark.parametrize('with_volumes', [True, False])
@mark.parametrize('missing_required_var', [False, True])
@mark.asyncio
async def test_start(patch, story, async_mock,
missing_required_var,
run_command, with_volumes):
line = {
LineConstants.service: 'alpine',
LineConstants.command: 'echo',
'ln': '1'
}
patch.object(Kubernetes, 'create_pod', new=async_mock())
story.app.services = {
'alpine': {
ServiceConstants.config: {
'actions': {
'echo': {
}
},
'volumes': {
'db': {
'persist': True,
'target': '/db'
},
'tmp': {
'persist': False,
'target': '/tmp'
}
},
'environment': {
'param_1': {
'required': True
},
'alpine_only': {}
}
}
}
}
if not with_volumes:
del story.app.services['alpine'][ServiceConstants.config]['volumes']
if run_command is not None:
story.app.services['alpine'][ServiceConstants.config]['actions'][
'echo'] = {'run': {'command': run_command}}
story.app.environment = {
'alpine': {
'alpine_only': True,
'param_1': 'hello_world'
},
'global': 'yes'
}
if missing_required_var:
story.app.environment['alpine']['param_1'] = None
patch.object(Containers, 'get_container_name',
return_value='asyncy-alpine')
expected_volumes = []
if with_volumes:
hash_db = Containers.hash_volume_name(story.app, line, 'alpine', 'db')
hash_tmp = Containers.hash_volume_name(story.app, line, 'alpine',
'tmp')
expected_volumes = [
Volume(persist=True, name=hash_db, mount_path='/db'),
Volume(persist=False, name=hash_tmp, mount_path='/tmp'),
]
if missing_required_var:
with pytest.raises(EnvironmentVariableNotFound):
await Containers.start(story, line)
return
else:
await Containers.start(story, line)
Kubernetes.create_pod.mock.assert_called_with(
app=story.app, service='alpine',
image='alpine', container_name='asyncy-alpine',
start_command=run_command or ['tail', '-f', '/dev/null'],
shutdown_command=None,
env={'alpine_only': True, 'param_1': 'hello_world'},
volumes=expected_volumes)
@mark.asyncio
async def test_init(story, patch, async_mock):
patch.object(Kubernetes, 'create_namespace', new=async_mock())
await Containers.init(story.app)
Kubernetes.create_namespace.mock.assert_called_with(story.app)
def test_format_command_no_format(logger, app, echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = echo_service
config = app.services['alpine'][ServiceConstants.config]
config['actions']['echo']['format'] = None
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo', '{"msg":"foo"}'] == cmd
def test_format_command_no_spec(logger, app, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = {}
with pytest.raises(ContainerSpecNotRegisteredError):
Containers.format_command(story, echo_line, 'alpine', 'echo')
def test_format_command_no_args(logger, app, echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = echo_service
echo_service['alpine'][ServiceConstants.config]['actions']['echo'][
'arguments'] = None
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo'] == cmd
def test_format_command_with_format(patch, logger, app,
echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
patch.object(story, 'argument_by_name', return_value='asyncy')
app.services = echo_service
config = app.services['alpine'][ServiceConstants.config]
config['actions']['echo']['format'] = 'echo {msg}'
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo', 'asyncy'] == cmd
|
import json
from re import split
import shutil
import os
import sys
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from skimage import io
from shapely.geometry import Polygon
Image.MAX_IMAGE_PIXELS = None
def make_dir(path):
if not os.path.exists(path):
os.makedirs(path)
else:
shutil.rmtree(path)
os.makedirs(path)
def dice(a, b):
return 2 * a.intersection(b).area / (a.area + b.area)
def recall(a, b):
return a.intersection(b).area / b.area
def precision(a, b):
return a.intersection(b).area / a.area
def find_diff(dice_thred=0.5, draw_preview=True, log_score=True):
# A - new json
with open(file_A_path) as data_file:
data = json.load(data_file)
average_area = sum(
[Polygon(item["geometry"]["coordinates"][0]).area for item in data]
) / len(data)
area_threshold = average_area / 50
print("average area size: ", average_area)
print("size threshold: ", area_threshold)
coor_list_a = []
for item in data:
coor = item["geometry"]["coordinates"]
poly = Polygon(coor[0])
if poly.area > area_threshold:
coor_list_a.extend(item["geometry"]["coordinates"])
else:
print("A ignore", poly.area)
A_x_list = [[xy[0] for xy in coor] for coor in coor_list_a]
A_y_list = [[xy[1] for xy in coor] for coor in coor_list_a]
A_id_list = [i for i in range(len(coor_list_a))]
# B - old json
with open(file_B_path) as data_file:
data = json.load(data_file)
coor_list_b = []
for item in data:
coor = item["geometry"]["coordinates"]
coor = [
[[xy[1], xy[0]] for xy in coor[0]]
] # for some json. Comment this line if needed
poly = Polygon(coor[0])
if poly.area > area_threshold:
coor_list_b.extend(coor)
else:
print("B ignore", poly.area)
B_x_list = [[xy[0] for xy in coor] for coor in coor_list_b]
B_y_list = [[xy[1] for xy in coor] for coor in coor_list_b]
# find difference
center_list_new = []
for i in range(len(A_x_list)):
mean_x = (sum(A_x_list[i]) - A_x_list[i][-1]) / (len(A_x_list[i]) - 1)
mean_y = (sum(A_y_list[i]) - A_y_list[i][-1]) / (len(A_y_list[i]) - 1)
center_list_new.append((mean_x, mean_y))
center_list_old = []
for i in range(len(B_x_list)):
mean_x = (sum(B_x_list[i]) - B_x_list[i][-1]) / (len(B_x_list[i]) - 1)
mean_y = (sum(B_y_list[i]) - B_y_list[i][-1]) / (len(B_y_list[i]) - 1)
center_list_old.append((mean_x, mean_y))
new_added_list = []
new_added_f1_list = []
new_same_list = []
new_revised_list = []
f1_list = []
positon_threshold = 500
dice_threshold = dice_thred
ignore_count = 0
for i in A_id_list:
x, y = center_list_new[i]
new_p = Polygon(coor_list_a[i])
min_f1 = 0
min_j = -1
_recall, _precision = -1, -1
for j in range(len(center_list_old)):
_x, _y = center_list_old[j]
old_p = Polygon(coor_list_b[j])
if (x - _x) ** 2 + (y - _y) ** 2 <= positon_threshold ** 2:
f1 = dice(new_p, old_p)
if f1 > min_f1:
min_f1 = f1
min_j = j
_recall = recall(new_p, old_p)
_precision = precision(new_p, old_p)
if min_f1 >= 0.999:
_flag = f"Same\t{min_f1}"
new_same_list.append(i)
elif min_f1 >= dice_threshold:
_flag = f"Revised\t{min_f1}"
new_revised_list.append(i)
f1_list.append((min_f1, _recall, _precision))
else:
_flag = f"Added\t{min_f1}"
new_added_list.append(i)
new_added_f1_list.append(min_f1)
# print(min_f1)
if _flag.startswith("Same") or _flag.startswith("Revised"):
if min_j != -1:
coor_list_b.pop(min_j)
center_list_old.pop(min_j)
# print(i, _flag)
removed_count = len(center_list_old)
print(f"A\tB\tsame\tmatch\tadded\tdeleted")
print(
f"{len(A_x_list)}\t{len(B_x_list)}\t{len(new_same_list)}\t{len(new_revised_list)}"
f"\t{len(new_added_list)}\t{removed_count}"
)
print(f"[FP: {len(new_added_list)}/{len(A_x_list)}]")
print(f"[FN: {removed_count}/{len(B_x_list)}]")
# print(f"{len(new_same_list)} same")
# print(f"{len(new_revised_list)} revised")
# print(f"{len(new_added_list)} added")
# print(f"{removed_count} deleted")
# draw visualization
if draw_preview:
ref_image = io.imread(image_ref_path)
background = np.zeros(shape=ref_image.shape, dtype=np.uint8)
img = Image.fromarray(background, "L")
img = img.convert("RGB")
font_path = r"c:\windows\fonts\bahnschrift.ttf"
font = ImageFont.truetype(font_path, size=48)
title_font = ImageFont.truetype(font_path, size=72)
ImageDraw.Draw(img).text(
(100, 400),
text=f"DICE Threshold = {dice_thred}",
font=title_font,
fill="white",
)
ImageDraw.Draw(img).text(
(100, 480),
text=f"PREDICTION [FP: {len(new_added_list)}/{len(A_x_list)}]",
font=title_font,
fill="yellow",
)
ImageDraw.Draw(img).text(
(100, 560),
text=f"GROUND TRUTH [FN: {removed_count}/{len(B_x_list)}]",
font=title_font,
fill="red",
)
for i in new_added_list:
coor_tuple = [(xy[1], xy[0]) for xy in coor_list_a[i]]
# print(coor_tuple)
ImageDraw.Draw(img).line(coor_tuple, fill="yellow", width=6)
# text
f1 = new_added_f1_list[new_added_list.index(i)]
if f1 > 0:
text = "{:.3f}".format(f1) # + f",{Polygon(coor_list_a[i]).area}"
ImageDraw.Draw(img).text(
(center_list_new[i][1] - 40, center_list_new[i][0] + 60),
text,
font=font,
)
for coor_b in coor_list_b:
coor_tuple = [(xy[1], xy[0]) for xy in coor_b]
# print(coor_tuple)
ImageDraw.Draw(img).line(coor_tuple, fill="red", width=6)
# text = f",{Polygon(coor_b).area}"
# ImageDraw.Draw(img).text(
# (coor_tuple[0][0], coor_tuple[0][1]),
# text,
# font=font,
# )
img = np.array(img).astype("uint8")
output_path = image_ref_path.replace(
".png", f'_{str(dice_thred).replace('.','_')}.png'
)
io.imsave(output_path, img)
print(f"Image saved to {output_path}")
# write score
if log_score:
txt_path = file_A_path.replace("json", "txt")
with open(txt_path, "w") as f:
for item in f1_list:
f.write(f"{item[0]},{item[1]},{item[2]}\n")
if __name__ == "__main__":
file_A_path = (
r"C:\Users\yiju\Desktop\Copy\Scripts\masks\1-tom-new-kidney\pred_00a67c839.json"
)
file_B_path = r"C:\Users\yiju\Desktop\Copy\Data\hubmap-kidney-segmentation\test\00a67c839.json"
if len(sys.argv) >= 3:
file_A_path = sys.argv[1]
file_B_path = sys.argv[2]
image_ref_path = file_A_path.replace("json", "png")
A_name = file_A_path.split("\\")[-1].split(".")[0]
B_name = file_B_path.split("\\")[-1].split(".")[0]
print("A: ", A_name)
print("B: ", B_name)
for d in [0.5]: # [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
find_diff(dice_thred=d, draw_preview=True, log_score=True)
| import json
from re import split
import shutil
import os
import sys
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from skimage import io
from shapely.geometry import Polygon
Image.MAX_IMAGE_PIXELS = None
def make_dir(path):
if not os.path.exists(path):
os.makedirs(path)
else:
shutil.rmtree(path)
os.makedirs(path)
def dice(a, b):
return 2 * a.intersection(b).area / (a.area + b.area)
def recall(a, b):
return a.intersection(b).area / b.area
def precision(a, b):
return a.intersection(b).area / a.area
def find_diff(dice_thred=0.5, draw_preview=True, log_score=True):
# A - new json
with open(file_A_path) as data_file:
data = json.load(data_file)
average_area = sum(
[Polygon(item["geometry"]["coordinates"][0]).area for item in data]
) / len(data)
area_threshold = average_area / 50
print("average area size: ", average_area)
print("size threshold: ", area_threshold)
coor_list_a = []
for item in data:
coor = item["geometry"]["coordinates"]
poly = Polygon(coor[0])
if poly.area > area_threshold:
coor_list_a.extend(item["geometry"]["coordinates"])
else:
print("A ignore", poly.area)
A_x_list = [[xy[0] for xy in coor] for coor in coor_list_a]
A_y_list = [[xy[1] for xy in coor] for coor in coor_list_a]
A_id_list = [i for i in range(len(coor_list_a))]
# B - old json
with open(file_B_path) as data_file:
data = json.load(data_file)
coor_list_b = []
for item in data:
coor = item["geometry"]["coordinates"]
coor = [
[[xy[1], xy[0]] for xy in coor[0]]
] # for some json. Comment this line if needed
poly = Polygon(coor[0])
if poly.area > area_threshold:
coor_list_b.extend(coor)
else:
print("B ignore", poly.area)
B_x_list = [[xy[0] for xy in coor] for coor in coor_list_b]
B_y_list = [[xy[1] for xy in coor] for coor in coor_list_b]
# find difference
center_list_new = []
for i in range(len(A_x_list)):
mean_x = (sum(A_x_list[i]) - A_x_list[i][-1]) / (len(A_x_list[i]) - 1)
mean_y = (sum(A_y_list[i]) - A_y_list[i][-1]) / (len(A_y_list[i]) - 1)
center_list_new.append((mean_x, mean_y))
center_list_old = []
for i in range(len(B_x_list)):
mean_x = (sum(B_x_list[i]) - B_x_list[i][-1]) / (len(B_x_list[i]) - 1)
mean_y = (sum(B_y_list[i]) - B_y_list[i][-1]) / (len(B_y_list[i]) - 1)
center_list_old.append((mean_x, mean_y))
new_added_list = []
new_added_f1_list = []
new_same_list = []
new_revised_list = []
f1_list = []
positon_threshold = 500
dice_threshold = dice_thred
ignore_count = 0
for i in A_id_list:
x, y = center_list_new[i]
new_p = Polygon(coor_list_a[i])
min_f1 = 0
min_j = -1
_recall, _precision = -1, -1
for j in range(len(center_list_old)):
_x, _y = center_list_old[j]
old_p = Polygon(coor_list_b[j])
if (x - _x) ** 2 + (y - _y) ** 2 <= positon_threshold ** 2:
f1 = dice(new_p, old_p)
if f1 > min_f1:
min_f1 = f1
min_j = j
_recall = recall(new_p, old_p)
_precision = precision(new_p, old_p)
if min_f1 >= 0.999:
_flag = f"Same\t{min_f1}"
new_same_list.append(i)
elif min_f1 >= dice_threshold:
_flag = f"Revised\t{min_f1}"
new_revised_list.append(i)
f1_list.append((min_f1, _recall, _precision))
else:
_flag = f"Added\t{min_f1}"
new_added_list.append(i)
new_added_f1_list.append(min_f1)
# print(min_f1)
if _flag.startswith("Same") or _flag.startswith("Revised"):
if min_j != -1:
coor_list_b.pop(min_j)
center_list_old.pop(min_j)
# print(i, _flag)
removed_count = len(center_list_old)
print(f"A\tB\tsame\tmatch\tadded\tdeleted")
print(
f"{len(A_x_list)}\t{len(B_x_list)}\t{len(new_same_list)}\t{len(new_revised_list)}"
f"\t{len(new_added_list)}\t{removed_count}"
)
print(f"[FP: {len(new_added_list)}/{len(A_x_list)}]")
print(f"[FN: {removed_count}/{len(B_x_list)}]")
# print(f"{len(new_same_list)} same")
# print(f"{len(new_revised_list)} revised")
# print(f"{len(new_added_list)} added")
# print(f"{removed_count} deleted")
# draw visualization
if draw_preview:
ref_image = io.imread(image_ref_path)
background = np.zeros(shape=ref_image.shape, dtype=np.uint8)
img = Image.fromarray(background, "L")
img = img.convert("RGB")
font_path = r"c:\windows\fonts\bahnschrift.ttf"
font = ImageFont.truetype(font_path, size=48)
title_font = ImageFont.truetype(font_path, size=72)
ImageDraw.Draw(img).text(
(100, 400),
text=f"DICE Threshold = {dice_thred}",
font=title_font,
fill="white",
)
ImageDraw.Draw(img).text(
(100, 480),
text=f"PREDICTION [FP: {len(new_added_list)}/{len(A_x_list)}]",
font=title_font,
fill="yellow",
)
ImageDraw.Draw(img).text(
(100, 560),
text=f"GROUND TRUTH [FN: {removed_count}/{len(B_x_list)}]",
font=title_font,
fill="red",
)
for i in new_added_list:
coor_tuple = [(xy[1], xy[0]) for xy in coor_list_a[i]]
# print(coor_tuple)
ImageDraw.Draw(img).line(coor_tuple, fill="yellow", width=6)
# text
f1 = new_added_f1_list[new_added_list.index(i)]
if f1 > 0:
text = "{:.3f}".format(f1) # + f",{Polygon(coor_list_a[i]).area}"
ImageDraw.Draw(img).text(
(center_list_new[i][1] - 40, center_list_new[i][0] + 60),
text,
font=font,
)
for coor_b in coor_list_b:
coor_tuple = [(xy[1], xy[0]) for xy in coor_b]
# print(coor_tuple)
ImageDraw.Draw(img).line(coor_tuple, fill="red", width=6)
# text = f",{Polygon(coor_b).area}"
# ImageDraw.Draw(img).text(
# (coor_tuple[0][0], coor_tuple[0][1]),
# text,
# font=font,
# )
img = np.array(img).astype("uint8")
output_path = image_ref_path.replace(
".png", f'_{str(dice_thred).replace(".","_")}.png'
)
io.imsave(output_path, img)
print(f"Image saved to {output_path}")
# write score
if log_score:
txt_path = file_A_path.replace("json", "txt")
with open(txt_path, "w") as f:
for item in f1_list:
f.write(f"{item[0]},{item[1]},{item[2]}\n")
if __name__ == "__main__":
file_A_path = (
r"C:\Users\yiju\Desktop\Copy\Scripts\masks\1-tom-new-kidney\pred_00a67c839.json"
)
file_B_path = r"C:\Users\yiju\Desktop\Copy\Data\hubmap-kidney-segmentation\test\00a67c839.json"
if len(sys.argv) >= 3:
file_A_path = sys.argv[1]
file_B_path = sys.argv[2]
image_ref_path = file_A_path.replace("json", "png")
A_name = file_A_path.split("\\")[-1].split(".")[0]
B_name = file_B_path.split("\\")[-1].split(".")[0]
print("A: ", A_name)
print("B: ", B_name)
for d in [0.5]: # [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
find_diff(dice_thred=d, draw_preview=True, log_score=True)
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
# Configuration file for JupyterHub
import os
# pre-spawn settings
NB_UID = 65534
NB_GID = 65534
CUDA = 'cuda' in os.environ['HOSTNODE']
c = get_config()
# read users/teams & images
import os, yaml
with open('/srv/jupyterhub/config.yaml', 'r') as cfgfile:
cfg = yaml.load(cfgfile, Loader=yaml.FullLoader)
team_map = cfg['users']
# Whitlelist users and admins # google: remove @gmail.com
c.Authenticator.allowed_users = list(team_map.keys())
c.Authenticator.admin_users = admin = set()
for u, team in team_map.items():
if 'admin' in team:
admin.add(u)
# Spawn single-user servers as Docker containers
# CustomDockerSpawner
# form to select image
def get_options_form(spawner):
username = spawner.user.name # .split('@')[0]
teams = cfg['users'][username]
images = cfg['images']
# list of image letters for user
img = {k:v for k,v in images.items() if k in teams }
images = [] # unique list
for t,i in img.items():
for k in i:
if k not in images:
images.append(k)
if not CUDA:
images = [i for i in images if i != 'G']
# dict of image label:build
available_images = cfg['available_images']
allowed_images = [v for k,v in available_images.items() if k in images]
images=[]
for i in allowed_images:
images = images | i.items()
allowed_images = dict(images)
allowed_images = dict(sorted(allowed_images.items(), key=lambda x: x[0]))
# prepare form
if len(allowed_images) > 1:
option_t = '<option value="{image}" {selected}>{label}</option>'
options = [
option_t.format(
image=image, label=label, selected='selected' if image == spawner.image else ''
)
for label, image in allowed_images.items()
]
return """
<br><br>
<h3>Select an image</h3><br><br>{havecuda}<br><br><b>User: {username}</b><br><br>
<select class="form-control" name="image" required autofocus>
{options}
</select>
""".format(options=options, username=username, havecuda='All can run CUDA' if CUDA else '')
else:
spawner.image = [v for k,v in allowed_images.items()][0]
c.DockerSpawner.options_form = get_options_form
def set_sudo(spawner):
username = spawner.user.name
teams = cfg['users'][username]
if 'sudo' in teams:
return 'yes'
else:
return 'no'
def set_USER(spawner):
username = spawner.user.name
if username[0:4].isnumeric():
return username.upper()
else:
return username
def set_HOME(spawner):
return '/home/' + spawner.user.name
def set_UID(spawner):
UID = cfg['users'][spawner.user.name][0]['uid']
if UID >= 1 and UID < 65536:
return UID
else:
return 1000
def set_GID(spawner):
GID = cfg['users'][spawner.user.name][1]['gid']
if GID >= 1 and GID < 65536:
return GID
else:
return 100
c.DockerSpawner.environment = {
'NB_USER': set_USER,
'NB_UID': set_UID,
'NB_GID': set_GID,
'NB_UMASK':'002',
'CHOWN_HOME':'yes',
'GRANT_SUDO': set_sudo,
}
home_dir = os.environ.get('HOME_DIR')
# notebook_dir = '/home/' + spawner.user.name
# c.DockerSpawner.notebook_dir = notebook_dir
from dockerspawner import DockerSpawner
class CustomDockerSpawner(DockerSpawner):
# mount volumes by team
def start(self):
username = set_USER(self)
# username = self.user.name
# home dir
self.volumes[f"{home_dir}/{username.split("@")[0]}"] = {
'bind': '/home/' + username ,
'mode': 'rw',
}
# copy system /etc/group file
self.volumes['/etc/group'] = {
'bind': '/tmp/group',
'mode': 'ro',
}
# mount /srv from files in /singleuser/srv/setup
self.volumes[os.environ['JHUB_DIR']+'/singleuser/srv/setup'] = {
'bind': '/srv',
'mode': 'ro',
}
# user specific mounts as in config.yaml
teams = cfg['users'][self.user.name] # lowercase
mounts = cfg['mounts']
mounts = {k:v for k,v in mounts.items() if k in teams }
for k,v in mounts.items():
for h,d in v.items():
self.volumes[h] = { 'bind': d[0].replace('USER',username), 'mode': d[1] }
return super().start()
# c.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner'
c.JupyterHub.spawner_class = CustomDockerSpawner
# hub runs as 'root',
c.DockerSpawner.extra_create_kwargs = {
'user': 'root',
'hostname': 'hub',
}
# nvidia
# /dev/shm 64M > 16G
if CUDA:
c.DockerSpawner.extra_host_config = {
'runtime': 'nvidia',
'shm_size': '16gb'
}
# JupyterHub requires a single-user instance of the Notebook server, so we
# default to using the `start-singleuser.sh` script included in the
# jupyter/docker-stacks *-notebook images as the Docker run command when
# spawning containers. Optionally, you can override the Docker run command
# using the DOCKER_SPAWN_CMD environment variable.
spawn_cmd = "start-singleuser.sh"
c.DockerSpawner.extra_create_kwargs.update({ 'command': spawn_cmd })
# Connect containers to this Docker network
network_name = os.environ['DOCKER_NETWORK_NAME']
c.DockerSpawner.use_internal_ip = True
c.DockerSpawner.network_name = network_name
# Pass the network name as argument to spawned containers
c.DockerSpawner.extra_host_config.update({ 'network_mode': network_name })
# Mount the real user's Docker volume on the host to the notebook user's
# notebook directory in the container
#c.DockerSpawner.volumes = { 'jupyterhub-user-{username}': notebook_dir }
# external proxy
c.JupyterHub.cleanup_servers = False
# tells the hub to not stop servers when the hub restarts (proxy runs separately).
c.ConfigurableHTTPProxy.should_start = False
# tells the hub that the proxy should not be started (because you start it yourself).
c.ConfigurableHTTPProxy.auth_token = os.environ.get('CONFIGPROXY_AUTH_TOKEN')
# token for authenticating communication with the proxy.
c.ConfigurableHTTPProxy.api_url = 'http://jupyterproxy:8001'
# the URL which the hub uses to connect to the proxy’s API.
# Remove containers once they are stopped
c.DockerSpawner.remove_containers = True
# User containers will access hub by container name on the Docker network
c.JupyterHub.base_url = '/jhub/'
c.JupyterHub.hub_ip = 'jupyterhub'
c.JupyterHub.hub_port = 8080
# don't need because we are behind an https reverse proxy
# # TLS config: requires generating certificates
# c.JupyterHub.port = 443
# c.JupyterHub.ssl_key = os.environ['SSL_KEY']
# c.JupyterHub.ssl_cert = os.environ['SSL_CERT']
# Persist hub data on volume mounted inside container
data_dir = '/data'
c.JupyterHub.cookie_secret_file = os.path.join(data_dir,
'jupyterhub_cookie_secret')
c.JupyterHub.db_url = f'sqlite:///{data_dir}/jupyterhub.sqlite'
# c.JupyterHub.db_url = 'postgresql://postgres:{password}@{host}/{db}'.format(
# host=os.environ['POSTGRES_HOST'],
# password=os.environ['POSTGRES_PASSWORD'],
# db=os.environ['POSTGRES_DB'],
# )
# reset database
# c.JupyterHub.reset_db = False
# Authenticate users
'''
# GitHub
c.JupyterHub.authenticator_class = 'oauthenticator.GitHubOAuthenticator'
c.GitHubOAuthenticator.oauth_callback_url = os.environ['OAUTH_CALLBACK_URL']
# Native
# admin users in c.Authenticator.admin_users are automatically authorized when signup
c.JupyterHub.authenticator_class = 'nativeauthenticator.NativeAuthenticator'
'''
##### multioauth
# https://github.com/jupyterhub/oauthenticator/issues/136
from traitlets import List
from jupyterhub.auth import Authenticator
def url_path_join(*parts):
return '/'.join([p.strip().strip('/') for p in parts])
class MultiOAuthenticator(Authenticator):
authenticators = List(help="The subauthenticators to use", config=True)
def __init__(self, *arg, **kwargs):
super().__init__(*arg, **kwargs)
self._authenticators = []
for authenticator_klass, url_scope, configs in self.authenticators:
c = self.trait_values()
c.update(configs)
self._authenticators.append({"instance": authenticator_klass(**c), "url_scope": url_scope})
def get_custom_html(self, base_url):
html = []
for authenticator in self._authenticators:
login_service = authenticator["instance"].login_service
if login_service == 'User/Pass':
url = url_path_join(authenticator["url_scope"], "login")
else:
url = url_path_join(authenticator["url_scope"], "oauth_login")
# html.append(
# f"""
# <div class="service-login">
# <a role="button" class='btn btn-jupyter btn-lg' href='{url}'>
# Sign in with {login_service}
# </a>
# </div>
# """
# )
return "\n".join(html)
def get_handlers(self, app):
routes = []
for _authenticator in self._authenticators:
for path, handler in _authenticator["instance"].get_handlers(app):
class SubHandler(handler):
authenticator = _authenticator["instance"]
routes.append((f'{_authenticator['url_scope']}{path}', SubHandler))
return routes
c.JupyterHub.authenticator_class = MultiOAuthenticator
from oauthenticator.github import GitHubOAuthenticator
from oauthenticator.google import GoogleOAuthenticator
from nativeauthenticator import NativeAuthenticator
#from oauthenticator.azuread import AzureAdOAuthenticator
c.MultiOAuthenticator.authenticators = [
(GitHubOAuthenticator, '/github', {
'client_id': os.environ['GITHUB_CLIENT_ID'],
'client_secret': os.environ['GITHUB_CLIENT_SECRET'],
'oauth_callback_url': os.environ['GITHUB_CALLBACK_URL']
}),
(GoogleOAuthenticator, '/google', {
'client_id': os.environ['GOOGLE_CLIENT_ID'],
'client_secret': os.environ['GOOGLE_CLIENT_SECRET'],
'oauth_callback_url': os.environ['GOOGLE_CALLBACK_URL'],
'login_service': 'Google'
}),
(NativeAuthenticator, '/', {
'login_service': 'User/Pass'
}),
]
import nativeauthenticator
c.JupyterHub.template_paths = [f"{os.path.dirname(nativeauthenticator.__file__)}/templates/"]
# template modified to allow github/google oauth
# ["/usr/local/lib/python3.8/dist-packages/nativeauthenticator/templates/"]
# google
# https://oauthenticator.readthedocs.io/en/latest/api/gen/oauthenticator.google.html
c.GoogleOAuthenticator.hosted_domain = ['gmail.com']
c.GoogleOAuthenticator.login_service = 'Google'
c.GoogleOAuthenticator.delete_invalid_users = True
c.NativeAuthenticator.check_common_password = True
c.NativeAuthenticator.minimum_password_length = 8
c.NativeAuthenticator.allowed_failed_logins = 3
c.NativeAuthenticator.enable_signup = True
# recaptcha config
# https://www.google.com/recaptcha/admin/site/500725121/settings
c.NativeAuthenticator.recaptcha_key = os.environ['RECAPCHA_KEY']
c.NativeAuthenticator.recaptcha_secret = os.environ['RECAPCHA_SECRET']
c.NativeAuthenticator.tos = 'Acepto las <a href="https://remote.genrisk.org/CDU.html" target="_blank">condiciones de uso</a>'
## enable authentication state0
c.MultiOAuthenticator.enable_auth_state = True
import warnings
if 'JUPYTERHUB_CRYPT_KEY' not in os.environ:
warnings.warn(
"Need JUPYTERHUB_CRYPT_KEY env for persistent auth_state.\n"
" export JUPYTERHUB_CRYPT_KEY=$(openssl rand -hex 32)"
)
c.CryptKeeper.keys = [ os.urandom(32) ]
pass
'''
# remove idle notebooks after inactive time
# https://github.com/jupyterhub/jupyterhub-idle-culler
import sys
c.JupyterHub.services = [
{
'name': 'idle-culler',
'admin': True,
'command': [sys.executable, '-m', 'jupyterhub_idle_culler', '--timeout=3600'],
}
]
'''
# max simultaneous users
c.JupyterHub.concurrent_spawn_limit = 10
# user limits
# c.Spawner.cpu_limit = 2 # cores
# c.Spawner.mem_limit = 8G
| # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
# Configuration file for JupyterHub
import os
# pre-spawn settings
NB_UID = 65534
NB_GID = 65534
CUDA = 'cuda' in os.environ['HOSTNODE']
c = get_config()
# read users/teams & images
import os, yaml
with open('/srv/jupyterhub/config.yaml', 'r') as cfgfile:
cfg = yaml.load(cfgfile, Loader=yaml.FullLoader)
team_map = cfg['users']
# Whitlelist users and admins # google: remove @gmail.com
c.Authenticator.allowed_users = list(team_map.keys())
c.Authenticator.admin_users = admin = set()
for u, team in team_map.items():
if 'admin' in team:
admin.add(u)
# Spawn single-user servers as Docker containers
# CustomDockerSpawner
# form to select image
def get_options_form(spawner):
username = spawner.user.name # .split('@')[0]
teams = cfg['users'][username]
images = cfg['images']
# list of image letters for user
img = {k:v for k,v in images.items() if k in teams }
images = [] # unique list
for t,i in img.items():
for k in i:
if k not in images:
images.append(k)
if not CUDA:
images = [i for i in images if i != 'G']
# dict of image label:build
available_images = cfg['available_images']
allowed_images = [v for k,v in available_images.items() if k in images]
images=[]
for i in allowed_images:
images = images | i.items()
allowed_images = dict(images)
allowed_images = dict(sorted(allowed_images.items(), key=lambda x: x[0]))
# prepare form
if len(allowed_images) > 1:
option_t = '<option value="{image}" {selected}>{label}</option>'
options = [
option_t.format(
image=image, label=label, selected='selected' if image == spawner.image else ''
)
for label, image in allowed_images.items()
]
return """
<br><br>
<h3>Select an image</h3><br><br>{havecuda}<br><br><b>User: {username}</b><br><br>
<select class="form-control" name="image" required autofocus>
{options}
</select>
""".format(options=options, username=username, havecuda='All can run CUDA' if CUDA else '')
else:
spawner.image = [v for k,v in allowed_images.items()][0]
c.DockerSpawner.options_form = get_options_form
def set_sudo(spawner):
username = spawner.user.name
teams = cfg['users'][username]
if 'sudo' in teams:
return 'yes'
else:
return 'no'
def set_USER(spawner):
username = spawner.user.name
if username[0:4].isnumeric():
return username.upper()
else:
return username
def set_HOME(spawner):
return '/home/' + spawner.user.name
def set_UID(spawner):
UID = cfg['users'][spawner.user.name][0]['uid']
if UID >= 1 and UID < 65536:
return UID
else:
return 1000
def set_GID(spawner):
GID = cfg['users'][spawner.user.name][1]['gid']
if GID >= 1 and GID < 65536:
return GID
else:
return 100
c.DockerSpawner.environment = {
'NB_USER': set_USER,
'NB_UID': set_UID,
'NB_GID': set_GID,
'NB_UMASK':'002',
'CHOWN_HOME':'yes',
'GRANT_SUDO': set_sudo,
}
home_dir = os.environ.get('HOME_DIR')
# notebook_dir = '/home/' + spawner.user.name
# c.DockerSpawner.notebook_dir = notebook_dir
from dockerspawner import DockerSpawner
class CustomDockerSpawner(DockerSpawner):
# mount volumes by team
def start(self):
username = set_USER(self)
# username = self.user.name
# home dir
self.volumes[f"{home_dir}/{username.split('@')[0]}"] = {
'bind': '/home/' + username ,
'mode': 'rw',
}
# copy system /etc/group file
self.volumes['/etc/group'] = {
'bind': '/tmp/group',
'mode': 'ro',
}
# mount /srv from files in /singleuser/srv/setup
self.volumes[os.environ['JHUB_DIR']+'/singleuser/srv/setup'] = {
'bind': '/srv',
'mode': 'ro',
}
# user specific mounts as in config.yaml
teams = cfg['users'][self.user.name] # lowercase
mounts = cfg['mounts']
mounts = {k:v for k,v in mounts.items() if k in teams }
for k,v in mounts.items():
for h,d in v.items():
self.volumes[h] = { 'bind': d[0].replace('USER',username), 'mode': d[1] }
return super().start()
# c.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner'
c.JupyterHub.spawner_class = CustomDockerSpawner
# hub runs as 'root',
c.DockerSpawner.extra_create_kwargs = {
'user': 'root',
'hostname': 'hub',
}
# nvidia
# /dev/shm 64M > 16G
if CUDA:
c.DockerSpawner.extra_host_config = {
'runtime': 'nvidia',
'shm_size': '16gb'
}
# JupyterHub requires a single-user instance of the Notebook server, so we
# default to using the `start-singleuser.sh` script included in the
# jupyter/docker-stacks *-notebook images as the Docker run command when
# spawning containers. Optionally, you can override the Docker run command
# using the DOCKER_SPAWN_CMD environment variable.
spawn_cmd = "start-singleuser.sh"
c.DockerSpawner.extra_create_kwargs.update({ 'command': spawn_cmd })
# Connect containers to this Docker network
network_name = os.environ['DOCKER_NETWORK_NAME']
c.DockerSpawner.use_internal_ip = True
c.DockerSpawner.network_name = network_name
# Pass the network name as argument to spawned containers
c.DockerSpawner.extra_host_config.update({ 'network_mode': network_name })
# Mount the real user's Docker volume on the host to the notebook user's
# notebook directory in the container
#c.DockerSpawner.volumes = { 'jupyterhub-user-{username}': notebook_dir }
# external proxy
c.JupyterHub.cleanup_servers = False
# tells the hub to not stop servers when the hub restarts (proxy runs separately).
c.ConfigurableHTTPProxy.should_start = False
# tells the hub that the proxy should not be started (because you start it yourself).
c.ConfigurableHTTPProxy.auth_token = os.environ.get('CONFIGPROXY_AUTH_TOKEN')
# token for authenticating communication with the proxy.
c.ConfigurableHTTPProxy.api_url = 'http://jupyterproxy:8001'
# the URL which the hub uses to connect to the proxy’s API.
# Remove containers once they are stopped
c.DockerSpawner.remove_containers = True
# User containers will access hub by container name on the Docker network
c.JupyterHub.base_url = '/jhub/'
c.JupyterHub.hub_ip = 'jupyterhub'
c.JupyterHub.hub_port = 8080
# don't need because we are behind an https reverse proxy
# # TLS config: requires generating certificates
# c.JupyterHub.port = 443
# c.JupyterHub.ssl_key = os.environ['SSL_KEY']
# c.JupyterHub.ssl_cert = os.environ['SSL_CERT']
# Persist hub data on volume mounted inside container
data_dir = '/data'
c.JupyterHub.cookie_secret_file = os.path.join(data_dir,
'jupyterhub_cookie_secret')
c.JupyterHub.db_url = f'sqlite:///{data_dir}/jupyterhub.sqlite'
# c.JupyterHub.db_url = 'postgresql://postgres:{password}@{host}/{db}'.format(
# host=os.environ['POSTGRES_HOST'],
# password=os.environ['POSTGRES_PASSWORD'],
# db=os.environ['POSTGRES_DB'],
# )
# reset database
# c.JupyterHub.reset_db = False
# Authenticate users
'''
# GitHub
c.JupyterHub.authenticator_class = 'oauthenticator.GitHubOAuthenticator'
c.GitHubOAuthenticator.oauth_callback_url = os.environ['OAUTH_CALLBACK_URL']
# Native
# admin users in c.Authenticator.admin_users are automatically authorized when signup
c.JupyterHub.authenticator_class = 'nativeauthenticator.NativeAuthenticator'
'''
##### multioauth
# https://github.com/jupyterhub/oauthenticator/issues/136
from traitlets import List
from jupyterhub.auth import Authenticator
def url_path_join(*parts):
return '/'.join([p.strip().strip('/') for p in parts])
class MultiOAuthenticator(Authenticator):
authenticators = List(help="The subauthenticators to use", config=True)
def __init__(self, *arg, **kwargs):
super().__init__(*arg, **kwargs)
self._authenticators = []
for authenticator_klass, url_scope, configs in self.authenticators:
c = self.trait_values()
c.update(configs)
self._authenticators.append({"instance": authenticator_klass(**c), "url_scope": url_scope})
def get_custom_html(self, base_url):
html = []
for authenticator in self._authenticators:
login_service = authenticator["instance"].login_service
if login_service == 'User/Pass':
url = url_path_join(authenticator["url_scope"], "login")
else:
url = url_path_join(authenticator["url_scope"], "oauth_login")
# html.append(
# f"""
# <div class="service-login">
# <a role="button" class='btn btn-jupyter btn-lg' href='{url}'>
# Sign in with {login_service}
# </a>
# </div>
# """
# )
return "\n".join(html)
def get_handlers(self, app):
routes = []
for _authenticator in self._authenticators:
for path, handler in _authenticator["instance"].get_handlers(app):
class SubHandler(handler):
authenticator = _authenticator["instance"]
routes.append((f'{_authenticator["url_scope"]}{path}', SubHandler))
return routes
c.JupyterHub.authenticator_class = MultiOAuthenticator
from oauthenticator.github import GitHubOAuthenticator
from oauthenticator.google import GoogleOAuthenticator
from nativeauthenticator import NativeAuthenticator
#from oauthenticator.azuread import AzureAdOAuthenticator
c.MultiOAuthenticator.authenticators = [
(GitHubOAuthenticator, '/github', {
'client_id': os.environ['GITHUB_CLIENT_ID'],
'client_secret': os.environ['GITHUB_CLIENT_SECRET'],
'oauth_callback_url': os.environ['GITHUB_CALLBACK_URL']
}),
(GoogleOAuthenticator, '/google', {
'client_id': os.environ['GOOGLE_CLIENT_ID'],
'client_secret': os.environ['GOOGLE_CLIENT_SECRET'],
'oauth_callback_url': os.environ['GOOGLE_CALLBACK_URL'],
'login_service': 'Google'
}),
(NativeAuthenticator, '/', {
'login_service': 'User/Pass'
}),
]
import nativeauthenticator
c.JupyterHub.template_paths = [f"{os.path.dirname(nativeauthenticator.__file__)}/templates/"]
# template modified to allow github/google oauth
# ["/usr/local/lib/python3.8/dist-packages/nativeauthenticator/templates/"]
# google
# https://oauthenticator.readthedocs.io/en/latest/api/gen/oauthenticator.google.html
c.GoogleOAuthenticator.hosted_domain = ['gmail.com']
c.GoogleOAuthenticator.login_service = 'Google'
c.GoogleOAuthenticator.delete_invalid_users = True
c.NativeAuthenticator.check_common_password = True
c.NativeAuthenticator.minimum_password_length = 8
c.NativeAuthenticator.allowed_failed_logins = 3
c.NativeAuthenticator.enable_signup = True
# recaptcha config
# https://www.google.com/recaptcha/admin/site/500725121/settings
c.NativeAuthenticator.recaptcha_key = os.environ['RECAPCHA_KEY']
c.NativeAuthenticator.recaptcha_secret = os.environ['RECAPCHA_SECRET']
c.NativeAuthenticator.tos = 'Acepto las <a href="https://remote.genrisk.org/CDU.html" target="_blank">condiciones de uso</a>'
## enable authentication state0
c.MultiOAuthenticator.enable_auth_state = True
import warnings
if 'JUPYTERHUB_CRYPT_KEY' not in os.environ:
warnings.warn(
"Need JUPYTERHUB_CRYPT_KEY env for persistent auth_state.\n"
" export JUPYTERHUB_CRYPT_KEY=$(openssl rand -hex 32)"
)
c.CryptKeeper.keys = [ os.urandom(32) ]
pass
'''
# remove idle notebooks after inactive time
# https://github.com/jupyterhub/jupyterhub-idle-culler
import sys
c.JupyterHub.services = [
{
'name': 'idle-culler',
'admin': True,
'command': [sys.executable, '-m', 'jupyterhub_idle_culler', '--timeout=3600'],
}
]
'''
# max simultaneous users
c.JupyterHub.concurrent_spawn_limit = 10
# user limits
# c.Spawner.cpu_limit = 2 # cores
# c.Spawner.mem_limit = 8G
|
"""
Contains various data structures used by Bionic's infrastructure.
"""
import attr
from .utils.misc import ImmutableSequence, ImmutableMapping
@attr.s(frozen=True)
class EntityDefinition:
"""
Describes the immutable properties of an entity. These properties generally have
to do with the entity's "contract": the assumptions other parts of the system can
make about its value. However, this does *not* include the way the entity's value
is determined; this is configured separately and can be changed more easily.
Attributes
----------
name: string
The name of the entity.
protocol: Protocol
The protocol to use when serializing and deserializing entity values on disk.
doc: string
A human-readable description of the entity.
optional_should_memoize: boolean or None
Whether the entity should be memoized, or None if the global default should be
used.
optional_should_persist: boolean or None
Whether the entity should be persisted, or None if the global default should be
used
needs_caching: boolean
Indicates that some kind of caching needs to be enabled for this entity (either
persistence or memoization).
"""
name = attr.ib()
protocol = attr.ib()
doc = attr.ib()
optional_should_memoize = attr.ib()
optional_should_persist = attr.ib()
needs_caching = attr.ib(default=False)
@attr.s(frozen=True)
class DescriptorMetadata:
"""
Holds extra data we might need when working with a descriptor.
Similar to an EntityDefinition, but can apply to non-entity descriptors, and also
incorporates information from the global configuration. (For example,
EntityDefinition has an `optional_should_memoize` field which describes the
user's memoization preferences, if any; this class has a `should_memoize` field
which describes what we'll actually do, based on both user preferences and the
global configuration.)
Attributes
----------
protocol: Protocol
The protocol to use when serializing and deserializing descriptor values on
disk.
doc: string
A human-readable description of the descriptor.
should_memoize: boolean
Whether the value should be memoized for the lifetime of its Flow instance.
should_memoize_for_query: boolean
Whether the value should be memoized for the lifetime of a Flow.get() call.
(Only relevant if ``should_memoize`` is False.)
should_persist: boolean
Whether the value should be persisted.
is_composite: boolean
Whether the value contains other descriptor values. (If so, it's desirable to
get it out of memory quickly.)
"""
protocol = attr.ib()
doc = attr.ib()
should_memoize = attr.ib(default=False)
should_memoize_for_query = attr.ib(default=False)
should_persist = attr.ib(default=False)
is_composite = attr.ib(default=True)
@attr.s(frozen=True)
class TaskKey:
"""
A unique identifier for a Task.
"""
dnode = attr.ib()
case_key = attr.ib()
def evolve(self, **kwargs):
return attr.evolve(self, **kwargs)
def __str__(self):
args_str = ", ".join(f"{name}={value}" for name, value in self.case_key.items())
return f"{self.dnode.to_descriptor(near_commas=True)}({args_str})"
@attr.s(frozen=True)
class Task:
"""
A unit of work. Can have dependencies, which are referred to via their
TaskKeys.
Attributes
----------
key: TaskKey
Key corresponding to the output value computed by this task.
dep_keys: list of TaskKeys
Keys corresponding to the input values required by this task.
compute_func: function taking a single ``dep_values`` argument
Generates output values based on the passed input values.
is_simple_lookup: boolean
Whether this task consists of simply looking up the fixed value of an entity;
used to determine what message to log when this task is computed.
"""
key = attr.ib()
dep_keys = attr.ib(converter=tuple)
compute_func = attr.ib()
is_simple_lookup = attr.ib(default=False)
def compute(self, dep_values):
return self.compute_func(dep_values)
@property
def can_be_serialized(self):
return not self.is_simple_lookup
def evolve(self, **kwargs):
return attr.evolve(self, **kwargs)
def __repr__(self):
return f"Task({self.key!r}, {self.dep_keys!r})"
@attr.s(frozen=True)
class Result:
"""
Represents one value for one entity.
"""
task_key = attr.ib()
value = attr.ib()
local_artifact = attr.ib()
value_is_missing = attr.ib(default=False)
def __repr__(self):
return f"Result({self.task_key!r}, {self.value!r})"
@attr.s(frozen=True)
class Artifact:
"""
Represents a serialized, file-like artifact, either on a local filesystem or in a
cloud object store.
"""
url: str = attr.ib()
content_hash: str = attr.ib()
def evolve(self, **kwargs):
return attr.evolve(self, **kwargs)
class CaseKeySpace(ImmutableSequence):
"""
A set of CaseKey names (without values) -- represents a space of possible
CaseKeys.
"""
def __init__(self, names=None):
if names is None:
names = []
super(CaseKeySpace, self).__init__(sorted(names))
def union(self, other):
return CaseKeySpace(set(self).union(other))
def intersection(self, other):
return CaseKeySpace(name for name in self if name in other)
def difference(self, other):
return CaseKeySpace(name for name in self if name not in other)
def select(self, case_key):
return case_key.project(self)
@classmethod
def union_all(cls, spaces):
if not spaces:
return CaseKeySpace([])
names = set()
for space in spaces:
names = names.union(space)
return CaseKeySpace(names)
@classmethod
def intersection_all(cls, spaces):
if not spaces:
raise ValueError("Can't take the intersection of zero spaces")
names = None
for space in spaces:
if names is None:
names = set(spaces)
else:
names = names.intersection(space)
return CaseKeySpace(names)
def __repr__(self):
return f'CaseKeySpace({', '.join(repr(name) for name in self)})'
class CaseKey(ImmutableMapping):
"""
A collection of name-token pairs that uniquely identifies a case.
"""
def __init__(self, name_token_pairs):
tokens_by_name = {name: token for name, token in name_token_pairs}
super(CaseKey, self).__init__(tokens_by_name)
self._name_token_pairs = name_token_pairs
self.tokens = tokens_by_name
self.space = CaseKeySpace(list(tokens_by_name.keys()))
self.missing_names = [
# None is a sentinel value used to indicate that no value is available.
# Normally I would prefer to represent missing-ness out-of-band by making the
# `missing_names` field the source of truth here, but the relational methods like
# `project` are cleaner when we use a sentinel value.
name
for name, token in name_token_pairs
if token is None
]
self.has_missing_values = len(self.missing_names) > 0
def project(self, key_space):
return CaseKey(
[
(name, token)
for name, token in self._name_token_pairs
if name in key_space
]
)
def drop(self, key_space):
return CaseKey(
[
(name, token)
for name, token in self._name_token_pairs
if name not in key_space
]
)
def merge(self, other):
tokens_by_name = {name: token for name, token in self._name_token_pairs}
for name, token in other._name_token_pairs:
if name in tokens_by_name:
assert token == tokens_by_name[name]
else:
tokens_by_name[name] = token
return CaseKey([(name, token) for name, token in tokens_by_name.items()])
def __repr__(self):
args_str = ", ".join(f"{name}={token}" for name, token in self.items())
return f"CaseKey({args_str})"
class ResultGroup(ImmutableSequence):
"""
Represents a collection of Results, distinguished by their CaseKeys. Each
CaseKey should have the same set of names.
"""
def __init__(self, results, key_space):
super(ResultGroup, self).__init__(results)
self.key_space = key_space
def __repr__(self):
return f"ResultGroup({list(self)!r})"
def str_from_version_value(value):
if value is None:
return "0"
elif isinstance(value, int):
return str(value)
elif isinstance(value, str):
return value
else:
raise ValueError(f"Version values must be str, int, or None: got {value!r}")
# The CodeVersion and CodeFingerprint classs are used (indirectly) by
# persistence.ArtifactMetadataRecord and can be serialized to YAML and stored in the
# persistent cache. That means if we add new fields to them, we also need to update
# persistence.CACHE_SCHEMA_VERSION.
# TODO Should we just move these classes to persistence.py as well?
@attr.s(frozen=True)
class CodeVersion:
"""
Contains the user-designated version of a piece of code, consisting of a
major and a minor version string, and a boolean that indicates whether it
includes the bytecode. The convention is that changing the major version
indicates a functional change, while changing the minor version indicates a
nonfunctional change. If ``includes_bytecode`` is True, then the major version
is understood to implicitly include the bytecode of the code as well.
"""
major: str = attr.ib(converter=str_from_version_value)
minor: str = attr.ib(converter=str_from_version_value)
includes_bytecode: bool = attr.ib(converter=attr.converters.default_if_none(True))
@attr.s(frozen=True)
class CodeVersioningPolicy:
"""
Contains the version of the user entity function with any additional settings
related to the version. For now, we only have one setting that affects the
analysis-time behavior of the version.
"""
version: CodeVersion = attr.ib()
suppress_bytecode_warnings: bool = attr.ib(
converter=attr.converters.default_if_none(False)
)
@attr.s(frozen=True)
class CodeFingerprint:
"""
A collection of characteristics attempting to uniquely identify a function.
Attributes
----------
version: CodeVersion
A version identifier provided by the user.
bytecode_hash: str
A hash of the function's Python bytecode.
orig_flow_name: str
The name of the flow in which this function was originally defined.
is_identity: bool
If True, indicates that this function is equivalent to the identity function:
it takes one argument and returns it unchanged.
"""
version: CodeVersion = attr.ib()
bytecode_hash: str = attr.ib()
orig_flow_name: str = attr.ib()
is_identity: bool = attr.ib(default=False)
@attr.s(frozen=True)
class VersioningPolicy:
"""
Encodes the versioning rules to use when computing entity values.
"""
check_for_bytecode_errors = attr.ib()
treat_bytecode_as_functional = attr.ib()
ignore_bytecode_exceptions = attr.ib()
@attr.s(frozen=True)
class FunctionAttributes:
"""
Describes properties of a Python function.
"""
code_fingerprint = attr.ib()
code_versioning_policy = attr.ib()
changes_per_run = attr.ib()
aip_task_config = attr.ib()
| """
Contains various data structures used by Bionic's infrastructure.
"""
import attr
from .utils.misc import ImmutableSequence, ImmutableMapping
@attr.s(frozen=True)
class EntityDefinition:
"""
Describes the immutable properties of an entity. These properties generally have
to do with the entity's "contract": the assumptions other parts of the system can
make about its value. However, this does *not* include the way the entity's value
is determined; this is configured separately and can be changed more easily.
Attributes
----------
name: string
The name of the entity.
protocol: Protocol
The protocol to use when serializing and deserializing entity values on disk.
doc: string
A human-readable description of the entity.
optional_should_memoize: boolean or None
Whether the entity should be memoized, or None if the global default should be
used.
optional_should_persist: boolean or None
Whether the entity should be persisted, or None if the global default should be
used
needs_caching: boolean
Indicates that some kind of caching needs to be enabled for this entity (either
persistence or memoization).
"""
name = attr.ib()
protocol = attr.ib()
doc = attr.ib()
optional_should_memoize = attr.ib()
optional_should_persist = attr.ib()
needs_caching = attr.ib(default=False)
@attr.s(frozen=True)
class DescriptorMetadata:
"""
Holds extra data we might need when working with a descriptor.
Similar to an EntityDefinition, but can apply to non-entity descriptors, and also
incorporates information from the global configuration. (For example,
EntityDefinition has an `optional_should_memoize` field which describes the
user's memoization preferences, if any; this class has a `should_memoize` field
which describes what we'll actually do, based on both user preferences and the
global configuration.)
Attributes
----------
protocol: Protocol
The protocol to use when serializing and deserializing descriptor values on
disk.
doc: string
A human-readable description of the descriptor.
should_memoize: boolean
Whether the value should be memoized for the lifetime of its Flow instance.
should_memoize_for_query: boolean
Whether the value should be memoized for the lifetime of a Flow.get() call.
(Only relevant if ``should_memoize`` is False.)
should_persist: boolean
Whether the value should be persisted.
is_composite: boolean
Whether the value contains other descriptor values. (If so, it's desirable to
get it out of memory quickly.)
"""
protocol = attr.ib()
doc = attr.ib()
should_memoize = attr.ib(default=False)
should_memoize_for_query = attr.ib(default=False)
should_persist = attr.ib(default=False)
is_composite = attr.ib(default=True)
@attr.s(frozen=True)
class TaskKey:
"""
A unique identifier for a Task.
"""
dnode = attr.ib()
case_key = attr.ib()
def evolve(self, **kwargs):
return attr.evolve(self, **kwargs)
def __str__(self):
args_str = ", ".join(f"{name}={value}" for name, value in self.case_key.items())
return f"{self.dnode.to_descriptor(near_commas=True)}({args_str})"
@attr.s(frozen=True)
class Task:
"""
A unit of work. Can have dependencies, which are referred to via their
TaskKeys.
Attributes
----------
key: TaskKey
Key corresponding to the output value computed by this task.
dep_keys: list of TaskKeys
Keys corresponding to the input values required by this task.
compute_func: function taking a single ``dep_values`` argument
Generates output values based on the passed input values.
is_simple_lookup: boolean
Whether this task consists of simply looking up the fixed value of an entity;
used to determine what message to log when this task is computed.
"""
key = attr.ib()
dep_keys = attr.ib(converter=tuple)
compute_func = attr.ib()
is_simple_lookup = attr.ib(default=False)
def compute(self, dep_values):
return self.compute_func(dep_values)
@property
def can_be_serialized(self):
return not self.is_simple_lookup
def evolve(self, **kwargs):
return attr.evolve(self, **kwargs)
def __repr__(self):
return f"Task({self.key!r}, {self.dep_keys!r})"
@attr.s(frozen=True)
class Result:
"""
Represents one value for one entity.
"""
task_key = attr.ib()
value = attr.ib()
local_artifact = attr.ib()
value_is_missing = attr.ib(default=False)
def __repr__(self):
return f"Result({self.task_key!r}, {self.value!r})"
@attr.s(frozen=True)
class Artifact:
"""
Represents a serialized, file-like artifact, either on a local filesystem or in a
cloud object store.
"""
url: str = attr.ib()
content_hash: str = attr.ib()
def evolve(self, **kwargs):
return attr.evolve(self, **kwargs)
class CaseKeySpace(ImmutableSequence):
"""
A set of CaseKey names (without values) -- represents a space of possible
CaseKeys.
"""
def __init__(self, names=None):
if names is None:
names = []
super(CaseKeySpace, self).__init__(sorted(names))
def union(self, other):
return CaseKeySpace(set(self).union(other))
def intersection(self, other):
return CaseKeySpace(name for name in self if name in other)
def difference(self, other):
return CaseKeySpace(name for name in self if name not in other)
def select(self, case_key):
return case_key.project(self)
@classmethod
def union_all(cls, spaces):
if not spaces:
return CaseKeySpace([])
names = set()
for space in spaces:
names = names.union(space)
return CaseKeySpace(names)
@classmethod
def intersection_all(cls, spaces):
if not spaces:
raise ValueError("Can't take the intersection of zero spaces")
names = None
for space in spaces:
if names is None:
names = set(spaces)
else:
names = names.intersection(space)
return CaseKeySpace(names)
def __repr__(self):
return f'CaseKeySpace({", ".join(repr(name) for name in self)})'
class CaseKey(ImmutableMapping):
"""
A collection of name-token pairs that uniquely identifies a case.
"""
def __init__(self, name_token_pairs):
tokens_by_name = {name: token for name, token in name_token_pairs}
super(CaseKey, self).__init__(tokens_by_name)
self._name_token_pairs = name_token_pairs
self.tokens = tokens_by_name
self.space = CaseKeySpace(list(tokens_by_name.keys()))
self.missing_names = [
# None is a sentinel value used to indicate that no value is available.
# Normally I would prefer to represent missing-ness out-of-band by making the
# `missing_names` field the source of truth here, but the relational methods like
# `project` are cleaner when we use a sentinel value.
name
for name, token in name_token_pairs
if token is None
]
self.has_missing_values = len(self.missing_names) > 0
def project(self, key_space):
return CaseKey(
[
(name, token)
for name, token in self._name_token_pairs
if name in key_space
]
)
def drop(self, key_space):
return CaseKey(
[
(name, token)
for name, token in self._name_token_pairs
if name not in key_space
]
)
def merge(self, other):
tokens_by_name = {name: token for name, token in self._name_token_pairs}
for name, token in other._name_token_pairs:
if name in tokens_by_name:
assert token == tokens_by_name[name]
else:
tokens_by_name[name] = token
return CaseKey([(name, token) for name, token in tokens_by_name.items()])
def __repr__(self):
args_str = ", ".join(f"{name}={token}" for name, token in self.items())
return f"CaseKey({args_str})"
class ResultGroup(ImmutableSequence):
"""
Represents a collection of Results, distinguished by their CaseKeys. Each
CaseKey should have the same set of names.
"""
def __init__(self, results, key_space):
super(ResultGroup, self).__init__(results)
self.key_space = key_space
def __repr__(self):
return f"ResultGroup({list(self)!r})"
def str_from_version_value(value):
if value is None:
return "0"
elif isinstance(value, int):
return str(value)
elif isinstance(value, str):
return value
else:
raise ValueError(f"Version values must be str, int, or None: got {value!r}")
# The CodeVersion and CodeFingerprint classs are used (indirectly) by
# persistence.ArtifactMetadataRecord and can be serialized to YAML and stored in the
# persistent cache. That means if we add new fields to them, we also need to update
# persistence.CACHE_SCHEMA_VERSION.
# TODO Should we just move these classes to persistence.py as well?
@attr.s(frozen=True)
class CodeVersion:
"""
Contains the user-designated version of a piece of code, consisting of a
major and a minor version string, and a boolean that indicates whether it
includes the bytecode. The convention is that changing the major version
indicates a functional change, while changing the minor version indicates a
nonfunctional change. If ``includes_bytecode`` is True, then the major version
is understood to implicitly include the bytecode of the code as well.
"""
major: str = attr.ib(converter=str_from_version_value)
minor: str = attr.ib(converter=str_from_version_value)
includes_bytecode: bool = attr.ib(converter=attr.converters.default_if_none(True))
@attr.s(frozen=True)
class CodeVersioningPolicy:
"""
Contains the version of the user entity function with any additional settings
related to the version. For now, we only have one setting that affects the
analysis-time behavior of the version.
"""
version: CodeVersion = attr.ib()
suppress_bytecode_warnings: bool = attr.ib(
converter=attr.converters.default_if_none(False)
)
@attr.s(frozen=True)
class CodeFingerprint:
"""
A collection of characteristics attempting to uniquely identify a function.
Attributes
----------
version: CodeVersion
A version identifier provided by the user.
bytecode_hash: str
A hash of the function's Python bytecode.
orig_flow_name: str
The name of the flow in which this function was originally defined.
is_identity: bool
If True, indicates that this function is equivalent to the identity function:
it takes one argument and returns it unchanged.
"""
version: CodeVersion = attr.ib()
bytecode_hash: str = attr.ib()
orig_flow_name: str = attr.ib()
is_identity: bool = attr.ib(default=False)
@attr.s(frozen=True)
class VersioningPolicy:
"""
Encodes the versioning rules to use when computing entity values.
"""
check_for_bytecode_errors = attr.ib()
treat_bytecode_as_functional = attr.ib()
ignore_bytecode_exceptions = attr.ib()
@attr.s(frozen=True)
class FunctionAttributes:
"""
Describes properties of a Python function.
"""
code_fingerprint = attr.ib()
code_versioning_policy = attr.ib()
changes_per_run = attr.ib()
aip_task_config = attr.ib()
|
from pytezos import PyTezosClient
class Token(object):
def __init__(self, client: PyTezosClient):
self.client = client
def set_admin(self, contract_id, new_admin):
print(f"Setting fa2 admin on {contract_id} to {new_admin}")
call = self.set_admin_call(contract_id, new_admin)
res = call.autofill().sign().inject(_async=False)
print(f"Done {res[0]["hash"]}")
def set_admin_call(self, contract_id, new_admin):
contract = self.client.contract(contract_id)
op = contract \
.set_admin(new_admin)
return op
def set_minter_call(self, contract_id, new_admin):
contract = self.client.contract(contract_id)
op = contract \
.set_minter(new_admin)
return op | from pytezos import PyTezosClient
class Token(object):
def __init__(self, client: PyTezosClient):
self.client = client
def set_admin(self, contract_id, new_admin):
print(f"Setting fa2 admin on {contract_id} to {new_admin}")
call = self.set_admin_call(contract_id, new_admin)
res = call.autofill().sign().inject(_async=False)
print(f"Done {res[0]['hash']}")
def set_admin_call(self, contract_id, new_admin):
contract = self.client.contract(contract_id)
op = contract \
.set_admin(new_admin)
return op
def set_minter_call(self, contract_id, new_admin):
contract = self.client.contract(contract_id)
op = contract \
.set_minter(new_admin)
return op |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 7 12:02:50 2021
@author: ministudio
"""
from datetime import datetime, timezone
import pandas as pd
import numpy as np
from alive_progress import alive_bar
def get_all_futures(ftx_client):
tickers = ftx_client.fetchMarkets()
list_perp =[]
#with alive_bar(len(tickers),length=20) as bar:
for ticker in tickers:
if 'PERP' in ticker['id']:
list_perp.append(ticker['id'])
#bar()
return list_perp
def scanner(day,month,year,ticker,ftx):
results = pd.DataFrame(columns=['P/L %'])
start_trade = datetime(year, month, day, 0, 0, 0)
timestamp = start_trade.replace(tzinfo=timezone.utc).timestamp()
candles = ftx.fetchOHLCV(ticker, timeframe='1h', since=timestamp*1000, limit=5000)
candles_df = pd.DataFrame(candles, columns=['MTS','OPEN','HIGH','LOW','CLOSE','VOLUME'])
volume = candles_df.VOLUME.sum()
for j in range(0,24):
# algoritmo per andare di candela in candela
ledger = pd.DataFrame(columns=['POSITION','ENTRY PRICE','P_L SINGLE','P_L TOTAL'])
long = True
time_scanner = ''
# calcolo l'offset tra una candela e l'altra di mio interesse
offset = 12
if j != 0:
candles = candles[1:]
try:
for i in range(0,len(candles),offset):
entry_price = candles[i][1]
if i == 0:
start = datetime.utcfromtimestamp(candles[i][0]/1000)
end = datetime.utcfromtimestamp(candles[i+offset][0]/1000) #datetime.utcfromtimestamp(candles[i+offset+10][0]/1000)
#print('FROM',start.strftime("%H:%M"),'TO',end.strftime("%H:%M"))
var_pct = p_l_total = 0
position = 'LONG'
time_scanner = f'{start.strftime('%H:%M')} to {end.strftime('%H:%M')}'
else:
#r_exit_entry = candles[i][4]/candles[i-offset][4] #if not long else candles[i][4]/candles[i-offset][4]
# calcolo il profitto
if long:
var_pct = round((candles[i-offset][1] - candles[i][1])/candles[i-offset][1]*100, 3)
p_l_total = ledger['P_L TOTAL'].iloc[-1] + var_pct
if not long:
var_pct = round((candles[i][1]-candles[i-offset][1])/candles[i][1]*100, 3)
p_l_total = ledger['P_L TOTAL'].iloc[-1] + var_pct
if long:
date = datetime.utcfromtimestamp(candles[i][0]/1000)
position = 'LONG'
long = False
else:
# quindi vado in short
date = datetime.utcfromtimestamp(candles[i][0]/1000) #candles[i+10][0]/1000
position = 'SHORT'
long = True
ledger.loc[date] = [position, entry_price, var_pct, p_l_total]
results.loc[time_scanner] = round(ledger['P_L TOTAL'][-1],2)
#print('P/L TOTAL :\t',round(ledger['P_L TOTAL'][-1],2), '%\n')
except Exception as e:
results.loc[time_scanner] = np.NAN
return results, volume
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 7 12:02:50 2021
@author: ministudio
"""
from datetime import datetime, timezone
import pandas as pd
import numpy as np
from alive_progress import alive_bar
def get_all_futures(ftx_client):
tickers = ftx_client.fetchMarkets()
list_perp =[]
#with alive_bar(len(tickers),length=20) as bar:
for ticker in tickers:
if 'PERP' in ticker['id']:
list_perp.append(ticker['id'])
#bar()
return list_perp
def scanner(day,month,year,ticker,ftx):
results = pd.DataFrame(columns=['P/L %'])
start_trade = datetime(year, month, day, 0, 0, 0)
timestamp = start_trade.replace(tzinfo=timezone.utc).timestamp()
candles = ftx.fetchOHLCV(ticker, timeframe='1h', since=timestamp*1000, limit=5000)
candles_df = pd.DataFrame(candles, columns=['MTS','OPEN','HIGH','LOW','CLOSE','VOLUME'])
volume = candles_df.VOLUME.sum()
for j in range(0,24):
# algoritmo per andare di candela in candela
ledger = pd.DataFrame(columns=['POSITION','ENTRY PRICE','P_L SINGLE','P_L TOTAL'])
long = True
time_scanner = ''
# calcolo l'offset tra una candela e l'altra di mio interesse
offset = 12
if j != 0:
candles = candles[1:]
try:
for i in range(0,len(candles),offset):
entry_price = candles[i][1]
if i == 0:
start = datetime.utcfromtimestamp(candles[i][0]/1000)
end = datetime.utcfromtimestamp(candles[i+offset][0]/1000) #datetime.utcfromtimestamp(candles[i+offset+10][0]/1000)
#print('FROM',start.strftime("%H:%M"),'TO',end.strftime("%H:%M"))
var_pct = p_l_total = 0
position = 'LONG'
time_scanner = f'{start.strftime("%H:%M")} to {end.strftime("%H:%M")}'
else:
#r_exit_entry = candles[i][4]/candles[i-offset][4] #if not long else candles[i][4]/candles[i-offset][4]
# calcolo il profitto
if long:
var_pct = round((candles[i-offset][1] - candles[i][1])/candles[i-offset][1]*100, 3)
p_l_total = ledger['P_L TOTAL'].iloc[-1] + var_pct
if not long:
var_pct = round((candles[i][1]-candles[i-offset][1])/candles[i][1]*100, 3)
p_l_total = ledger['P_L TOTAL'].iloc[-1] + var_pct
if long:
date = datetime.utcfromtimestamp(candles[i][0]/1000)
position = 'LONG'
long = False
else:
# quindi vado in short
date = datetime.utcfromtimestamp(candles[i][0]/1000) #candles[i+10][0]/1000
position = 'SHORT'
long = True
ledger.loc[date] = [position, entry_price, var_pct, p_l_total]
results.loc[time_scanner] = round(ledger['P_L TOTAL'][-1],2)
#print('P/L TOTAL :\t',round(ledger['P_L TOTAL'][-1],2), '%\n')
except Exception as e:
results.loc[time_scanner] = np.NAN
return results, volume
|
"""Output formatters."""
import os
from pathlib import Path
from typing import TYPE_CHECKING, Generic, TypeVar, Union
import rich
if TYPE_CHECKING:
from ansiblelint.errors import MatchError
T = TypeVar('T', bound='BaseFormatter')
class BaseFormatter(Generic[T]):
"""Formatter of ansible-lint output.
Base class for output formatters.
Args:
base_dir (str|Path): reference directory against which display relative path.
display_relative_path (bool): whether to show path as relative or absolute
"""
def __init__(self, base_dir: Union[str, Path], display_relative_path: bool) -> None:
"""Initialize a BaseFormatter instance."""
if isinstance(base_dir, str):
base_dir = Path(base_dir)
if base_dir: # can be None
base_dir = base_dir.absolute()
# Required 'cause os.path.relpath() does not accept Path before 3.6
if isinstance(base_dir, Path):
base_dir = str(base_dir) # Drop when Python 3.5 is no longer supported
self._base_dir = base_dir if display_relative_path else None
def _format_path(self, path: Union[str, Path]) -> str:
# Required 'cause os.path.relpath() does not accept Path before 3.6
if isinstance(path, Path):
path = str(path) # Drop when Python 3.5 is no longer supported
if not self._base_dir:
return path
# Use os.path.relpath 'cause Path.relative_to() misbehaves
return os.path.relpath(path, start=self._base_dir)
def format(self, match: "MatchError") -> str:
return str(match)
def escape(self, text: str) -> str:
"""Escapes a string to avoid processing it as markup."""
return rich.markup.escape(text)
class Formatter(BaseFormatter):
def format(self, match: "MatchError") -> str:
_id = getattr(match.rule, 'id', '000')
result = (
f"[error_code]{_id}[/][dim]:[/] [error_title]{self.escape(match.message)}[/]")
if match.tag:
result += f" [dim][error_code]({match.tag})[/][/]"
result += (
"\n"
f"[filename]{self._format_path(match.filename or "")}[/]:{match.position}")
if match.details:
result += f" [dim]{match.details}[/]"
result += "\n"
return result
class QuietFormatter(BaseFormatter):
def format(self, match: "MatchError") -> str:
return (
f"[error_code]{match.rule.id}[/] "
f"[filename]{self._format_path(match.filename or "")}[/]:{match.position}")
class ParseableFormatter(BaseFormatter):
"""Parseable uses PEP8 compatible format."""
def format(self, match: "MatchError") -> str:
result = (
f"[filename]{self._format_path(match.filename or "")}[/]:{match.position}: "
f"[error_code]E{match.rule.id}[/] [dim]{self.escape(match.message)}[/]")
if match.tag:
result += f" [dim][error_code]({match.tag})[/][/]"
return result
class AnnotationsFormatter(BaseFormatter):
# https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-a-warning-message
"""Formatter for emitting violations as GitHub Workflow Commands.
These commands trigger the GHA Workflow runners platform to post violations
in a form of GitHub Checks API annotations that appear rendered in pull-
request files view.
::debug file={name},line={line},col={col},severity={severity}::{message}
::warning file={name},line={line},col={col},severity={severity}::{message}
::error file={name},line={line},col={col},severity={severity}::{message}
Supported levels: debug, warning, error
"""
def format(self, match: "MatchError") -> str:
"""Prepare a match instance for reporting as a GitHub Actions annotation."""
level = self._severity_to_level(match.rule.severity)
file_path = self._format_path(match.filename or "")
line_num = match.linenumber
rule_id = match.rule.id
severity = match.rule.severity
violation_details = self.escape(match.message)
if match.column:
col = f",col={match.column}"
else:
col = ""
return (
f"::{level} file={file_path},line={line_num}{col},severity={severity}"
f"::E{rule_id} {violation_details}"
)
@staticmethod
def _severity_to_level(severity: str) -> str:
if severity in ['VERY_LOW', 'LOW']:
return 'warning'
if severity in ['INFO']:
return 'debug'
# ['MEDIUM', 'HIGH', 'VERY_HIGH'] or anything else
return 'error'
class ParseableSeverityFormatter(BaseFormatter):
def format(self, match: "MatchError") -> str:
filename = self._format_path(match.filename or "")
position = match.position
rule_id = u"E{0}".format(match.rule.id)
severity = match.rule.severity
message = self.escape(str(match.message))
return (
f"[filename]{filename}[/]:{position}: [[error_code]{rule_id}[/]] "
f"[[error_code]{severity}[/]] [dim]{message}[/]")
| """Output formatters."""
import os
from pathlib import Path
from typing import TYPE_CHECKING, Generic, TypeVar, Union
import rich
if TYPE_CHECKING:
from ansiblelint.errors import MatchError
T = TypeVar('T', bound='BaseFormatter')
class BaseFormatter(Generic[T]):
"""Formatter of ansible-lint output.
Base class for output formatters.
Args:
base_dir (str|Path): reference directory against which display relative path.
display_relative_path (bool): whether to show path as relative or absolute
"""
def __init__(self, base_dir: Union[str, Path], display_relative_path: bool) -> None:
"""Initialize a BaseFormatter instance."""
if isinstance(base_dir, str):
base_dir = Path(base_dir)
if base_dir: # can be None
base_dir = base_dir.absolute()
# Required 'cause os.path.relpath() does not accept Path before 3.6
if isinstance(base_dir, Path):
base_dir = str(base_dir) # Drop when Python 3.5 is no longer supported
self._base_dir = base_dir if display_relative_path else None
def _format_path(self, path: Union[str, Path]) -> str:
# Required 'cause os.path.relpath() does not accept Path before 3.6
if isinstance(path, Path):
path = str(path) # Drop when Python 3.5 is no longer supported
if not self._base_dir:
return path
# Use os.path.relpath 'cause Path.relative_to() misbehaves
return os.path.relpath(path, start=self._base_dir)
def format(self, match: "MatchError") -> str:
return str(match)
def escape(self, text: str) -> str:
"""Escapes a string to avoid processing it as markup."""
return rich.markup.escape(text)
class Formatter(BaseFormatter):
def format(self, match: "MatchError") -> str:
_id = getattr(match.rule, 'id', '000')
result = (
f"[error_code]{_id}[/][dim]:[/] [error_title]{self.escape(match.message)}[/]")
if match.tag:
result += f" [dim][error_code]({match.tag})[/][/]"
result += (
"\n"
f"[filename]{self._format_path(match.filename or '')}[/]:{match.position}")
if match.details:
result += f" [dim]{match.details}[/]"
result += "\n"
return result
class QuietFormatter(BaseFormatter):
def format(self, match: "MatchError") -> str:
return (
f"[error_code]{match.rule.id}[/] "
f"[filename]{self._format_path(match.filename or '')}[/]:{match.position}")
class ParseableFormatter(BaseFormatter):
"""Parseable uses PEP8 compatible format."""
def format(self, match: "MatchError") -> str:
result = (
f"[filename]{self._format_path(match.filename or '')}[/]:{match.position}: "
f"[error_code]E{match.rule.id}[/] [dim]{self.escape(match.message)}[/]")
if match.tag:
result += f" [dim][error_code]({match.tag})[/][/]"
return result
class AnnotationsFormatter(BaseFormatter):
# https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-a-warning-message
"""Formatter for emitting violations as GitHub Workflow Commands.
These commands trigger the GHA Workflow runners platform to post violations
in a form of GitHub Checks API annotations that appear rendered in pull-
request files view.
::debug file={name},line={line},col={col},severity={severity}::{message}
::warning file={name},line={line},col={col},severity={severity}::{message}
::error file={name},line={line},col={col},severity={severity}::{message}
Supported levels: debug, warning, error
"""
def format(self, match: "MatchError") -> str:
"""Prepare a match instance for reporting as a GitHub Actions annotation."""
level = self._severity_to_level(match.rule.severity)
file_path = self._format_path(match.filename or "")
line_num = match.linenumber
rule_id = match.rule.id
severity = match.rule.severity
violation_details = self.escape(match.message)
if match.column:
col = f",col={match.column}"
else:
col = ""
return (
f"::{level} file={file_path},line={line_num}{col},severity={severity}"
f"::E{rule_id} {violation_details}"
)
@staticmethod
def _severity_to_level(severity: str) -> str:
if severity in ['VERY_LOW', 'LOW']:
return 'warning'
if severity in ['INFO']:
return 'debug'
# ['MEDIUM', 'HIGH', 'VERY_HIGH'] or anything else
return 'error'
class ParseableSeverityFormatter(BaseFormatter):
def format(self, match: "MatchError") -> str:
filename = self._format_path(match.filename or "")
position = match.position
rule_id = u"E{0}".format(match.rule.id)
severity = match.rule.severity
message = self.escape(str(match.message))
return (
f"[filename]{filename}[/]:{position}: [[error_code]{rule_id}[/]] "
f"[[error_code]{severity}[/]] [dim]{message}[/]")
|
# PYTHON_ARGCOMPLETE_OK
"""The command line interface to pipx"""
import argparse
import logging
import logging.config
import os
import re
import shlex
import sys
import textwrap
import time
import urllib.parse
from pathlib import Path
from typing import Any, Callable, Dict, List
import argcomplete # type: ignore
from packaging.requirements import InvalidRequirement, Requirement
from packaging.utils import canonicalize_name
import pipx.constants
from pipx import commands, constants
from pipx.animate import hide_cursor, show_cursor
from pipx.colors import bold, green
from pipx.constants import ExitCode
from pipx.emojis import hazard
from pipx.interpreter import DEFAULT_PYTHON
from pipx.util import PipxError, mkdir, pipx_wrap, rmdir
from pipx.venv import VenvContainer
from pipx.version import __version__
logger = logging.getLogger(__name__)
VenvCompleter = Callable[[str], List[str]]
def print_version() -> None:
print(__version__)
SPEC_HELP = textwrap.dedent(
"""\
The package name or specific installation source passed to pip.
Runs `pip install -U SPEC`.
For example `--spec mypackage==2.0.0` or `--spec git+https://github.com/user/repo.git@branch`
"""
)
PIPX_DESCRIPTION = textwrap.dedent(
f"""
Install and execute apps from Python packages.
Binaries can either be installed globally into isolated Virtual Environments
or run directly in a temporary Virtual Environment.
Virtual Environment location is {str(constants.PIPX_LOCAL_VENVS)}.
Symlinks to apps are placed in {str(constants.LOCAL_BIN_DIR)}.
"""
)
PIPX_DESCRIPTION += pipx_wrap(
"""
optional environment variables:
PIPX_HOME Overrides default pipx location. Virtual Environments will be installed to $PIPX_HOME/venvs.
PIPX_BIN_DIR Overrides location of app installations. Apps are symlinked or copied here.
USE_EMOJI Overrides emoji behavior. Default value varies based on platform.
PIPX_DEFAULT_PYTHON Overrides default python used for commands.
""",
subsequent_indent=" " * 24, # match the indent of argparse options
keep_newlines=True,
)
DOC_DEFAULT_PYTHON = os.getenv("PIPX__DOC_DEFAULT_PYTHON", DEFAULT_PYTHON)
INSTALL_DESCRIPTION = textwrap.dedent(
f"""
The install command is the preferred way to globally install apps
from python packages on your system. It creates an isolated virtual
environment for the package, then ensures the package's apps are
accessible on your $PATH.
The result: apps you can run from anywhere, located in packages
you can cleanly upgrade or uninstall. Guaranteed to not have
dependency version conflicts or interfere with your OS's python
packages. 'sudo' is not required to do this.
pipx install PACKAGE_NAME
pipx install --python PYTHON PACKAGE_NAME
pipx install VCS_URL
pipx install ./LOCAL_PATH
pipx install ZIP_FILE
pipx install TAR_GZ_FILE
The PACKAGE_SPEC argument is passed directly to `pip install`.
The default virtual environment location is {constants.DEFAULT_PIPX_HOME}
and can be overridden by setting the environment variable `PIPX_HOME`
(Virtual Environments will be installed to `$PIPX_HOME/venvs`).
The default app location is {constants.DEFAULT_PIPX_BIN_DIR} and can be
overridden by setting the environment variable `PIPX_BIN_DIR`.
The default python executable used to install a package is
{DOC_DEFAULT_PYTHON} and can be overridden
by setting the environment variable `PIPX_DEFAULT_PYTHON`.
"""
)
class LineWrapRawTextHelpFormatter(argparse.RawDescriptionHelpFormatter):
def _split_lines(self, text: str, width: int) -> List[str]:
text = self._whitespace_matcher.sub(" ", text).strip()
return textwrap.wrap(text, width)
class InstalledVenvsCompleter:
def __init__(self, venv_container: VenvContainer) -> None:
self.packages = [str(p.name) for p in sorted(venv_container.iter_venv_dirs())]
def use(self, prefix: str, **kwargs: Any) -> List[str]:
return [
f"{prefix}{x[len(prefix):]}"
for x in self.packages
if x.startswith(canonicalize_name(prefix))
]
def get_pip_args(parsed_args: Dict[str, str]) -> List[str]:
pip_args: List[str] = []
if parsed_args.get("index_url"):
pip_args += ["--index-url", parsed_args["index_url"]]
if parsed_args.get("pip_args"):
pip_args += shlex.split(parsed_args.get("pip_args", ""))
# make sure --editable is last because it needs to be right before
# package specification
if parsed_args.get("editable"):
pip_args += ["--editable"]
return pip_args
def get_venv_args(parsed_args: Dict[str, str]) -> List[str]:
venv_args: List[str] = []
if parsed_args.get("system_site_packages"):
venv_args += ["--system-site-packages"]
return venv_args
def run_pipx_command(args: argparse.Namespace) -> ExitCode: # noqa: C901
verbose = args.verbose if "verbose" in args else False
pip_args = get_pip_args(vars(args))
venv_args = get_venv_args(vars(args))
venv_container = VenvContainer(constants.PIPX_LOCAL_VENVS)
if "package" in args:
package = args.package
if urllib.parse.urlparse(package).scheme:
raise PipxError("Package cannot be a url")
if "spec" in args and args.spec is not None:
if urllib.parse.urlparse(args.spec).scheme:
if "#egg=" not in args.spec:
args.spec = args.spec + f"#egg={package}"
venv_dir = venv_container.get_venv_dir(package)
logger.info(f"Virtual Environment location is {venv_dir}")
if "skip" in args:
skip_list = [canonicalize_name(x) for x in args.skip]
if args.command == "run":
package_or_url = (
args.spec
if ("spec" in args and args.spec is not None)
else args.app_with_args[0]
)
# For any package, we need to just use the name
try:
package_name = Requirement(args.app_with_args[0]).name
except InvalidRequirement:
# Raw URLs to scripts are supported, too, so continue if
# we can't parse this as a package
package_name = args.app_with_args[0]
use_cache = not args.no_cache
commands.run(
package_name,
package_or_url,
args.app_with_args[1:],
args.python,
pip_args,
venv_args,
args.pypackages,
verbose,
use_cache,
)
# We should never reach here because run() is NoReturn.
return ExitCode(1)
elif args.command == "install":
return commands.install(
None,
None,
args.package_spec,
constants.LOCAL_BIN_DIR,
args.python,
pip_args,
venv_args,
verbose,
force=args.force,
include_dependencies=args.include_deps,
suffix=args.suffix,
)
elif args.command == "inject":
return commands.inject(
venv_dir,
None,
args.dependencies,
pip_args,
verbose=verbose,
include_apps=args.include_apps,
include_dependencies=args.include_deps,
force=args.force,
)
elif args.command == "upgrade":
return commands.upgrade(
venv_dir,
pip_args,
verbose,
include_injected=args.include_injected,
force=args.force,
)
elif args.command == "upgrade-all":
return commands.upgrade_all(
venv_container,
verbose,
include_injected=args.include_injected,
skip=skip_list,
force=args.force,
)
elif args.command == "list":
return commands.list_packages(venv_container, args.include_injected, args.json)
elif args.command == "uninstall":
return commands.uninstall(venv_dir, constants.LOCAL_BIN_DIR, verbose)
elif args.command == "uninstall-all":
return commands.uninstall_all(venv_container, constants.LOCAL_BIN_DIR, verbose)
elif args.command == "reinstall":
return commands.reinstall(
venv_dir=venv_dir,
local_bin_dir=constants.LOCAL_BIN_DIR,
python=args.python,
verbose=verbose,
)
elif args.command == "reinstall-all":
return commands.reinstall_all(
venv_container,
constants.LOCAL_BIN_DIR,
args.python,
verbose,
skip=skip_list,
)
elif args.command == "runpip":
if not venv_dir:
raise PipxError("Developer error: venv_dir is not defined.")
return commands.run_pip(package, venv_dir, args.pipargs, args.verbose)
elif args.command == "ensurepath":
try:
return commands.ensure_pipx_paths(force=args.force)
except Exception as e:
logger.debug("Uncaught Exception:", exc_info=True)
raise PipxError(str(e), wrap_message=False)
elif args.command == "completions":
print(constants.completion_instructions)
return ExitCode(0)
else:
raise PipxError(f"Unknown command {args.command}")
def add_pip_venv_args(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--system-site-packages",
action="store_true",
help="Give the virtual environment access to the system site-packages dir.",
)
parser.add_argument("--index-url", "-i", help="Base URL of Python Package Index")
parser.add_argument(
"--editable",
"-e",
help="Install a project in editable mode",
action="store_true",
)
parser.add_argument(
"--pip-args",
help="Arbitrary pip arguments to pass directly to pip install/upgrade commands",
)
def add_include_dependencies(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--include-deps", help="Include apps of dependent packages", action="store_true"
)
def _add_install(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"install",
help="Install a package",
formatter_class=LineWrapRawTextHelpFormatter,
description=INSTALL_DESCRIPTION,
)
p.add_argument("package_spec", help="package name or pip installation spec")
add_include_dependencies(p)
p.add_argument("--verbose", action="store_true")
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
p.add_argument(
"--suffix",
default="",
help=(
"Optional suffix for virtual environment and executable names. "
"NOTE: The suffix feature is experimental and subject to change."
),
)
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help=(
"The Python executable used to create the Virtual Environment and run the "
"associated app/apps. Must be v3.6+."
),
)
add_pip_venv_args(p)
def _add_inject(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"inject",
help="Install packages into an existing Virtual Environment",
description="Installs packages to an existing pipx-managed virtual environment.",
)
p.add_argument(
"package",
help="Name of the existing pipx-managed Virtual Environment to inject into",
).completer = venv_completer
p.add_argument(
"dependencies",
nargs="+",
help="the packages to inject into the Virtual Environment--either package name or pip package spec",
)
p.add_argument(
"--include-apps",
action="store_true",
help="Add apps from the injected packages onto your PATH",
)
add_include_dependencies(p)
add_pip_venv_args(p)
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
p.add_argument("--verbose", action="store_true")
def _add_upgrade(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"upgrade",
help="Upgrade a package",
description="Upgrade a package in a pipx-managed Virtual Environment by running 'pip install --upgrade PACKAGE'",
)
p.add_argument("package").completer = venv_completer
p.add_argument(
"--include-injected",
action="store_true",
help="Also upgrade packages injected into the main app's environment",
)
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
add_pip_venv_args(p)
p.add_argument("--verbose", action="store_true")
def _add_upgrade_all(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"upgrade-all",
help="Upgrade all packages. Runs `pip install -U <pkgname>` for each package.",
description="Upgrades all packages within their virtual environments by running 'pip install --upgrade PACKAGE'",
)
p.add_argument(
"--include-injected",
action="store_true",
help="Also upgrade packages injected into the main app's environment",
)
p.add_argument("--skip", nargs="+", default=[], help="skip these packages")
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
p.add_argument("--verbose", action="store_true")
def _add_uninstall(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"uninstall",
help="Uninstall a package",
description="Uninstalls a pipx-managed Virtual Environment by deleting it and any files that point to its apps.",
)
p.add_argument("package").completer = venv_completer
p.add_argument("--verbose", action="store_true")
def _add_uninstall_all(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"uninstall-all",
help="Uninstall all packages",
description="Uninstall all pipx-managed packages",
)
p.add_argument("--verbose", action="store_true")
def _add_reinstall(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"reinstall",
formatter_class=LineWrapRawTextHelpFormatter,
help="Reinstall a package",
description=textwrap.dedent(
"""
Reinstalls a package.
Package is uninstalled, then installed with pipx install PACKAGE
with the same options used in the original install of PACKAGE.
"""
),
)
p.add_argument("package").completer = venv_completer
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help=(
"The Python executable used to recreate the Virtual Environment "
"and run the associated app/apps. Must be v3.6+."
),
)
p.add_argument("--verbose", action="store_true")
def _add_reinstall_all(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"reinstall-all",
formatter_class=LineWrapRawTextHelpFormatter,
help="Reinstall all packages",
description=textwrap.dedent(
"""
Reinstalls all packages.
Packages are uninstalled, then installed with pipx install PACKAGE
with the same options used in the original install of PACKAGE.
This is useful if you upgraded to a new version of Python and want
all your packages to use the latest as well.
"""
),
)
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help=(
"The Python executable used to recreate the Virtual Environment "
"and run the associated app/apps. Must be v3.6+."
),
)
p.add_argument("--skip", nargs="+", default=[], help="skip these packages")
p.add_argument("--verbose", action="store_true")
def _add_list(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"list",
help="List installed packages",
description="List packages and apps installed with pipx",
)
p.add_argument(
"--include-injected",
action="store_true",
help="Show packages injected into the main app's environment",
)
p.add_argument(
"--json", action="store_true", help="Output rich data in json format."
)
p.add_argument("--verbose", action="store_true")
def _add_run(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"run",
formatter_class=LineWrapRawTextHelpFormatter,
help=(
"Download the latest version of a package to a temporary virtual environment, "
"then run an app from it. Also compatible with local `__pypackages__` "
"directory (experimental)."
),
description=textwrap.dedent(
f"""
Download the latest version of a package to a temporary virtual environment,
then run an app from it. The environment will be cached
and re-used for up to {constants.TEMP_VENV_EXPIRATION_THRESHOLD_DAYS} days. This
means subsequent calls to 'run' for the same package will be faster
since they can re-use the cached Virtual Environment.
In support of PEP 582 'run' will use apps found in a local __pypackages__
directory, if present. Please note that this behavior is experimental,
and acts as a companion tool to pythonloc. It may be modified or
removed in the future. See https://github.com/cs01/pythonloc.
"""
),
)
p.add_argument(
"--no-cache",
action="store_true",
help="Do not re-use cached virtual environment if it exists",
)
p.add_argument(
"app_with_args",
metavar="app ...",
nargs=argparse.REMAINDER,
help="app/package name and any arguments to be passed to it",
default=[],
)
p.add_argument(
"--pypackages",
action="store_true",
help="Require app to be run from local __pypackages__ directory",
)
p.add_argument("--spec", help=SPEC_HELP)
p.add_argument("--verbose", action="store_true")
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help="The Python version to run package's CLI app with. Must be v3.6+.",
)
add_pip_venv_args(p)
p.set_defaults(subparser=p)
# modify usage text to show required app argument
p.usage = re.sub(r"^usage: ", "", p.format_usage())
# add a double-dash to usage text to show requirement before app
p.usage = re.sub(r"\.\.\.", "app ...", p.usage)
def _add_runpip(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"runpip",
help="Run pip in an existing pipx-managed Virtual Environment",
description="Run pip in an existing pipx-managed Virtual Environment",
)
p.add_argument(
"package",
help="Name of the existing pipx-managed Virtual Environment to run pip in",
).completer = venv_completer
p.add_argument(
"pipargs",
nargs=argparse.REMAINDER,
default=[],
help="Arguments to forward to pip command",
)
p.add_argument("--verbose", action="store_true")
def _add_ensurepath(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"ensurepath",
help=(
"Ensure directories necessary for pipx operation are in your "
"PATH environment variable."
),
description=(
"Ensure directory where pipx stores apps is in your "
"PATH environment variable. Also if pipx was installed via "
"`pip install --user`, ensure pipx itself is in your PATH. "
"Note that running this may modify "
"your shell's configuration file(s) such as '~/.bashrc'."
),
)
p.add_argument(
"--force",
"-f",
action="store_true",
help=(
"Add text to your shell's config file even if it looks like your "
"PATH already contains paths to pipx and pipx-install apps."
),
)
def get_command_parser() -> argparse.ArgumentParser:
venv_container = VenvContainer(constants.PIPX_LOCAL_VENVS)
completer_venvs = InstalledVenvsCompleter(venv_container)
parser = argparse.ArgumentParser(
prog="pipx",
formatter_class=LineWrapRawTextHelpFormatter,
description=PIPX_DESCRIPTION,
)
parser.man_short_description = PIPX_DESCRIPTION.splitlines()[1] # type: ignore
subparsers = parser.add_subparsers(
dest="command", description="Get help for commands with pipx COMMAND --help"
)
_add_install(subparsers)
_add_inject(subparsers, completer_venvs.use)
_add_upgrade(subparsers, completer_venvs.use)
_add_upgrade_all(subparsers)
_add_uninstall(subparsers, completer_venvs.use)
_add_uninstall_all(subparsers)
_add_reinstall(subparsers, completer_venvs.use)
_add_reinstall_all(subparsers)
_add_list(subparsers)
_add_run(subparsers)
_add_runpip(subparsers, completer_venvs.use)
_add_ensurepath(subparsers)
parser.add_argument("--version", action="store_true", help="Print version and exit")
subparsers.add_parser(
"completions",
help="Print instructions on enabling shell completions for pipx",
description="Print instructions on enabling shell completions for pipx",
)
return parser
def delete_oldest_logs(file_list: List[Path], keep_number: int) -> None:
file_list = sorted(file_list)
if len(file_list) > keep_number:
for existing_file in file_list[:-keep_number]:
try:
existing_file.unlink()
except FileNotFoundError:
pass
def setup_log_file() -> Path:
max_logs = 10
# don't use utils.mkdir, to prevent emission of log message
constants.PIPX_LOG_DIR.mkdir(parents=True, exist_ok=True)
delete_oldest_logs(list(constants.PIPX_LOG_DIR.glob("cmd_*[0-9].log")), max_logs)
delete_oldest_logs(
list(constants.PIPX_LOG_DIR.glob("cmd_*_pip_errors.log")), max_logs
)
datetime_str = time.strftime("%Y-%m-%d_%H.%M.%S")
log_file = constants.PIPX_LOG_DIR / f"cmd_{datetime_str}.log"
counter = 1
while log_file.exists() and counter < 10:
log_file = constants.PIPX_LOG_DIR / f"cmd_{datetime_str}_{counter}.log"
counter += 1
return log_file
def setup_logging(verbose: bool) -> None:
pipx_str = bold(green("pipx >")) if sys.stdout.isatty() else "pipx >"
pipx.constants.pipx_log_file = setup_log_file()
# "incremental" is False so previous pytest tests don't accumulate handlers
logging_config = {
"version": 1,
"formatters": {
"stream_nonverbose": {
"class": "logging.Formatter",
"format": "{message}",
"style": "{",
},
"stream_verbose": {
"class": "logging.Formatter",
"format": pipx_str + "({funcName}:{lineno}): {message}",
"style": "{",
},
"file": {
"class": "logging.Formatter",
"format": "{relativeCreated: >8.1f}ms ({funcName}:{lineno}): {message}",
"style": "{",
},
},
"handlers": {
"stream": {
"class": "logging.StreamHandler",
"formatter": "stream_verbose" if verbose else "stream_nonverbose",
"level": "INFO" if verbose else "WARNING",
},
"file": {
"class": "logging.FileHandler",
"formatter": "file",
"filename": str(pipx.constants.pipx_log_file),
"encoding": "utf-8",
"level": "DEBUG",
},
},
"loggers": {"pipx": {"handlers": ["stream", "file"], "level": "DEBUG"}},
"incremental": False,
}
logging.config.dictConfig(logging_config)
def setup(args: argparse.Namespace) -> None:
if "version" in args and args.version:
print_version()
sys.exit(0)
setup_logging("verbose" in args and args.verbose)
logger.debug(f"{time.strftime("%Y-%m-%d %H:%M:%S")}")
logger.debug(f"{" ".join(sys.argv)}")
logger.info(f"pipx version is {__version__}")
logger.info(f"Default python interpreter is {repr(DEFAULT_PYTHON)}")
mkdir(constants.PIPX_LOCAL_VENVS)
mkdir(constants.LOCAL_BIN_DIR)
mkdir(constants.PIPX_VENV_CACHEDIR)
rmdir(constants.PIPX_TRASH_DIR, False)
old_pipx_venv_location = constants.PIPX_LOCAL_VENVS / "pipx-app"
if old_pipx_venv_location.exists():
logger.warning(
pipx_wrap(
f"""
{hazard} A virtual environment for pipx was detected at
{str(old_pipx_venv_location)}. The 'pipx-app' package has been
renamed back to 'pipx'
(https://github.com/pypa/pipx/issues/82).
""",
subsequent_indent=" " * 4,
)
)
def check_args(parsed_pipx_args: argparse.Namespace) -> None:
if parsed_pipx_args.command == "run":
# we manually discard a first -- because using nargs=argparse.REMAINDER
# will not do it automatically
if parsed_pipx_args.app_with_args and parsed_pipx_args.app_with_args[0] == "--":
parsed_pipx_args.app_with_args.pop(0)
# since we would like app to be required but not in a separate argparse
# add_argument, we implement our own missing required arg error
if not parsed_pipx_args.app_with_args:
parsed_pipx_args.subparser.error(
"the following arguments are required: app"
)
def cli() -> ExitCode:
"""Entry point from command line"""
try:
hide_cursor()
parser = get_command_parser()
argcomplete.autocomplete(parser)
parsed_pipx_args = parser.parse_args()
setup(parsed_pipx_args)
check_args(parsed_pipx_args)
if not parsed_pipx_args.command:
parser.print_help()
return ExitCode(1)
return run_pipx_command(parsed_pipx_args)
except PipxError as e:
print(str(e), file=sys.stderr)
logger.debug(f"PipxError: {e}", exc_info=True)
return ExitCode(1)
except KeyboardInterrupt:
return ExitCode(1)
except Exception:
logger.debug("Uncaught Exception:", exc_info=True)
raise
finally:
logger.debug("pipx finished.")
show_cursor()
if __name__ == "__main__":
sys.exit(cli())
| # PYTHON_ARGCOMPLETE_OK
"""The command line interface to pipx"""
import argparse
import logging
import logging.config
import os
import re
import shlex
import sys
import textwrap
import time
import urllib.parse
from pathlib import Path
from typing import Any, Callable, Dict, List
import argcomplete # type: ignore
from packaging.requirements import InvalidRequirement, Requirement
from packaging.utils import canonicalize_name
import pipx.constants
from pipx import commands, constants
from pipx.animate import hide_cursor, show_cursor
from pipx.colors import bold, green
from pipx.constants import ExitCode
from pipx.emojis import hazard
from pipx.interpreter import DEFAULT_PYTHON
from pipx.util import PipxError, mkdir, pipx_wrap, rmdir
from pipx.venv import VenvContainer
from pipx.version import __version__
logger = logging.getLogger(__name__)
VenvCompleter = Callable[[str], List[str]]
def print_version() -> None:
print(__version__)
SPEC_HELP = textwrap.dedent(
"""\
The package name or specific installation source passed to pip.
Runs `pip install -U SPEC`.
For example `--spec mypackage==2.0.0` or `--spec git+https://github.com/user/repo.git@branch`
"""
)
PIPX_DESCRIPTION = textwrap.dedent(
f"""
Install and execute apps from Python packages.
Binaries can either be installed globally into isolated Virtual Environments
or run directly in a temporary Virtual Environment.
Virtual Environment location is {str(constants.PIPX_LOCAL_VENVS)}.
Symlinks to apps are placed in {str(constants.LOCAL_BIN_DIR)}.
"""
)
PIPX_DESCRIPTION += pipx_wrap(
"""
optional environment variables:
PIPX_HOME Overrides default pipx location. Virtual Environments will be installed to $PIPX_HOME/venvs.
PIPX_BIN_DIR Overrides location of app installations. Apps are symlinked or copied here.
USE_EMOJI Overrides emoji behavior. Default value varies based on platform.
PIPX_DEFAULT_PYTHON Overrides default python used for commands.
""",
subsequent_indent=" " * 24, # match the indent of argparse options
keep_newlines=True,
)
DOC_DEFAULT_PYTHON = os.getenv("PIPX__DOC_DEFAULT_PYTHON", DEFAULT_PYTHON)
INSTALL_DESCRIPTION = textwrap.dedent(
f"""
The install command is the preferred way to globally install apps
from python packages on your system. It creates an isolated virtual
environment for the package, then ensures the package's apps are
accessible on your $PATH.
The result: apps you can run from anywhere, located in packages
you can cleanly upgrade or uninstall. Guaranteed to not have
dependency version conflicts or interfere with your OS's python
packages. 'sudo' is not required to do this.
pipx install PACKAGE_NAME
pipx install --python PYTHON PACKAGE_NAME
pipx install VCS_URL
pipx install ./LOCAL_PATH
pipx install ZIP_FILE
pipx install TAR_GZ_FILE
The PACKAGE_SPEC argument is passed directly to `pip install`.
The default virtual environment location is {constants.DEFAULT_PIPX_HOME}
and can be overridden by setting the environment variable `PIPX_HOME`
(Virtual Environments will be installed to `$PIPX_HOME/venvs`).
The default app location is {constants.DEFAULT_PIPX_BIN_DIR} and can be
overridden by setting the environment variable `PIPX_BIN_DIR`.
The default python executable used to install a package is
{DOC_DEFAULT_PYTHON} and can be overridden
by setting the environment variable `PIPX_DEFAULT_PYTHON`.
"""
)
class LineWrapRawTextHelpFormatter(argparse.RawDescriptionHelpFormatter):
def _split_lines(self, text: str, width: int) -> List[str]:
text = self._whitespace_matcher.sub(" ", text).strip()
return textwrap.wrap(text, width)
class InstalledVenvsCompleter:
def __init__(self, venv_container: VenvContainer) -> None:
self.packages = [str(p.name) for p in sorted(venv_container.iter_venv_dirs())]
def use(self, prefix: str, **kwargs: Any) -> List[str]:
return [
f"{prefix}{x[len(prefix):]}"
for x in self.packages
if x.startswith(canonicalize_name(prefix))
]
def get_pip_args(parsed_args: Dict[str, str]) -> List[str]:
pip_args: List[str] = []
if parsed_args.get("index_url"):
pip_args += ["--index-url", parsed_args["index_url"]]
if parsed_args.get("pip_args"):
pip_args += shlex.split(parsed_args.get("pip_args", ""))
# make sure --editable is last because it needs to be right before
# package specification
if parsed_args.get("editable"):
pip_args += ["--editable"]
return pip_args
def get_venv_args(parsed_args: Dict[str, str]) -> List[str]:
venv_args: List[str] = []
if parsed_args.get("system_site_packages"):
venv_args += ["--system-site-packages"]
return venv_args
def run_pipx_command(args: argparse.Namespace) -> ExitCode: # noqa: C901
verbose = args.verbose if "verbose" in args else False
pip_args = get_pip_args(vars(args))
venv_args = get_venv_args(vars(args))
venv_container = VenvContainer(constants.PIPX_LOCAL_VENVS)
if "package" in args:
package = args.package
if urllib.parse.urlparse(package).scheme:
raise PipxError("Package cannot be a url")
if "spec" in args and args.spec is not None:
if urllib.parse.urlparse(args.spec).scheme:
if "#egg=" not in args.spec:
args.spec = args.spec + f"#egg={package}"
venv_dir = venv_container.get_venv_dir(package)
logger.info(f"Virtual Environment location is {venv_dir}")
if "skip" in args:
skip_list = [canonicalize_name(x) for x in args.skip]
if args.command == "run":
package_or_url = (
args.spec
if ("spec" in args and args.spec is not None)
else args.app_with_args[0]
)
# For any package, we need to just use the name
try:
package_name = Requirement(args.app_with_args[0]).name
except InvalidRequirement:
# Raw URLs to scripts are supported, too, so continue if
# we can't parse this as a package
package_name = args.app_with_args[0]
use_cache = not args.no_cache
commands.run(
package_name,
package_or_url,
args.app_with_args[1:],
args.python,
pip_args,
venv_args,
args.pypackages,
verbose,
use_cache,
)
# We should never reach here because run() is NoReturn.
return ExitCode(1)
elif args.command == "install":
return commands.install(
None,
None,
args.package_spec,
constants.LOCAL_BIN_DIR,
args.python,
pip_args,
venv_args,
verbose,
force=args.force,
include_dependencies=args.include_deps,
suffix=args.suffix,
)
elif args.command == "inject":
return commands.inject(
venv_dir,
None,
args.dependencies,
pip_args,
verbose=verbose,
include_apps=args.include_apps,
include_dependencies=args.include_deps,
force=args.force,
)
elif args.command == "upgrade":
return commands.upgrade(
venv_dir,
pip_args,
verbose,
include_injected=args.include_injected,
force=args.force,
)
elif args.command == "upgrade-all":
return commands.upgrade_all(
venv_container,
verbose,
include_injected=args.include_injected,
skip=skip_list,
force=args.force,
)
elif args.command == "list":
return commands.list_packages(venv_container, args.include_injected, args.json)
elif args.command == "uninstall":
return commands.uninstall(venv_dir, constants.LOCAL_BIN_DIR, verbose)
elif args.command == "uninstall-all":
return commands.uninstall_all(venv_container, constants.LOCAL_BIN_DIR, verbose)
elif args.command == "reinstall":
return commands.reinstall(
venv_dir=venv_dir,
local_bin_dir=constants.LOCAL_BIN_DIR,
python=args.python,
verbose=verbose,
)
elif args.command == "reinstall-all":
return commands.reinstall_all(
venv_container,
constants.LOCAL_BIN_DIR,
args.python,
verbose,
skip=skip_list,
)
elif args.command == "runpip":
if not venv_dir:
raise PipxError("Developer error: venv_dir is not defined.")
return commands.run_pip(package, venv_dir, args.pipargs, args.verbose)
elif args.command == "ensurepath":
try:
return commands.ensure_pipx_paths(force=args.force)
except Exception as e:
logger.debug("Uncaught Exception:", exc_info=True)
raise PipxError(str(e), wrap_message=False)
elif args.command == "completions":
print(constants.completion_instructions)
return ExitCode(0)
else:
raise PipxError(f"Unknown command {args.command}")
def add_pip_venv_args(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--system-site-packages",
action="store_true",
help="Give the virtual environment access to the system site-packages dir.",
)
parser.add_argument("--index-url", "-i", help="Base URL of Python Package Index")
parser.add_argument(
"--editable",
"-e",
help="Install a project in editable mode",
action="store_true",
)
parser.add_argument(
"--pip-args",
help="Arbitrary pip arguments to pass directly to pip install/upgrade commands",
)
def add_include_dependencies(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--include-deps", help="Include apps of dependent packages", action="store_true"
)
def _add_install(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"install",
help="Install a package",
formatter_class=LineWrapRawTextHelpFormatter,
description=INSTALL_DESCRIPTION,
)
p.add_argument("package_spec", help="package name or pip installation spec")
add_include_dependencies(p)
p.add_argument("--verbose", action="store_true")
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
p.add_argument(
"--suffix",
default="",
help=(
"Optional suffix for virtual environment and executable names. "
"NOTE: The suffix feature is experimental and subject to change."
),
)
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help=(
"The Python executable used to create the Virtual Environment and run the "
"associated app/apps. Must be v3.6+."
),
)
add_pip_venv_args(p)
def _add_inject(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"inject",
help="Install packages into an existing Virtual Environment",
description="Installs packages to an existing pipx-managed virtual environment.",
)
p.add_argument(
"package",
help="Name of the existing pipx-managed Virtual Environment to inject into",
).completer = venv_completer
p.add_argument(
"dependencies",
nargs="+",
help="the packages to inject into the Virtual Environment--either package name or pip package spec",
)
p.add_argument(
"--include-apps",
action="store_true",
help="Add apps from the injected packages onto your PATH",
)
add_include_dependencies(p)
add_pip_venv_args(p)
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
p.add_argument("--verbose", action="store_true")
def _add_upgrade(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"upgrade",
help="Upgrade a package",
description="Upgrade a package in a pipx-managed Virtual Environment by running 'pip install --upgrade PACKAGE'",
)
p.add_argument("package").completer = venv_completer
p.add_argument(
"--include-injected",
action="store_true",
help="Also upgrade packages injected into the main app's environment",
)
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
add_pip_venv_args(p)
p.add_argument("--verbose", action="store_true")
def _add_upgrade_all(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"upgrade-all",
help="Upgrade all packages. Runs `pip install -U <pkgname>` for each package.",
description="Upgrades all packages within their virtual environments by running 'pip install --upgrade PACKAGE'",
)
p.add_argument(
"--include-injected",
action="store_true",
help="Also upgrade packages injected into the main app's environment",
)
p.add_argument("--skip", nargs="+", default=[], help="skip these packages")
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
p.add_argument("--verbose", action="store_true")
def _add_uninstall(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"uninstall",
help="Uninstall a package",
description="Uninstalls a pipx-managed Virtual Environment by deleting it and any files that point to its apps.",
)
p.add_argument("package").completer = venv_completer
p.add_argument("--verbose", action="store_true")
def _add_uninstall_all(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"uninstall-all",
help="Uninstall all packages",
description="Uninstall all pipx-managed packages",
)
p.add_argument("--verbose", action="store_true")
def _add_reinstall(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"reinstall",
formatter_class=LineWrapRawTextHelpFormatter,
help="Reinstall a package",
description=textwrap.dedent(
"""
Reinstalls a package.
Package is uninstalled, then installed with pipx install PACKAGE
with the same options used in the original install of PACKAGE.
"""
),
)
p.add_argument("package").completer = venv_completer
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help=(
"The Python executable used to recreate the Virtual Environment "
"and run the associated app/apps. Must be v3.6+."
),
)
p.add_argument("--verbose", action="store_true")
def _add_reinstall_all(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"reinstall-all",
formatter_class=LineWrapRawTextHelpFormatter,
help="Reinstall all packages",
description=textwrap.dedent(
"""
Reinstalls all packages.
Packages are uninstalled, then installed with pipx install PACKAGE
with the same options used in the original install of PACKAGE.
This is useful if you upgraded to a new version of Python and want
all your packages to use the latest as well.
"""
),
)
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help=(
"The Python executable used to recreate the Virtual Environment "
"and run the associated app/apps. Must be v3.6+."
),
)
p.add_argument("--skip", nargs="+", default=[], help="skip these packages")
p.add_argument("--verbose", action="store_true")
def _add_list(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"list",
help="List installed packages",
description="List packages and apps installed with pipx",
)
p.add_argument(
"--include-injected",
action="store_true",
help="Show packages injected into the main app's environment",
)
p.add_argument(
"--json", action="store_true", help="Output rich data in json format."
)
p.add_argument("--verbose", action="store_true")
def _add_run(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"run",
formatter_class=LineWrapRawTextHelpFormatter,
help=(
"Download the latest version of a package to a temporary virtual environment, "
"then run an app from it. Also compatible with local `__pypackages__` "
"directory (experimental)."
),
description=textwrap.dedent(
f"""
Download the latest version of a package to a temporary virtual environment,
then run an app from it. The environment will be cached
and re-used for up to {constants.TEMP_VENV_EXPIRATION_THRESHOLD_DAYS} days. This
means subsequent calls to 'run' for the same package will be faster
since they can re-use the cached Virtual Environment.
In support of PEP 582 'run' will use apps found in a local __pypackages__
directory, if present. Please note that this behavior is experimental,
and acts as a companion tool to pythonloc. It may be modified or
removed in the future. See https://github.com/cs01/pythonloc.
"""
),
)
p.add_argument(
"--no-cache",
action="store_true",
help="Do not re-use cached virtual environment if it exists",
)
p.add_argument(
"app_with_args",
metavar="app ...",
nargs=argparse.REMAINDER,
help="app/package name and any arguments to be passed to it",
default=[],
)
p.add_argument(
"--pypackages",
action="store_true",
help="Require app to be run from local __pypackages__ directory",
)
p.add_argument("--spec", help=SPEC_HELP)
p.add_argument("--verbose", action="store_true")
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help="The Python version to run package's CLI app with. Must be v3.6+.",
)
add_pip_venv_args(p)
p.set_defaults(subparser=p)
# modify usage text to show required app argument
p.usage = re.sub(r"^usage: ", "", p.format_usage())
# add a double-dash to usage text to show requirement before app
p.usage = re.sub(r"\.\.\.", "app ...", p.usage)
def _add_runpip(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"runpip",
help="Run pip in an existing pipx-managed Virtual Environment",
description="Run pip in an existing pipx-managed Virtual Environment",
)
p.add_argument(
"package",
help="Name of the existing pipx-managed Virtual Environment to run pip in",
).completer = venv_completer
p.add_argument(
"pipargs",
nargs=argparse.REMAINDER,
default=[],
help="Arguments to forward to pip command",
)
p.add_argument("--verbose", action="store_true")
def _add_ensurepath(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"ensurepath",
help=(
"Ensure directories necessary for pipx operation are in your "
"PATH environment variable."
),
description=(
"Ensure directory where pipx stores apps is in your "
"PATH environment variable. Also if pipx was installed via "
"`pip install --user`, ensure pipx itself is in your PATH. "
"Note that running this may modify "
"your shell's configuration file(s) such as '~/.bashrc'."
),
)
p.add_argument(
"--force",
"-f",
action="store_true",
help=(
"Add text to your shell's config file even if it looks like your "
"PATH already contains paths to pipx and pipx-install apps."
),
)
def get_command_parser() -> argparse.ArgumentParser:
venv_container = VenvContainer(constants.PIPX_LOCAL_VENVS)
completer_venvs = InstalledVenvsCompleter(venv_container)
parser = argparse.ArgumentParser(
prog="pipx",
formatter_class=LineWrapRawTextHelpFormatter,
description=PIPX_DESCRIPTION,
)
parser.man_short_description = PIPX_DESCRIPTION.splitlines()[1] # type: ignore
subparsers = parser.add_subparsers(
dest="command", description="Get help for commands with pipx COMMAND --help"
)
_add_install(subparsers)
_add_inject(subparsers, completer_venvs.use)
_add_upgrade(subparsers, completer_venvs.use)
_add_upgrade_all(subparsers)
_add_uninstall(subparsers, completer_venvs.use)
_add_uninstall_all(subparsers)
_add_reinstall(subparsers, completer_venvs.use)
_add_reinstall_all(subparsers)
_add_list(subparsers)
_add_run(subparsers)
_add_runpip(subparsers, completer_venvs.use)
_add_ensurepath(subparsers)
parser.add_argument("--version", action="store_true", help="Print version and exit")
subparsers.add_parser(
"completions",
help="Print instructions on enabling shell completions for pipx",
description="Print instructions on enabling shell completions for pipx",
)
return parser
def delete_oldest_logs(file_list: List[Path], keep_number: int) -> None:
file_list = sorted(file_list)
if len(file_list) > keep_number:
for existing_file in file_list[:-keep_number]:
try:
existing_file.unlink()
except FileNotFoundError:
pass
def setup_log_file() -> Path:
max_logs = 10
# don't use utils.mkdir, to prevent emission of log message
constants.PIPX_LOG_DIR.mkdir(parents=True, exist_ok=True)
delete_oldest_logs(list(constants.PIPX_LOG_DIR.glob("cmd_*[0-9].log")), max_logs)
delete_oldest_logs(
list(constants.PIPX_LOG_DIR.glob("cmd_*_pip_errors.log")), max_logs
)
datetime_str = time.strftime("%Y-%m-%d_%H.%M.%S")
log_file = constants.PIPX_LOG_DIR / f"cmd_{datetime_str}.log"
counter = 1
while log_file.exists() and counter < 10:
log_file = constants.PIPX_LOG_DIR / f"cmd_{datetime_str}_{counter}.log"
counter += 1
return log_file
def setup_logging(verbose: bool) -> None:
pipx_str = bold(green("pipx >")) if sys.stdout.isatty() else "pipx >"
pipx.constants.pipx_log_file = setup_log_file()
# "incremental" is False so previous pytest tests don't accumulate handlers
logging_config = {
"version": 1,
"formatters": {
"stream_nonverbose": {
"class": "logging.Formatter",
"format": "{message}",
"style": "{",
},
"stream_verbose": {
"class": "logging.Formatter",
"format": pipx_str + "({funcName}:{lineno}): {message}",
"style": "{",
},
"file": {
"class": "logging.Formatter",
"format": "{relativeCreated: >8.1f}ms ({funcName}:{lineno}): {message}",
"style": "{",
},
},
"handlers": {
"stream": {
"class": "logging.StreamHandler",
"formatter": "stream_verbose" if verbose else "stream_nonverbose",
"level": "INFO" if verbose else "WARNING",
},
"file": {
"class": "logging.FileHandler",
"formatter": "file",
"filename": str(pipx.constants.pipx_log_file),
"encoding": "utf-8",
"level": "DEBUG",
},
},
"loggers": {"pipx": {"handlers": ["stream", "file"], "level": "DEBUG"}},
"incremental": False,
}
logging.config.dictConfig(logging_config)
def setup(args: argparse.Namespace) -> None:
if "version" in args and args.version:
print_version()
sys.exit(0)
setup_logging("verbose" in args and args.verbose)
logger.debug(f"{time.strftime('%Y-%m-%d %H:%M:%S')}")
logger.debug(f"{' '.join(sys.argv)}")
logger.info(f"pipx version is {__version__}")
logger.info(f"Default python interpreter is {repr(DEFAULT_PYTHON)}")
mkdir(constants.PIPX_LOCAL_VENVS)
mkdir(constants.LOCAL_BIN_DIR)
mkdir(constants.PIPX_VENV_CACHEDIR)
rmdir(constants.PIPX_TRASH_DIR, False)
old_pipx_venv_location = constants.PIPX_LOCAL_VENVS / "pipx-app"
if old_pipx_venv_location.exists():
logger.warning(
pipx_wrap(
f"""
{hazard} A virtual environment for pipx was detected at
{str(old_pipx_venv_location)}. The 'pipx-app' package has been
renamed back to 'pipx'
(https://github.com/pypa/pipx/issues/82).
""",
subsequent_indent=" " * 4,
)
)
def check_args(parsed_pipx_args: argparse.Namespace) -> None:
if parsed_pipx_args.command == "run":
# we manually discard a first -- because using nargs=argparse.REMAINDER
# will not do it automatically
if parsed_pipx_args.app_with_args and parsed_pipx_args.app_with_args[0] == "--":
parsed_pipx_args.app_with_args.pop(0)
# since we would like app to be required but not in a separate argparse
# add_argument, we implement our own missing required arg error
if not parsed_pipx_args.app_with_args:
parsed_pipx_args.subparser.error(
"the following arguments are required: app"
)
def cli() -> ExitCode:
"""Entry point from command line"""
try:
hide_cursor()
parser = get_command_parser()
argcomplete.autocomplete(parser)
parsed_pipx_args = parser.parse_args()
setup(parsed_pipx_args)
check_args(parsed_pipx_args)
if not parsed_pipx_args.command:
parser.print_help()
return ExitCode(1)
return run_pipx_command(parsed_pipx_args)
except PipxError as e:
print(str(e), file=sys.stderr)
logger.debug(f"PipxError: {e}", exc_info=True)
return ExitCode(1)
except KeyboardInterrupt:
return ExitCode(1)
except Exception:
logger.debug("Uncaught Exception:", exc_info=True)
raise
finally:
logger.debug("pipx finished.")
show_cursor()
if __name__ == "__main__":
sys.exit(cli())
|
#!/usr/bin/env python
# coding: utf-8
"""Download and parse Tanakh from <http://mechon-mamre.org/>.
The text is based on the [Aleppo Codex][1].
[1]: https://en.wikipedia.org/wiki/Aleppo_Codex
Each book is in a separate HTML file (e.g., `c01.htm`) and contains navigation
and textual data.
The relevant structure is:
```html
<BODY>
<H1>...</H1>
<P>
<B>...,...</B> ...
</P>
</BODY>
```
Notes:
- verses are newline-delimited
- `<H1>` Hebrew book name
- `<B>` comma-separated Hebrew numbering of chapter and verse
- for multipart volumes (e.g., Samuel, Kings) also contains the part number
- `<BIG>`, `<SMALL>`, `<SUP>` around specific letter (we keep)
- `<A...>...</A>` links to notes (we ignore)
- `<BR>` within the text indicates a line break (we replace with a space)
- `{...}<BR>` indicates `pe` break (we ignore)
- `{...}` indicates `samekh` break (we ignore)
- `(...)` indicates the qere (we keep)
- the unvowelized previous word is the ketiv (we ignore)
"""
# native
from functools import partial
from multiprocessing import Queue
from pathlib import Path
from typing import List
import os
import re
# lib
from tqdm import tqdm
# pkg
from . import parse_args, download_unzip, Msg, queuer, spawn_processes, save_database
from .. import tokens as T, grammar
BOOK_NAMES = {
"בראשית": "Genesis",
"שמות": "Exodus",
"ויקרא": "Leviticus",
"במדבר": "Numbers",
"דברים": "Deuteronomy",
#
"יהושוע": "Joshua",
"שופטים": "Judges",
"שמואל א": "I Samuel",
"שמואל ב": "II Samuel",
"מלכים א": "I Kings",
"מלכים ב": "II Kings",
"ישעיהו": "Isaiah",
"ירמיהו": "Jeremiah",
"יחזקאל": "Ezekiel",
"הושע": "Hosea",
"יואל": "Joel",
"עמוס": "Amos",
"עובדיה": "Obadiah",
"יונה": "Jonah",
"מיכה": "Micah",
"נחום": "Nahum",
"חבקוק": "Habakkuk",
"צפניה": "Zephaniah",
"חגיי": "Haggai",
"זכריה": "Zechariah",
"מלאכי": "Malachi",
#
"תהילים": "Psalms",
"משלי": "Proverbs",
"איוב": "Job",
"שיר השירים": "Song of Songs",
"רות": "Ruth",
"איכה": "Lamentations",
"קוהלת": "Ecclesiastes",
"אסתר": "Esther",
"דנייאל": "Daniel",
"עזרא / נחמיה ע": "Ezra",
"עזרא / נחמיה נ": "Nehemiah",
"דברי הימים א": "I Chronicles",
"דברי הימים ב": "II Chronicles",
}
def count_words(lock, pos: int, read_q: Queue, write_q: Queue):
"""Count words in a book."""
# pylint: disable=too-many-locals
tqdm.set_lock(lock)
re_remove = re.compile(
r"</?P>|</?BIG>|</?SMALL>|</?SUP>|<A[^>]+>(.*)</A>|\{.\}|\(|\)"
)
re_name = re.compile(r"<H1>(.*)</H1>")
re_ref = re.compile(r"<B>(.*)</B>")
for msg in queuer(read_q):
result = {"books": [], "words": {}}
book = Path(msg.data)
text = book.read_text()
# book_num = int(book.stem[1:], 10)
book_name = re_name.search(text)[1]
book_num = 0
en_name = ""
# result["books"].append(
# dict(id=book_num, name=book_name, corpus="mechon-mamre.org")
# )
save_ref = ""
desc = f"{os.getpid()} COUNT {book_name:<15}"
for line in tqdm(text.split("\n"), desc=desc, position=pos):
line = re_remove.sub("", line).replace("<BR>", " ").strip()
if save_ref:
ref, save_ref = save_ref, ""
else:
if not line or not line.startswith("<B>"):
continue
ref = re_ref.search(line)[1].replace(" ׆", "")
if "-" in ref:
ref, save_ref = ref.split("-")
save_ref = f'{ref.split(',')[0]},{save_ref}'
ref = f"{book_name} {ref}"
he_name, ref = ref.rsplit(" ", 1)
tmp_name = BOOK_NAMES[he_name]
if tmp_name != en_name:
en_name = tmp_name
book_num = list(BOOK_NAMES).index(he_name) + 1
result["books"].append(
dict(id=book_num, name=en_name, corpus="mechon-mamre.org")
)
chapter, verse = ref.split(",")
chapter, verse = grammar.gematria(chapter), grammar.gematria(verse)
line = re_ref.sub("", line) # reference removed
line = line.replace(T.PUNCTUATION_MAQAF, T.PUNCTUATION_MAQAF + " ")
for raw in line.split():
clean = T.strip(raw)
if not clean:
continue
if clean in result["words"]:
result["words"][clean]["freq"] += 1
else:
ref = f"{en_name} {chapter}:{verse}"
result["words"][clean] = dict(
book_id=book_num, freq=1, ref=ref, raw=raw
)
write_q.put(Msg("SAVE", result))
def list_books(read_q: Queue, folder: Path):
"""Enqueue paths of books to parse."""
for path in sorted(folder.iterdir()):
read_q.put(Msg("COUNT", path))
def main(argv: List[str] = None):
"""Parse texts from <http://mechon-mamre.org>.
Usage: mechon_mamre_org.py [download <folder> | -i <PATH>] [-n COUNT]
Options:
download <folder> download HTML files to <folder>
--index, -i PATH HTML folder [default: text/mechon-mamre.org]
--cpus, -n NUM number of CPUs to use; at least 2 [default: all]
"""
args = parse_args(main.__doc__ or "", argv)
num_readers = args["num_readers"]
num_writers = args["num_writers"]
if args["download"]:
url = "http://mechon-mamre.org/htmlzips/ct005.zip"
folder = Path(args["<folder>"]).resolve()
pattern = re.compile(r"c/ct/c[0-9]{2}.htm")
folder = download_unzip(url, folder, pattern)
else:
folder = Path(args["--index"]).resolve()
init_fn = partial(list_books, folder=folder)
spawn_processes(init_fn, count_words, save_database, num_readers, num_writers)
if __name__ == "__main__": # pragma: no cover
main()
| #!/usr/bin/env python
# coding: utf-8
"""Download and parse Tanakh from <http://mechon-mamre.org/>.
The text is based on the [Aleppo Codex][1].
[1]: https://en.wikipedia.org/wiki/Aleppo_Codex
Each book is in a separate HTML file (e.g., `c01.htm`) and contains navigation
and textual data.
The relevant structure is:
```html
<BODY>
<H1>...</H1>
<P>
<B>...,...</B> ...
</P>
</BODY>
```
Notes:
- verses are newline-delimited
- `<H1>` Hebrew book name
- `<B>` comma-separated Hebrew numbering of chapter and verse
- for multipart volumes (e.g., Samuel, Kings) also contains the part number
- `<BIG>`, `<SMALL>`, `<SUP>` around specific letter (we keep)
- `<A...>...</A>` links to notes (we ignore)
- `<BR>` within the text indicates a line break (we replace with a space)
- `{...}<BR>` indicates `pe` break (we ignore)
- `{...}` indicates `samekh` break (we ignore)
- `(...)` indicates the qere (we keep)
- the unvowelized previous word is the ketiv (we ignore)
"""
# native
from functools import partial
from multiprocessing import Queue
from pathlib import Path
from typing import List
import os
import re
# lib
from tqdm import tqdm
# pkg
from . import parse_args, download_unzip, Msg, queuer, spawn_processes, save_database
from .. import tokens as T, grammar
BOOK_NAMES = {
"בראשית": "Genesis",
"שמות": "Exodus",
"ויקרא": "Leviticus",
"במדבר": "Numbers",
"דברים": "Deuteronomy",
#
"יהושוע": "Joshua",
"שופטים": "Judges",
"שמואל א": "I Samuel",
"שמואל ב": "II Samuel",
"מלכים א": "I Kings",
"מלכים ב": "II Kings",
"ישעיהו": "Isaiah",
"ירמיהו": "Jeremiah",
"יחזקאל": "Ezekiel",
"הושע": "Hosea",
"יואל": "Joel",
"עמוס": "Amos",
"עובדיה": "Obadiah",
"יונה": "Jonah",
"מיכה": "Micah",
"נחום": "Nahum",
"חבקוק": "Habakkuk",
"צפניה": "Zephaniah",
"חגיי": "Haggai",
"זכריה": "Zechariah",
"מלאכי": "Malachi",
#
"תהילים": "Psalms",
"משלי": "Proverbs",
"איוב": "Job",
"שיר השירים": "Song of Songs",
"רות": "Ruth",
"איכה": "Lamentations",
"קוהלת": "Ecclesiastes",
"אסתר": "Esther",
"דנייאל": "Daniel",
"עזרא / נחמיה ע": "Ezra",
"עזרא / נחמיה נ": "Nehemiah",
"דברי הימים א": "I Chronicles",
"דברי הימים ב": "II Chronicles",
}
def count_words(lock, pos: int, read_q: Queue, write_q: Queue):
"""Count words in a book."""
# pylint: disable=too-many-locals
tqdm.set_lock(lock)
re_remove = re.compile(
r"</?P>|</?BIG>|</?SMALL>|</?SUP>|<A[^>]+>(.*)</A>|\{.\}|\(|\)"
)
re_name = re.compile(r"<H1>(.*)</H1>")
re_ref = re.compile(r"<B>(.*)</B>")
for msg in queuer(read_q):
result = {"books": [], "words": {}}
book = Path(msg.data)
text = book.read_text()
# book_num = int(book.stem[1:], 10)
book_name = re_name.search(text)[1]
book_num = 0
en_name = ""
# result["books"].append(
# dict(id=book_num, name=book_name, corpus="mechon-mamre.org")
# )
save_ref = ""
desc = f"{os.getpid()} COUNT {book_name:<15}"
for line in tqdm(text.split("\n"), desc=desc, position=pos):
line = re_remove.sub("", line).replace("<BR>", " ").strip()
if save_ref:
ref, save_ref = save_ref, ""
else:
if not line or not line.startswith("<B>"):
continue
ref = re_ref.search(line)[1].replace(" ׆", "")
if "-" in ref:
ref, save_ref = ref.split("-")
save_ref = f'{ref.split(",")[0]},{save_ref}'
ref = f"{book_name} {ref}"
he_name, ref = ref.rsplit(" ", 1)
tmp_name = BOOK_NAMES[he_name]
if tmp_name != en_name:
en_name = tmp_name
book_num = list(BOOK_NAMES).index(he_name) + 1
result["books"].append(
dict(id=book_num, name=en_name, corpus="mechon-mamre.org")
)
chapter, verse = ref.split(",")
chapter, verse = grammar.gematria(chapter), grammar.gematria(verse)
line = re_ref.sub("", line) # reference removed
line = line.replace(T.PUNCTUATION_MAQAF, T.PUNCTUATION_MAQAF + " ")
for raw in line.split():
clean = T.strip(raw)
if not clean:
continue
if clean in result["words"]:
result["words"][clean]["freq"] += 1
else:
ref = f"{en_name} {chapter}:{verse}"
result["words"][clean] = dict(
book_id=book_num, freq=1, ref=ref, raw=raw
)
write_q.put(Msg("SAVE", result))
def list_books(read_q: Queue, folder: Path):
"""Enqueue paths of books to parse."""
for path in sorted(folder.iterdir()):
read_q.put(Msg("COUNT", path))
def main(argv: List[str] = None):
"""Parse texts from <http://mechon-mamre.org>.
Usage: mechon_mamre_org.py [download <folder> | -i <PATH>] [-n COUNT]
Options:
download <folder> download HTML files to <folder>
--index, -i PATH HTML folder [default: text/mechon-mamre.org]
--cpus, -n NUM number of CPUs to use; at least 2 [default: all]
"""
args = parse_args(main.__doc__ or "", argv)
num_readers = args["num_readers"]
num_writers = args["num_writers"]
if args["download"]:
url = "http://mechon-mamre.org/htmlzips/ct005.zip"
folder = Path(args["<folder>"]).resolve()
pattern = re.compile(r"c/ct/c[0-9]{2}.htm")
folder = download_unzip(url, folder, pattern)
else:
folder = Path(args["--index"]).resolve()
init_fn = partial(list_books, folder=folder)
spawn_processes(init_fn, count_words, save_database, num_readers, num_writers)
if __name__ == "__main__": # pragma: no cover
main()
|
import json
import time
import os
import pandas as pd
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
def process_9gag(args):
fetched_memes = []
errors = 0
# for i in tqdm(range(args.))
pass
def process_me_dot_me(args):
pass
def templates_imgflip(args):
args.source_url = "https://imgflip.com/memetemplates"
fetched_templates = []
errors = 0
for i in tqdm(range(args.from_page, args.pages + 1)):
print(f"Requesting: {args.source_url}?page={i}")
response = requests.get(f"{args.source_url}?page={i}")
print(response)
if response.status_code != 200:
print("Bad response")
break
body = BeautifulSoup(response.text, 'html.parser')
templates = body.findAll("div", {"class": "mt-box"})
print(len(templates))
for template in templates:
try:
template_url = "https://"+template.find('img', {"class": "shadow"})['src'][2:]
template_id, template_format = os.path.splitext(template_url.split("/")[-1])
template_title = template.find("h3", {"class": "mt-title"}).find("a")
template_title = "" if template_title is None else template_title.text
template_data = {
"id": template_id,
"format": template_format,
"website": "imgflip",
"url": template_url,
"title": template_title
}
fetched_templates.append(template_data)
except:
errors += 1
# time.sleep(args.delay)
print(f"Fetched: {len(fetched_templates)} templates. Found {errors} error(s).")
return fetched_templates
def process_imgflip(args):
'''
https://gist.github.com/WalterSimoncini/defca6de456bb168ada303085358bf0a
'''
fetched_memes = []
errors = 0
for i in tqdm(range(args.from_page, args.pages + 1)):
# print(f"Processing page {i}")
response = requests.get(f"{args.source_url}?page={i}")
body = BeautifulSoup(response.text, 'html.parser')
if response.status_code != 200:
# print("Something went wrong!")
break # Something went wrong (e.g. page limit)
memes = body.findAll("div", {"class": "base-unit clearfix"})
for meme in memes:
if "not-safe-for-work images" in str(meme):
continue # NSFW memes are available only to logged in users
try:
meme_url = 'https://'+meme.find("img", {"class": "base-img"})["src"][2:]
meme_id, meme_format = os.path.splitext(meme_url.split("/")[-1])
# Handle anonymous authors
meme_author = meme.find("a", {"class": "u-username"})
meme_author = "anonymous" if meme_author is None else meme_author.text
# Handle empty titles
meme_title = meme.find("h2", {"class": "base-unit-title"}).find("a")
meme_title = "" if meme_title is None else meme_title.text
meme_text = meme.find("img", {"class": "base-img"})["alt"]
meme_text = meme_text.split("|")[1].strip()
meme_data = {
"id": meme_id,
"format": meme_format,
"website": "imgflip",
"url": meme_url,
"author": meme_author,
"title": meme_title,
"text": meme_text.lower()
}
fetched_memes.append(meme_data)
except:
errors += 1
time.sleep(args.delay)
print(f"Fetched: {len(fetched_memes)} memes. Found {errors} error(s).")
return fetched_memes
if __name__ == '__main__':
import argparse
ap = argparse.ArgumentParser()
# ap.add_argument("--source_url", default="https://imgflip.com/tag/programming", help="Memes list url (e.g. https://imgflip.com/meme/Bird-Box)", type=str)
ap.add_argument("--tag", required=True, type=str)#default=['programming', 'artificial intelligence', 'computer'], type=list)
ap.add_argument("--from_page", default=1, help="Initial page", type=int)
ap.add_argument("--pages", default=44, help="Maximum page number to be scraped", type=int)
ap.add_argument("--delay", default=2, help="Delay between page loads (seconds)", type=int)
ap.add_argument("-o", "--output", default="templates.tsv")
args = ap.parse_args()
# category = args.source_url.split("/")[-1].replace("-", " ")
# Get the data
data = {}
# for tag in args.tags:
print(f"Processing tag: {args.tag}")
# Get the data
# args.source_url = f"https://imgflip.com/tag/{args.tag.replace(" ", "+")}"
# data = process_imgflip(args)
# args.source_url = f"https://ww.9gag.com/search/?query={args.tag.replace(" ", "+")}"
# data = process_9gag(args)
data = templates_imgflip(args)
# Create a pd.DataFrame and save (append to existing .tsv)
df = pd.DataFrame(data)
print(df.head(20))
df.to_csv(args.output, sep='\t', index=False, mode='a') |
import json
import time
import os
import pandas as pd
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
def process_9gag(args):
fetched_memes = []
errors = 0
# for i in tqdm(range(args.))
pass
def process_me_dot_me(args):
pass
def templates_imgflip(args):
args.source_url = "https://imgflip.com/memetemplates"
fetched_templates = []
errors = 0
for i in tqdm(range(args.from_page, args.pages + 1)):
print(f"Requesting: {args.source_url}?page={i}")
response = requests.get(f"{args.source_url}?page={i}")
print(response)
if response.status_code != 200:
print("Bad response")
break
body = BeautifulSoup(response.text, 'html.parser')
templates = body.findAll("div", {"class": "mt-box"})
print(len(templates))
for template in templates:
try:
template_url = "https://"+template.find('img', {"class": "shadow"})['src'][2:]
template_id, template_format = os.path.splitext(template_url.split("/")[-1])
template_title = template.find("h3", {"class": "mt-title"}).find("a")
template_title = "" if template_title is None else template_title.text
template_data = {
"id": template_id,
"format": template_format,
"website": "imgflip",
"url": template_url,
"title": template_title
}
fetched_templates.append(template_data)
except:
errors += 1
# time.sleep(args.delay)
print(f"Fetched: {len(fetched_templates)} templates. Found {errors} error(s).")
return fetched_templates
def process_imgflip(args):
'''
https://gist.github.com/WalterSimoncini/defca6de456bb168ada303085358bf0a
'''
fetched_memes = []
errors = 0
for i in tqdm(range(args.from_page, args.pages + 1)):
# print(f"Processing page {i}")
response = requests.get(f"{args.source_url}?page={i}")
body = BeautifulSoup(response.text, 'html.parser')
if response.status_code != 200:
# print("Something went wrong!")
break # Something went wrong (e.g. page limit)
memes = body.findAll("div", {"class": "base-unit clearfix"})
for meme in memes:
if "not-safe-for-work images" in str(meme):
continue # NSFW memes are available only to logged in users
try:
meme_url = 'https://'+meme.find("img", {"class": "base-img"})["src"][2:]
meme_id, meme_format = os.path.splitext(meme_url.split("/")[-1])
# Handle anonymous authors
meme_author = meme.find("a", {"class": "u-username"})
meme_author = "anonymous" if meme_author is None else meme_author.text
# Handle empty titles
meme_title = meme.find("h2", {"class": "base-unit-title"}).find("a")
meme_title = "" if meme_title is None else meme_title.text
meme_text = meme.find("img", {"class": "base-img"})["alt"]
meme_text = meme_text.split("|")[1].strip()
meme_data = {
"id": meme_id,
"format": meme_format,
"website": "imgflip",
"url": meme_url,
"author": meme_author,
"title": meme_title,
"text": meme_text.lower()
}
fetched_memes.append(meme_data)
except:
errors += 1
time.sleep(args.delay)
print(f"Fetched: {len(fetched_memes)} memes. Found {errors} error(s).")
return fetched_memes
if __name__ == '__main__':
import argparse
ap = argparse.ArgumentParser()
# ap.add_argument("--source_url", default="https://imgflip.com/tag/programming", help="Memes list url (e.g. https://imgflip.com/meme/Bird-Box)", type=str)
ap.add_argument("--tag", required=True, type=str)#default=['programming', 'artificial intelligence', 'computer'], type=list)
ap.add_argument("--from_page", default=1, help="Initial page", type=int)
ap.add_argument("--pages", default=44, help="Maximum page number to be scraped", type=int)
ap.add_argument("--delay", default=2, help="Delay between page loads (seconds)", type=int)
ap.add_argument("-o", "--output", default="templates.tsv")
args = ap.parse_args()
# category = args.source_url.split("/")[-1].replace("-", " ")
# Get the data
data = {}
# for tag in args.tags:
print(f"Processing tag: {args.tag}")
# Get the data
# args.source_url = f"https://imgflip.com/tag/{args.tag.replace(' ', '+')}"
# data = process_imgflip(args)
# args.source_url = f"https://ww.9gag.com/search/?query={args.tag.replace(' ', '+')}"
# data = process_9gag(args)
data = templates_imgflip(args)
# Create a pd.DataFrame and save (append to existing .tsv)
df = pd.DataFrame(data)
print(df.head(20))
df.to_csv(args.output, sep='\t', index=False, mode='a') |
import datetime
import zlib
from collections import OrderedDict
from copy import deepcopy
from decimal import Decimal
from django.db.models import Q
from clients.models import Document, DispensaryReg, Card
from directions.models import Napravleniya, Issledovaniya, ParaclinicResult, IstochnikiFinansirovaniya, PersonContract
from directory.models import Researches
from laboratory import utils
from laboratory.utils import strdate
from api.stationar.stationar_func import hosp_get_data_direction, check_transfer_epicrisis
from api.stationar.sql_func import get_result_value_iss
from utils.dates import normalize_date
def get_all_doc(docs: [Document]):
"""
возвращает словарь словарей documents. Данные о документах: паспорт : номер: серия, полис: номер, снислс: номер
"""
documents = {
'passport': {'num': "", 'serial': "", 'date_start': "", 'issued': ""},
'polis': {'serial': "", 'num': "", 'issued': ""},
'snils': {'num': ""},
'bc': {'num': "", 'serial': "", 'date_start': "", 'issued': ""},
}
for d in docs:
if d.document_type.title == "СНИЛС":
documents["snils"]["num"] = d.number
if d.document_type.title == 'Паспорт гражданина РФ':
documents["passport"]["num"] = d.number
documents["passport"]["serial"] = d.serial
documents["passport"]["date_start"] = "" if not d.date_start else d.date_start.strftime("%d.%m.%Y")
documents["polis"]["issued"] = d.who_give
if d.document_type.title == 'Полис ОМС':
documents["polis"]["num"] = d.number
documents["polis"]["serial"] = d.serial
documents["polis"]["date_start"] = "" if not d.date_start else d.date_start.strftime("%d.%m.%Y")
documents["polis"]["issued"] = d.who_give
if d.document_type.title == 'Свидетельство о рождении':
documents["bc"]["num"] = d.number
documents["bc"]["serial"] = d.serial
documents["bc"]["date_start"] = "" if not d.date_start else d.date_start.strftime("%d.%m.%Y")
documents["bc"]["issued"] = d.who_give
return documents
def get_coast_from_issledovanie(dir_research_loc):
"""
При печати листа на оплату возвращает (цены из записанных в Исследования)
На основании прайса, услуг возвращает Для листа на оплату {
направление: {услуга:[цена, скидка, количество],услуга:[цена, скидка, количество]},
направление: {услуга:[цена, скидка, количество],услуга:[цена, скидка, количество]},
направление: {услуга:[цена, скидка, количество],услуга:[цена, скидка, количество]},
}
"""
d = tuple()
if type(dir_research_loc) == dict:
dict_coast = {}
for k, v in dir_research_loc.items():
d = {
r: [
s,
d,
h,
]
for r, s, d, h in Issledovaniya.objects.filter(napravleniye=k, research__in=v, coast__isnull=False).values_list('research_id', 'coast', 'discount', 'how_many')
}
dict_coast[k] = d
return dict_coast
else:
return 0
def get_research_by_dir(dir_temp_l):
"""
Получить словаь: {направление1:[услуга1, услуга2, услуга3],направление2:[услуга1].....}
:param dir_temp_l:
:return:
"""
dict_research_dir = {}
for i in dir_temp_l:
# Если есть хотя бы одно сохранения услуги по направлению, то не учитывается
if any([x.doc_save is not None for x in Issledovaniya.objects.filter(napravleniye=i)]):
continue
else:
research_l = [x.research_id for x in Issledovaniya.objects.filter(napravleniye=i)]
dict_research_dir[i] = research_l
return dict_research_dir
def get_final_data(research_price_loc):
"""
Получить итоговую структуру данных: код услуги, напрвление, услуга, цена, скидка/наценка, цена со скидкой, кол-во, сумма
Направление указывается один раз для нескольких строк
"""
total_sum = 0
tmp_data = []
# is_discount = False
z = ""
x = ""
tmp_napr = []
for k, v in research_price_loc.items():
# research_attr = ([s for s in Researches.objects.filter(id__in=v.keys()).values_list('id', 'title')])
research_attr = [s for s in Researches.objects.filter(id__in=v.keys()).values_list('id', 'title', 'internal_code')]
research_attr_list = [list(z) for z in research_attr]
for research_id, research_coast in v.items():
h = []
for j in research_attr_list:
if research_id == j[0]:
if k != 0:
h.append(k)
k = 0
else:
h.append("")
h.extend([j[2], j[1]])
h.append("{:,.2f}".format(research_coast[0]).replace(",", " "))
coast_with_discount = research_coast[0] + (research_coast[0] * research_coast[1] / 100)
if research_coast[1] != 0:
z = "+"
if research_coast[1] > 0:
x = "+"
else:
x = ""
h.append(x + str(research_coast[1]))
h.append("{:,.2f}".format(coast_with_discount).replace(",", " "))
h.append(research_coast[2])
research_sum = coast_with_discount * research_coast[2]
h.append("{:,.2f}".format(research_sum).replace(",", " "))
h[0], h[1] = h[1], h[0]
total_sum += research_sum
research_attr_list.remove(j)
tmp_data.append(h)
if h[1]:
tmp_napr.append(h[1])
if h:
break
res_lis = []
for t in tmp_data:
tmp_d = list(map(str, t))
res_lis.append(tmp_d)
total_data = []
total_data.append(res_lis)
total_data.append("{:,.2f}".format(total_sum).replace(",", " "))
if z == "+":
total_data.append("is_discount")
else:
total_data.append("no_discount")
total_data.append(tmp_napr)
# total_data:[стру-рка данных, итоговая сумма, есть ли скидка, номера направлений]
return total_data
def get_data_individual(card_object):
"""
Получает на входе объект Карта
возвращает словарь атрибутов по карте и Физ.лицу(Индивидуалу)
:param card_object:
:return:
"""
ind_data = {'ind': card_object.individual}
ind_data['age'] = ind_data['ind'].age()
ind_data['doc'] = Document.objects.filter(individual=ind_data['ind'], is_active=True)
ind_data['fio'] = ind_data['ind'].fio()
ind_data['born'] = ind_data['ind'].bd()
ind_data['main_address'] = "____________________________________________________" if not card_object.main_address else card_object.main_address
ind_data['fact_address'] = "____________________________________________________" if not card_object.fact_address else card_object.fact_address
# document_passport = "Паспорт РФ"
ind_documents = get_all_doc(ind_data['doc'])
ind_data['passport_num'] = ind_documents['passport']['num']
ind_data['passport_serial'] = ind_documents['passport']['serial']
ind_data['passport_date_start'] = ind_documents['passport']['date_start']
ind_data['passport_issued'] = ind_documents['passport']['issued']
ind_data['bc_num'] = ind_documents['bc']['num']
ind_data['bc_serial'] = ind_documents['bc']['serial']
ind_data['bc_date_start'] = ind_documents['bc']['date_start']
ind_data['bc_issued'] = ind_documents['bc']['issued']
ind_data['snils'] = ind_documents["snils"]["num"]
ind_data['oms'] = {}
ind_data['oms']['polis_num'] = ind_documents["polis"]["num"]
ind_data['oms']['polis_serial'] = ind_documents["polis"]["serial"]
# ind_data['oms']['polis_date_start'] = ind_documents["polis"]["date_start"]
ind_data['oms']['polis_issued'] = ind_documents["polis"]["issued"]
return ind_data
def form_notfound():
"""
В случае не верной настройки форм по типам и функциям или переданным аргументам в параметры
:return:
"""
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.pagesizes import A4
from reportlab.lib.units import mm
from copy import deepcopy
from reportlab.lib.enums import TA_CENTER
import os.path
from io import BytesIO
from laboratory.settings import FONTS_FOLDER
buffer = BytesIO()
pdfmetrics.registerFont(TTFont('PTAstraSerifBold', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Bold.ttf')))
pdfmetrics.registerFont(TTFont('PTAstraSerifReg', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Regular.ttf')))
doc = SimpleDocTemplate(
buffer, pagesize=A4, leftMargin=10 * mm, rightMargin=10 * mm, topMargin=10 * mm, bottomMargin=10 * mm, allowSplitting=1, title="Форма {}".format("Паспорт здоровья")
)
styleSheet = getSampleStyleSheet()
style = styleSheet["Normal"]
style.fontName = "PTAstraSerifBold"
style.fontSize = 16
style.leading = 15
styleBold = deepcopy(style)
styleBold.fontName = "PTAstraSerifBold"
styleCenter = deepcopy(style)
styleCenter.alignment = TA_CENTER
styleCenterBold = deepcopy(styleBold)
styleCenterBold.alignment = TA_CENTER
objs = [
Spacer(1, 3 * mm),
Paragraph('<font face="PTAstraSerifBold">Ая-я-я-я-я-я-я-яй!</font>', styleCenter),
Spacer(1, 3 * mm),
Paragraph('<font face="PTAstraSerifBold">Что-то Администраторы не верно настроили с типами форм! </font>', styleCenter),
Spacer(1, 3 * mm),
Paragraph('<font face="PTAstraSerifBold">А-та-та-та им!</font>', styleCenter),
]
doc.build(objs)
pdf = buffer.getvalue()
buffer.close()
return pdf
def get_doc_results(doc_obj, date_result):
"""
возвращает результаты врача за определенную дату. ***** Ни в коем случае не переделывать на диапозон дат
"""
doc_results = Issledovaniya.objects.filter(doc_confirmation=doc_obj, time_confirmation__date=date_result, napravleniye__isnull=False)
return doc_results
def get_finaldata_talon(doc_result_obj):
"""
Вход результаты врача за определенную дату
Выход: стр-ра данных {'№п.п':'номер', 'ФИО пациента':'Иванов Иван Иванович', '№ карты (тип)':'1212 (L2)',
'Данные полиса':'номер;Компаня', 'цель посещения': '(код)', 'первичны прием':'Нет',
'Диагноз по МКБ': '(код)', 'Впервые':'Да', 'Результат обращения':'код',
'Исход':'Код', 'Д-стоит':'коды', 'Д-взят':'коды', 'Д-снят':'коды'
'причина снятия':'', 'Онкоподозрение':'Да'
"""
fin_oms = 'омс'
fin_dms = 'дмс'
fin_pay = 'платно'
fin_medexam = 'медосмотр'
fin_disp = 'диспансеризация'
fin_budget = 'бюджет'
fin_source = OrderedDict()
fin_source[fin_oms] = OrderedDict()
fin_source[fin_pay] = OrderedDict()
fin_source[fin_dms] = OrderedDict()
fin_source[fin_medexam] = OrderedDict()
fin_source[fin_disp] = OrderedDict()
fin_source[fin_budget] = OrderedDict()
fin_source_iss = OrderedDict()
fin_source_iss[fin_oms] = OrderedDict()
fin_source_iss[fin_pay] = OrderedDict()
fin_source_iss[fin_dms] = OrderedDict()
fin_source_iss[fin_medexam] = OrderedDict()
fin_source_iss[fin_disp] = OrderedDict()
fin_source_iss[fin_budget] = OrderedDict()
oms_count = 0
dms_count = 0
pay_count = 0
disp_count = 0
medexam_count = 0
budget_count = 0
empty = '-'
today = utils.timezone.now().date()
for i in doc_result_obj:
napr_attr = Napravleniya.get_attr(i.napravleniye)
temp_dict = OrderedDict()
temp_dict_iss = OrderedDict()
dict_fsourcce = ''
order = ''
if napr_attr['istochnik_f'] in ['омс', '']:
oms_count += 1
dict_fsourcce = fin_oms
order = oms_count
elif napr_attr['istochnik_f'] == 'платно':
pay_count += 1
dict_fsourcce = fin_pay
order = pay_count
elif napr_attr['istochnik_f'] == 'дмс':
dms_count += 1
dict_fsourcce = fin_dms
order = dms_count
elif napr_attr['istochnik_f'] == 'медосмотр':
medexam_count += 1
dict_fsourcce = fin_medexam
order = medexam_count
elif napr_attr['istochnik_f'] == 'диспансеризация':
disp_count += 1
dict_fsourcce = fin_disp
order = disp_count
elif napr_attr['istochnik_f'] == 'бюджет':
budget_count += 1
dict_fsourcce = fin_budget
order = budget_count
else:
continue
polis_who_giv = empty if not napr_attr['polis_who_give'] else napr_attr['polis_who_give']
polis_num = empty if not napr_attr['polis_n'] else napr_attr['polis_n']
temp_dict['client_fio'] = napr_attr['client_fio'] + ', ' + napr_attr['client_bd']
temp_dict['med_exam'] = strdate(i.medical_examination) + ', ' + str(i.napravleniye_id)
num_poliklinika = f'\n({napr_attr['number_poliklinika']})' if napr_attr['number_poliklinika'] else ''
temp_dict['card_num'] = napr_attr['card_num'] + num_poliklinika
temp_dict['polis_data'] = '<u>' + polis_num + '</u>' + '<br/>' + polis_who_giv
temp_dict_iss = temp_dict.copy()
temp_dict_iss['research_code'] = i.research.code
temp_dict_iss['research_title'] = i.research.title
temp_dict['purpose'] = empty if not i.purpose else i.purpose
temp_dict['is_first_reception'] = 'Да' if i.research.is_first_reception else 'Нет'
temp_dict['diagnos'] = empty if not i.diagnos else i.diagnos
temp_dict['first_time'] = 'Да' if i.first_time else 'Нет'
temp_dict['result_reception'] = empty if not i.result_reception else i.result_reception
temp_dict['outcome_illness'] = empty if not i.outcome_illness else i.outcome_illness
# Данные Д-учета
disp = DispensaryReg.objects.filter(Q(card=i.napravleniye.client), (Q(date_end=None) | Q(date_end=today)))
d_stand = []
d_take = []
d_stop = []
d_whystop = []
if disp:
for d in disp:
if d.date_end is None and d.date_start != i.time_confirmation.date():
date_start = strdate(d.date_start, short_year=True)
date_start = normalize_date(date_start)
d_stand.append(f'{d.diagnos}<br/>{date_start}<br/>')
elif d.date_end is None and d.date_start == i.time_confirmation.date():
d_take.append(d.diagnos)
elif d.date_end == i.time_confirmation.date():
d_stop.append(d.diagnos)
d_whystop.append(d.why_stop)
temp_dict['d_stand'] = '' if not d_stand else ''.join(d_stand)
temp_dict['d_take'] = '' if not d_take else ', '.join(d_take)
temp_dict['d_stop'] = '' if not d_stand else ', '.join(d_stop)
temp_dict['d_whystop'] = '' if not d_whystop else ', '.join(d_whystop)
temp_dict['maybe_onco'] = 'Да' if i.maybe_onco else ''
fin_source[dict_fsourcce].update({order: temp_dict})
fin_source_iss[dict_fsourcce].update({order: temp_dict_iss})
if Issledovaniya.objects.filter(parent=i).exists():
temp_dict_iss_copy = deepcopy(temp_dict_iss)
add_iss_dict = OrderedDict()
for iss in Issledovaniya.objects.filter(parent=i):
temp_dict_iss_copy['research_code'] = iss.research.code
temp_dict_iss_copy['research_title'] = iss.research.title
order = Decimal(str(order)) + Decimal('0.1')
add_iss_dict[order] = deepcopy(temp_dict_iss_copy)
fin_source_iss[dict_fsourcce].update(add_iss_dict)
return [fin_source, fin_source_iss]
def primary_reception_get_data(hosp_first_num):
# Получение данных из певичного приема
hosp_primary_receptions = hosp_get_data_direction(hosp_first_num, site_type=0, type_service='None', level=2)
hosp_primary_iss, primary_research_id = None, None
if hosp_primary_receptions:
hosp_primary_iss = hosp_primary_receptions[0].get('iss')
primary_research_id = hosp_primary_receptions[0].get('research_id')
titles_field = [
'Дата поступления',
'Время поступления',
'Виды транспортировки',
'Побочное действие лекарств (непереносимость)',
'Кем направлен больной',
'Вид госпитализации',
'Время через, которое доставлен после начала заболевания, получения травмы',
'Диагноз направившего учреждения',
'Диагноз при поступлении',
'Госпитализирован по поводу данного заболевания',
'Общее состояние',
'Социальный статус',
'Категория льготности',
'Всего госпитализаций',
'Вид травмы',
'Группа крови',
'Резус принадлежность',
'Вес',
]
list_values = None
if titles_field and hosp_primary_receptions:
list_values = get_result_value_iss(hosp_primary_iss, primary_research_id, titles_field)
date_entered_value, time_entered_value, type_transport, medicament_allergy = '', '', '', ''
who_directed, plan_hospital, extra_hospital, type_hospital = '', '', '', ''
time_start_ill, diagnos_who_directed, diagnos_entered = '', '', ''
what_time_hospitalized, state, social_status, category_privilege = '', '', '', ''
all_hospitalized, type_trauma, blood_group, resus_factor = '', '', '', ''
weight = ''
if list_values:
for i in list_values:
if i[3] == 'Дата поступления':
date_entered_value = normalize_date(i[2])
continue
if i[3] == 'Время поступления':
time_entered_value = i[2]
continue
if i[3] == 'Виды транспортировки':
type_transport = i[2]
continue
if i[3] == 'Побочное действие лекарств (непереносимость)':
medicament_allergy = i[2]
continue
if i[3] == 'Кем направлен больной':
who_directed = i[2]
continue
if i[3] == 'Вид госпитализации':
type_hospital = i[2]
if type_hospital.lower() == 'экстренная':
time_start_ill_obj = get_result_value_iss(hosp_primary_iss, primary_research_id, ['Время через, которое доставлен после начала заболевания, получения травмы'])
if time_start_ill_obj:
time_start_ill = time_start_ill_obj[0][2]
extra_hospital = "Да"
plan_hospital = "Нет"
else:
plan_hospital = "Да"
extra_hospital = "Нет"
time_start_ill = ''
if i[3] == 'Диагноз направившего учреждения':
diagnos_who_directed = i[2]
continue
if i[3] == 'Диагноз при поступлении':
diagnos_entered = i[2]
continue
if i[3] == 'Госпитализирован по поводу данного заболевания':
what_time_hospitalized = i[2]
continue
if i[3] == 'Общее состояние':
state = i[2]
continue
if i[3] == 'Социальный статус':
social_status = i[2]
continue
if i[3] == 'Категория льготности':
category_privilege = i[2]
continue
if i[3] == 'Всего госпитализаций':
all_hospitalized = i[2]
continue
if i[3] == 'Вид травмы':
type_trauma = i[2]
continue
if i[3] == 'Группа крови':
blood_group = i[2]
continue
if i[3] == 'Резус принадлежность':
resus_factor = i[2]
continue
if i[3] == 'Вес':
weight = i[2]
continue
return {
'date_entered_value': date_entered_value,
'time_entered_value': time_entered_value,
'type_transport': type_transport,
'medicament_allergy': medicament_allergy,
'who_directed': who_directed,
'plan_hospital': plan_hospital,
'extra_hospital': extra_hospital,
'type_hospital': type_hospital,
'time_start_ill': time_start_ill,
'diagnos_who_directed': diagnos_who_directed,
'diagnos_entered': diagnos_entered,
'what_time_hospitalized': what_time_hospitalized,
'state': state,
'social_status': social_status,
'category_privilege': category_privilege,
'all_hospitalized': all_hospitalized,
'type_trauma': type_trauma,
'blood_group': blood_group,
'resus_factor': resus_factor,
'weight': weight,
}
def hosp_extract_get_data(hosp_last_num):
# Получение данных из выписки
hosp_extract = hosp_get_data_direction(hosp_last_num, site_type=7, type_service='None', level=2)
if not hosp_extract:
return {}
hosp_extract_iss, extract_research_id, doc_confirm = None, None, None
if hosp_extract:
hosp_extract_iss = hosp_extract[0].get('iss')
doc_confirm = Issledovaniya.objects.get(pk=hosp_extract_iss).doc_confirmation
if not doc_confirm:
return {}
extract_research_id = hosp_extract[0].get('research_id')
titles_field = [
'Время выписки',
'Дата выписки',
'Основной диагноз (описание)',
'Основной диагноз по МКБ',
'Осложнение основного диагноза (описание)',
'Осложнение основного диагноза по МКБ',
'Сопутствующий диагноз (описание)',
'Сопутствующий диагноз по МКБ',
'Исход госпитализации',
'Результат госпитализации',
'Проведено койко-дней',
'Заведующий отделением',
'Палата №',
]
list_values = None
if titles_field and hosp_extract:
list_values = get_result_value_iss(hosp_extract_iss, extract_research_id, titles_field)
date_value, time_value = '', ''
final_diagnos, other_diagnos, near_diagnos, outcome, final_diagnos_mkb, other_diagnos_mkb, near_diagnos_mkb = '', '', '', '', '', '', ''
days_count, result_hospital, manager_depart, room_num = '', '', '', ''
if list_values:
for i in list_values:
if i[3] == 'Дата выписки':
date_value = normalize_date(i[2])
if i[3] == 'Время выписки':
time_value = i[2]
if i[3] == 'Основной диагноз (описание)':
final_diagnos = i[2]
if i[3] == 'Осложнение основного диагноза (описание)':
other_diagnos = i[2]
if i[3] == 'Сопутствующий диагноз (описание)':
near_diagnos = i[2]
if i[3] == 'Исход госпитализации':
outcome = i[2]
if i[3] == 'Результат госпитализации':
result_hospital = i[2]
if i[3] == 'Основной диагноз по МКБ':
final_diagnos_mkb = str(i[2])
if i[3] == 'Осложнение основного диагноза по МКБ':
other_diagnos_mkb = str(i[2]).split(' ')[0]
if i[3] == 'Сопутствующий диагноз по МКБ':
near_diagnos_mkb = str(i[2]).split(' ')[0]
if i[3] == 'Проведено койко-дней':
days_count = str(i[2])
if i[3] == 'Заведующий отделением':
manager_depart = str(i[2])
if i[3] == 'Палата №':
room_num = str(i[2])
doc_fio = doc_confirm.get_fio()
return {
'date_value': date_value,
'time_value': time_value,
'final_diagnos': final_diagnos,
'other_diagnos': other_diagnos,
'near_diagnos': near_diagnos,
'outcome': outcome,
'final_diagnos_mkb': final_diagnos_mkb,
'other_diagnos_mkb': other_diagnos_mkb,
'near_diagnos_mkb': near_diagnos_mkb,
'extract_iss': hosp_extract_iss,
'days_count': days_count,
'result_hospital': result_hospital,
'doc_fio': doc_fio,
'manager_depart': manager_depart,
'room_num': room_num,
}
def hosp_get_clinical_diagnos(hosp_obj):
clinic_diagnos = ''
tmp_clinic_diagnos = []
for i in hosp_obj:
hosp_diagnostic_epicris = hosp_get_data_direction(i['direction'], site_type=6, type_service='None', level=2)
day_entries_iss = []
day_entries_research_id = None
if hosp_diagnostic_epicris:
for i in hosp_diagnostic_epicris:
# найти эпикризы диагностические
if i.get('research_title').lower().find('диагностич') != -1:
day_entries_iss.append(i.get('iss'))
if not day_entries_research_id:
day_entries_research_id = i.get('research_id')
titles_field = ['Диагноз клинический', 'Дата установления диагноза', 'Основной', 'Осложнение', 'Сопутствующий']
list_values = []
if titles_field and day_entries_iss:
for i in day_entries_iss:
list_values.append(get_result_value_iss(i, day_entries_research_id, titles_field))
if list_values:
for fields in list_values:
clinical_data = {'clinic_diagnos': '', 'main_diagnos': '', 'other_diagnos': '', 'near_diagnos': '', 'date': ''}
for i in fields:
if i[3] == 'Дата установления диагноза':
clinical_data['date'] = normalize_date(i[2])
continue
if i[3] == 'Диагноз клинический':
clinical_data['clinic_diagnos'] = i[2]
continue
if i[3] == 'Основной':
clinical_data['main_diagnos'] = f"Основной: {i[2]}"
continue
if i[3] == 'Осложнение':
clinical_data['other_diagnos'] = f"; Осложнение: {i[2]}"
continue
if i[3] == 'Сопутствующий':
clinical_data['near_diagnos'] = f"; Сопутствующий: {i[2]}"
continue
if clinical_data['date'] and (clinical_data['clinic_diagnos'] or clinical_data['main_diagnos']):
tmp_clinic_diagnos.append(clinical_data.copy())
for i in tmp_clinic_diagnos:
clinic_diagnos = f"{clinic_diagnos}{i["clinic_diagnos"]} <u>{i["main_diagnos"]}</u>{i["other_diagnos"]}{i["near_diagnos"]}; дата: {i["date"]}<br/>"
return clinic_diagnos
def hosp_get_transfers_data(hosp_nums_obj):
titles_field = ['Дата перевода', 'Время перевода']
date_transfer_value, time_transfer_value = '', ''
transfers = []
list_values = None
for i in range(len(hosp_nums_obj)):
if i == 0:
continue
transfer_research_title = hosp_nums_obj[i].get('research_title')
# получить для текущего hosp_dir эпикриз с title - перевод.....
from_hosp_dir_transfer = hosp_nums_obj[i - 1].get('direction')
epicrisis_data = hosp_get_data_direction(from_hosp_dir_transfer, site_type=6, type_service='None', level=2)
if epicrisis_data:
result_check = check_transfer_epicrisis(epicrisis_data)
if result_check['iss']:
iss_transfer, research_id_transfer = result_check['iss'], result_check['research_id']
if titles_field and iss_transfer:
list_values = get_result_value_iss(iss_transfer, research_id_transfer, titles_field)
else:
continue
if list_values:
for i in list_values:
if i[3] == 'Дата перевода':
date_transfer_value = normalize_date(i[2])
continue
if i[3] == 'Время перевода':
time_transfer_value = i[2]
continue
transfers.append({'transfer_research_title': transfer_research_title, 'date_transfer_value': date_transfer_value, 'time_transfer_value': time_transfer_value})
return transfers
def hosp_patient_movement(hosp_nums_obj):
titles_field = ['Дата перевода']
patient_movement = []
list_values = None
for i in range(len(hosp_nums_obj)):
date_out, diagnos_mkb, doc_confirm_code = '', '', ''
bed_profile_research_title = hosp_nums_obj[i].get('research_title')
hosp_dir = hosp_nums_obj[i].get('direction')
primary_reception_data = primary_reception_get_data(hosp_dir)
hosp_extract_data = hosp_get_data_direction(hosp_dir, site_type=7, type_service='None', level=2)
if hosp_extract_data:
extract_data = hosp_extract_get_data(hosp_dir)
if extract_data:
date_out = extract_data['date_value']
diagnos_mkb = extract_data['final_diagnos_mkb']
doc_confirm_code = (
None if not Issledovaniya.objects.get(pk=extract_data['extract_iss']) else Issledovaniya.objects.get(pk=extract_data['extract_iss']).doc_confirmation.personal_code
)
list_values = None
epicrisis_data = hosp_get_data_direction(hosp_dir, site_type=6, type_service='None', level=2)
if epicrisis_data:
result_check = check_transfer_epicrisis(epicrisis_data)
if result_check['iss']:
iss_transfer, research_id_transfer = result_check['iss'], result_check['research_id']
if titles_field and iss_transfer:
list_values = get_result_value_iss(iss_transfer, research_id_transfer, titles_field)
if list_values:
for i in list_values:
if i[3] == 'Дата перевода':
date_out = normalize_date(i[2])
if i[3] == 'Клинический диагноз по МКБ':
diagnos_mkb = i[2]
patient_movement.append(
{
'bed_profile_research_title': bed_profile_research_title,
'date_entered_value': primary_reception_data['date_entered_value'],
'date_oute': date_out,
'diagnos_mkb': diagnos_mkb,
'doc_confirm_code': doc_confirm_code,
}
)
return patient_movement
def hosp_get_operation_data(num_dir):
hosp_operation = hosp_get_data_direction(num_dir, site_type=3, type_service='None', level=-1)
operation_iss_research = []
if hosp_operation:
for i in hosp_operation:
# найти протоколы по типу операции
if (i.get('research_title').lower().find('операци') != -1 or i.get('research_title').lower().find('манипул') != -1) and i['date_confirm']:
operation_iss_research.append({'iss': i['iss'], 'research': i['research_id']})
titles_field = [
'Название операции',
'Дата проведения',
'Время начала',
'Время окончания',
'Метод обезболивания',
'Осложнения',
'Код операции',
'Код манипуляции',
'Оперативное вмешательство',
'Код анестезиолога',
'Категория сложности',
'Диагноз после оперативного лечения',
'МКБ 10',
'Оперировал',
'Код хирурга',
]
list_values = []
operation_result = []
if titles_field and operation_iss_research and hosp_operation:
for i in operation_iss_research:
list_values.append(get_result_value_iss(i['iss'], i['research'], titles_field))
operation_result = []
for fields_operation in list_values:
pk_iss_operation = fields_operation[0][1]
operation_data = {
'name_operation': '',
'date': '',
'time_start': '',
'time_end': '',
'anesthesia method': '',
'complications': '',
'doc_fio': '',
'code_operation': '',
'code_doc_anesthesia': '',
'plan_operation': '',
'diagnos_after_operation': '',
'mkb10': '',
'category_difficult': '',
'doc_code': '',
}
iss_obj = Issledovaniya.objects.filter(pk=pk_iss_operation).first()
if not iss_obj.time_confirmation:
continue
operation_data['doc_fio'] = iss_obj.doc_confirmation_fio
operation_data['doc_code'] = None if not Issledovaniya.objects.get(pk=pk_iss_operation) else Issledovaniya.objects.get(pk=pk_iss_operation).doc_confirmation.personal_code
if operation_data['doc_code'] == 0:
operation_data['doc_code'] = ''
category_difficult = ''
for field in fields_operation:
if field[3] == 'Название операции':
operation_data['name_operation'] = field[2]
continue
if field[3] == 'Дата проведения':
operation_data['date'] = normalize_date(field[2])
continue
if field[3] == 'Время начала':
operation_data['time_start'] = field[2]
continue
if field[3] == 'Время окончания':
operation_data['time_end'] = field[2]
continue
if field[3] == 'Метод обезболивания':
operation_data['anesthesia method'] = field[2]
continue
if field[3] == 'Осложнения':
operation_data['complications'] = field[2]
continue
if field[3] == 'Код операции':
operation_data['code_operation'] = field[2]
continue
if field[3] == 'Код манипуляции':
operation_data['code_operation'] = field[2]
continue
if field[3] == 'Код анестезиолога':
operation_data['code_doc_anesthesia'] = field[2]
continue
if field[3] == 'Оперативное вмешательство':
operation_data['plan_operation'] = field[2]
continue
if field[3] == 'Категория сложности':
operation_data['category_difficult'] = f"Сложность - {field[2]}"
continue
if field[3] == 'Диагноз после оперативного лечения':
operation_data['diagnos_after_operation'] = field[2]
continue
if field[3] == 'МКБ 10':
operation_data['mkb10'] = field[2]
continue
if field[3] == 'Оперировал':
if field[2]:
operation_data['doc_fio'] = field[2]
continue
if field[3] == 'Код хирурга':
if field[2]:
operation_data['doc_code'] = field[2]
continue
operation_data['name_operation'] = f"{operation_data["name_operation"]} {category_difficult}"
operation_result.append(operation_data.copy())
return operation_result
def closed_bl(hosp_num_dir):
"""
Подтверждены больничные-протоколы со словом закрытие среди Б/Л?
"""
result_bl = hosp_get_data_direction(hosp_num_dir, site_type=8, type_service='None', level=-1)
num, who_get, who_care, start_date, end_date, start_work = '', '', '', '', '', ''
for i in result_bl:
if i['date_confirm'] is None:
continue
if i["research_title"].lower().find('закрыт') != -1:
data_closed_bl = ParaclinicResult.objects.filter(issledovaniye=i['iss'])
for b in data_closed_bl:
if b.field.title == "Лист нетрудоспособности №":
num = b.value
continue
if b.field.title == "Выдан кому":
who_get = b.value
continue
if b.field.title == "по уходу за":
who_care = b.value
continue
if b.field.title == "выдан с":
start_date = b.value
if start_date.find('-') != -1:
start_date = normalize_date(start_date)
continue
if b.field.title == "по":
end_date = b.value
if end_date.find('-') != -1:
end_date = normalize_date(end_date)
continue
if b.field.title == "к труду":
start_work = b.value
if start_work.find('-') != -1:
start_work = normalize_date(start_work)
continue
return {'is_closed': True, 'num': num, 'who_get': who_get, 'who_care': who_care, 'start_date': start_date, 'end_date': end_date, 'start_work': start_work}
return {'is_closed': False, 'num': num, 'who_get': who_get, 'who_care': who_care, 'start_date': start_date, 'end_date': end_date, 'start_work': start_work}
def create_contract(ind_dir, card_pk):
ind_card = Card.objects.get(pk=card_pk)
# exec_person = request_data['user'].doctorprofile.get_full_fio()
patient_data = ind_card.get_data_individual()
p_agent = None
if ind_card.who_is_agent:
p_agent = getattr(ind_card, ind_card.who_is_agent)
p_payer = None
if ind_card.payer:
p_payer = ind_card.payer
# Получить все источники, у которых title-ПЛАТНО
ist_f = list(IstochnikiFinansirovaniya.objects.values_list('id').filter(title__exact='Платно'))
ist_f_list = [int(x[0]) for x in ist_f]
napr = Napravleniya.objects.filter(pk__in=ind_dir)
dir_temp = []
# Проверить, что все направления принадлежат к одной карте и имеют ист. финансирования "Платно"
num_contract_set = set()
for n in napr:
if n.istochnik_f_id in ist_f_list and n.client == ind_card:
num_contract_set.add(n.num_contract)
dir_temp.append(n.pk)
if not dir_temp:
return False
# получить УСЛУГИ по направлениям(отфильтрованы по "платно" и нет сохраненных исследований) в Issledovaniya
research_direction = get_research_by_dir(dir_temp)
if not research_direction:
return False
# получить по направлению-услугам цену из Issledovaniya
research_price = get_coast_from_issledovanie(research_direction)
# Получить Итоговую стр-ру данных
result_data = get_final_data(research_price)
sum_research = result_data[1]
# Контрольная сумма расчет: послдеовательность направлений+Итоговая сумма (стоимость денежная)
qr_napr = ','.join([str(elem) for elem in result_data[3]])
protect_val = sum_research.replace(' ', '')
bstr = (qr_napr + protect_val).encode()
protect_code = str(zlib.crc32(bstr))
today = utils.current_time()
date_now1 = datetime.datetime.strftime(today, '%y%m%d%H%M%S%f')[:-3]
date_now_str = str(ind_card.pk) + str(date_now1)
# Проверить записан ли номер контракта в направлениях, и контрольная сумма
# ПереЗаписать номер контракта Если в наборе направлений значение None, или в направлениях разные контракты,
# а также разные контрольные суммы, все перезаписать.
num_contract_set = set()
protect_code_set = set()
napr_end = Napravleniya.objects.filter(id__in=result_data[3])
for n in napr_end:
num_contract_set.add(n.num_contract)
protect_code_set.add(n.protect_code)
if len(num_contract_set) == 1 and None in num_contract_set or None in protect_code_set:
PersonContract.person_contract_save(date_now_str, protect_code, qr_napr, sum_research, patient_data['fio'], ind_card, p_payer, p_agent)
Napravleniya.objects.filter(id__in=result_data[3]).update(num_contract=date_now_str, protect_code=protect_code)
return PersonContract.pk
| import datetime
import zlib
from collections import OrderedDict
from copy import deepcopy
from decimal import Decimal
from django.db.models import Q
from clients.models import Document, DispensaryReg, Card
from directions.models import Napravleniya, Issledovaniya, ParaclinicResult, IstochnikiFinansirovaniya, PersonContract
from directory.models import Researches
from laboratory import utils
from laboratory.utils import strdate
from api.stationar.stationar_func import hosp_get_data_direction, check_transfer_epicrisis
from api.stationar.sql_func import get_result_value_iss
from utils.dates import normalize_date
def get_all_doc(docs: [Document]):
"""
возвращает словарь словарей documents. Данные о документах: паспорт : номер: серия, полис: номер, снислс: номер
"""
documents = {
'passport': {'num': "", 'serial': "", 'date_start': "", 'issued': ""},
'polis': {'serial': "", 'num': "", 'issued': ""},
'snils': {'num': ""},
'bc': {'num': "", 'serial': "", 'date_start': "", 'issued': ""},
}
for d in docs:
if d.document_type.title == "СНИЛС":
documents["snils"]["num"] = d.number
if d.document_type.title == 'Паспорт гражданина РФ':
documents["passport"]["num"] = d.number
documents["passport"]["serial"] = d.serial
documents["passport"]["date_start"] = "" if not d.date_start else d.date_start.strftime("%d.%m.%Y")
documents["polis"]["issued"] = d.who_give
if d.document_type.title == 'Полис ОМС':
documents["polis"]["num"] = d.number
documents["polis"]["serial"] = d.serial
documents["polis"]["date_start"] = "" if not d.date_start else d.date_start.strftime("%d.%m.%Y")
documents["polis"]["issued"] = d.who_give
if d.document_type.title == 'Свидетельство о рождении':
documents["bc"]["num"] = d.number
documents["bc"]["serial"] = d.serial
documents["bc"]["date_start"] = "" if not d.date_start else d.date_start.strftime("%d.%m.%Y")
documents["bc"]["issued"] = d.who_give
return documents
def get_coast_from_issledovanie(dir_research_loc):
"""
При печати листа на оплату возвращает (цены из записанных в Исследования)
На основании прайса, услуг возвращает Для листа на оплату {
направление: {услуга:[цена, скидка, количество],услуга:[цена, скидка, количество]},
направление: {услуга:[цена, скидка, количество],услуга:[цена, скидка, количество]},
направление: {услуга:[цена, скидка, количество],услуга:[цена, скидка, количество]},
}
"""
d = tuple()
if type(dir_research_loc) == dict:
dict_coast = {}
for k, v in dir_research_loc.items():
d = {
r: [
s,
d,
h,
]
for r, s, d, h in Issledovaniya.objects.filter(napravleniye=k, research__in=v, coast__isnull=False).values_list('research_id', 'coast', 'discount', 'how_many')
}
dict_coast[k] = d
return dict_coast
else:
return 0
def get_research_by_dir(dir_temp_l):
"""
Получить словаь: {направление1:[услуга1, услуга2, услуга3],направление2:[услуга1].....}
:param dir_temp_l:
:return:
"""
dict_research_dir = {}
for i in dir_temp_l:
# Если есть хотя бы одно сохранения услуги по направлению, то не учитывается
if any([x.doc_save is not None for x in Issledovaniya.objects.filter(napravleniye=i)]):
continue
else:
research_l = [x.research_id for x in Issledovaniya.objects.filter(napravleniye=i)]
dict_research_dir[i] = research_l
return dict_research_dir
def get_final_data(research_price_loc):
"""
Получить итоговую структуру данных: код услуги, напрвление, услуга, цена, скидка/наценка, цена со скидкой, кол-во, сумма
Направление указывается один раз для нескольких строк
"""
total_sum = 0
tmp_data = []
# is_discount = False
z = ""
x = ""
tmp_napr = []
for k, v in research_price_loc.items():
# research_attr = ([s for s in Researches.objects.filter(id__in=v.keys()).values_list('id', 'title')])
research_attr = [s for s in Researches.objects.filter(id__in=v.keys()).values_list('id', 'title', 'internal_code')]
research_attr_list = [list(z) for z in research_attr]
for research_id, research_coast in v.items():
h = []
for j in research_attr_list:
if research_id == j[0]:
if k != 0:
h.append(k)
k = 0
else:
h.append("")
h.extend([j[2], j[1]])
h.append("{:,.2f}".format(research_coast[0]).replace(",", " "))
coast_with_discount = research_coast[0] + (research_coast[0] * research_coast[1] / 100)
if research_coast[1] != 0:
z = "+"
if research_coast[1] > 0:
x = "+"
else:
x = ""
h.append(x + str(research_coast[1]))
h.append("{:,.2f}".format(coast_with_discount).replace(",", " "))
h.append(research_coast[2])
research_sum = coast_with_discount * research_coast[2]
h.append("{:,.2f}".format(research_sum).replace(",", " "))
h[0], h[1] = h[1], h[0]
total_sum += research_sum
research_attr_list.remove(j)
tmp_data.append(h)
if h[1]:
tmp_napr.append(h[1])
if h:
break
res_lis = []
for t in tmp_data:
tmp_d = list(map(str, t))
res_lis.append(tmp_d)
total_data = []
total_data.append(res_lis)
total_data.append("{:,.2f}".format(total_sum).replace(",", " "))
if z == "+":
total_data.append("is_discount")
else:
total_data.append("no_discount")
total_data.append(tmp_napr)
# total_data:[стру-рка данных, итоговая сумма, есть ли скидка, номера направлений]
return total_data
def get_data_individual(card_object):
"""
Получает на входе объект Карта
возвращает словарь атрибутов по карте и Физ.лицу(Индивидуалу)
:param card_object:
:return:
"""
ind_data = {'ind': card_object.individual}
ind_data['age'] = ind_data['ind'].age()
ind_data['doc'] = Document.objects.filter(individual=ind_data['ind'], is_active=True)
ind_data['fio'] = ind_data['ind'].fio()
ind_data['born'] = ind_data['ind'].bd()
ind_data['main_address'] = "____________________________________________________" if not card_object.main_address else card_object.main_address
ind_data['fact_address'] = "____________________________________________________" if not card_object.fact_address else card_object.fact_address
# document_passport = "Паспорт РФ"
ind_documents = get_all_doc(ind_data['doc'])
ind_data['passport_num'] = ind_documents['passport']['num']
ind_data['passport_serial'] = ind_documents['passport']['serial']
ind_data['passport_date_start'] = ind_documents['passport']['date_start']
ind_data['passport_issued'] = ind_documents['passport']['issued']
ind_data['bc_num'] = ind_documents['bc']['num']
ind_data['bc_serial'] = ind_documents['bc']['serial']
ind_data['bc_date_start'] = ind_documents['bc']['date_start']
ind_data['bc_issued'] = ind_documents['bc']['issued']
ind_data['snils'] = ind_documents["snils"]["num"]
ind_data['oms'] = {}
ind_data['oms']['polis_num'] = ind_documents["polis"]["num"]
ind_data['oms']['polis_serial'] = ind_documents["polis"]["serial"]
# ind_data['oms']['polis_date_start'] = ind_documents["polis"]["date_start"]
ind_data['oms']['polis_issued'] = ind_documents["polis"]["issued"]
return ind_data
def form_notfound():
"""
В случае не верной настройки форм по типам и функциям или переданным аргументам в параметры
:return:
"""
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.pagesizes import A4
from reportlab.lib.units import mm
from copy import deepcopy
from reportlab.lib.enums import TA_CENTER
import os.path
from io import BytesIO
from laboratory.settings import FONTS_FOLDER
buffer = BytesIO()
pdfmetrics.registerFont(TTFont('PTAstraSerifBold', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Bold.ttf')))
pdfmetrics.registerFont(TTFont('PTAstraSerifReg', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Regular.ttf')))
doc = SimpleDocTemplate(
buffer, pagesize=A4, leftMargin=10 * mm, rightMargin=10 * mm, topMargin=10 * mm, bottomMargin=10 * mm, allowSplitting=1, title="Форма {}".format("Паспорт здоровья")
)
styleSheet = getSampleStyleSheet()
style = styleSheet["Normal"]
style.fontName = "PTAstraSerifBold"
style.fontSize = 16
style.leading = 15
styleBold = deepcopy(style)
styleBold.fontName = "PTAstraSerifBold"
styleCenter = deepcopy(style)
styleCenter.alignment = TA_CENTER
styleCenterBold = deepcopy(styleBold)
styleCenterBold.alignment = TA_CENTER
objs = [
Spacer(1, 3 * mm),
Paragraph('<font face="PTAstraSerifBold">Ая-я-я-я-я-я-я-яй!</font>', styleCenter),
Spacer(1, 3 * mm),
Paragraph('<font face="PTAstraSerifBold">Что-то Администраторы не верно настроили с типами форм! </font>', styleCenter),
Spacer(1, 3 * mm),
Paragraph('<font face="PTAstraSerifBold">А-та-та-та им!</font>', styleCenter),
]
doc.build(objs)
pdf = buffer.getvalue()
buffer.close()
return pdf
def get_doc_results(doc_obj, date_result):
"""
возвращает результаты врача за определенную дату. ***** Ни в коем случае не переделывать на диапозон дат
"""
doc_results = Issledovaniya.objects.filter(doc_confirmation=doc_obj, time_confirmation__date=date_result, napravleniye__isnull=False)
return doc_results
def get_finaldata_talon(doc_result_obj):
"""
Вход результаты врача за определенную дату
Выход: стр-ра данных {'№п.п':'номер', 'ФИО пациента':'Иванов Иван Иванович', '№ карты (тип)':'1212 (L2)',
'Данные полиса':'номер;Компаня', 'цель посещения': '(код)', 'первичны прием':'Нет',
'Диагноз по МКБ': '(код)', 'Впервые':'Да', 'Результат обращения':'код',
'Исход':'Код', 'Д-стоит':'коды', 'Д-взят':'коды', 'Д-снят':'коды'
'причина снятия':'', 'Онкоподозрение':'Да'
"""
fin_oms = 'омс'
fin_dms = 'дмс'
fin_pay = 'платно'
fin_medexam = 'медосмотр'
fin_disp = 'диспансеризация'
fin_budget = 'бюджет'
fin_source = OrderedDict()
fin_source[fin_oms] = OrderedDict()
fin_source[fin_pay] = OrderedDict()
fin_source[fin_dms] = OrderedDict()
fin_source[fin_medexam] = OrderedDict()
fin_source[fin_disp] = OrderedDict()
fin_source[fin_budget] = OrderedDict()
fin_source_iss = OrderedDict()
fin_source_iss[fin_oms] = OrderedDict()
fin_source_iss[fin_pay] = OrderedDict()
fin_source_iss[fin_dms] = OrderedDict()
fin_source_iss[fin_medexam] = OrderedDict()
fin_source_iss[fin_disp] = OrderedDict()
fin_source_iss[fin_budget] = OrderedDict()
oms_count = 0
dms_count = 0
pay_count = 0
disp_count = 0
medexam_count = 0
budget_count = 0
empty = '-'
today = utils.timezone.now().date()
for i in doc_result_obj:
napr_attr = Napravleniya.get_attr(i.napravleniye)
temp_dict = OrderedDict()
temp_dict_iss = OrderedDict()
dict_fsourcce = ''
order = ''
if napr_attr['istochnik_f'] in ['омс', '']:
oms_count += 1
dict_fsourcce = fin_oms
order = oms_count
elif napr_attr['istochnik_f'] == 'платно':
pay_count += 1
dict_fsourcce = fin_pay
order = pay_count
elif napr_attr['istochnik_f'] == 'дмс':
dms_count += 1
dict_fsourcce = fin_dms
order = dms_count
elif napr_attr['istochnik_f'] == 'медосмотр':
medexam_count += 1
dict_fsourcce = fin_medexam
order = medexam_count
elif napr_attr['istochnik_f'] == 'диспансеризация':
disp_count += 1
dict_fsourcce = fin_disp
order = disp_count
elif napr_attr['istochnik_f'] == 'бюджет':
budget_count += 1
dict_fsourcce = fin_budget
order = budget_count
else:
continue
polis_who_giv = empty if not napr_attr['polis_who_give'] else napr_attr['polis_who_give']
polis_num = empty if not napr_attr['polis_n'] else napr_attr['polis_n']
temp_dict['client_fio'] = napr_attr['client_fio'] + ', ' + napr_attr['client_bd']
temp_dict['med_exam'] = strdate(i.medical_examination) + ', ' + str(i.napravleniye_id)
num_poliklinika = f'\n({napr_attr["number_poliklinika"]})' if napr_attr['number_poliklinika'] else ''
temp_dict['card_num'] = napr_attr['card_num'] + num_poliklinika
temp_dict['polis_data'] = '<u>' + polis_num + '</u>' + '<br/>' + polis_who_giv
temp_dict_iss = temp_dict.copy()
temp_dict_iss['research_code'] = i.research.code
temp_dict_iss['research_title'] = i.research.title
temp_dict['purpose'] = empty if not i.purpose else i.purpose
temp_dict['is_first_reception'] = 'Да' if i.research.is_first_reception else 'Нет'
temp_dict['diagnos'] = empty if not i.diagnos else i.diagnos
temp_dict['first_time'] = 'Да' if i.first_time else 'Нет'
temp_dict['result_reception'] = empty if not i.result_reception else i.result_reception
temp_dict['outcome_illness'] = empty if not i.outcome_illness else i.outcome_illness
# Данные Д-учета
disp = DispensaryReg.objects.filter(Q(card=i.napravleniye.client), (Q(date_end=None) | Q(date_end=today)))
d_stand = []
d_take = []
d_stop = []
d_whystop = []
if disp:
for d in disp:
if d.date_end is None and d.date_start != i.time_confirmation.date():
date_start = strdate(d.date_start, short_year=True)
date_start = normalize_date(date_start)
d_stand.append(f'{d.diagnos}<br/>{date_start}<br/>')
elif d.date_end is None and d.date_start == i.time_confirmation.date():
d_take.append(d.diagnos)
elif d.date_end == i.time_confirmation.date():
d_stop.append(d.diagnos)
d_whystop.append(d.why_stop)
temp_dict['d_stand'] = '' if not d_stand else ''.join(d_stand)
temp_dict['d_take'] = '' if not d_take else ', '.join(d_take)
temp_dict['d_stop'] = '' if not d_stand else ', '.join(d_stop)
temp_dict['d_whystop'] = '' if not d_whystop else ', '.join(d_whystop)
temp_dict['maybe_onco'] = 'Да' if i.maybe_onco else ''
fin_source[dict_fsourcce].update({order: temp_dict})
fin_source_iss[dict_fsourcce].update({order: temp_dict_iss})
if Issledovaniya.objects.filter(parent=i).exists():
temp_dict_iss_copy = deepcopy(temp_dict_iss)
add_iss_dict = OrderedDict()
for iss in Issledovaniya.objects.filter(parent=i):
temp_dict_iss_copy['research_code'] = iss.research.code
temp_dict_iss_copy['research_title'] = iss.research.title
order = Decimal(str(order)) + Decimal('0.1')
add_iss_dict[order] = deepcopy(temp_dict_iss_copy)
fin_source_iss[dict_fsourcce].update(add_iss_dict)
return [fin_source, fin_source_iss]
def primary_reception_get_data(hosp_first_num):
# Получение данных из певичного приема
hosp_primary_receptions = hosp_get_data_direction(hosp_first_num, site_type=0, type_service='None', level=2)
hosp_primary_iss, primary_research_id = None, None
if hosp_primary_receptions:
hosp_primary_iss = hosp_primary_receptions[0].get('iss')
primary_research_id = hosp_primary_receptions[0].get('research_id')
titles_field = [
'Дата поступления',
'Время поступления',
'Виды транспортировки',
'Побочное действие лекарств (непереносимость)',
'Кем направлен больной',
'Вид госпитализации',
'Время через, которое доставлен после начала заболевания, получения травмы',
'Диагноз направившего учреждения',
'Диагноз при поступлении',
'Госпитализирован по поводу данного заболевания',
'Общее состояние',
'Социальный статус',
'Категория льготности',
'Всего госпитализаций',
'Вид травмы',
'Группа крови',
'Резус принадлежность',
'Вес',
]
list_values = None
if titles_field and hosp_primary_receptions:
list_values = get_result_value_iss(hosp_primary_iss, primary_research_id, titles_field)
date_entered_value, time_entered_value, type_transport, medicament_allergy = '', '', '', ''
who_directed, plan_hospital, extra_hospital, type_hospital = '', '', '', ''
time_start_ill, diagnos_who_directed, diagnos_entered = '', '', ''
what_time_hospitalized, state, social_status, category_privilege = '', '', '', ''
all_hospitalized, type_trauma, blood_group, resus_factor = '', '', '', ''
weight = ''
if list_values:
for i in list_values:
if i[3] == 'Дата поступления':
date_entered_value = normalize_date(i[2])
continue
if i[3] == 'Время поступления':
time_entered_value = i[2]
continue
if i[3] == 'Виды транспортировки':
type_transport = i[2]
continue
if i[3] == 'Побочное действие лекарств (непереносимость)':
medicament_allergy = i[2]
continue
if i[3] == 'Кем направлен больной':
who_directed = i[2]
continue
if i[3] == 'Вид госпитализации':
type_hospital = i[2]
if type_hospital.lower() == 'экстренная':
time_start_ill_obj = get_result_value_iss(hosp_primary_iss, primary_research_id, ['Время через, которое доставлен после начала заболевания, получения травмы'])
if time_start_ill_obj:
time_start_ill = time_start_ill_obj[0][2]
extra_hospital = "Да"
plan_hospital = "Нет"
else:
plan_hospital = "Да"
extra_hospital = "Нет"
time_start_ill = ''
if i[3] == 'Диагноз направившего учреждения':
diagnos_who_directed = i[2]
continue
if i[3] == 'Диагноз при поступлении':
diagnos_entered = i[2]
continue
if i[3] == 'Госпитализирован по поводу данного заболевания':
what_time_hospitalized = i[2]
continue
if i[3] == 'Общее состояние':
state = i[2]
continue
if i[3] == 'Социальный статус':
social_status = i[2]
continue
if i[3] == 'Категория льготности':
category_privilege = i[2]
continue
if i[3] == 'Всего госпитализаций':
all_hospitalized = i[2]
continue
if i[3] == 'Вид травмы':
type_trauma = i[2]
continue
if i[3] == 'Группа крови':
blood_group = i[2]
continue
if i[3] == 'Резус принадлежность':
resus_factor = i[2]
continue
if i[3] == 'Вес':
weight = i[2]
continue
return {
'date_entered_value': date_entered_value,
'time_entered_value': time_entered_value,
'type_transport': type_transport,
'medicament_allergy': medicament_allergy,
'who_directed': who_directed,
'plan_hospital': plan_hospital,
'extra_hospital': extra_hospital,
'type_hospital': type_hospital,
'time_start_ill': time_start_ill,
'diagnos_who_directed': diagnos_who_directed,
'diagnos_entered': diagnos_entered,
'what_time_hospitalized': what_time_hospitalized,
'state': state,
'social_status': social_status,
'category_privilege': category_privilege,
'all_hospitalized': all_hospitalized,
'type_trauma': type_trauma,
'blood_group': blood_group,
'resus_factor': resus_factor,
'weight': weight,
}
def hosp_extract_get_data(hosp_last_num):
# Получение данных из выписки
hosp_extract = hosp_get_data_direction(hosp_last_num, site_type=7, type_service='None', level=2)
if not hosp_extract:
return {}
hosp_extract_iss, extract_research_id, doc_confirm = None, None, None
if hosp_extract:
hosp_extract_iss = hosp_extract[0].get('iss')
doc_confirm = Issledovaniya.objects.get(pk=hosp_extract_iss).doc_confirmation
if not doc_confirm:
return {}
extract_research_id = hosp_extract[0].get('research_id')
titles_field = [
'Время выписки',
'Дата выписки',
'Основной диагноз (описание)',
'Основной диагноз по МКБ',
'Осложнение основного диагноза (описание)',
'Осложнение основного диагноза по МКБ',
'Сопутствующий диагноз (описание)',
'Сопутствующий диагноз по МКБ',
'Исход госпитализации',
'Результат госпитализации',
'Проведено койко-дней',
'Заведующий отделением',
'Палата №',
]
list_values = None
if titles_field and hosp_extract:
list_values = get_result_value_iss(hosp_extract_iss, extract_research_id, titles_field)
date_value, time_value = '', ''
final_diagnos, other_diagnos, near_diagnos, outcome, final_diagnos_mkb, other_diagnos_mkb, near_diagnos_mkb = '', '', '', '', '', '', ''
days_count, result_hospital, manager_depart, room_num = '', '', '', ''
if list_values:
for i in list_values:
if i[3] == 'Дата выписки':
date_value = normalize_date(i[2])
if i[3] == 'Время выписки':
time_value = i[2]
if i[3] == 'Основной диагноз (описание)':
final_diagnos = i[2]
if i[3] == 'Осложнение основного диагноза (описание)':
other_diagnos = i[2]
if i[3] == 'Сопутствующий диагноз (описание)':
near_diagnos = i[2]
if i[3] == 'Исход госпитализации':
outcome = i[2]
if i[3] == 'Результат госпитализации':
result_hospital = i[2]
if i[3] == 'Основной диагноз по МКБ':
final_diagnos_mkb = str(i[2])
if i[3] == 'Осложнение основного диагноза по МКБ':
other_diagnos_mkb = str(i[2]).split(' ')[0]
if i[3] == 'Сопутствующий диагноз по МКБ':
near_diagnos_mkb = str(i[2]).split(' ')[0]
if i[3] == 'Проведено койко-дней':
days_count = str(i[2])
if i[3] == 'Заведующий отделением':
manager_depart = str(i[2])
if i[3] == 'Палата №':
room_num = str(i[2])
doc_fio = doc_confirm.get_fio()
return {
'date_value': date_value,
'time_value': time_value,
'final_diagnos': final_diagnos,
'other_diagnos': other_diagnos,
'near_diagnos': near_diagnos,
'outcome': outcome,
'final_diagnos_mkb': final_diagnos_mkb,
'other_diagnos_mkb': other_diagnos_mkb,
'near_diagnos_mkb': near_diagnos_mkb,
'extract_iss': hosp_extract_iss,
'days_count': days_count,
'result_hospital': result_hospital,
'doc_fio': doc_fio,
'manager_depart': manager_depart,
'room_num': room_num,
}
def hosp_get_clinical_diagnos(hosp_obj):
clinic_diagnos = ''
tmp_clinic_diagnos = []
for i in hosp_obj:
hosp_diagnostic_epicris = hosp_get_data_direction(i['direction'], site_type=6, type_service='None', level=2)
day_entries_iss = []
day_entries_research_id = None
if hosp_diagnostic_epicris:
for i in hosp_diagnostic_epicris:
# найти эпикризы диагностические
if i.get('research_title').lower().find('диагностич') != -1:
day_entries_iss.append(i.get('iss'))
if not day_entries_research_id:
day_entries_research_id = i.get('research_id')
titles_field = ['Диагноз клинический', 'Дата установления диагноза', 'Основной', 'Осложнение', 'Сопутствующий']
list_values = []
if titles_field and day_entries_iss:
for i in day_entries_iss:
list_values.append(get_result_value_iss(i, day_entries_research_id, titles_field))
if list_values:
for fields in list_values:
clinical_data = {'clinic_diagnos': '', 'main_diagnos': '', 'other_diagnos': '', 'near_diagnos': '', 'date': ''}
for i in fields:
if i[3] == 'Дата установления диагноза':
clinical_data['date'] = normalize_date(i[2])
continue
if i[3] == 'Диагноз клинический':
clinical_data['clinic_diagnos'] = i[2]
continue
if i[3] == 'Основной':
clinical_data['main_diagnos'] = f"Основной: {i[2]}"
continue
if i[3] == 'Осложнение':
clinical_data['other_diagnos'] = f"; Осложнение: {i[2]}"
continue
if i[3] == 'Сопутствующий':
clinical_data['near_diagnos'] = f"; Сопутствующий: {i[2]}"
continue
if clinical_data['date'] and (clinical_data['clinic_diagnos'] or clinical_data['main_diagnos']):
tmp_clinic_diagnos.append(clinical_data.copy())
for i in tmp_clinic_diagnos:
clinic_diagnos = f"{clinic_diagnos}{i['clinic_diagnos']} <u>{i['main_diagnos']}</u>{i['other_diagnos']}{i['near_diagnos']}; дата: {i['date']}<br/>"
return clinic_diagnos
def hosp_get_transfers_data(hosp_nums_obj):
titles_field = ['Дата перевода', 'Время перевода']
date_transfer_value, time_transfer_value = '', ''
transfers = []
list_values = None
for i in range(len(hosp_nums_obj)):
if i == 0:
continue
transfer_research_title = hosp_nums_obj[i].get('research_title')
# получить для текущего hosp_dir эпикриз с title - перевод.....
from_hosp_dir_transfer = hosp_nums_obj[i - 1].get('direction')
epicrisis_data = hosp_get_data_direction(from_hosp_dir_transfer, site_type=6, type_service='None', level=2)
if epicrisis_data:
result_check = check_transfer_epicrisis(epicrisis_data)
if result_check['iss']:
iss_transfer, research_id_transfer = result_check['iss'], result_check['research_id']
if titles_field and iss_transfer:
list_values = get_result_value_iss(iss_transfer, research_id_transfer, titles_field)
else:
continue
if list_values:
for i in list_values:
if i[3] == 'Дата перевода':
date_transfer_value = normalize_date(i[2])
continue
if i[3] == 'Время перевода':
time_transfer_value = i[2]
continue
transfers.append({'transfer_research_title': transfer_research_title, 'date_transfer_value': date_transfer_value, 'time_transfer_value': time_transfer_value})
return transfers
def hosp_patient_movement(hosp_nums_obj):
titles_field = ['Дата перевода']
patient_movement = []
list_values = None
for i in range(len(hosp_nums_obj)):
date_out, diagnos_mkb, doc_confirm_code = '', '', ''
bed_profile_research_title = hosp_nums_obj[i].get('research_title')
hosp_dir = hosp_nums_obj[i].get('direction')
primary_reception_data = primary_reception_get_data(hosp_dir)
hosp_extract_data = hosp_get_data_direction(hosp_dir, site_type=7, type_service='None', level=2)
if hosp_extract_data:
extract_data = hosp_extract_get_data(hosp_dir)
if extract_data:
date_out = extract_data['date_value']
diagnos_mkb = extract_data['final_diagnos_mkb']
doc_confirm_code = (
None if not Issledovaniya.objects.get(pk=extract_data['extract_iss']) else Issledovaniya.objects.get(pk=extract_data['extract_iss']).doc_confirmation.personal_code
)
list_values = None
epicrisis_data = hosp_get_data_direction(hosp_dir, site_type=6, type_service='None', level=2)
if epicrisis_data:
result_check = check_transfer_epicrisis(epicrisis_data)
if result_check['iss']:
iss_transfer, research_id_transfer = result_check['iss'], result_check['research_id']
if titles_field and iss_transfer:
list_values = get_result_value_iss(iss_transfer, research_id_transfer, titles_field)
if list_values:
for i in list_values:
if i[3] == 'Дата перевода':
date_out = normalize_date(i[2])
if i[3] == 'Клинический диагноз по МКБ':
diagnos_mkb = i[2]
patient_movement.append(
{
'bed_profile_research_title': bed_profile_research_title,
'date_entered_value': primary_reception_data['date_entered_value'],
'date_oute': date_out,
'diagnos_mkb': diagnos_mkb,
'doc_confirm_code': doc_confirm_code,
}
)
return patient_movement
def hosp_get_operation_data(num_dir):
hosp_operation = hosp_get_data_direction(num_dir, site_type=3, type_service='None', level=-1)
operation_iss_research = []
if hosp_operation:
for i in hosp_operation:
# найти протоколы по типу операции
if (i.get('research_title').lower().find('операци') != -1 or i.get('research_title').lower().find('манипул') != -1) and i['date_confirm']:
operation_iss_research.append({'iss': i['iss'], 'research': i['research_id']})
titles_field = [
'Название операции',
'Дата проведения',
'Время начала',
'Время окончания',
'Метод обезболивания',
'Осложнения',
'Код операции',
'Код манипуляции',
'Оперативное вмешательство',
'Код анестезиолога',
'Категория сложности',
'Диагноз после оперативного лечения',
'МКБ 10',
'Оперировал',
'Код хирурга',
]
list_values = []
operation_result = []
if titles_field and operation_iss_research and hosp_operation:
for i in operation_iss_research:
list_values.append(get_result_value_iss(i['iss'], i['research'], titles_field))
operation_result = []
for fields_operation in list_values:
pk_iss_operation = fields_operation[0][1]
operation_data = {
'name_operation': '',
'date': '',
'time_start': '',
'time_end': '',
'anesthesia method': '',
'complications': '',
'doc_fio': '',
'code_operation': '',
'code_doc_anesthesia': '',
'plan_operation': '',
'diagnos_after_operation': '',
'mkb10': '',
'category_difficult': '',
'doc_code': '',
}
iss_obj = Issledovaniya.objects.filter(pk=pk_iss_operation).first()
if not iss_obj.time_confirmation:
continue
operation_data['doc_fio'] = iss_obj.doc_confirmation_fio
operation_data['doc_code'] = None if not Issledovaniya.objects.get(pk=pk_iss_operation) else Issledovaniya.objects.get(pk=pk_iss_operation).doc_confirmation.personal_code
if operation_data['doc_code'] == 0:
operation_data['doc_code'] = ''
category_difficult = ''
for field in fields_operation:
if field[3] == 'Название операции':
operation_data['name_operation'] = field[2]
continue
if field[3] == 'Дата проведения':
operation_data['date'] = normalize_date(field[2])
continue
if field[3] == 'Время начала':
operation_data['time_start'] = field[2]
continue
if field[3] == 'Время окончания':
operation_data['time_end'] = field[2]
continue
if field[3] == 'Метод обезболивания':
operation_data['anesthesia method'] = field[2]
continue
if field[3] == 'Осложнения':
operation_data['complications'] = field[2]
continue
if field[3] == 'Код операции':
operation_data['code_operation'] = field[2]
continue
if field[3] == 'Код манипуляции':
operation_data['code_operation'] = field[2]
continue
if field[3] == 'Код анестезиолога':
operation_data['code_doc_anesthesia'] = field[2]
continue
if field[3] == 'Оперативное вмешательство':
operation_data['plan_operation'] = field[2]
continue
if field[3] == 'Категория сложности':
operation_data['category_difficult'] = f"Сложность - {field[2]}"
continue
if field[3] == 'Диагноз после оперативного лечения':
operation_data['diagnos_after_operation'] = field[2]
continue
if field[3] == 'МКБ 10':
operation_data['mkb10'] = field[2]
continue
if field[3] == 'Оперировал':
if field[2]:
operation_data['doc_fio'] = field[2]
continue
if field[3] == 'Код хирурга':
if field[2]:
operation_data['doc_code'] = field[2]
continue
operation_data['name_operation'] = f"{operation_data['name_operation']} {category_difficult}"
operation_result.append(operation_data.copy())
return operation_result
def closed_bl(hosp_num_dir):
"""
Подтверждены больничные-протоколы со словом закрытие среди Б/Л?
"""
result_bl = hosp_get_data_direction(hosp_num_dir, site_type=8, type_service='None', level=-1)
num, who_get, who_care, start_date, end_date, start_work = '', '', '', '', '', ''
for i in result_bl:
if i['date_confirm'] is None:
continue
if i["research_title"].lower().find('закрыт') != -1:
data_closed_bl = ParaclinicResult.objects.filter(issledovaniye=i['iss'])
for b in data_closed_bl:
if b.field.title == "Лист нетрудоспособности №":
num = b.value
continue
if b.field.title == "Выдан кому":
who_get = b.value
continue
if b.field.title == "по уходу за":
who_care = b.value
continue
if b.field.title == "выдан с":
start_date = b.value
if start_date.find('-') != -1:
start_date = normalize_date(start_date)
continue
if b.field.title == "по":
end_date = b.value
if end_date.find('-') != -1:
end_date = normalize_date(end_date)
continue
if b.field.title == "к труду":
start_work = b.value
if start_work.find('-') != -1:
start_work = normalize_date(start_work)
continue
return {'is_closed': True, 'num': num, 'who_get': who_get, 'who_care': who_care, 'start_date': start_date, 'end_date': end_date, 'start_work': start_work}
return {'is_closed': False, 'num': num, 'who_get': who_get, 'who_care': who_care, 'start_date': start_date, 'end_date': end_date, 'start_work': start_work}
def create_contract(ind_dir, card_pk):
ind_card = Card.objects.get(pk=card_pk)
# exec_person = request_data['user'].doctorprofile.get_full_fio()
patient_data = ind_card.get_data_individual()
p_agent = None
if ind_card.who_is_agent:
p_agent = getattr(ind_card, ind_card.who_is_agent)
p_payer = None
if ind_card.payer:
p_payer = ind_card.payer
# Получить все источники, у которых title-ПЛАТНО
ist_f = list(IstochnikiFinansirovaniya.objects.values_list('id').filter(title__exact='Платно'))
ist_f_list = [int(x[0]) for x in ist_f]
napr = Napravleniya.objects.filter(pk__in=ind_dir)
dir_temp = []
# Проверить, что все направления принадлежат к одной карте и имеют ист. финансирования "Платно"
num_contract_set = set()
for n in napr:
if n.istochnik_f_id in ist_f_list and n.client == ind_card:
num_contract_set.add(n.num_contract)
dir_temp.append(n.pk)
if not dir_temp:
return False
# получить УСЛУГИ по направлениям(отфильтрованы по "платно" и нет сохраненных исследований) в Issledovaniya
research_direction = get_research_by_dir(dir_temp)
if not research_direction:
return False
# получить по направлению-услугам цену из Issledovaniya
research_price = get_coast_from_issledovanie(research_direction)
# Получить Итоговую стр-ру данных
result_data = get_final_data(research_price)
sum_research = result_data[1]
# Контрольная сумма расчет: послдеовательность направлений+Итоговая сумма (стоимость денежная)
qr_napr = ','.join([str(elem) for elem in result_data[3]])
protect_val = sum_research.replace(' ', '')
bstr = (qr_napr + protect_val).encode()
protect_code = str(zlib.crc32(bstr))
today = utils.current_time()
date_now1 = datetime.datetime.strftime(today, '%y%m%d%H%M%S%f')[:-3]
date_now_str = str(ind_card.pk) + str(date_now1)
# Проверить записан ли номер контракта в направлениях, и контрольная сумма
# ПереЗаписать номер контракта Если в наборе направлений значение None, или в направлениях разные контракты,
# а также разные контрольные суммы, все перезаписать.
num_contract_set = set()
protect_code_set = set()
napr_end = Napravleniya.objects.filter(id__in=result_data[3])
for n in napr_end:
num_contract_set.add(n.num_contract)
protect_code_set.add(n.protect_code)
if len(num_contract_set) == 1 and None in num_contract_set or None in protect_code_set:
PersonContract.person_contract_save(date_now_str, protect_code, qr_napr, sum_research, patient_data['fio'], ind_card, p_payer, p_agent)
Napravleniya.objects.filter(id__in=result_data[3]).update(num_contract=date_now_str, protect_code=protect_code)
return PersonContract.pk
|
"""
Empatica E4 is a wearable device that offers real-time physiological data
acquisition such as blood volume pulse, electrodermal activity (EDA), heart
rate, interbeat intervals, 3-axis acceleration and skin temperature.
"""
import os
import random
import numpy as np
import pandas as pd
class EmpaticaReader:
"""
Read, timeshift and write data generated by Empatica E4.
Attributes
----------
start_times : dict
Contain the timestamp of the first measurement for all
measured signals (BVP, ACC, etc.).
sample_freqs : dict ]
Contain the sampling frequencies of all measured signals
in Hz.
IBI : pandas.DataFrame
Contain inter-beat interval data. The column
"seconds_since_start" is the time in seconds between the start of
measurements and the column "IBI" is the duration in seconds between
consecutive beats.
ACC : pandas.DataFrame
Contain the data measured with the onboard MEMS type
3-axis accelerometer, indexed by time of measurement.
BVP : pandas.DataFrame
Contain blood volume pulse data, indexed by time of
measurement.
EDA : pandas.DataFrame
Contain data captured from the electrodermal activity
sensor, indexed by time of measurement.
HR : pandas.DataFrame
Contain heart rate data, indexed by time of
measurement.
TEMP : pandas.DataFrame
Contain temperature data, indexed by time of
measurement.
data : pandas.DataFrame
Joined dataframe of the ACC, BVP, EDA, HR and TEMP
dataframes (see above). May contain NaN values because sampling
frequencies differ across signals.
"""
def __init__(self, path):
"""
Parse the csv files located in the specified directory into dataframes.
Parameters
----------
path : str
Path of the directory that contains the individual signal csv
files. The files must be named ACC.csv, BVP.csv, EDA.csv, HR.csv,
IBI.csv and TEMP.csv. If present, the file tags.csv is also read.
"""
self.start_times = {}
self.sample_freqs = {}
files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
if files is None:
print('Empty directory. Nothing to read.')
return None
self.ACC = self._read_signal(os.path.join(path, 'ACC.csv'), 'ACC', col_names=['X', 'Y', 'Z'])
self.BVP = self._read_signal(os.path.join(path, 'BVP.csv'), 'BVP')
self.EDA = self._read_signal(os.path.join(path, 'EDA.csv'), 'EDA')
self.HR = self._read_signal(os.path.join(path, 'HR.csv'), 'HR')
self.TEMP = self._read_signal(os.path.join(path, 'TEMP.csv'), 'TEMP')
self.IBI = self._read_ibi(os.path.join(path, 'IBI.csv'))
self.tags = self._read_tags(os.path.join(path, 'tags.csv'))
self.data = self._get_joined_dataframe()
def write(self, dir_path):
"""
Write the signal dataframes back to individual csv files formatted the
same way as they were read.
Parameters
----------
path : str
Path of the directory in which the csv files are created.
If the directory exists, the csv files are written using writing mode 'w'
ignoring other files in the directory.
If the directory doe not exist, it will be created.
"""
if not os.path.exists(dir_path):
os.mkdir(dir_path)
if self.ACC is not None:
self._write_signal(os.path.join(dir_path, 'ACC.csv'), self.ACC, 'ACC')
if self.BVP is not None:
self._write_signal(os.path.join(dir_path, 'BVP.csv'), self.BVP, 'BVP')
if self.EDA is not None:
self._write_signal(os.path.join(dir_path, 'EDA.csv'), self.EDA, 'EDA')
if self.HR is not None:
self._write_signal(os.path.join(dir_path, 'HR.csv'), self.HR, 'HR')
if self.TEMP is not None:
self._write_signal(os.path.join(dir_path, 'TEMP.csv'), self.TEMP, 'TEMP')
if self.IBI is not None:
self._write_ibi(os.path.join(dir_path, 'IBI.csv'))
if self.tags is not None:
self._write_tags(os.path.join(dir_path, 'tags.csv'))
def _read_signal(self, path, signal_name, col_names=None):
try:
if os.stat(path).st_size > 0:
with open(path, 'r') as file:
start_time_str = file.readline().split(', ')[0]
self.start_times[signal_name] = pd.Timestamp(float(start_time_str), unit='s')
sample_freq_str = file.readline().split(', ')[0]
self.sample_freqs[signal_name] = float(sample_freq_str)
col_names = [signal_name] if col_names is None else col_names
dataframe = pd.read_csv(file, header=None, names=col_names)
dataframe.index = pd.date_range(
start=self.start_times[signal_name],
freq=f"{1 / self.sample_freqs[signal_name]}S",
periods=len(dataframe))
if col_names is not None:
dataframe.rename(dict(enumerate(col_names)), inplace=True)
else:
dataframe.rename({0: signal_name}, inplace=True)
return dataframe.squeeze()
else:
print(f"Not reading signal because the file {path} is empty.")
except OSError:
print(f"Not reading signal because the file {path} does not exist.")
return None
def _write_signal(self, path, dataframe, signal_name):
n_cols = len(dataframe.columns) if isinstance(dataframe, pd.DataFrame) else 1
meta = np.array([[self.start_times[signal_name].value / 1e9] * n_cols,
[self.sample_freqs[signal_name]] * n_cols])
with open(path, 'w') as file:
np.savetxt(file, meta, fmt='%s', delimiter=', ', newline='\n')
dataframe.to_csv(file, index=None, header=None, line_terminator='\n')
def _read_ibi(self, path):
try:
if os.stat(path).st_size > 0:
with open(path, 'r') as file:
start_time = pd.Timestamp(float(file.readline().split(',')[0]), unit='s')
self.start_times['IBI'] = start_time
df = pd.read_csv(file, names=['time', 'IBI'], header=None)
df['time'] = pd.to_timedelta(df['time'], unit='s')
df['time'] = start_time + df['time']
return df.set_index('time')
else:
print(f"Not reading signal because the file {path} is empty.")
except OSError:
print(f"Not reading signal because the file {path} does not exist.")
return None
def _write_ibi(self, path):
with open(path, 'w') as file:
file.write(f"{self.start_times["IBI"].value // 1e9}, IBI\n")
write_df = self.IBI.copy()
write_df.index = (write_df.index - self.start_times['IBI']).values.astype(int) / 1e9
write_df.to_csv(file, header=None, line_terminator='\n')
def _read_tags(self, path):
try:
if os.stat(path).st_size > 0:
return pd.read_csv(path, header=None,
parse_dates=[0],
date_parser=lambda x : pd.to_datetime(x, unit='s'),
names=['tags'],
squeeze=True)
else:
print(f"Not reading tags because the file {path} is empty.")
except OSError:
print(f"Not reading tags because the file {path} does not exist.")
return None
def _write_tags(self, path):
if self.tags is not None:
tags_write_series = self.tags.map(lambda x: x.value / 1e9)
tags_write_series.to_csv(path, header=None, index=None, line_terminator='\n')
def timeshift(self, shift='random'):
"""
Timeshift all time related columns as well as the starting_times dict.
Parameters
----------
shift : None/'random', pd.Timestamp or pd.Timedelta
If shift is not specified, shifts the data by a random time interval
between one month and two years to the past.
If shift is a timdelta, adds that timedelta to all time-related attributes.
If shift is a timestamp, shifts the data such that the earliest entry
has that timestamp. The remaining values will mantain the same
time difference to the first entry.
"""
if shift == 'random':
one_month = pd.Timedelta('- 30 days').value
two_years = pd.Timedelta('- 730 days').value
random_timedelta = pd.Timedelta(random.uniform(one_month, two_years))
self.timeshift(random_timedelta)
dataframes = []
variables = [self.ACC, self.BVP, self.EDA,
self.HR, self.TEMP, self.data]
for variable in variables:
if variable is not None:
dataframes.append(variable)
if isinstance(shift, pd.Timestamp):
min_start_time = min(self.start_times.values())
new_start_times = dict()
for signal_name, start_time in self.start_times.items():
new_start_times[signal_name] = shift + (start_time - min_start_time)
self.start_times = new_start_times
if self.tags is not None:
timedeltas = self.tags - self.tags.min()
self.tags = shift + timedeltas
for dataframe in dataframes:
timedeltas = dataframe.index - dataframe.index.min()
dataframe.index = shift + timedeltas
if isinstance(shift, pd.Timedelta):
for signal_name in self.start_times:
self.start_times[signal_name] += shift
if self.tags is not None:
self.tags += shift
for dataframe in dataframes:
dataframe.index += shift
def _get_joined_dataframe(self):
dataframes = []
variables = [self.ACC, self.BVP, self.EDA,
self.HR, self.TEMP]
for variable in variables:
if variable is not None:
dataframes.append(variable)
if not dataframes:
print('No joined dataframe possible due to lack of data.')
return None
joined_idx = pd.concat([pd.Series(dataframe.index) for dataframe in dataframes])
joined_idx = pd.Index(joined_idx.drop_duplicates().sort_values())
joined_dataframe = pd.DataFrame(index=joined_idx)
if self.ACC is not None:
joined_dataframe.loc[self.ACC.index, 'ACC_X'] = self.ACC['X']
joined_dataframe.loc[self.ACC.index, 'ACC_Y'] = self.ACC['Y']
joined_dataframe.loc[self.ACC.index, 'ACC_Z'] = self.ACC['Z']
if self.BVP is not None:
joined_dataframe.loc[self.BVP.index, 'BVP'] = self.BVP
if self.EDA is not None:
joined_dataframe.loc[self.EDA.index, 'EDA'] = self.EDA
if self.HR is not None:
joined_dataframe.loc[self.HR.index, 'HR'] = self.HR
if self.TEMP is not None:
joined_dataframe.loc[self.TEMP.index, 'TEMP'] = self.TEMP
return joined_dataframe
| """
Empatica E4 is a wearable device that offers real-time physiological data
acquisition such as blood volume pulse, electrodermal activity (EDA), heart
rate, interbeat intervals, 3-axis acceleration and skin temperature.
"""
import os
import random
import numpy as np
import pandas as pd
class EmpaticaReader:
"""
Read, timeshift and write data generated by Empatica E4.
Attributes
----------
start_times : dict
Contain the timestamp of the first measurement for all
measured signals (BVP, ACC, etc.).
sample_freqs : dict ]
Contain the sampling frequencies of all measured signals
in Hz.
IBI : pandas.DataFrame
Contain inter-beat interval data. The column
"seconds_since_start" is the time in seconds between the start of
measurements and the column "IBI" is the duration in seconds between
consecutive beats.
ACC : pandas.DataFrame
Contain the data measured with the onboard MEMS type
3-axis accelerometer, indexed by time of measurement.
BVP : pandas.DataFrame
Contain blood volume pulse data, indexed by time of
measurement.
EDA : pandas.DataFrame
Contain data captured from the electrodermal activity
sensor, indexed by time of measurement.
HR : pandas.DataFrame
Contain heart rate data, indexed by time of
measurement.
TEMP : pandas.DataFrame
Contain temperature data, indexed by time of
measurement.
data : pandas.DataFrame
Joined dataframe of the ACC, BVP, EDA, HR and TEMP
dataframes (see above). May contain NaN values because sampling
frequencies differ across signals.
"""
def __init__(self, path):
"""
Parse the csv files located in the specified directory into dataframes.
Parameters
----------
path : str
Path of the directory that contains the individual signal csv
files. The files must be named ACC.csv, BVP.csv, EDA.csv, HR.csv,
IBI.csv and TEMP.csv. If present, the file tags.csv is also read.
"""
self.start_times = {}
self.sample_freqs = {}
files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
if files is None:
print('Empty directory. Nothing to read.')
return None
self.ACC = self._read_signal(os.path.join(path, 'ACC.csv'), 'ACC', col_names=['X', 'Y', 'Z'])
self.BVP = self._read_signal(os.path.join(path, 'BVP.csv'), 'BVP')
self.EDA = self._read_signal(os.path.join(path, 'EDA.csv'), 'EDA')
self.HR = self._read_signal(os.path.join(path, 'HR.csv'), 'HR')
self.TEMP = self._read_signal(os.path.join(path, 'TEMP.csv'), 'TEMP')
self.IBI = self._read_ibi(os.path.join(path, 'IBI.csv'))
self.tags = self._read_tags(os.path.join(path, 'tags.csv'))
self.data = self._get_joined_dataframe()
def write(self, dir_path):
"""
Write the signal dataframes back to individual csv files formatted the
same way as they were read.
Parameters
----------
path : str
Path of the directory in which the csv files are created.
If the directory exists, the csv files are written using writing mode 'w'
ignoring other files in the directory.
If the directory doe not exist, it will be created.
"""
if not os.path.exists(dir_path):
os.mkdir(dir_path)
if self.ACC is not None:
self._write_signal(os.path.join(dir_path, 'ACC.csv'), self.ACC, 'ACC')
if self.BVP is not None:
self._write_signal(os.path.join(dir_path, 'BVP.csv'), self.BVP, 'BVP')
if self.EDA is not None:
self._write_signal(os.path.join(dir_path, 'EDA.csv'), self.EDA, 'EDA')
if self.HR is not None:
self._write_signal(os.path.join(dir_path, 'HR.csv'), self.HR, 'HR')
if self.TEMP is not None:
self._write_signal(os.path.join(dir_path, 'TEMP.csv'), self.TEMP, 'TEMP')
if self.IBI is not None:
self._write_ibi(os.path.join(dir_path, 'IBI.csv'))
if self.tags is not None:
self._write_tags(os.path.join(dir_path, 'tags.csv'))
def _read_signal(self, path, signal_name, col_names=None):
try:
if os.stat(path).st_size > 0:
with open(path, 'r') as file:
start_time_str = file.readline().split(', ')[0]
self.start_times[signal_name] = pd.Timestamp(float(start_time_str), unit='s')
sample_freq_str = file.readline().split(', ')[0]
self.sample_freqs[signal_name] = float(sample_freq_str)
col_names = [signal_name] if col_names is None else col_names
dataframe = pd.read_csv(file, header=None, names=col_names)
dataframe.index = pd.date_range(
start=self.start_times[signal_name],
freq=f"{1 / self.sample_freqs[signal_name]}S",
periods=len(dataframe))
if col_names is not None:
dataframe.rename(dict(enumerate(col_names)), inplace=True)
else:
dataframe.rename({0: signal_name}, inplace=True)
return dataframe.squeeze()
else:
print(f"Not reading signal because the file {path} is empty.")
except OSError:
print(f"Not reading signal because the file {path} does not exist.")
return None
def _write_signal(self, path, dataframe, signal_name):
n_cols = len(dataframe.columns) if isinstance(dataframe, pd.DataFrame) else 1
meta = np.array([[self.start_times[signal_name].value / 1e9] * n_cols,
[self.sample_freqs[signal_name]] * n_cols])
with open(path, 'w') as file:
np.savetxt(file, meta, fmt='%s', delimiter=', ', newline='\n')
dataframe.to_csv(file, index=None, header=None, line_terminator='\n')
def _read_ibi(self, path):
try:
if os.stat(path).st_size > 0:
with open(path, 'r') as file:
start_time = pd.Timestamp(float(file.readline().split(',')[0]), unit='s')
self.start_times['IBI'] = start_time
df = pd.read_csv(file, names=['time', 'IBI'], header=None)
df['time'] = pd.to_timedelta(df['time'], unit='s')
df['time'] = start_time + df['time']
return df.set_index('time')
else:
print(f"Not reading signal because the file {path} is empty.")
except OSError:
print(f"Not reading signal because the file {path} does not exist.")
return None
def _write_ibi(self, path):
with open(path, 'w') as file:
file.write(f"{self.start_times['IBI'].value // 1e9}, IBI\n")
write_df = self.IBI.copy()
write_df.index = (write_df.index - self.start_times['IBI']).values.astype(int) / 1e9
write_df.to_csv(file, header=None, line_terminator='\n')
def _read_tags(self, path):
try:
if os.stat(path).st_size > 0:
return pd.read_csv(path, header=None,
parse_dates=[0],
date_parser=lambda x : pd.to_datetime(x, unit='s'),
names=['tags'],
squeeze=True)
else:
print(f"Not reading tags because the file {path} is empty.")
except OSError:
print(f"Not reading tags because the file {path} does not exist.")
return None
def _write_tags(self, path):
if self.tags is not None:
tags_write_series = self.tags.map(lambda x: x.value / 1e9)
tags_write_series.to_csv(path, header=None, index=None, line_terminator='\n')
def timeshift(self, shift='random'):
"""
Timeshift all time related columns as well as the starting_times dict.
Parameters
----------
shift : None/'random', pd.Timestamp or pd.Timedelta
If shift is not specified, shifts the data by a random time interval
between one month and two years to the past.
If shift is a timdelta, adds that timedelta to all time-related attributes.
If shift is a timestamp, shifts the data such that the earliest entry
has that timestamp. The remaining values will mantain the same
time difference to the first entry.
"""
if shift == 'random':
one_month = pd.Timedelta('- 30 days').value
two_years = pd.Timedelta('- 730 days').value
random_timedelta = pd.Timedelta(random.uniform(one_month, two_years))
self.timeshift(random_timedelta)
dataframes = []
variables = [self.ACC, self.BVP, self.EDA,
self.HR, self.TEMP, self.data]
for variable in variables:
if variable is not None:
dataframes.append(variable)
if isinstance(shift, pd.Timestamp):
min_start_time = min(self.start_times.values())
new_start_times = dict()
for signal_name, start_time in self.start_times.items():
new_start_times[signal_name] = shift + (start_time - min_start_time)
self.start_times = new_start_times
if self.tags is not None:
timedeltas = self.tags - self.tags.min()
self.tags = shift + timedeltas
for dataframe in dataframes:
timedeltas = dataframe.index - dataframe.index.min()
dataframe.index = shift + timedeltas
if isinstance(shift, pd.Timedelta):
for signal_name in self.start_times:
self.start_times[signal_name] += shift
if self.tags is not None:
self.tags += shift
for dataframe in dataframes:
dataframe.index += shift
def _get_joined_dataframe(self):
dataframes = []
variables = [self.ACC, self.BVP, self.EDA,
self.HR, self.TEMP]
for variable in variables:
if variable is not None:
dataframes.append(variable)
if not dataframes:
print('No joined dataframe possible due to lack of data.')
return None
joined_idx = pd.concat([pd.Series(dataframe.index) for dataframe in dataframes])
joined_idx = pd.Index(joined_idx.drop_duplicates().sort_values())
joined_dataframe = pd.DataFrame(index=joined_idx)
if self.ACC is not None:
joined_dataframe.loc[self.ACC.index, 'ACC_X'] = self.ACC['X']
joined_dataframe.loc[self.ACC.index, 'ACC_Y'] = self.ACC['Y']
joined_dataframe.loc[self.ACC.index, 'ACC_Z'] = self.ACC['Z']
if self.BVP is not None:
joined_dataframe.loc[self.BVP.index, 'BVP'] = self.BVP
if self.EDA is not None:
joined_dataframe.loc[self.EDA.index, 'EDA'] = self.EDA
if self.HR is not None:
joined_dataframe.loc[self.HR.index, 'HR'] = self.HR
if self.TEMP is not None:
joined_dataframe.loc[self.TEMP.index, 'TEMP'] = self.TEMP
return joined_dataframe
|
# Owner(s): ["oncall: fx"]
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH, CodeGen
from torch.fx.node import Target, Argument
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
# test for issue described at https://github.com/pytorch/pytorch/issues/63883
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
# saving the original list because we will insert new nodes as a part of a test
orig_graph_nodes = list(graph.nodes)
for node in orig_graph_nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
# verify that copying the node does not lose the stack trace
new_node = graph.node_copy(node)
self.assertTrue(new_node.stack_trace is not None)
assert 'test_fx.py' in new_node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_interpreter_default_args(self):
class Model(torch.nn.Module):
def forward(self, x, y=3.14159):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
out = interp.run(x)
torch.testing.assert_allclose(out, x + 3.14159)
def test_interpreter_not_enough_args(self):
class Model(torch.nn.Module):
def forward(self, x, y):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
with self.assertRaisesRegex(RuntimeError,
'Expected positional argument for parameter y, but one was not passed in'):
out = interp.run(x)
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_prepend_self(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.prepend(b)
x.append(b)
self.assertEqual(len(graph.nodes), 3)
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(list(x.node.users.keys()), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(list(x.node.users.keys()), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_delete_unused_submodules_leaf(self):
class SubModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = SubModule()
def forward(self, x):
x = self.submod(x)
return x
model = Model()
class MyCustomTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return module_qualified_name == "submod"
inputs = torch.randn(1, 10)
traced_graph = MyCustomTracer().trace(model)
gm2 = torch.fx.GraphModule(model, traced_graph)
gm2.delete_all_unused_submodules()
torch.testing.assert_allclose(gm2(inputs), model(inputs))
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108")
def test_assert(self):
def f(x):
assert x > 1
return x + 1
try:
torch.fx.proxy.TracerBase.trace_asserts = True
traced = symbolic_trace(f)
finally:
torch.fx.proxy.TracerBase.trace_asserts = False
self.assertEqual(f(2), traced(2))
with self.assertRaises(AssertionError):
traced(0)
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(val))), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def test_custom_codegen(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def f(a, b):
return a + b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
self.assertEqual(nf(*vals), f(*vals))
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf(vals), f(*vals))
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(vals))), f(*vals))
ts_f = torch.jit.script(nf)
self.assertEqual(nf(vals), ts_f(vals))
def test_imul_code_print(self):
graph = torch.fx.Graph()
a = graph.placeholder("a")
b = graph.placeholder("b")
graph.call_function(operator.imul, (a, b), {})
graph.output(a)
gm = torch.fx.GraphModule({}, graph)
gm.recompile()
self.assertEqual(gm(2, 3), 6)
self.assertIn("a *= b", gm.code)
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
if not isinstance(op.op, types.BuiltinFunctionType):
raise unittest.SkipTest("This path doesn't work on Python functions")
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
self.maxDiff = None
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
"""
Unfortunately we have to serialize function signatures manually since
serialization for `inspect.Signature` objects is not stable across
python versions
"""
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
# Need to fix up some default value strings.
# First case: modules. Default module `repr` contains the FS path of the module.
# Don't leak that
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({', '.join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
# Forward ref
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
# Handle types with contained types
contained = getattr(t, '__args__', None) or []
# Callables contain a bare List for arguments
contained = t if isinstance(t, list) else contained
# Python 3.8 puts type vars into __args__ for unbound types such as Dict
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{', '.join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
# Unbound types don't have `__origin__` in some Python versions, so fix that up here.
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{', '.join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
"""
Test backward compatibility for function signatures with
@compatibility(is_backward_compatible=True). Currently this checks for
exact signature matches, which may lead to false positives. If this
becomes too annoying, we can refine this check to actually parse out
the saved schema strings and check if the change is truly backward-
incompatible.
"""
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
"""
Test backward compatibility for members of classes with
@compatibility(is_backward_compatible=True). Currently this checks for
exact matches on the publicly visible members of the class.
"""
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"bilinear": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"native_channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"gelu": BUILT_IN_FUNC,
"hardshrink": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"linear": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pairwise_distance": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"prelu": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"max_pool1d_with_indices": CONTROL_FLOW,
"max_pool2d_with_indices": CONTROL_FLOW,
"max_pool3d_with_indices": CONTROL_FLOW,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
"normalize" : MUTABLE,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
| # Owner(s): ["oncall: fx"]
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH, CodeGen
from torch.fx.node import Target, Argument
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
# test for issue described at https://github.com/pytorch/pytorch/issues/63883
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
# saving the original list because we will insert new nodes as a part of a test
orig_graph_nodes = list(graph.nodes)
for node in orig_graph_nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
# verify that copying the node does not lose the stack trace
new_node = graph.node_copy(node)
self.assertTrue(new_node.stack_trace is not None)
assert 'test_fx.py' in new_node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_interpreter_default_args(self):
class Model(torch.nn.Module):
def forward(self, x, y=3.14159):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
out = interp.run(x)
torch.testing.assert_allclose(out, x + 3.14159)
def test_interpreter_not_enough_args(self):
class Model(torch.nn.Module):
def forward(self, x, y):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
with self.assertRaisesRegex(RuntimeError,
'Expected positional argument for parameter y, but one was not passed in'):
out = interp.run(x)
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_prepend_self(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.prepend(b)
x.append(b)
self.assertEqual(len(graph.nodes), 3)
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(list(x.node.users.keys()), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(list(x.node.users.keys()), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_delete_unused_submodules_leaf(self):
class SubModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = SubModule()
def forward(self, x):
x = self.submod(x)
return x
model = Model()
class MyCustomTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return module_qualified_name == "submod"
inputs = torch.randn(1, 10)
traced_graph = MyCustomTracer().trace(model)
gm2 = torch.fx.GraphModule(model, traced_graph)
gm2.delete_all_unused_submodules()
torch.testing.assert_allclose(gm2(inputs), model(inputs))
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108")
def test_assert(self):
def f(x):
assert x > 1
return x + 1
try:
torch.fx.proxy.TracerBase.trace_asserts = True
traced = symbolic_trace(f)
finally:
torch.fx.proxy.TracerBase.trace_asserts = False
self.assertEqual(f(2), traced(2))
with self.assertRaises(AssertionError):
traced(0)
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(val))), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def test_custom_codegen(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def f(a, b):
return a + b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
self.assertEqual(nf(*vals), f(*vals))
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf(vals), f(*vals))
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(vals))), f(*vals))
ts_f = torch.jit.script(nf)
self.assertEqual(nf(vals), ts_f(vals))
def test_imul_code_print(self):
graph = torch.fx.Graph()
a = graph.placeholder("a")
b = graph.placeholder("b")
graph.call_function(operator.imul, (a, b), {})
graph.output(a)
gm = torch.fx.GraphModule({}, graph)
gm.recompile()
self.assertEqual(gm(2, 3), 6)
self.assertIn("a *= b", gm.code)
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
if not isinstance(op.op, types.BuiltinFunctionType):
raise unittest.SkipTest("This path doesn't work on Python functions")
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
self.maxDiff = None
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
"""
Unfortunately we have to serialize function signatures manually since
serialization for `inspect.Signature` objects is not stable across
python versions
"""
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
# Need to fix up some default value strings.
# First case: modules. Default module `repr` contains the FS path of the module.
# Don't leak that
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({", ".join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
# Forward ref
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
# Handle types with contained types
contained = getattr(t, '__args__', None) or []
# Callables contain a bare List for arguments
contained = t if isinstance(t, list) else contained
# Python 3.8 puts type vars into __args__ for unbound types such as Dict
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
# Unbound types don't have `__origin__` in some Python versions, so fix that up here.
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
"""
Test backward compatibility for function signatures with
@compatibility(is_backward_compatible=True). Currently this checks for
exact signature matches, which may lead to false positives. If this
becomes too annoying, we can refine this check to actually parse out
the saved schema strings and check if the change is truly backward-
incompatible.
"""
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
"""
Test backward compatibility for members of classes with
@compatibility(is_backward_compatible=True). Currently this checks for
exact matches on the publicly visible members of the class.
"""
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"bilinear": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"native_channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"gelu": BUILT_IN_FUNC,
"hardshrink": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"linear": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pairwise_distance": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"prelu": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"max_pool1d_with_indices": CONTROL_FLOW,
"max_pool2d_with_indices": CONTROL_FLOW,
"max_pool3d_with_indices": CONTROL_FLOW,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
"normalize" : MUTABLE,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
|
import click
from cmsis_svd.parser import SVDParser
MCU_OPTIONS = [
'STM32F0xx',
]
MCU2VENDOR_FILE = {
'STM32F0xx': ('STMicro', 'STM32F0xx.svd'),
}
ALL = 'show_all'
def show_register(register):
fields = []
for field in register.fields:
upper_index = field.bit_offset + field.bit_width - 1
lower_index = field.bit_offset
if upper_index == lower_index:
index_s = str(upper_index)
else:
index_s = f'{upper_index}:{lower_index}'
fields.append(f'{field.name}[{index_s}]')
print(f'{register.name: <5} 0x{register.address_offset:04x}: {','.join(fields)}')
def show_peripheral(peripheral):
print(peripheral.name)
for register in peripheral.registers:
show_register(register)
print()
@click.command()
@click.option('--mcu', type=click.Choice(MCU_OPTIONS), required=True,
help='MCU Name')
@click.option('--mcu-peripheral', help='Peripheral Specified')
def main(mcu, mcu_peripheral=None):
"""Given a chip and peripheral, prints the registers.
"""
parser = SVDParser.for_packaged_svd(*MCU2VENDOR_FILE[mcu])
address2peripheral = {}
for peripheral in parser.get_device().peripherals:
address2peripheral[peripheral.base_address] = peripheral
for _, peripheral in sorted(address2peripheral.items()):
print(f'{peripheral.name: <16} @ 0x{peripheral.base_address:08x} ({peripheral.address_block.size: >4})')
if mcu_peripheral:
for peripheral in parser.get_device().peripherals:
if peripheral.name == mcu_peripheral or mcu_peripheral == ALL:
show_peripheral(peripheral)
if __name__ == '__main__':
main()
| import click
from cmsis_svd.parser import SVDParser
MCU_OPTIONS = [
'STM32F0xx',
]
MCU2VENDOR_FILE = {
'STM32F0xx': ('STMicro', 'STM32F0xx.svd'),
}
ALL = 'show_all'
def show_register(register):
fields = []
for field in register.fields:
upper_index = field.bit_offset + field.bit_width - 1
lower_index = field.bit_offset
if upper_index == lower_index:
index_s = str(upper_index)
else:
index_s = f'{upper_index}:{lower_index}'
fields.append(f'{field.name}[{index_s}]')
print(f'{register.name: <5} 0x{register.address_offset:04x}: {",".join(fields)}')
def show_peripheral(peripheral):
print(peripheral.name)
for register in peripheral.registers:
show_register(register)
print()
@click.command()
@click.option('--mcu', type=click.Choice(MCU_OPTIONS), required=True,
help='MCU Name')
@click.option('--mcu-peripheral', help='Peripheral Specified')
def main(mcu, mcu_peripheral=None):
"""Given a chip and peripheral, prints the registers.
"""
parser = SVDParser.for_packaged_svd(*MCU2VENDOR_FILE[mcu])
address2peripheral = {}
for peripheral in parser.get_device().peripherals:
address2peripheral[peripheral.base_address] = peripheral
for _, peripheral in sorted(address2peripheral.items()):
print(f'{peripheral.name: <16} @ 0x{peripheral.base_address:08x} ({peripheral.address_block.size: >4})')
if mcu_peripheral:
for peripheral in parser.get_device().peripherals:
if peripheral.name == mcu_peripheral or mcu_peripheral == ALL:
show_peripheral(peripheral)
if __name__ == '__main__':
main()
|
from urllib.parse import urlsplit, urlunsplit
import pytest
import requests
_KUMA_STATUS = None
def pytest_configure(config):
"""Configure pytest for the Kuma deployment under test."""
global _KUMA_STATUS
# The pytest-base-url plugin adds --base-url, and sets the default from
# environment variable PYTEST_BASE_URL. If still unset, force to staging.
if config.option.base_url is None:
config.option.base_url = "https://developer.allizom.org"
base_url = config.getoption("base_url")
# Process the server status from _kuma_status.json
base_parts = urlsplit(base_url)
kuma_status_url = urlunsplit(
(base_parts.scheme, base_parts.netloc, "_kuma_status.json", "", "")
)
response = requests.get(kuma_status_url, headers={"Accept": "application/json"})
response.raise_for_status()
_KUMA_STATUS = response.json()
_KUMA_STATUS["response"] = {"headers": response.headers}
@pytest.fixture(scope="session")
def kuma_status(base_url):
return _KUMA_STATUS
@pytest.fixture(scope="session")
def is_behind_cdn(kuma_status):
return "x-amz-cf-id" in kuma_status["response"]["headers"]
@pytest.fixture(scope="session")
def media_url():
return "https://media.prod.mdn.mozit.cloud"
@pytest.fixture(scope="session")
def attachment_url(kuma_status):
return f'https://{kuma_status['settings']['ATTACHMENT_HOST']}'
| from urllib.parse import urlsplit, urlunsplit
import pytest
import requests
_KUMA_STATUS = None
def pytest_configure(config):
"""Configure pytest for the Kuma deployment under test."""
global _KUMA_STATUS
# The pytest-base-url plugin adds --base-url, and sets the default from
# environment variable PYTEST_BASE_URL. If still unset, force to staging.
if config.option.base_url is None:
config.option.base_url = "https://developer.allizom.org"
base_url = config.getoption("base_url")
# Process the server status from _kuma_status.json
base_parts = urlsplit(base_url)
kuma_status_url = urlunsplit(
(base_parts.scheme, base_parts.netloc, "_kuma_status.json", "", "")
)
response = requests.get(kuma_status_url, headers={"Accept": "application/json"})
response.raise_for_status()
_KUMA_STATUS = response.json()
_KUMA_STATUS["response"] = {"headers": response.headers}
@pytest.fixture(scope="session")
def kuma_status(base_url):
return _KUMA_STATUS
@pytest.fixture(scope="session")
def is_behind_cdn(kuma_status):
return "x-amz-cf-id" in kuma_status["response"]["headers"]
@pytest.fixture(scope="session")
def media_url():
return "https://media.prod.mdn.mozit.cloud"
@pytest.fixture(scope="session")
def attachment_url(kuma_status):
return f'https://{kuma_status["settings"]["ATTACHMENT_HOST"]}'
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Main training loop."""
import os
import pickle
import time
import PIL.Image
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary
from training import dataset
#----------------------------------------------------------------------------
# Select size and contents of the image snapshot grids that are exported
# periodically during training.
def setup_snapshot_image_grid(training_set):
gw = np.clip(7680 // training_set.shape[2], 7, 32)
gh = np.clip(4320 // training_set.shape[1], 4, 32)
# Unconditional.
if training_set.label_size == 0:
reals, labels = training_set.get_minibatch_np(gw * gh)
return (gw, gh), reals, labels
# Row per class.
cw, ch = (gw, 1)
nw = (gw - 1) // cw + 1
nh = (gh - 1) // ch + 1
# Collect images.
blocks = [[] for _i in range(nw * nh)]
for _iter in range(1000000):
real, label = training_set.get_minibatch_np(1)
idx = np.argmax(label[0])
while idx < len(blocks) and len(blocks[idx]) >= cw * ch:
idx += training_set.label_size
if idx < len(blocks):
blocks[idx].append((real, label))
if all(len(block) >= cw * ch for block in blocks):
break
# Layout grid.
reals = np.zeros([gw * gh] + training_set.shape, dtype=training_set.dtype)
labels = np.zeros([gw * gh, training_set.label_size], dtype=training_set.label_dtype)
for i, block in enumerate(blocks):
for j, (real, label) in enumerate(block):
x = (i % nw) * cw + j % cw
y = (i // nw) * ch + j // cw
if x < gw and y < gh:
reals[x + y * gw] = real[0]
labels[x + y * gw] = label[0]
return (gw, gh), reals, labels
#----------------------------------------------------------------------------
def save_image_grid(images, filename, drange, grid_size):
lo, hi = drange
gw, gh = grid_size
images = np.asarray(images, dtype=np.float32)
images = (images - lo) * (255 / (hi - lo))
images = np.rint(images).clip(0, 255).astype(np.uint8)
_N, C, H, W = images.shape
images = images.reshape(gh, gw, C, H, W)
images = images.transpose(0, 3, 1, 4, 2)
images = images.reshape(gh * H, gw * W, C)
PIL.Image.fromarray(images, {3: 'RGB', 1: 'L'}[C]).save(filename)
#----------------------------------------------------------------------------
# Main training script.
def training_loop(
run_dir = '.', # Output directory.
G_args = {}, # Options for generator network.
D_args = {}, # Options for discriminator network.
G_opt_args = {}, # Options for generator optimizer.
D_opt_args = {}, # Options for discriminator optimizer.
loss_args = {}, # Options for loss function.
train_dataset_args = {}, # Options for dataset to train with.
metric_dataset_args = {}, # Options for dataset to evaluate metrics against.
augment_args = {}, # Options for adaptive augmentations.
metric_arg_list = [], # Metrics to evaluate during training.
num_gpus = 1, # Number of GPUs to use.
minibatch_size = 32, # Global minibatch size.
minibatch_gpu = 4, # Number of samples processed at a time by one GPU.
G_smoothing_kimg = 10, # Half-life of the exponential moving average (EMA) of generator weights.
G_smoothing_rampup = None, # EMA ramp-up coefficient.
minibatch_repeats = 4, # Number of minibatches to run in the inner loop.
lazy_regularization = True, # Perform regularization as a separate training step?
G_reg_interval = 4, # How often the perform regularization for G? Ignored if lazy_regularization=False.
D_reg_interval = 16, # How often the perform regularization for D? Ignored if lazy_regularization=False.
total_kimg = 25000, # Total length of the training, measured in thousands of real images.
kimg_per_tick = 4, # Progress snapshot interval.
image_snapshot_ticks = 50, # How often to save image snapshots? None = only save 'reals.png' and 'fakes-init.png'.
network_snapshot_ticks = 50, # How often to save network snapshots? None = only save 'networks-final.pkl'.
resume_pkl = None, # Network pickle to resume training from.
abort_fn = None, # Callback function for determining whether to abort training.
progress_fn = None, # Callback function for updating training progress.
):
assert minibatch_size % (num_gpus * minibatch_gpu) == 0
start_time = time.time()
print('Loading training set...')
training_set = dataset.load_dataset(**train_dataset_args)
print('Image shape:', np.int32(training_set.shape).tolist())
print('Label shape:', [training_set.label_size])
print()
print('Constructing networks...')
with tf.device('/gpu:0'):
G = tflib.Network('G', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **G_args)
D = tflib.Network('D', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **D_args)
Gs = G.clone('Gs')
if resume_pkl is not None:
print(f'Resuming from "{resume_pkl}"')
with dnnlib.util.open_url(resume_pkl) as f:
rG, rD, rGs = pickle.load(f)
G.copy_vars_from(rG)
D.copy_vars_from(rD)
Gs.copy_vars_from(rGs)
G.print_layers()
D.print_layers()
print('Exporting sample images...')
grid_size, grid_reals, grid_labels = setup_snapshot_image_grid(training_set)
save_image_grid(grid_reals, os.path.join(run_dir, 'reals.png'), drange=[0,255], grid_size=grid_size)
grid_latents = np.random.randn(np.prod(grid_size), *G.input_shape[1:])
grid_fakes = Gs.run(grid_latents, grid_labels, is_validation=True, minibatch_size=minibatch_gpu)
save_image_grid(grid_fakes, os.path.join(run_dir, 'fakes_init.png'), drange=[-1,1], grid_size=grid_size)
print(f'Replicating networks across {num_gpus} GPUs...')
G_gpus = [G]
D_gpus = [D]
for gpu in range(1, num_gpus):
with tf.device(f'/gpu:{gpu}'):
G_gpus.append(G.clone(f'{G.name}_gpu{gpu}'))
D_gpus.append(D.clone(f'{D.name}_gpu{gpu}'))
print('Initializing augmentations...')
aug = None
if augment_args.get('class_name', None) is not None:
aug = dnnlib.util.construct_class_by_name(**augment_args)
aug.init_validation_set(D_gpus=D_gpus, training_set=training_set)
print('Setting up optimizers...')
G_opt_args = dict(G_opt_args)
D_opt_args = dict(D_opt_args)
for args, reg_interval in [(G_opt_args, G_reg_interval), (D_opt_args, D_reg_interval)]:
args['minibatch_multiplier'] = minibatch_size // num_gpus // minibatch_gpu
if lazy_regularization:
mb_ratio = reg_interval / (reg_interval + 1)
args['learning_rate'] *= mb_ratio
if 'beta1' in args: args['beta1'] **= mb_ratio
if 'beta2' in args: args['beta2'] **= mb_ratio
G_opt = tflib.Optimizer(name='TrainG', **G_opt_args)
D_opt = tflib.Optimizer(name='TrainD', **D_opt_args)
G_reg_opt = tflib.Optimizer(name='RegG', share=G_opt, **G_opt_args)
D_reg_opt = tflib.Optimizer(name='RegD', share=D_opt, **D_opt_args)
print('Constructing training graph...')
data_fetch_ops = []
training_set.configure(minibatch_gpu)
for gpu, (G_gpu, D_gpu) in enumerate(zip(G_gpus, D_gpus)):
with tf.name_scope(f'Train_gpu{gpu}'), tf.device(f'/gpu:{gpu}'):
# Fetch training data via temporary variables.
with tf.name_scope('DataFetch'):
real_images_var = tf.Variable(name='images', trainable=False, initial_value=tf.zeros([minibatch_gpu] + training_set.shape))
real_labels_var = tf.Variable(name='labels', trainable=False, initial_value=tf.zeros([minibatch_gpu, training_set.label_size]))
real_images_write, real_labels_write = training_set.get_minibatch_tf()
real_images_write = tflib.convert_images_from_uint8(real_images_write)
data_fetch_ops += [tf.assign(real_images_var, real_images_write)]
data_fetch_ops += [tf.assign(real_labels_var, real_labels_write)]
# Evaluate loss function and register gradients.
fake_labels = training_set.get_random_labels_tf(minibatch_gpu)
terms = dnnlib.util.call_func_by_name(G=G_gpu, D=D_gpu, aug=aug, fake_labels=fake_labels, real_images=real_images_var, real_labels=real_labels_var, **loss_args)
if lazy_regularization:
if terms.G_reg is not None: G_reg_opt.register_gradients(tf.reduce_mean(terms.G_reg * G_reg_interval), G_gpu.trainables)
if terms.D_reg is not None: D_reg_opt.register_gradients(tf.reduce_mean(terms.D_reg * D_reg_interval), D_gpu.trainables)
else:
if terms.G_reg is not None: terms.G_loss += terms.G_reg
if terms.D_reg is not None: terms.D_loss += terms.D_reg
G_opt.register_gradients(tf.reduce_mean(terms.G_loss), G_gpu.trainables)
D_opt.register_gradients(tf.reduce_mean(terms.D_loss), D_gpu.trainables)
print('Finalizing training ops...')
data_fetch_op = tf.group(*data_fetch_ops)
G_train_op = G_opt.apply_updates()
D_train_op = D_opt.apply_updates()
G_reg_op = G_reg_opt.apply_updates(allow_no_op=True)
D_reg_op = D_reg_opt.apply_updates(allow_no_op=True)
Gs_beta_in = tf.placeholder(tf.float32, name='Gs_beta_in', shape=[])
Gs_update_op = Gs.setup_as_moving_average_of(G, beta=Gs_beta_in)
tflib.init_uninitialized_vars()
with tf.device('/gpu:0'):
peak_gpu_mem_op = tf.contrib.memory_stats.MaxBytesInUse()
print('Initializing metrics...')
summary_log = tf.summary.FileWriter(run_dir)
metrics = []
for args in metric_arg_list:
metric = dnnlib.util.construct_class_by_name(**args)
metric.configure(dataset_args=metric_dataset_args, run_dir=run_dir)
metrics.append(metric)
print(f'Training for {total_kimg} kimg...')
print()
if progress_fn is not None:
progress_fn(0, total_kimg)
tick_start_time = time.time()
maintenance_time = tick_start_time - start_time
cur_nimg = 0
cur_tick = -1
tick_start_nimg = cur_nimg
running_mb_counter = 0
done = False
while not done:
# Compute EMA decay parameter.
Gs_nimg = G_smoothing_kimg * 1000.0
if G_smoothing_rampup is not None:
Gs_nimg = min(Gs_nimg, cur_nimg * G_smoothing_rampup)
Gs_beta = 0.5 ** (minibatch_size / max(Gs_nimg, 1e-8))
# Run training ops.
for _repeat_idx in range(minibatch_repeats):
rounds = range(0, minibatch_size, minibatch_gpu * num_gpus)
run_G_reg = (lazy_regularization and running_mb_counter % G_reg_interval == 0)
run_D_reg = (lazy_regularization and running_mb_counter % D_reg_interval == 0)
cur_nimg += minibatch_size
running_mb_counter += 1
# Fast path without gradient accumulation.
if len(rounds) == 1:
tflib.run([G_train_op, data_fetch_op])
if run_G_reg:
tflib.run(G_reg_op)
tflib.run([D_train_op, Gs_update_op], {Gs_beta_in: Gs_beta})
if run_D_reg:
tflib.run(D_reg_op)
# Slow path with gradient accumulation.
else:
for _round in rounds:
tflib.run(G_train_op)
if run_G_reg:
tflib.run(G_reg_op)
tflib.run(Gs_update_op, {Gs_beta_in: Gs_beta})
for _round in rounds:
tflib.run(data_fetch_op)
tflib.run(D_train_op)
if run_D_reg:
tflib.run(D_reg_op)
# Run validation.
if aug is not None:
aug.run_validation(minibatch_size=minibatch_size)
# Tune augmentation parameters.
if aug is not None:
aug.tune(minibatch_size * minibatch_repeats)
# Perform maintenance tasks once per tick.
done = (cur_nimg >= total_kimg * 1000) or (abort_fn is not None and abort_fn())
if done or cur_tick < 0 or cur_nimg >= tick_start_nimg + kimg_per_tick * 1000:
cur_tick += 1
tick_kimg = (cur_nimg - tick_start_nimg) / 1000.0
tick_start_nimg = cur_nimg
tick_end_time = time.time()
total_time = tick_end_time - start_time
tick_time = tick_end_time - tick_start_time
# Report progress.
print(' '.join([
f"tick {autosummary("Progress/tick", cur_tick):<5d}",
f"kimg {autosummary("Progress/kimg", cur_nimg / 1000.0):<8.1f}",
f"time {dnnlib.util.format_time(autosummary("Timing/total_sec", total_time)):<12s}",
f"sec/tick {autosummary("Timing/sec_per_tick", tick_time):<7.1f}",
f"sec/kimg {autosummary("Timing/sec_per_kimg", tick_time / tick_kimg):<7.2f}",
f"maintenance {autosummary("Timing/maintenance_sec", maintenance_time):<6.1f}",
f"gpumem {autosummary("Resources/peak_gpu_mem_gb", peak_gpu_mem_op.eval() / 2**30):<5.1f}",
f"augment {autosummary("Progress/augment", aug.strength if aug is not None else 0):.3f}",
]))
autosummary('Timing/total_hours', total_time / (60.0 * 60.0))
autosummary('Timing/total_days', total_time / (24.0 * 60.0 * 60.0))
if progress_fn is not None:
progress_fn(cur_nimg // 1000, total_kimg)
# Save snapshots.
if image_snapshot_ticks is not None and (done or cur_tick % image_snapshot_ticks == 0):
grid_fakes = Gs.run(grid_latents, grid_labels, is_validation=True, minibatch_size=minibatch_gpu)
save_image_grid(grid_fakes, os.path.join(run_dir, f'fakes{cur_nimg // 1000:06d}.png'), drange=[-1,1], grid_size=grid_size)
if network_snapshot_ticks is not None and (done or cur_tick % network_snapshot_ticks == 0):
pkl = os.path.join(run_dir, f'network-snapshot-{cur_nimg // 1000:06d}.pkl')
with open(pkl, 'wb') as f:
pickle.dump((G, D, Gs), f)
if len(metrics):
print('Evaluating metrics...')
for metric in metrics:
metric.run(pkl, num_gpus=num_gpus)
# Update summaries.
for metric in metrics:
metric.update_autosummaries()
tflib.autosummary.save_summaries(summary_log, cur_nimg)
tick_start_time = time.time()
maintenance_time = tick_start_time - tick_end_time
print()
print('Exiting...')
summary_log.close()
training_set.close()
#----------------------------------------------------------------------------
| # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Main training loop."""
import os
import pickle
import time
import PIL.Image
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary
from training import dataset
#----------------------------------------------------------------------------
# Select size and contents of the image snapshot grids that are exported
# periodically during training.
def setup_snapshot_image_grid(training_set):
gw = np.clip(7680 // training_set.shape[2], 7, 32)
gh = np.clip(4320 // training_set.shape[1], 4, 32)
# Unconditional.
if training_set.label_size == 0:
reals, labels = training_set.get_minibatch_np(gw * gh)
return (gw, gh), reals, labels
# Row per class.
cw, ch = (gw, 1)
nw = (gw - 1) // cw + 1
nh = (gh - 1) // ch + 1
# Collect images.
blocks = [[] for _i in range(nw * nh)]
for _iter in range(1000000):
real, label = training_set.get_minibatch_np(1)
idx = np.argmax(label[0])
while idx < len(blocks) and len(blocks[idx]) >= cw * ch:
idx += training_set.label_size
if idx < len(blocks):
blocks[idx].append((real, label))
if all(len(block) >= cw * ch for block in blocks):
break
# Layout grid.
reals = np.zeros([gw * gh] + training_set.shape, dtype=training_set.dtype)
labels = np.zeros([gw * gh, training_set.label_size], dtype=training_set.label_dtype)
for i, block in enumerate(blocks):
for j, (real, label) in enumerate(block):
x = (i % nw) * cw + j % cw
y = (i // nw) * ch + j // cw
if x < gw and y < gh:
reals[x + y * gw] = real[0]
labels[x + y * gw] = label[0]
return (gw, gh), reals, labels
#----------------------------------------------------------------------------
def save_image_grid(images, filename, drange, grid_size):
lo, hi = drange
gw, gh = grid_size
images = np.asarray(images, dtype=np.float32)
images = (images - lo) * (255 / (hi - lo))
images = np.rint(images).clip(0, 255).astype(np.uint8)
_N, C, H, W = images.shape
images = images.reshape(gh, gw, C, H, W)
images = images.transpose(0, 3, 1, 4, 2)
images = images.reshape(gh * H, gw * W, C)
PIL.Image.fromarray(images, {3: 'RGB', 1: 'L'}[C]).save(filename)
#----------------------------------------------------------------------------
# Main training script.
def training_loop(
run_dir = '.', # Output directory.
G_args = {}, # Options for generator network.
D_args = {}, # Options for discriminator network.
G_opt_args = {}, # Options for generator optimizer.
D_opt_args = {}, # Options for discriminator optimizer.
loss_args = {}, # Options for loss function.
train_dataset_args = {}, # Options for dataset to train with.
metric_dataset_args = {}, # Options for dataset to evaluate metrics against.
augment_args = {}, # Options for adaptive augmentations.
metric_arg_list = [], # Metrics to evaluate during training.
num_gpus = 1, # Number of GPUs to use.
minibatch_size = 32, # Global minibatch size.
minibatch_gpu = 4, # Number of samples processed at a time by one GPU.
G_smoothing_kimg = 10, # Half-life of the exponential moving average (EMA) of generator weights.
G_smoothing_rampup = None, # EMA ramp-up coefficient.
minibatch_repeats = 4, # Number of minibatches to run in the inner loop.
lazy_regularization = True, # Perform regularization as a separate training step?
G_reg_interval = 4, # How often the perform regularization for G? Ignored if lazy_regularization=False.
D_reg_interval = 16, # How often the perform regularization for D? Ignored if lazy_regularization=False.
total_kimg = 25000, # Total length of the training, measured in thousands of real images.
kimg_per_tick = 4, # Progress snapshot interval.
image_snapshot_ticks = 50, # How often to save image snapshots? None = only save 'reals.png' and 'fakes-init.png'.
network_snapshot_ticks = 50, # How often to save network snapshots? None = only save 'networks-final.pkl'.
resume_pkl = None, # Network pickle to resume training from.
abort_fn = None, # Callback function for determining whether to abort training.
progress_fn = None, # Callback function for updating training progress.
):
assert minibatch_size % (num_gpus * minibatch_gpu) == 0
start_time = time.time()
print('Loading training set...')
training_set = dataset.load_dataset(**train_dataset_args)
print('Image shape:', np.int32(training_set.shape).tolist())
print('Label shape:', [training_set.label_size])
print()
print('Constructing networks...')
with tf.device('/gpu:0'):
G = tflib.Network('G', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **G_args)
D = tflib.Network('D', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **D_args)
Gs = G.clone('Gs')
if resume_pkl is not None:
print(f'Resuming from "{resume_pkl}"')
with dnnlib.util.open_url(resume_pkl) as f:
rG, rD, rGs = pickle.load(f)
G.copy_vars_from(rG)
D.copy_vars_from(rD)
Gs.copy_vars_from(rGs)
G.print_layers()
D.print_layers()
print('Exporting sample images...')
grid_size, grid_reals, grid_labels = setup_snapshot_image_grid(training_set)
save_image_grid(grid_reals, os.path.join(run_dir, 'reals.png'), drange=[0,255], grid_size=grid_size)
grid_latents = np.random.randn(np.prod(grid_size), *G.input_shape[1:])
grid_fakes = Gs.run(grid_latents, grid_labels, is_validation=True, minibatch_size=minibatch_gpu)
save_image_grid(grid_fakes, os.path.join(run_dir, 'fakes_init.png'), drange=[-1,1], grid_size=grid_size)
print(f'Replicating networks across {num_gpus} GPUs...')
G_gpus = [G]
D_gpus = [D]
for gpu in range(1, num_gpus):
with tf.device(f'/gpu:{gpu}'):
G_gpus.append(G.clone(f'{G.name}_gpu{gpu}'))
D_gpus.append(D.clone(f'{D.name}_gpu{gpu}'))
print('Initializing augmentations...')
aug = None
if augment_args.get('class_name', None) is not None:
aug = dnnlib.util.construct_class_by_name(**augment_args)
aug.init_validation_set(D_gpus=D_gpus, training_set=training_set)
print('Setting up optimizers...')
G_opt_args = dict(G_opt_args)
D_opt_args = dict(D_opt_args)
for args, reg_interval in [(G_opt_args, G_reg_interval), (D_opt_args, D_reg_interval)]:
args['minibatch_multiplier'] = minibatch_size // num_gpus // minibatch_gpu
if lazy_regularization:
mb_ratio = reg_interval / (reg_interval + 1)
args['learning_rate'] *= mb_ratio
if 'beta1' in args: args['beta1'] **= mb_ratio
if 'beta2' in args: args['beta2'] **= mb_ratio
G_opt = tflib.Optimizer(name='TrainG', **G_opt_args)
D_opt = tflib.Optimizer(name='TrainD', **D_opt_args)
G_reg_opt = tflib.Optimizer(name='RegG', share=G_opt, **G_opt_args)
D_reg_opt = tflib.Optimizer(name='RegD', share=D_opt, **D_opt_args)
print('Constructing training graph...')
data_fetch_ops = []
training_set.configure(minibatch_gpu)
for gpu, (G_gpu, D_gpu) in enumerate(zip(G_gpus, D_gpus)):
with tf.name_scope(f'Train_gpu{gpu}'), tf.device(f'/gpu:{gpu}'):
# Fetch training data via temporary variables.
with tf.name_scope('DataFetch'):
real_images_var = tf.Variable(name='images', trainable=False, initial_value=tf.zeros([minibatch_gpu] + training_set.shape))
real_labels_var = tf.Variable(name='labels', trainable=False, initial_value=tf.zeros([minibatch_gpu, training_set.label_size]))
real_images_write, real_labels_write = training_set.get_minibatch_tf()
real_images_write = tflib.convert_images_from_uint8(real_images_write)
data_fetch_ops += [tf.assign(real_images_var, real_images_write)]
data_fetch_ops += [tf.assign(real_labels_var, real_labels_write)]
# Evaluate loss function and register gradients.
fake_labels = training_set.get_random_labels_tf(minibatch_gpu)
terms = dnnlib.util.call_func_by_name(G=G_gpu, D=D_gpu, aug=aug, fake_labels=fake_labels, real_images=real_images_var, real_labels=real_labels_var, **loss_args)
if lazy_regularization:
if terms.G_reg is not None: G_reg_opt.register_gradients(tf.reduce_mean(terms.G_reg * G_reg_interval), G_gpu.trainables)
if terms.D_reg is not None: D_reg_opt.register_gradients(tf.reduce_mean(terms.D_reg * D_reg_interval), D_gpu.trainables)
else:
if terms.G_reg is not None: terms.G_loss += terms.G_reg
if terms.D_reg is not None: terms.D_loss += terms.D_reg
G_opt.register_gradients(tf.reduce_mean(terms.G_loss), G_gpu.trainables)
D_opt.register_gradients(tf.reduce_mean(terms.D_loss), D_gpu.trainables)
print('Finalizing training ops...')
data_fetch_op = tf.group(*data_fetch_ops)
G_train_op = G_opt.apply_updates()
D_train_op = D_opt.apply_updates()
G_reg_op = G_reg_opt.apply_updates(allow_no_op=True)
D_reg_op = D_reg_opt.apply_updates(allow_no_op=True)
Gs_beta_in = tf.placeholder(tf.float32, name='Gs_beta_in', shape=[])
Gs_update_op = Gs.setup_as_moving_average_of(G, beta=Gs_beta_in)
tflib.init_uninitialized_vars()
with tf.device('/gpu:0'):
peak_gpu_mem_op = tf.contrib.memory_stats.MaxBytesInUse()
print('Initializing metrics...')
summary_log = tf.summary.FileWriter(run_dir)
metrics = []
for args in metric_arg_list:
metric = dnnlib.util.construct_class_by_name(**args)
metric.configure(dataset_args=metric_dataset_args, run_dir=run_dir)
metrics.append(metric)
print(f'Training for {total_kimg} kimg...')
print()
if progress_fn is not None:
progress_fn(0, total_kimg)
tick_start_time = time.time()
maintenance_time = tick_start_time - start_time
cur_nimg = 0
cur_tick = -1
tick_start_nimg = cur_nimg
running_mb_counter = 0
done = False
while not done:
# Compute EMA decay parameter.
Gs_nimg = G_smoothing_kimg * 1000.0
if G_smoothing_rampup is not None:
Gs_nimg = min(Gs_nimg, cur_nimg * G_smoothing_rampup)
Gs_beta = 0.5 ** (minibatch_size / max(Gs_nimg, 1e-8))
# Run training ops.
for _repeat_idx in range(minibatch_repeats):
rounds = range(0, minibatch_size, minibatch_gpu * num_gpus)
run_G_reg = (lazy_regularization and running_mb_counter % G_reg_interval == 0)
run_D_reg = (lazy_regularization and running_mb_counter % D_reg_interval == 0)
cur_nimg += minibatch_size
running_mb_counter += 1
# Fast path without gradient accumulation.
if len(rounds) == 1:
tflib.run([G_train_op, data_fetch_op])
if run_G_reg:
tflib.run(G_reg_op)
tflib.run([D_train_op, Gs_update_op], {Gs_beta_in: Gs_beta})
if run_D_reg:
tflib.run(D_reg_op)
# Slow path with gradient accumulation.
else:
for _round in rounds:
tflib.run(G_train_op)
if run_G_reg:
tflib.run(G_reg_op)
tflib.run(Gs_update_op, {Gs_beta_in: Gs_beta})
for _round in rounds:
tflib.run(data_fetch_op)
tflib.run(D_train_op)
if run_D_reg:
tflib.run(D_reg_op)
# Run validation.
if aug is not None:
aug.run_validation(minibatch_size=minibatch_size)
# Tune augmentation parameters.
if aug is not None:
aug.tune(minibatch_size * minibatch_repeats)
# Perform maintenance tasks once per tick.
done = (cur_nimg >= total_kimg * 1000) or (abort_fn is not None and abort_fn())
if done or cur_tick < 0 or cur_nimg >= tick_start_nimg + kimg_per_tick * 1000:
cur_tick += 1
tick_kimg = (cur_nimg - tick_start_nimg) / 1000.0
tick_start_nimg = cur_nimg
tick_end_time = time.time()
total_time = tick_end_time - start_time
tick_time = tick_end_time - tick_start_time
# Report progress.
print(' '.join([
f"tick {autosummary('Progress/tick', cur_tick):<5d}",
f"kimg {autosummary('Progress/kimg', cur_nimg / 1000.0):<8.1f}",
f"time {dnnlib.util.format_time(autosummary('Timing/total_sec', total_time)):<12s}",
f"sec/tick {autosummary('Timing/sec_per_tick', tick_time):<7.1f}",
f"sec/kimg {autosummary('Timing/sec_per_kimg', tick_time / tick_kimg):<7.2f}",
f"maintenance {autosummary('Timing/maintenance_sec', maintenance_time):<6.1f}",
f"gpumem {autosummary('Resources/peak_gpu_mem_gb', peak_gpu_mem_op.eval() / 2**30):<5.1f}",
f"augment {autosummary('Progress/augment', aug.strength if aug is not None else 0):.3f}",
]))
autosummary('Timing/total_hours', total_time / (60.0 * 60.0))
autosummary('Timing/total_days', total_time / (24.0 * 60.0 * 60.0))
if progress_fn is not None:
progress_fn(cur_nimg // 1000, total_kimg)
# Save snapshots.
if image_snapshot_ticks is not None and (done or cur_tick % image_snapshot_ticks == 0):
grid_fakes = Gs.run(grid_latents, grid_labels, is_validation=True, minibatch_size=minibatch_gpu)
save_image_grid(grid_fakes, os.path.join(run_dir, f'fakes{cur_nimg // 1000:06d}.png'), drange=[-1,1], grid_size=grid_size)
if network_snapshot_ticks is not None and (done or cur_tick % network_snapshot_ticks == 0):
pkl = os.path.join(run_dir, f'network-snapshot-{cur_nimg // 1000:06d}.pkl')
with open(pkl, 'wb') as f:
pickle.dump((G, D, Gs), f)
if len(metrics):
print('Evaluating metrics...')
for metric in metrics:
metric.run(pkl, num_gpus=num_gpus)
# Update summaries.
for metric in metrics:
metric.update_autosummaries()
tflib.autosummary.save_summaries(summary_log, cur_nimg)
tick_start_time = time.time()
maintenance_time = tick_start_time - tick_end_time
print()
print('Exiting...')
summary_log.close()
training_set.close()
#----------------------------------------------------------------------------
|
from qtpy.QtCore import QSize
from qtpy.QtGui import QIcon
from qtpy.QtWidgets import QListWidget, QListWidgetItem
from pathlib import Path
ICON_ROOT = Path(__file__).parent / "icons"
STYLES = r"""
QListWidget{
min-width: 294;
background: none;
font-size: 8pt;
color: #eee;
}
QListWidget::item {
width: 68;
height: 85;
border-radius: 0;
margin: 1;
padding: 4;
background: #414851;
}
QListWidget::item::hover {
background: #5A626C;
}
"""
def _get_icon(name):
path = ICON_ROOT / f'{name.lower().replace(' ', '_')}.png'
if not path.exists():
return ""
return str(path)
class ButtonGrid(QListWidget):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setMovement(self.Static) # The items cannot be moved by the user.
self.setViewMode(self.IconMode) # make items icons
self.setResizeMode(self.Adjust) # relayout when view is resized.
self.setUniformItemSizes(True) # better performance
self.setIconSize(QSize(64, 44))
self.setWordWrap(True)
self.setStyleSheet(STYLES)
def addItem(self, label : str, tool_tip : str = None):
if isinstance(label, QListWidgetItem):
super().addItem(label)
item = QListWidgetItem(QIcon(_get_icon(label)), label)
if tool_tip is not None:
item.setToolTip(tool_tip)
super().addItem(item)
def addItems(self, labels) -> None:
for label in labels:
if hasattr(labels[label], "tool_tip"):
self.addItem(label, labels[label].tool_tip)
else:
self.addItem(label)
| from qtpy.QtCore import QSize
from qtpy.QtGui import QIcon
from qtpy.QtWidgets import QListWidget, QListWidgetItem
from pathlib import Path
ICON_ROOT = Path(__file__).parent / "icons"
STYLES = r"""
QListWidget{
min-width: 294;
background: none;
font-size: 8pt;
color: #eee;
}
QListWidget::item {
width: 68;
height: 85;
border-radius: 0;
margin: 1;
padding: 4;
background: #414851;
}
QListWidget::item::hover {
background: #5A626C;
}
"""
def _get_icon(name):
path = ICON_ROOT / f'{name.lower().replace(" ", "_")}.png'
if not path.exists():
return ""
return str(path)
class ButtonGrid(QListWidget):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setMovement(self.Static) # The items cannot be moved by the user.
self.setViewMode(self.IconMode) # make items icons
self.setResizeMode(self.Adjust) # relayout when view is resized.
self.setUniformItemSizes(True) # better performance
self.setIconSize(QSize(64, 44))
self.setWordWrap(True)
self.setStyleSheet(STYLES)
def addItem(self, label : str, tool_tip : str = None):
if isinstance(label, QListWidgetItem):
super().addItem(label)
item = QListWidgetItem(QIcon(_get_icon(label)), label)
if tool_tip is not None:
item.setToolTip(tool_tip)
super().addItem(item)
def addItems(self, labels) -> None:
for label in labels:
if hasattr(labels[label], "tool_tip"):
self.addItem(label, labels[label].tool_tip)
else:
self.addItem(label)
|
#!/usr/bin/env python3
# Author: Volodymyr Shymanskyy
# Usage:
# ./run-spec-test.py
# ./run-spec-test.py ./core/i32.json
# ./run-spec-test.py ./core/float_exprs.json --line 2070
# ./run-spec-test.py ./proposals/tail-call/*.json
# ./run-spec-test.py --exec ../build-custom/wasm3
# ./run-spec-test.py --engine "wasmer run" --exec ../build-wasi/wasm3.wasm
# ./run-spec-test.py --engine "wasmer run --backend=llvm" --exec ../build-wasi/wasm3.wasm
#
# TODO
# - Get more tests from: https://github.com/microsoft/ChakraCore/tree/master/test/WasmSpec
# - Fix "Empty Stack" check
# - Check Canonical NaN and Arithmetic NaN separately
# - Fix imports.wast
import argparse
import os, sys, glob, time
import subprocess
import json
import re
import struct
import math
import pathlib
scriptDir = os.path.dirname(os.path.abspath(sys.argv[0]))
sys.path.append(os.path.join(scriptDir, '..', 'extra'))
from testutils import *
from pprint import pprint
#
# Args handling
#
parser = argparse.ArgumentParser()
parser.add_argument("--exec", metavar="<interpreter>", default="../build/wasm3")
parser.add_argument("--engine", metavar="<engine>")
parser.add_argument("--timeout", type=int, default=30)
parser.add_argument("--line", metavar="<source line>", type=int)
parser.add_argument("--all", action="store_true")
parser.add_argument("--show-logs", action="store_true")
parser.add_argument("--format", choices=["raw", "hex", "fp"], default="fp")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-s", "--silent", action="store_true")
parser.add_argument("file", nargs='*')
args = parser.parse_args()
if args.line:
args.show_logs = True
#
# Utilities
#
log = open("spec-test.log","w+")
log.write("======================\n")
def warning(msg):
log.write("Warning: " + msg + "\n")
log.flush()
if args.verbose:
print(f"{ansi.WARNING}Warning:{ansi.ENDC} {msg}")
def fatal(msg):
log.write("Fatal: " + msg + "\n")
log.flush()
print(f"{ansi.FAIL}Fatal:{ansi.ENDC} {msg}")
sys.exit(1)
def binaryToFloat(num, t):
if t == "f32":
return struct.unpack('!f', struct.pack('!L', int(num)))[0]
elif t == "f64":
return struct.unpack('!d', struct.pack('!Q', int(num)))[0]
else:
fatal(f"Unknown type '{t}'")
def escape(s):
c = ord(s)
if c < 128 and s.isprintable() and not s in " \n\r\t\\":
return s
if c <= 0xff:
return r'\x{0:02x}'.format(c)
elif c <= 0xffff:
return r'\u{0:04x}'.format(c)
else:
return r'\U{0:08x}'.format(c)
def escape_str(s):
if s == "":
return r'\x00'
return ''.join(escape(c) for c in s)
#
# Value format options
#
def formatValueRaw(num, t):
return str(num)
def formatValueHex(num, t):
if t == "f32" or t == "i32":
return "{0:#0{1}x}".format(int(num), 8+2)
elif t == "f64" or t == "i64":
return "{0:#0{1}x}".format(int(num), 16+2)
else:
return str(num)
def formatValueFloat(num, t):
if t == "f32":
s = 6
elif t == "f64":
s = 10
else:
return str(num)
result = "{0:.{1}f}".format(binaryToFloat(num, t), s).rstrip('0')
if result.endswith('.'): result = result + '0'
if len(result) > s*2:
result = "{0:.{1}e}".format(binaryToFloat(num, t), s)
return result
formaters = {
'raw': formatValueRaw,
'hex': formatValueHex,
'fp': formatValueFloat,
}
formatValue = formaters[args.format]
if args.format == "fp":
print("When using fp display format, values are compared loosely (some tests may produce false positives)")
#
# Spec tests preparation
#
if not (os.path.isdir("./core") and os.path.isdir("./proposals")):
from io import BytesIO
from zipfile import ZipFile
from urllib.request import urlopen
officialSpec = "https://github.com/wasm3/wasm-core-testsuite/archive/master.zip"
print(f"Downloading {officialSpec}")
resp = urlopen(officialSpec)
with ZipFile(BytesIO(resp.read())) as zipFile:
for zipInfo in zipFile.infolist():
if re.match(r".*-master/.*/.*(\.wasm|\.json)", zipInfo.filename):
parts = pathlib.Path(zipInfo.filename).parts
newpath = str(pathlib.Path(*parts[1:-1]))
newfn = str(pathlib.Path(*parts[-1:]))
ensure_path(newpath)
newpath = newpath + "/" + newfn
zipInfo.filename = newpath
zipFile.extract(zipInfo)
#
# Wasm3 REPL
#
from subprocess import Popen, STDOUT, PIPE
from threading import Thread
from queue import Queue, Empty
import shlex
def get_engine_cmd(engine, exe):
if engine:
cmd = shlex.split(engine)
if "wasirun" in engine or "wasm3" in engine:
return cmd + [exe, "--repl"]
elif "wasmer" in engine:
return cmd + ["--dir=.", exe, "--", "--repl"]
elif "wasmtime" in engine:
return cmd + ["--dir=.", exe, "--", "--repl"]
elif "iwasm" in engine:
return cmd + ["--dir=.", exe, "--repl"]
elif "wavm" in engine:
return cmd + ["--mount-root", ".", exe, "--repl"] # TODO, fix path
else:
fatal(f"Don't know how to run engine {engine}")
else:
if exe.endswith(".wasm"):
fatal(f"Need engine to execute wasm")
return shlex.split(exe) + ["--repl"]
class Wasm3():
def __init__(self, exe, engine=None):
self.exe = exe
self.engine = engine
self.p = None
self.loaded = None
self.timeout = args.timeout
self.autorestart = True
self.run()
def run(self):
if self.p:
self.terminate()
cmd = get_engine_cmd(self.engine, self.exe)
#print(f"wasm3: Starting {" ".join(cmd)}")
self.q = Queue()
self.p = Popen(cmd, bufsize=0, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
def _read_output(out, queue):
for data in iter(lambda: out.read(1024), b''):
queue.put(data)
queue.put(None)
self.t = Thread(target=_read_output, args=(self.p.stdout, self.q))
self.t.daemon = True
self.t.start()
try:
self._read_until("wasm3> ")
except Exception as e:
print(f"wasm3: Could not start: {e}")
def restart(self):
print(f"wasm3: Restarting")
for i in range(10):
try:
self.run()
try:
if self.loaded:
self.load(self.loaded)
except Exception as e:
pass
break
except Exception as e:
print(f"wasm3: {e} => retry")
time.sleep(0.1)
def init(self):
return self._run_cmd(f":init\n")
def version(self):
return self._run_cmd(f":version\n")
def load(self, fn):
self.loaded = None
res = self._run_cmd(f":load {fn}\n")
self.loaded = fn
return res
def invoke(self, cmd):
return self._run_cmd(" ".join(map(str, cmd)) + "\n")
def _run_cmd(self, cmd):
if self.autorestart and not self._is_running():
self.restart()
self._flush_input()
#print(f"wasm3: {cmd.strip()}")
self._write(cmd)
return self._read_until("wasm3> ")
def _read_until(self, token):
buff = ""
tout = time.time() + self.timeout
error = None
while time.time() < tout:
try:
data = self.q.get(timeout=0.1)
if data == None:
error = "Crashed"
break
buff = buff + data.decode("utf-8")
idx = buff.rfind(token)
if idx >= 0:
return buff[0:idx].strip()
except Empty:
pass
else:
error = "Timeout"
self.terminate()
raise Exception(error)
def _write(self, data):
self.p.stdin.write(data.encode("utf-8"))
self.p.stdin.flush()
def _is_running(self):
return self.p and (self.p.poll() == None)
def _flush_input(self):
while not self.q.empty():
self.q.get()
def terminate(self):
self.p.stdin.close()
self.p.terminate()
self.p.wait(timeout=1.0)
self.p = None
#
# Actual test
#
wasm3 = Wasm3(args.exec, args.engine)
print("Version: " + wasm3.version())
blacklist = Blacklist([
"float_exprs.wast:* f32.nonarithmetic_nan_bitpattern*",
"imports.wast:*",
"names.wast:630 *", # name that starts with '\0'
])
stats = dotdict(total_run=0, skipped=0, failed=0, crashed=0, timeout=0, success=0, missing=0)
# Convert some trap names from the original spec
trapmap = {
"unreachable": "unreachable executed"
}
def runInvoke(test):
test.cmd = [test.action.field]
displayArgs = []
for arg in test.action.args:
test.cmd.append(arg['value'])
displayArgs.append(formatValue(arg['value'], arg['type']))
test_id = f"{test.source} {test.wasm} {test.cmd[0]}({", ".join(test.cmd[1:])})"
if test_id in blacklist and not args.all:
warning(f"Skipped {test_id} (blacklisted)")
stats.skipped += 1
return
if args.verbose:
print(f"Running {test_id}")
stats.total_run += 1
output = ""
actual = None
actual_val = None
force_fail = False
try:
output = wasm3.invoke(test.cmd)
except Exception as e:
actual = f"<{e}>"
force_fail = True
# Parse the actual output
if not actual:
result = re.findall(r'Result: (.*?)$', "\n" + output + "\n", re.MULTILINE)
if len(result) > 0:
actual = "result " + result[-1]
actual_val = result[0]
if not actual:
result = re.findall(r'Error: \[trap\] (.*?) \(', "\n" + output + "\n", re.MULTILINE)
if len(result) > 0:
actual = "trap " + result[-1]
if not actual:
result = re.findall(r'Error: (.*?)$', "\n" + output + "\n", re.MULTILINE)
if len(result) > 0:
actual = "error " + result[-1]
if not actual:
actual = "<No Result>"
force_fail = True
if actual == "error no operation ()":
actual = "<Not Implemented>"
stats.missing += 1
force_fail = True
elif actual == "<Crashed>":
stats.crashed += 1
force_fail = True
elif actual == "<Timeout>":
stats.timeout += 1
force_fail = True
# Prepare the expected result
expect = None
if "expected" in test:
if len(test.expected) == 0:
expect = "result <Empty Stack>"
elif len(test.expected) == 1:
t = test.expected[0]['type']
value = str(test.expected[0]['value'])
expect = "result " + value
if actual_val != None:
if (t == "f32" or t == "f64") and (value == "<Canonical NaN>" or value == "<Arithmetic NaN>"):
val = binaryToFloat(actual_val, t)
#warning(f"{actual_val} => {val}")
if math.isnan(val):
actual = "<Some NaN>"
expect = "<Some NaN>"
else:
expect = "result " + formatValue(value, t)
actual = "result " + formatValue(actual_val, t)
else:
warning(f"Test {test.source} specifies multiple results")
expect = "result <Multiple>"
elif "expected_trap" in test:
if test.expected_trap in trapmap:
test.expected_trap = trapmap[test.expected_trap]
expect = "trap " + str(test.expected_trap)
elif "expected_anything" in test:
expect = "<Anything>"
else:
expect = "<Unknown>"
def showTestResult():
print(" ----------------------")
print(f"Test: {ansi.HEADER}{test_id}{ansi.ENDC}")
print(f"Args: {", ".join(displayArgs)}")
print(f"Expected: {ansi.OKGREEN}{expect}{ansi.ENDC}")
print(f"Actual: {ansi.WARNING}{actual}{ansi.ENDC}")
if args.show_logs and len(output):
print(f"Log:")
print(output)
log.write(f"{test.source}\t|\t{test.wasm} {test.action.field}({", ".join(displayArgs)})\t=>\t\t")
if actual == expect or (expect == "<Anything>" and not force_fail):
stats.success += 1
log.write(f"OK: {actual}\n")
if args.line:
showTestResult()
else:
stats.failed += 1
log.write(f"FAIL: {actual}, should be: {expect}\n")
if args.silent: return
showTestResult()
#sys.exit(1)
if args.file:
jsonFiles = args.file
else:
jsonFiles = glob.glob(os.path.join(".", "core", "*.json"))
jsonFiles = list(map(lambda x: os.path.relpath(x, scriptDir), jsonFiles))
jsonFiles.sort()
for fn in jsonFiles:
with open(fn) as f:
data = json.load(f)
wast_source = filename(data["source_filename"])
wasm_module = ""
print(f"Running {fn}")
wasm3.init()
for cmd in data["commands"]:
test = dotdict()
test.line = int(cmd["line"])
test.source = wast_source + ":" + str(test.line)
test.wasm = wasm_module
test.type = cmd["type"]
if test.type == "module":
wasm_module = cmd["filename"]
if args.verbose:
print(f"Loading {wasm_module}")
try:
wasm_fn = os.path.join(pathname(fn), wasm_module)
wasm3.load(wasm_fn)
except Exception as e:
pass #fatal(str(e))
elif ( test.type == "action" or
test.type == "assert_return" or
test.type == "assert_trap" or
test.type == "assert_exhaustion" or
test.type == "assert_return_canonical_nan" or
test.type == "assert_return_arithmetic_nan"):
if args.line and test.line != args.line:
continue
if test.type == "action":
test.expected_anything = True
elif test.type == "assert_return":
test.expected = cmd["expected"]
elif test.type == "assert_return_canonical_nan":
test.expected = cmd["expected"]
test.expected[0]["value"] = "<Canonical NaN>"
elif test.type == "assert_return_arithmetic_nan":
test.expected = cmd["expected"]
test.expected[0]["value"] = "<Arithmetic NaN>"
elif test.type == "assert_trap":
test.expected_trap = cmd["text"]
elif test.type == "assert_exhaustion":
test.expected_trap = "stack overflow"
else:
stats.skipped += 1
warning(f"Skipped {test.source} ({test.type} not implemented)")
continue
test.action = dotdict(cmd["action"])
if test.action.type == "invoke":
# TODO: invoking in modules not implemented
if test.action.module:
stats.skipped += 1
warning(f"Skipped {test.source} (invoke in module)")
continue
test.action.field = escape_str(test.action.field)
runInvoke(test)
else:
stats.skipped += 1
warning(f"Skipped {test.source} (unknown action type '{test.action.type}')")
# These are irrelevant
elif (test.type == "assert_invalid" or
test.type == "assert_malformed" or
test.type == "assert_uninstantiable"):
pass
# Others - report as skipped
else:
stats.skipped += 1
warning(f"Skipped {test.source} ('{test.type}' not implemented)")
if (stats.failed + stats.success) != stats.total_run:
warning("Statistics summary invalid")
pprint(stats)
if stats.failed > 0:
failed = (stats.failed*100)/stats.total_run
print(f"{ansi.FAIL}=======================")
print(f" FAILED: {failed:.2f}%")
if stats.crashed > 0:
print(f" Crashed: {stats.crashed}")
print(f"======================={ansi.ENDC}")
sys.exit(1)
elif stats.success > 0:
print(f"{ansi.OKGREEN}=======================")
print(f" {stats.success}/{stats.total_run} tests OK")
if stats.skipped > 0:
print(f"{ansi.WARNING} ({stats.skipped} tests skipped){ansi.OKGREEN}")
print(f"======================={ansi.ENDC}")
| #!/usr/bin/env python3
# Author: Volodymyr Shymanskyy
# Usage:
# ./run-spec-test.py
# ./run-spec-test.py ./core/i32.json
# ./run-spec-test.py ./core/float_exprs.json --line 2070
# ./run-spec-test.py ./proposals/tail-call/*.json
# ./run-spec-test.py --exec ../build-custom/wasm3
# ./run-spec-test.py --engine "wasmer run" --exec ../build-wasi/wasm3.wasm
# ./run-spec-test.py --engine "wasmer run --backend=llvm" --exec ../build-wasi/wasm3.wasm
#
# TODO
# - Get more tests from: https://github.com/microsoft/ChakraCore/tree/master/test/WasmSpec
# - Fix "Empty Stack" check
# - Check Canonical NaN and Arithmetic NaN separately
# - Fix imports.wast
import argparse
import os, sys, glob, time
import subprocess
import json
import re
import struct
import math
import pathlib
scriptDir = os.path.dirname(os.path.abspath(sys.argv[0]))
sys.path.append(os.path.join(scriptDir, '..', 'extra'))
from testutils import *
from pprint import pprint
#
# Args handling
#
parser = argparse.ArgumentParser()
parser.add_argument("--exec", metavar="<interpreter>", default="../build/wasm3")
parser.add_argument("--engine", metavar="<engine>")
parser.add_argument("--timeout", type=int, default=30)
parser.add_argument("--line", metavar="<source line>", type=int)
parser.add_argument("--all", action="store_true")
parser.add_argument("--show-logs", action="store_true")
parser.add_argument("--format", choices=["raw", "hex", "fp"], default="fp")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-s", "--silent", action="store_true")
parser.add_argument("file", nargs='*')
args = parser.parse_args()
if args.line:
args.show_logs = True
#
# Utilities
#
log = open("spec-test.log","w+")
log.write("======================\n")
def warning(msg):
log.write("Warning: " + msg + "\n")
log.flush()
if args.verbose:
print(f"{ansi.WARNING}Warning:{ansi.ENDC} {msg}")
def fatal(msg):
log.write("Fatal: " + msg + "\n")
log.flush()
print(f"{ansi.FAIL}Fatal:{ansi.ENDC} {msg}")
sys.exit(1)
def binaryToFloat(num, t):
if t == "f32":
return struct.unpack('!f', struct.pack('!L', int(num)))[0]
elif t == "f64":
return struct.unpack('!d', struct.pack('!Q', int(num)))[0]
else:
fatal(f"Unknown type '{t}'")
def escape(s):
c = ord(s)
if c < 128 and s.isprintable() and not s in " \n\r\t\\":
return s
if c <= 0xff:
return r'\x{0:02x}'.format(c)
elif c <= 0xffff:
return r'\u{0:04x}'.format(c)
else:
return r'\U{0:08x}'.format(c)
def escape_str(s):
if s == "":
return r'\x00'
return ''.join(escape(c) for c in s)
#
# Value format options
#
def formatValueRaw(num, t):
return str(num)
def formatValueHex(num, t):
if t == "f32" or t == "i32":
return "{0:#0{1}x}".format(int(num), 8+2)
elif t == "f64" or t == "i64":
return "{0:#0{1}x}".format(int(num), 16+2)
else:
return str(num)
def formatValueFloat(num, t):
if t == "f32":
s = 6
elif t == "f64":
s = 10
else:
return str(num)
result = "{0:.{1}f}".format(binaryToFloat(num, t), s).rstrip('0')
if result.endswith('.'): result = result + '0'
if len(result) > s*2:
result = "{0:.{1}e}".format(binaryToFloat(num, t), s)
return result
formaters = {
'raw': formatValueRaw,
'hex': formatValueHex,
'fp': formatValueFloat,
}
formatValue = formaters[args.format]
if args.format == "fp":
print("When using fp display format, values are compared loosely (some tests may produce false positives)")
#
# Spec tests preparation
#
if not (os.path.isdir("./core") and os.path.isdir("./proposals")):
from io import BytesIO
from zipfile import ZipFile
from urllib.request import urlopen
officialSpec = "https://github.com/wasm3/wasm-core-testsuite/archive/master.zip"
print(f"Downloading {officialSpec}")
resp = urlopen(officialSpec)
with ZipFile(BytesIO(resp.read())) as zipFile:
for zipInfo in zipFile.infolist():
if re.match(r".*-master/.*/.*(\.wasm|\.json)", zipInfo.filename):
parts = pathlib.Path(zipInfo.filename).parts
newpath = str(pathlib.Path(*parts[1:-1]))
newfn = str(pathlib.Path(*parts[-1:]))
ensure_path(newpath)
newpath = newpath + "/" + newfn
zipInfo.filename = newpath
zipFile.extract(zipInfo)
#
# Wasm3 REPL
#
from subprocess import Popen, STDOUT, PIPE
from threading import Thread
from queue import Queue, Empty
import shlex
def get_engine_cmd(engine, exe):
if engine:
cmd = shlex.split(engine)
if "wasirun" in engine or "wasm3" in engine:
return cmd + [exe, "--repl"]
elif "wasmer" in engine:
return cmd + ["--dir=.", exe, "--", "--repl"]
elif "wasmtime" in engine:
return cmd + ["--dir=.", exe, "--", "--repl"]
elif "iwasm" in engine:
return cmd + ["--dir=.", exe, "--repl"]
elif "wavm" in engine:
return cmd + ["--mount-root", ".", exe, "--repl"] # TODO, fix path
else:
fatal(f"Don't know how to run engine {engine}")
else:
if exe.endswith(".wasm"):
fatal(f"Need engine to execute wasm")
return shlex.split(exe) + ["--repl"]
class Wasm3():
def __init__(self, exe, engine=None):
self.exe = exe
self.engine = engine
self.p = None
self.loaded = None
self.timeout = args.timeout
self.autorestart = True
self.run()
def run(self):
if self.p:
self.terminate()
cmd = get_engine_cmd(self.engine, self.exe)
#print(f"wasm3: Starting {' '.join(cmd)}")
self.q = Queue()
self.p = Popen(cmd, bufsize=0, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
def _read_output(out, queue):
for data in iter(lambda: out.read(1024), b''):
queue.put(data)
queue.put(None)
self.t = Thread(target=_read_output, args=(self.p.stdout, self.q))
self.t.daemon = True
self.t.start()
try:
self._read_until("wasm3> ")
except Exception as e:
print(f"wasm3: Could not start: {e}")
def restart(self):
print(f"wasm3: Restarting")
for i in range(10):
try:
self.run()
try:
if self.loaded:
self.load(self.loaded)
except Exception as e:
pass
break
except Exception as e:
print(f"wasm3: {e} => retry")
time.sleep(0.1)
def init(self):
return self._run_cmd(f":init\n")
def version(self):
return self._run_cmd(f":version\n")
def load(self, fn):
self.loaded = None
res = self._run_cmd(f":load {fn}\n")
self.loaded = fn
return res
def invoke(self, cmd):
return self._run_cmd(" ".join(map(str, cmd)) + "\n")
def _run_cmd(self, cmd):
if self.autorestart and not self._is_running():
self.restart()
self._flush_input()
#print(f"wasm3: {cmd.strip()}")
self._write(cmd)
return self._read_until("wasm3> ")
def _read_until(self, token):
buff = ""
tout = time.time() + self.timeout
error = None
while time.time() < tout:
try:
data = self.q.get(timeout=0.1)
if data == None:
error = "Crashed"
break
buff = buff + data.decode("utf-8")
idx = buff.rfind(token)
if idx >= 0:
return buff[0:idx].strip()
except Empty:
pass
else:
error = "Timeout"
self.terminate()
raise Exception(error)
def _write(self, data):
self.p.stdin.write(data.encode("utf-8"))
self.p.stdin.flush()
def _is_running(self):
return self.p and (self.p.poll() == None)
def _flush_input(self):
while not self.q.empty():
self.q.get()
def terminate(self):
self.p.stdin.close()
self.p.terminate()
self.p.wait(timeout=1.0)
self.p = None
#
# Actual test
#
wasm3 = Wasm3(args.exec, args.engine)
print("Version: " + wasm3.version())
blacklist = Blacklist([
"float_exprs.wast:* f32.nonarithmetic_nan_bitpattern*",
"imports.wast:*",
"names.wast:630 *", # name that starts with '\0'
])
stats = dotdict(total_run=0, skipped=0, failed=0, crashed=0, timeout=0, success=0, missing=0)
# Convert some trap names from the original spec
trapmap = {
"unreachable": "unreachable executed"
}
def runInvoke(test):
test.cmd = [test.action.field]
displayArgs = []
for arg in test.action.args:
test.cmd.append(arg['value'])
displayArgs.append(formatValue(arg['value'], arg['type']))
test_id = f"{test.source} {test.wasm} {test.cmd[0]}({', '.join(test.cmd[1:])})"
if test_id in blacklist and not args.all:
warning(f"Skipped {test_id} (blacklisted)")
stats.skipped += 1
return
if args.verbose:
print(f"Running {test_id}")
stats.total_run += 1
output = ""
actual = None
actual_val = None
force_fail = False
try:
output = wasm3.invoke(test.cmd)
except Exception as e:
actual = f"<{e}>"
force_fail = True
# Parse the actual output
if not actual:
result = re.findall(r'Result: (.*?)$', "\n" + output + "\n", re.MULTILINE)
if len(result) > 0:
actual = "result " + result[-1]
actual_val = result[0]
if not actual:
result = re.findall(r'Error: \[trap\] (.*?) \(', "\n" + output + "\n", re.MULTILINE)
if len(result) > 0:
actual = "trap " + result[-1]
if not actual:
result = re.findall(r'Error: (.*?)$', "\n" + output + "\n", re.MULTILINE)
if len(result) > 0:
actual = "error " + result[-1]
if not actual:
actual = "<No Result>"
force_fail = True
if actual == "error no operation ()":
actual = "<Not Implemented>"
stats.missing += 1
force_fail = True
elif actual == "<Crashed>":
stats.crashed += 1
force_fail = True
elif actual == "<Timeout>":
stats.timeout += 1
force_fail = True
# Prepare the expected result
expect = None
if "expected" in test:
if len(test.expected) == 0:
expect = "result <Empty Stack>"
elif len(test.expected) == 1:
t = test.expected[0]['type']
value = str(test.expected[0]['value'])
expect = "result " + value
if actual_val != None:
if (t == "f32" or t == "f64") and (value == "<Canonical NaN>" or value == "<Arithmetic NaN>"):
val = binaryToFloat(actual_val, t)
#warning(f"{actual_val} => {val}")
if math.isnan(val):
actual = "<Some NaN>"
expect = "<Some NaN>"
else:
expect = "result " + formatValue(value, t)
actual = "result " + formatValue(actual_val, t)
else:
warning(f"Test {test.source} specifies multiple results")
expect = "result <Multiple>"
elif "expected_trap" in test:
if test.expected_trap in trapmap:
test.expected_trap = trapmap[test.expected_trap]
expect = "trap " + str(test.expected_trap)
elif "expected_anything" in test:
expect = "<Anything>"
else:
expect = "<Unknown>"
def showTestResult():
print(" ----------------------")
print(f"Test: {ansi.HEADER}{test_id}{ansi.ENDC}")
print(f"Args: {', '.join(displayArgs)}")
print(f"Expected: {ansi.OKGREEN}{expect}{ansi.ENDC}")
print(f"Actual: {ansi.WARNING}{actual}{ansi.ENDC}")
if args.show_logs and len(output):
print(f"Log:")
print(output)
log.write(f"{test.source}\t|\t{test.wasm} {test.action.field}({', '.join(displayArgs)})\t=>\t\t")
if actual == expect or (expect == "<Anything>" and not force_fail):
stats.success += 1
log.write(f"OK: {actual}\n")
if args.line:
showTestResult()
else:
stats.failed += 1
log.write(f"FAIL: {actual}, should be: {expect}\n")
if args.silent: return
showTestResult()
#sys.exit(1)
if args.file:
jsonFiles = args.file
else:
jsonFiles = glob.glob(os.path.join(".", "core", "*.json"))
jsonFiles = list(map(lambda x: os.path.relpath(x, scriptDir), jsonFiles))
jsonFiles.sort()
for fn in jsonFiles:
with open(fn) as f:
data = json.load(f)
wast_source = filename(data["source_filename"])
wasm_module = ""
print(f"Running {fn}")
wasm3.init()
for cmd in data["commands"]:
test = dotdict()
test.line = int(cmd["line"])
test.source = wast_source + ":" + str(test.line)
test.wasm = wasm_module
test.type = cmd["type"]
if test.type == "module":
wasm_module = cmd["filename"]
if args.verbose:
print(f"Loading {wasm_module}")
try:
wasm_fn = os.path.join(pathname(fn), wasm_module)
wasm3.load(wasm_fn)
except Exception as e:
pass #fatal(str(e))
elif ( test.type == "action" or
test.type == "assert_return" or
test.type == "assert_trap" or
test.type == "assert_exhaustion" or
test.type == "assert_return_canonical_nan" or
test.type == "assert_return_arithmetic_nan"):
if args.line and test.line != args.line:
continue
if test.type == "action":
test.expected_anything = True
elif test.type == "assert_return":
test.expected = cmd["expected"]
elif test.type == "assert_return_canonical_nan":
test.expected = cmd["expected"]
test.expected[0]["value"] = "<Canonical NaN>"
elif test.type == "assert_return_arithmetic_nan":
test.expected = cmd["expected"]
test.expected[0]["value"] = "<Arithmetic NaN>"
elif test.type == "assert_trap":
test.expected_trap = cmd["text"]
elif test.type == "assert_exhaustion":
test.expected_trap = "stack overflow"
else:
stats.skipped += 1
warning(f"Skipped {test.source} ({test.type} not implemented)")
continue
test.action = dotdict(cmd["action"])
if test.action.type == "invoke":
# TODO: invoking in modules not implemented
if test.action.module:
stats.skipped += 1
warning(f"Skipped {test.source} (invoke in module)")
continue
test.action.field = escape_str(test.action.field)
runInvoke(test)
else:
stats.skipped += 1
warning(f"Skipped {test.source} (unknown action type '{test.action.type}')")
# These are irrelevant
elif (test.type == "assert_invalid" or
test.type == "assert_malformed" or
test.type == "assert_uninstantiable"):
pass
# Others - report as skipped
else:
stats.skipped += 1
warning(f"Skipped {test.source} ('{test.type}' not implemented)")
if (stats.failed + stats.success) != stats.total_run:
warning("Statistics summary invalid")
pprint(stats)
if stats.failed > 0:
failed = (stats.failed*100)/stats.total_run
print(f"{ansi.FAIL}=======================")
print(f" FAILED: {failed:.2f}%")
if stats.crashed > 0:
print(f" Crashed: {stats.crashed}")
print(f"======================={ansi.ENDC}")
sys.exit(1)
elif stats.success > 0:
print(f"{ansi.OKGREEN}=======================")
print(f" {stats.success}/{stats.total_run} tests OK")
if stats.skipped > 0:
print(f"{ansi.WARNING} ({stats.skipped} tests skipped){ansi.OKGREEN}")
print(f"======================={ansi.ENDC}")
|
#!/usr/bin/env python3
"""
Project title: CollembolAI
Authors: Stephan Weißbach, Stanislav Sys, Clément Schneider
Original repository: https://github.com/stasys-hub/Collembola_AI.git
Module title: output_inference_images
.py
Purpose: draws bounding boxes from annotation on pictures. If provided with
groundtruth, it will also specifiy correctness of predictions
Dependencies: See ReadMe
Last Update: 18.02.2022
"""
from PIL import Image, ImageFont, ImageDraw
import os
from utils.cocoutils import coco2df
def draw_coco_bbox(
coco,
out_dir,
coco_dir,
eval_mode=False,
prefix="annotated",
line_width=10,
fontsize=80,
fontYshift=-70,
):
"""
Detectron2 module for writing annotated pictures was not so explicit to me, and default output not so pretty.
This function will draw the annotation on the pictures of a coco dataset. The dataset can be provided as a coco instance,
or as a dataframe resulting from coco2df. Modified pictures are written to the out_dir, with a name prefix.
To adjust display, simply change line_width (= box line), font_size (= label font). Labels text can be shifted vertically
with fontYshift.
"""
# define some colors for bounding boxes
with open(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "colors.txt"), "r"
) as colorfile:
colors = [color.replace("\n", "") for color in colorfile]
Image.MAX_IMAGE_PIXELS = None
fnt = ImageFont.truetype(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "FreeMono.ttf"),
fontsize,
)
# convert result dataframe to coco
try:
coco_df = coco2df(coco)
except:
coco_df = coco
# create label for bounding box
if eval_mode:
coco_df["label"] = [
f"{" ".join(row["category_name"].split("__")[0].split("_"))} {round(row["score"], 2)} {"true detection" if not row["is_false_positive"] else "false detection"}"
for _, row in coco_df.iterrows()
]
else:
coco_df["label"] = [
f"{" ".join(row["category_name"].split("__")[0].split("_"))} {round(row["score"], 2)}"
for _, row in coco_df.iterrows()
]
resh = lambda x: ((x[0], x[1]), (x[0] + x[2], x[1] + x[3]))
coco_df["coordinates"] = coco_df["bbox"].apply(resh)
# sample colors randomly
# create dictionary so that every class maps to one color
colormap = {}
for idx, classlabel in enumerate(coco_df["category_name"].unique()):
colormap[classlabel] = colors[idx % len(colors)]
# add a color column
for idx, row in coco_df.iterrows():
coco_df.loc[idx, "color"] = colormap[row["category_name"]]
for img_name in coco_df.file_name.unique():
source_img = Image.open(f"{coco_dir}/{img_name}")
draw = ImageDraw.Draw(source_img)
for row in coco_df[coco_df["file_name"] == img_name][
["label", "coordinates", "color"]
].values:
draw.rectangle(row[1], outline=row[2], width=line_width)
draw.text(
(row[1][0][0], row[1][0][1] + fontYshift), row[0], font=fnt, fill=row[2]
)
print(f"Writing {out_dir}/{prefix}_{img_name}")
source_img.save(f"{out_dir}/{prefix}_{img_name}", "JPEG")
| #!/usr/bin/env python3
"""
Project title: CollembolAI
Authors: Stephan Weißbach, Stanislav Sys, Clément Schneider
Original repository: https://github.com/stasys-hub/Collembola_AI.git
Module title: output_inference_images
.py
Purpose: draws bounding boxes from annotation on pictures. If provided with
groundtruth, it will also specifiy correctness of predictions
Dependencies: See ReadMe
Last Update: 18.02.2022
"""
from PIL import Image, ImageFont, ImageDraw
import os
from utils.cocoutils import coco2df
def draw_coco_bbox(
coco,
out_dir,
coco_dir,
eval_mode=False,
prefix="annotated",
line_width=10,
fontsize=80,
fontYshift=-70,
):
"""
Detectron2 module for writing annotated pictures was not so explicit to me, and default output not so pretty.
This function will draw the annotation on the pictures of a coco dataset. The dataset can be provided as a coco instance,
or as a dataframe resulting from coco2df. Modified pictures are written to the out_dir, with a name prefix.
To adjust display, simply change line_width (= box line), font_size (= label font). Labels text can be shifted vertically
with fontYshift.
"""
# define some colors for bounding boxes
with open(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "colors.txt"), "r"
) as colorfile:
colors = [color.replace("\n", "") for color in colorfile]
Image.MAX_IMAGE_PIXELS = None
fnt = ImageFont.truetype(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "FreeMono.ttf"),
fontsize,
)
# convert result dataframe to coco
try:
coco_df = coco2df(coco)
except:
coco_df = coco
# create label for bounding box
if eval_mode:
coco_df["label"] = [
f"{' '.join(row['category_name'].split('__')[0].split('_'))} {round(row['score'], 2)} {'true detection' if not row['is_false_positive'] else 'false detection'}"
for _, row in coco_df.iterrows()
]
else:
coco_df["label"] = [
f"{' '.join(row['category_name'].split('__')[0].split('_'))} {round(row['score'], 2)}"
for _, row in coco_df.iterrows()
]
resh = lambda x: ((x[0], x[1]), (x[0] + x[2], x[1] + x[3]))
coco_df["coordinates"] = coco_df["bbox"].apply(resh)
# sample colors randomly
# create dictionary so that every class maps to one color
colormap = {}
for idx, classlabel in enumerate(coco_df["category_name"].unique()):
colormap[classlabel] = colors[idx % len(colors)]
# add a color column
for idx, row in coco_df.iterrows():
coco_df.loc[idx, "color"] = colormap[row["category_name"]]
for img_name in coco_df.file_name.unique():
source_img = Image.open(f"{coco_dir}/{img_name}")
draw = ImageDraw.Draw(source_img)
for row in coco_df[coco_df["file_name"] == img_name][
["label", "coordinates", "color"]
].values:
draw.rectangle(row[1], outline=row[2], width=line_width)
draw.text(
(row[1][0][0], row[1][0][1] + fontYshift), row[0], font=fnt, fill=row[2]
)
print(f"Writing {out_dir}/{prefix}_{img_name}")
source_img.save(f"{out_dir}/{prefix}_{img_name}", "JPEG")
|
import os
import os.path
import json
import pathlib
from types import prepare_class
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import (
KeywordQueryEvent,
ItemEnterEvent,
PreferencesEvent,
PreferencesUpdateEvent,
)
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.item.ExtensionSmallResultItem import ExtensionSmallResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.HideWindowAction import HideWindowAction
from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction
from fuzzywuzzy import process, fuzz
class Utils:
@staticmethod
def get_path(filename, from_home=False):
base_dir = pathlib.Path.home() if from_home else pathlib.Path(
__file__).parent.absolute()
return os.path.join(base_dir, filename)
class Code:
open_command_paths = ["/opt/vscodium-bin"]
def get_installed_path(self):
for path in self.open_command_paths:
if os.path.exists(path):
return path
return False
def is_installed(self):
return bool(self.installed_path)
def get_recents(self):
recents = []
storage = json.load(
open(Utils.get_path(".config/VSCodium/storage.json", True), "r"))
openedPaths = storage["openedPathsList"]["entries"]
for path in openedPaths:
folder = "folderUri" in path
uri = path["folderUri"] if folder else path["fileUri"]
label = path["label"] if "label" in path else uri.split("/")[-1]
recents.append({
"folder": folder,
"uri": uri,
"label": label
})
return recents
def open_vscode(self, recent):
if not self.is_installed():
return
option = "--folder-uri" if recent["folder"] else "--file-uri"
os.system(f"{self.installed_path} {option} {recent["uri"]}")
def __init__(self):
self.installed_path = self.get_installed_path()
class CodeExtension(Extension):
keyword = None
code = None
def __init__(self):
super(CodeExtension, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
self.subscribe(ItemEnterEvent, ItemEnterEventListener())
self.subscribe(PreferencesEvent, PreferencesEventListener())
self.subscribe(PreferencesUpdateEvent, PreferencesUpdateEventListener())
self.code = Code()
def get_ext_result_items(self, query):
query = query.lower() if query else ""
recents = self.code.get_recents()
items = []
data = []
label_matches = process.extract(query, choices=map(lambda c: c["label"], recents), limit=20, scorer=fuzz.partial_ratio)
uri_matches = process.extract(query, choices=map(lambda c: c["uri"], recents), limit=20, scorer=fuzz.partial_ratio)
for match in label_matches:
recent = next((c for c in recents if c["label"] == match[0]), None)
if (recent is not None and match[1] > 95):
data.append(recent)
for match in uri_matches:
recent = next((c for c in recents if c["uri"] == match[0]), None)
existing = next((c for c in data if c["uri"] == recent["uri"]), None)
if (recent is not None and existing is None):
data.append(recent)
for recent in data[:20]:
items.append(
ExtensionSmallResultItem(
icon=Utils.get_path(
f"images/{"folder" if recent["folder"] else "file"}.svg"),
name=recent["label"],
on_enter=ExtensionCustomAction(recent),
)
)
return items
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
items = []
if not extension.code.is_installed():
items.append(
ExtensionResultItem(
icon=Utils.get_path("images/icon.svg"),
name="No VSCodium?",
description="Can't find the VSCodium's `codium` command in your system :(",
highlightable=False,
on_enter=HideWindowAction(),
)
)
return RenderResultListAction(items)
argument = event.get_argument() or ""
items.extend(extension.get_ext_result_items(argument))
return RenderResultListAction(items)
class ItemEnterEventListener(EventListener):
def on_event(self, event, extension):
recent = event.get_data()
extension.code.open_vscode(recent)
class PreferencesEventListener(EventListener):
def on_event(self, event, extension):
extension.keyword = event.preferences["code_kw"]
class PreferencesUpdateEventListener(EventListener):
def on_event(self, event, extension):
if event.id == "code_kw":
extension.keyword = event.new_value
if __name__ == "__main__":
CodeExtension().run()
| import os
import os.path
import json
import pathlib
from types import prepare_class
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import (
KeywordQueryEvent,
ItemEnterEvent,
PreferencesEvent,
PreferencesUpdateEvent,
)
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.item.ExtensionSmallResultItem import ExtensionSmallResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.HideWindowAction import HideWindowAction
from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction
from fuzzywuzzy import process, fuzz
class Utils:
@staticmethod
def get_path(filename, from_home=False):
base_dir = pathlib.Path.home() if from_home else pathlib.Path(
__file__).parent.absolute()
return os.path.join(base_dir, filename)
class Code:
open_command_paths = ["/opt/vscodium-bin"]
def get_installed_path(self):
for path in self.open_command_paths:
if os.path.exists(path):
return path
return False
def is_installed(self):
return bool(self.installed_path)
def get_recents(self):
recents = []
storage = json.load(
open(Utils.get_path(".config/VSCodium/storage.json", True), "r"))
openedPaths = storage["openedPathsList"]["entries"]
for path in openedPaths:
folder = "folderUri" in path
uri = path["folderUri"] if folder else path["fileUri"]
label = path["label"] if "label" in path else uri.split("/")[-1]
recents.append({
"folder": folder,
"uri": uri,
"label": label
})
return recents
def open_vscode(self, recent):
if not self.is_installed():
return
option = "--folder-uri" if recent["folder"] else "--file-uri"
os.system(f"{self.installed_path} {option} {recent['uri']}")
def __init__(self):
self.installed_path = self.get_installed_path()
class CodeExtension(Extension):
keyword = None
code = None
def __init__(self):
super(CodeExtension, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
self.subscribe(ItemEnterEvent, ItemEnterEventListener())
self.subscribe(PreferencesEvent, PreferencesEventListener())
self.subscribe(PreferencesUpdateEvent, PreferencesUpdateEventListener())
self.code = Code()
def get_ext_result_items(self, query):
query = query.lower() if query else ""
recents = self.code.get_recents()
items = []
data = []
label_matches = process.extract(query, choices=map(lambda c: c["label"], recents), limit=20, scorer=fuzz.partial_ratio)
uri_matches = process.extract(query, choices=map(lambda c: c["uri"], recents), limit=20, scorer=fuzz.partial_ratio)
for match in label_matches:
recent = next((c for c in recents if c["label"] == match[0]), None)
if (recent is not None and match[1] > 95):
data.append(recent)
for match in uri_matches:
recent = next((c for c in recents if c["uri"] == match[0]), None)
existing = next((c for c in data if c["uri"] == recent["uri"]), None)
if (recent is not None and existing is None):
data.append(recent)
for recent in data[:20]:
items.append(
ExtensionSmallResultItem(
icon=Utils.get_path(
f"images/{'folder' if recent['folder'] else 'file'}.svg"),
name=recent["label"],
on_enter=ExtensionCustomAction(recent),
)
)
return items
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
items = []
if not extension.code.is_installed():
items.append(
ExtensionResultItem(
icon=Utils.get_path("images/icon.svg"),
name="No VSCodium?",
description="Can't find the VSCodium's `codium` command in your system :(",
highlightable=False,
on_enter=HideWindowAction(),
)
)
return RenderResultListAction(items)
argument = event.get_argument() or ""
items.extend(extension.get_ext_result_items(argument))
return RenderResultListAction(items)
class ItemEnterEventListener(EventListener):
def on_event(self, event, extension):
recent = event.get_data()
extension.code.open_vscode(recent)
class PreferencesEventListener(EventListener):
def on_event(self, event, extension):
extension.keyword = event.preferences["code_kw"]
class PreferencesUpdateEventListener(EventListener):
def on_event(self, event, extension):
if event.id == "code_kw":
extension.keyword = event.new_value
if __name__ == "__main__":
CodeExtension().run()
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Optional, Union
import os
import shutil
import tempfile
from synthtool import _tracked_paths, metadata, shell
from synthtool.log import logger
from synthtool.sources import git
GOOGLEAPIS_URL: str = git.make_repo_clone_url("googleapis/googleapis")
GOOGLEAPIS_PRIVATE_URL: str = git.make_repo_clone_url("googleapis/googleapis-private")
DISCOVERY_ARTIFACT_MANAGER_URL: str = git.make_repo_clone_url(
"googleapis/discovery-artifact-manager"
)
LOCAL_GOOGLEAPIS: Optional[str] = os.environ.get("SYNTHTOOL_GOOGLEAPIS")
LOCAL_DISCOVERY_ARTIFACT_MANAGER: Optional[str] = os.environ.get(
"SYNTHTOOL_DISCOVERY_ARTIFACT_MANAGER"
)
class GAPICBazel:
"""A synthtool component that can produce libraries using bazel build.
"""
def __init__(self):
self._ensure_dependencies_installed()
self._googleapis = None
self._googleapis_private = None
self._discovery_artifact_manager = None
def py_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "python", **kwargs)
def go_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "go", **kwargs)
def node_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "nodejs", **kwargs)
def csharp_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "csharp", **kwargs)
def php_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "php", **kwargs)
def java_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "java", **kwargs)
def ruby_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "ruby", **kwargs)
def _generate_code(
self,
service: str,
version: str,
language: str,
*,
private: bool = False,
discogapic: bool = False,
proto_path: Union[str, Path] = None,
output_dir: Union[str, Path] = None,
bazel_target: str = None,
include_protos: bool = False,
proto_output_path: Union[str, Path] = None,
):
# Determine which googleapis repo to use
if discogapic:
api_definitions_repo = self._clone_discovery_artifact_manager()
api_definitions_repo_name = "discovery-artifact-manager"
elif private:
api_definitions_repo = self._clone_googleapis_private()
api_definitions_repo_name = "googleapis_private"
else:
api_definitions_repo = self._clone_googleapis()
api_definitions_repo_name = "googleapis"
# Sanity check: We should have a googleapis repo; if we do not,
# something went wrong, and we should abort.
if not api_definitions_repo:
raise RuntimeError(
f"Unable to generate {service}, the sources repository repository"
"is unavailable."
)
# Calculate proto_path if necessary.
if not bazel_target or include_protos:
# If bazel_target is not specified explicitly, we will need
# proto_path to calculate it. If include_protos is True,
# we will need the proto_path to copy the protos.
if not proto_path:
if bazel_target:
# Calculate proto_path from the full bazel target, which is
# in the format "//proto_path:target_name
proto_path = bazel_target.split(":")[0][2:]
else:
# If bazel_target is not specified, assume the protos are
# simply under google/cloud, where the most of the protos
# usually are.
proto_path = f"google/cloud/{service}/{version}"
protos = Path(proto_path)
if protos.is_absolute():
protos = protos.relative_to("/")
# Determine bazel target based on per-language patterns
# Java: google-cloud-{{assembly_name}}-{{version}}-java
# Go: gapi-cloud-{{assembly_name}}-{{version}}-go
# Python: {{assembly_name}}-{{version}}-py
# PHP: google-cloud-{{assembly_name}}-{{version}}-php
# Node.js: {{assembly_name}}-{{version}}-nodejs
# Ruby: google-cloud-{{assembly_name}}-{{version}}-ruby
# C#: google-cloud-{{assembly_name}}-{{version}}-csharp
if not bazel_target:
# Determine where the protos we are generating actually live.
# We can sometimes (but not always) determine this from the service
# and version; in other cases, the user must provide it outright.
parts = list(protos.parts)
while len(parts) > 0 and parts[0] != "google":
parts.pop(0)
if len(parts) == 0:
raise RuntimeError(
f"Cannot determine bazel_target from proto_path {protos}."
"Please set bazel_target explicitly."
)
if language == "python":
suffix = f"{service}-{version}-py"
elif language == "nodejs":
suffix = f"{service}-{version}-nodejs"
elif language == "go":
suffix = f"gapi-{"-".join(parts[1:])}-go"
else:
suffix = f"{"-".join(parts)}-{language}"
bazel_target = f"//{os.path.sep.join(parts)}:{suffix}"
# Sanity check: Do we have protos where we think we should?
if not (api_definitions_repo / protos).exists():
raise FileNotFoundError(
f"Unable to find directory for protos: {(api_definitions_repo / protos)}."
)
if not tuple((api_definitions_repo / protos).glob("*.proto")):
raise FileNotFoundError(
f"Directory {(api_definitions_repo / protos)} exists, but no protos found."
)
if not (api_definitions_repo / protos / "BUILD.bazel"):
raise FileNotFoundError(
f"File {(api_definitions_repo / protos / "BUILD.bazel")} does not exist."
)
# Ensure the desired output directory exists.
# If none was provided, create a temporary directory.
if not output_dir:
output_dir = tempfile.mkdtemp()
output_dir = Path(output_dir).resolve()
# Let's build some stuff now.
cwd = os.getcwd()
os.chdir(str(api_definitions_repo))
bazel_run_args = [
"bazel",
"--max_idle_secs=240",
"build",
bazel_target,
]
logger.debug(f"Generating code for: {bazel_target}.")
shell.run(bazel_run_args)
# We've got tar file!
# its location: bazel-bin/google/cloud/language/v1/language-v1-nodejs.tar.gz
# bazel_target: //google/cloud/language/v1:language-v1-nodejs
tar_file = (
f"bazel-bin{os.path.sep}{bazel_target[2:].replace(":", os.path.sep)}.tar.gz"
)
tar_run_args = [
"tar",
"-C",
str(output_dir),
"--strip-components=1",
"-xzf",
tar_file,
]
shell.run(tar_run_args)
# Get the *.protos files and put them in a protos dir in the output
if include_protos:
proto_files = protos.glob("**/*.proto")
# By default, put the protos at the root in a folder named 'protos'.
# Specific languages can be cased here to put them in a more language
# appropriate place.
if not proto_output_path:
proto_output_path = output_dir / "protos"
if language == "python":
# place protos alongsize the *_pb2.py files
proto_output_path = (
output_dir / f"google/cloud/{service}_{version}/proto"
)
else:
proto_output_path = Path(output_dir / proto_output_path)
os.makedirs(proto_output_path, exist_ok=True)
for i in proto_files:
logger.debug(f"Copy: {i} to {proto_output_path / i.name}")
shutil.copyfile(i, proto_output_path / i.name)
logger.success(f"Placed proto files into {proto_output_path}.")
os.chdir(cwd)
# Sanity check: Does the output location have code in it?
# If not, complain.
if not tuple(output_dir.iterdir()):
raise RuntimeError(
f"Code generation seemed to succeed, but {output_dir} is empty."
)
# Huzzah, it worked.
logger.success(f"Generated code into {output_dir}.")
# Record this in the synthtool metadata.
metadata.add_client_destination(
source=api_definitions_repo_name,
api_name=service,
api_version=version,
language=language,
generator="bazel",
)
_tracked_paths.add(output_dir)
return output_dir
def _clone_googleapis(self):
if self._googleapis:
return self._googleapis
if LOCAL_GOOGLEAPIS:
self._googleapis = Path(LOCAL_GOOGLEAPIS).expanduser()
logger.debug(f"Using local googleapis at {self._googleapis}")
else:
logger.debug("Cloning googleapis.")
self._googleapis = git.clone(GOOGLEAPIS_URL)
return self._googleapis
def _clone_googleapis_private(self):
if self._googleapis_private:
return self._googleapis_private
if LOCAL_GOOGLEAPIS:
self._googleapis_private = Path(LOCAL_GOOGLEAPIS).expanduser()
logger.debug(
f"Using local googleapis at {self._googleapis_private} for googleapis-private"
)
else:
logger.debug("Cloning googleapis-private.")
self._googleapis_private = git.clone(GOOGLEAPIS_PRIVATE_URL)
return self._googleapis_private
def _clone_discovery_artifact_manager(self):
if self._discovery_artifact_manager:
return self._discovery_artifact_manager
if LOCAL_DISCOVERY_ARTIFACT_MANAGER:
self._discovery_artifact_manager = Path(
LOCAL_DISCOVERY_ARTIFACT_MANAGER
).expanduser()
logger.debug(
f"Using local discovery_artifact_manager at {self._discovery_artifact_manager} for googleapis-private"
)
else:
logger.debug("Cloning discovery-artifact-manager.")
self._discovery_artifact_manager = git.clone(DISCOVERY_ARTIFACT_MANAGER_URL)
return self._discovery_artifact_manager
def _ensure_dependencies_installed(self):
logger.debug("Ensuring dependencies.")
dependencies = ["bazel", "zip", "unzip", "tar"]
failed_dependencies = []
for dependency in dependencies:
return_code = shell.run(["which", dependency], check=False).returncode
if return_code:
failed_dependencies.append(dependency)
if failed_dependencies:
raise EnvironmentError(
f"Dependencies missing: {", ".join(failed_dependencies)}"
)
| # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Optional, Union
import os
import shutil
import tempfile
from synthtool import _tracked_paths, metadata, shell
from synthtool.log import logger
from synthtool.sources import git
GOOGLEAPIS_URL: str = git.make_repo_clone_url("googleapis/googleapis")
GOOGLEAPIS_PRIVATE_URL: str = git.make_repo_clone_url("googleapis/googleapis-private")
DISCOVERY_ARTIFACT_MANAGER_URL: str = git.make_repo_clone_url(
"googleapis/discovery-artifact-manager"
)
LOCAL_GOOGLEAPIS: Optional[str] = os.environ.get("SYNTHTOOL_GOOGLEAPIS")
LOCAL_DISCOVERY_ARTIFACT_MANAGER: Optional[str] = os.environ.get(
"SYNTHTOOL_DISCOVERY_ARTIFACT_MANAGER"
)
class GAPICBazel:
"""A synthtool component that can produce libraries using bazel build.
"""
def __init__(self):
self._ensure_dependencies_installed()
self._googleapis = None
self._googleapis_private = None
self._discovery_artifact_manager = None
def py_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "python", **kwargs)
def go_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "go", **kwargs)
def node_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "nodejs", **kwargs)
def csharp_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "csharp", **kwargs)
def php_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "php", **kwargs)
def java_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "java", **kwargs)
def ruby_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "ruby", **kwargs)
def _generate_code(
self,
service: str,
version: str,
language: str,
*,
private: bool = False,
discogapic: bool = False,
proto_path: Union[str, Path] = None,
output_dir: Union[str, Path] = None,
bazel_target: str = None,
include_protos: bool = False,
proto_output_path: Union[str, Path] = None,
):
# Determine which googleapis repo to use
if discogapic:
api_definitions_repo = self._clone_discovery_artifact_manager()
api_definitions_repo_name = "discovery-artifact-manager"
elif private:
api_definitions_repo = self._clone_googleapis_private()
api_definitions_repo_name = "googleapis_private"
else:
api_definitions_repo = self._clone_googleapis()
api_definitions_repo_name = "googleapis"
# Sanity check: We should have a googleapis repo; if we do not,
# something went wrong, and we should abort.
if not api_definitions_repo:
raise RuntimeError(
f"Unable to generate {service}, the sources repository repository"
"is unavailable."
)
# Calculate proto_path if necessary.
if not bazel_target or include_protos:
# If bazel_target is not specified explicitly, we will need
# proto_path to calculate it. If include_protos is True,
# we will need the proto_path to copy the protos.
if not proto_path:
if bazel_target:
# Calculate proto_path from the full bazel target, which is
# in the format "//proto_path:target_name
proto_path = bazel_target.split(":")[0][2:]
else:
# If bazel_target is not specified, assume the protos are
# simply under google/cloud, where the most of the protos
# usually are.
proto_path = f"google/cloud/{service}/{version}"
protos = Path(proto_path)
if protos.is_absolute():
protos = protos.relative_to("/")
# Determine bazel target based on per-language patterns
# Java: google-cloud-{{assembly_name}}-{{version}}-java
# Go: gapi-cloud-{{assembly_name}}-{{version}}-go
# Python: {{assembly_name}}-{{version}}-py
# PHP: google-cloud-{{assembly_name}}-{{version}}-php
# Node.js: {{assembly_name}}-{{version}}-nodejs
# Ruby: google-cloud-{{assembly_name}}-{{version}}-ruby
# C#: google-cloud-{{assembly_name}}-{{version}}-csharp
if not bazel_target:
# Determine where the protos we are generating actually live.
# We can sometimes (but not always) determine this from the service
# and version; in other cases, the user must provide it outright.
parts = list(protos.parts)
while len(parts) > 0 and parts[0] != "google":
parts.pop(0)
if len(parts) == 0:
raise RuntimeError(
f"Cannot determine bazel_target from proto_path {protos}."
"Please set bazel_target explicitly."
)
if language == "python":
suffix = f"{service}-{version}-py"
elif language == "nodejs":
suffix = f"{service}-{version}-nodejs"
elif language == "go":
suffix = f"gapi-{'-'.join(parts[1:])}-go"
else:
suffix = f"{'-'.join(parts)}-{language}"
bazel_target = f"//{os.path.sep.join(parts)}:{suffix}"
# Sanity check: Do we have protos where we think we should?
if not (api_definitions_repo / protos).exists():
raise FileNotFoundError(
f"Unable to find directory for protos: {(api_definitions_repo / protos)}."
)
if not tuple((api_definitions_repo / protos).glob("*.proto")):
raise FileNotFoundError(
f"Directory {(api_definitions_repo / protos)} exists, but no protos found."
)
if not (api_definitions_repo / protos / "BUILD.bazel"):
raise FileNotFoundError(
f"File {(api_definitions_repo / protos / 'BUILD.bazel')} does not exist."
)
# Ensure the desired output directory exists.
# If none was provided, create a temporary directory.
if not output_dir:
output_dir = tempfile.mkdtemp()
output_dir = Path(output_dir).resolve()
# Let's build some stuff now.
cwd = os.getcwd()
os.chdir(str(api_definitions_repo))
bazel_run_args = [
"bazel",
"--max_idle_secs=240",
"build",
bazel_target,
]
logger.debug(f"Generating code for: {bazel_target}.")
shell.run(bazel_run_args)
# We've got tar file!
# its location: bazel-bin/google/cloud/language/v1/language-v1-nodejs.tar.gz
# bazel_target: //google/cloud/language/v1:language-v1-nodejs
tar_file = (
f"bazel-bin{os.path.sep}{bazel_target[2:].replace(':', os.path.sep)}.tar.gz"
)
tar_run_args = [
"tar",
"-C",
str(output_dir),
"--strip-components=1",
"-xzf",
tar_file,
]
shell.run(tar_run_args)
# Get the *.protos files and put them in a protos dir in the output
if include_protos:
proto_files = protos.glob("**/*.proto")
# By default, put the protos at the root in a folder named 'protos'.
# Specific languages can be cased here to put them in a more language
# appropriate place.
if not proto_output_path:
proto_output_path = output_dir / "protos"
if language == "python":
# place protos alongsize the *_pb2.py files
proto_output_path = (
output_dir / f"google/cloud/{service}_{version}/proto"
)
else:
proto_output_path = Path(output_dir / proto_output_path)
os.makedirs(proto_output_path, exist_ok=True)
for i in proto_files:
logger.debug(f"Copy: {i} to {proto_output_path / i.name}")
shutil.copyfile(i, proto_output_path / i.name)
logger.success(f"Placed proto files into {proto_output_path}.")
os.chdir(cwd)
# Sanity check: Does the output location have code in it?
# If not, complain.
if not tuple(output_dir.iterdir()):
raise RuntimeError(
f"Code generation seemed to succeed, but {output_dir} is empty."
)
# Huzzah, it worked.
logger.success(f"Generated code into {output_dir}.")
# Record this in the synthtool metadata.
metadata.add_client_destination(
source=api_definitions_repo_name,
api_name=service,
api_version=version,
language=language,
generator="bazel",
)
_tracked_paths.add(output_dir)
return output_dir
def _clone_googleapis(self):
if self._googleapis:
return self._googleapis
if LOCAL_GOOGLEAPIS:
self._googleapis = Path(LOCAL_GOOGLEAPIS).expanduser()
logger.debug(f"Using local googleapis at {self._googleapis}")
else:
logger.debug("Cloning googleapis.")
self._googleapis = git.clone(GOOGLEAPIS_URL)
return self._googleapis
def _clone_googleapis_private(self):
if self._googleapis_private:
return self._googleapis_private
if LOCAL_GOOGLEAPIS:
self._googleapis_private = Path(LOCAL_GOOGLEAPIS).expanduser()
logger.debug(
f"Using local googleapis at {self._googleapis_private} for googleapis-private"
)
else:
logger.debug("Cloning googleapis-private.")
self._googleapis_private = git.clone(GOOGLEAPIS_PRIVATE_URL)
return self._googleapis_private
def _clone_discovery_artifact_manager(self):
if self._discovery_artifact_manager:
return self._discovery_artifact_manager
if LOCAL_DISCOVERY_ARTIFACT_MANAGER:
self._discovery_artifact_manager = Path(
LOCAL_DISCOVERY_ARTIFACT_MANAGER
).expanduser()
logger.debug(
f"Using local discovery_artifact_manager at {self._discovery_artifact_manager} for googleapis-private"
)
else:
logger.debug("Cloning discovery-artifact-manager.")
self._discovery_artifact_manager = git.clone(DISCOVERY_ARTIFACT_MANAGER_URL)
return self._discovery_artifact_manager
def _ensure_dependencies_installed(self):
logger.debug("Ensuring dependencies.")
dependencies = ["bazel", "zip", "unzip", "tar"]
failed_dependencies = []
for dependency in dependencies:
return_code = shell.run(["which", dependency], check=False).returncode
if return_code:
failed_dependencies.append(dependency)
if failed_dependencies:
raise EnvironmentError(
f"Dependencies missing: {', '.join(failed_dependencies)}"
)
|
# Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import OrderedDict
from typing import Optional, Any, List
class Config:
indent: int = 4
line_break: str = os.linesep
def indent(level: int) -> str:
return ' ' * Config.indent * level
def indent_line(line: str, level: int) -> str:
return f"{indent(level)}{line}" if line else ''
def indent_code(code_lines: List[str], level: int = 0) -> str:
lines = []
for code in code_lines:
lines.extend(code.split(Config.line_break) if code is not None else [])
return Config.line_break.join([indent_line(line, level) for line in lines])
def empty_lines(num: int):
return ''.join([Config.line_break] * num)
class Item:
def __init__(self, name: str, description: Optional[str]):
self.name: str = name
self.description: Optional[str] = description
def to_description(self, level: int) -> Optional[str]:
if self.description is not None:
return indent_code([self.description], level)
return None
def to_code(self, level: int) -> str:
raise NotImplementedError
class Property(Item):
def __init__(self,
name: str,
type_str: str,
description: Optional[str] = None,
default: Any = None):
super().__init__(name, description)
self.type_str = type_str
self.default = default
def to_getter_setter_code(self, level) -> str:
"""
Returns: getter and setter functions generated by a property.
"""
name = self.name
lines = [("@property", 0),
(f"def {name}(self):", 0),
(f"return self._{name}", 1),
(empty_lines(0), 0),
(f"def set_{name}(self, {name}: {self.to_code(0)}):", 0),
(f"self.set_fields(_{name}={self.to_field_value()})", 1),
(empty_lines(0), 0)]
return indent_code([indent_line(*line) for line in lines], level)
def to_init_code(self, level: int) -> str:
return indent_line(f"self._{self.name}: {self.to_code(0)} = "
f"{repr(self.default)}", level)
def to_description(self, level: int) -> Optional[str]:
if self.description is not None and self.description.strip() != '':
type_str = f'{self.to_code(0)}'
type_str = f' ({type_str})' if type_str.strip() != '' else type_str
return indent_line(f"{self.name}{type_str}: "
f"{self.description}", level)
return None
def to_field_value(self):
raise NotImplementedError
class ClassAttributeItem(Property):
def to_code(self, level: int = 0) -> str:
return self.type_str
def to_init_code(self, level: int) -> str:
type_code = f'{self.to_code(0)}'
type_ = f': {type_code}' if type_code.strip() != '' else ''
return indent_line(f"{self.name}{type_} = {self.default}", level)
def to_field_value(self):
pass
class BasicItem(Property):
TYPES = {'int', 'float', 'str', 'bool'}
def to_code(self, level: int = 0) -> str:
return f"typing.Optional[{self.type_str}]"
def to_field_value(self):
if self.type_str in self.TYPES:
return self.name
return f"{self.name}.tid"
class CompositeItem(Property):
TYPES = {'List'}
def __init__(self,
name: str,
type_str: str,
item_type: str,
description: Optional[str] = None,
default: Any = None):
super().__init__(name, type_str, description, default)
self.item_type = item_type
def to_code(self, level: int = 0) -> str:
# TODO: Assumes only one type of elements are allowed in the list,
# allow multiple types
# items = list(OrderedDict([(item, None)
# for item in self.items]).keys())
# item_type_str = f"{", ".join(self.item_type)}"
# if len(self.items) > 1:
# item_type_str = f"typing.Union[{item_type_str}]"
return f"typing.Optional[{self.type_str}[{self.item_type}]]"
def to_field_value(self):
item_value_str = BasicItem('item', self.item_type).to_field_value()
return f"[{item_value_str} for item in {self.name}]"
class DefinitionItem(Item):
def __init__(self, name: str,
class_type: str,
init_args: Optional[str] = None,
properties: Optional[List[Property]] = None,
class_attributes: Optional[List[Property]] = None,
description: Optional[str] = None):
super().__init__(name, description)
self.class_type = class_type
self.properties: List[Property] = \
[] if properties is None else properties
self.class_attributes = [] if class_attributes is None \
else class_attributes
self.description = description if description else None
self.init_args = init_args if init_args is not None else ''
self.init_args = self.init_args.replace('=', ' = ')
def to_init_code(self, level: int) -> str:
return indent_line(f"def __init__(self, {self.init_args}):", level)
def to_code(self, level: int) -> str:
super_args = ', '.join([item.split(':')[0].strip()
for item in self.init_args.split(',')])
raw_desc = self.to_description(1)
desc: str = '' if raw_desc is None else raw_desc
lines = [
empty_lines(1),
f"__all__.extend('{self.name}')",
empty_lines(1),
f"class {self.name}({self.class_type}):",
]
lines += [desc] if desc.strip() else []
lines += [item.to_init_code(1) for item in self.class_attributes]
lines += [empty_lines(0)]
lines += [self.to_init_code(1),
indent_line(f"super().__init__({super_args})", 2)]
lines += [item.to_init_code(2) for item in self.properties]
lines += [empty_lines(0)]
lines += [item.to_getter_setter_code(1) for item in self.properties]
return indent_code(lines, level)
@staticmethod
def to_item_descs(items, title):
item_descs = [item.to_description(0) for item in items]
item_descs = [item for item in item_descs if item is not None]
if len(item_descs) > 0:
item_descs = [indent_line(title, 1)] + \
[indent_line(desc, 2) for desc in item_descs]
return item_descs
def to_description(self, level: int) -> Optional[str]:
class_desc = [] if self.description is None else [self.description]
item_descs = self.to_item_descs(self.properties, 'Args:')
att_descs = self.to_item_descs(self.class_attributes, 'Attr:')
descs = class_desc + item_descs + att_descs
if len(descs) == 0:
return ""
quotes = indent_line('"""', 0)
return indent_code([quotes] + descs + [quotes], level)
class FileItem:
def __init__(self,
entry_item: DefinitionItem,
entry_file: str,
ignore_errors: Optional[List[str]],
description: Optional[str],
imports: Optional[List[str]]):
self.description = description
self.ignore_errors = [] if not ignore_errors else ignore_errors
self.imports = [] if not imports else list(set(imports))
self.entry_item = entry_item
self.entry_file_exists = os.path.exists(entry_file)
def to_code(self, level: int) -> str:
lines: List[str] = []
if not self.entry_file_exists:
lines = [self.to_description(0),
self.to_import_code(0),
empty_lines(1), '__all__ = []']
lines.append(self.entry_item.to_code(0))
return indent_code(lines, level)
def to_description(self, level):
quotes = '"""'
lines = self.ignore_errors + [quotes, self.description, quotes]
return indent_code(lines, level)
def to_import_code(self, level):
imports_set: OrderedDict[str] = {}
for import_ in sorted(self.imports):
imports_set[f"import {import_}"] = None
return indent_code(list(imports_set), level)
| # Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import OrderedDict
from typing import Optional, Any, List
class Config:
indent: int = 4
line_break: str = os.linesep
def indent(level: int) -> str:
return ' ' * Config.indent * level
def indent_line(line: str, level: int) -> str:
return f"{indent(level)}{line}" if line else ''
def indent_code(code_lines: List[str], level: int = 0) -> str:
lines = []
for code in code_lines:
lines.extend(code.split(Config.line_break) if code is not None else [])
return Config.line_break.join([indent_line(line, level) for line in lines])
def empty_lines(num: int):
return ''.join([Config.line_break] * num)
class Item:
def __init__(self, name: str, description: Optional[str]):
self.name: str = name
self.description: Optional[str] = description
def to_description(self, level: int) -> Optional[str]:
if self.description is not None:
return indent_code([self.description], level)
return None
def to_code(self, level: int) -> str:
raise NotImplementedError
class Property(Item):
def __init__(self,
name: str,
type_str: str,
description: Optional[str] = None,
default: Any = None):
super().__init__(name, description)
self.type_str = type_str
self.default = default
def to_getter_setter_code(self, level) -> str:
"""
Returns: getter and setter functions generated by a property.
"""
name = self.name
lines = [("@property", 0),
(f"def {name}(self):", 0),
(f"return self._{name}", 1),
(empty_lines(0), 0),
(f"def set_{name}(self, {name}: {self.to_code(0)}):", 0),
(f"self.set_fields(_{name}={self.to_field_value()})", 1),
(empty_lines(0), 0)]
return indent_code([indent_line(*line) for line in lines], level)
def to_init_code(self, level: int) -> str:
return indent_line(f"self._{self.name}: {self.to_code(0)} = "
f"{repr(self.default)}", level)
def to_description(self, level: int) -> Optional[str]:
if self.description is not None and self.description.strip() != '':
type_str = f'{self.to_code(0)}'
type_str = f' ({type_str})' if type_str.strip() != '' else type_str
return indent_line(f"{self.name}{type_str}: "
f"{self.description}", level)
return None
def to_field_value(self):
raise NotImplementedError
class ClassAttributeItem(Property):
def to_code(self, level: int = 0) -> str:
return self.type_str
def to_init_code(self, level: int) -> str:
type_code = f'{self.to_code(0)}'
type_ = f': {type_code}' if type_code.strip() != '' else ''
return indent_line(f"{self.name}{type_} = {self.default}", level)
def to_field_value(self):
pass
class BasicItem(Property):
TYPES = {'int', 'float', 'str', 'bool'}
def to_code(self, level: int = 0) -> str:
return f"typing.Optional[{self.type_str}]"
def to_field_value(self):
if self.type_str in self.TYPES:
return self.name
return f"{self.name}.tid"
class CompositeItem(Property):
TYPES = {'List'}
def __init__(self,
name: str,
type_str: str,
item_type: str,
description: Optional[str] = None,
default: Any = None):
super().__init__(name, type_str, description, default)
self.item_type = item_type
def to_code(self, level: int = 0) -> str:
# TODO: Assumes only one type of elements are allowed in the list,
# allow multiple types
# items = list(OrderedDict([(item, None)
# for item in self.items]).keys())
# item_type_str = f"{', '.join(self.item_type)}"
# if len(self.items) > 1:
# item_type_str = f"typing.Union[{item_type_str}]"
return f"typing.Optional[{self.type_str}[{self.item_type}]]"
def to_field_value(self):
item_value_str = BasicItem('item', self.item_type).to_field_value()
return f"[{item_value_str} for item in {self.name}]"
class DefinitionItem(Item):
def __init__(self, name: str,
class_type: str,
init_args: Optional[str] = None,
properties: Optional[List[Property]] = None,
class_attributes: Optional[List[Property]] = None,
description: Optional[str] = None):
super().__init__(name, description)
self.class_type = class_type
self.properties: List[Property] = \
[] if properties is None else properties
self.class_attributes = [] if class_attributes is None \
else class_attributes
self.description = description if description else None
self.init_args = init_args if init_args is not None else ''
self.init_args = self.init_args.replace('=', ' = ')
def to_init_code(self, level: int) -> str:
return indent_line(f"def __init__(self, {self.init_args}):", level)
def to_code(self, level: int) -> str:
super_args = ', '.join([item.split(':')[0].strip()
for item in self.init_args.split(',')])
raw_desc = self.to_description(1)
desc: str = '' if raw_desc is None else raw_desc
lines = [
empty_lines(1),
f"__all__.extend('{self.name}')",
empty_lines(1),
f"class {self.name}({self.class_type}):",
]
lines += [desc] if desc.strip() else []
lines += [item.to_init_code(1) for item in self.class_attributes]
lines += [empty_lines(0)]
lines += [self.to_init_code(1),
indent_line(f"super().__init__({super_args})", 2)]
lines += [item.to_init_code(2) for item in self.properties]
lines += [empty_lines(0)]
lines += [item.to_getter_setter_code(1) for item in self.properties]
return indent_code(lines, level)
@staticmethod
def to_item_descs(items, title):
item_descs = [item.to_description(0) for item in items]
item_descs = [item for item in item_descs if item is not None]
if len(item_descs) > 0:
item_descs = [indent_line(title, 1)] + \
[indent_line(desc, 2) for desc in item_descs]
return item_descs
def to_description(self, level: int) -> Optional[str]:
class_desc = [] if self.description is None else [self.description]
item_descs = self.to_item_descs(self.properties, 'Args:')
att_descs = self.to_item_descs(self.class_attributes, 'Attr:')
descs = class_desc + item_descs + att_descs
if len(descs) == 0:
return ""
quotes = indent_line('"""', 0)
return indent_code([quotes] + descs + [quotes], level)
class FileItem:
def __init__(self,
entry_item: DefinitionItem,
entry_file: str,
ignore_errors: Optional[List[str]],
description: Optional[str],
imports: Optional[List[str]]):
self.description = description
self.ignore_errors = [] if not ignore_errors else ignore_errors
self.imports = [] if not imports else list(set(imports))
self.entry_item = entry_item
self.entry_file_exists = os.path.exists(entry_file)
def to_code(self, level: int) -> str:
lines: List[str] = []
if not self.entry_file_exists:
lines = [self.to_description(0),
self.to_import_code(0),
empty_lines(1), '__all__ = []']
lines.append(self.entry_item.to_code(0))
return indent_code(lines, level)
def to_description(self, level):
quotes = '"""'
lines = self.ignore_errors + [quotes, self.description, quotes]
return indent_code(lines, level)
def to_import_code(self, level):
imports_set: OrderedDict[str] = {}
for import_ in sorted(self.imports):
imports_set[f"import {import_}"] = None
return indent_code(list(imports_set), level)
|
#!/usr/bin/env python3
import redis
import argparse
import hashlib
from getpass import getpass
r = redis.StrictRedis(host="localhost", port=6379)
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--add', action='store_true', help='Adds a service')
group.add_argument('--check', action='store_true', help='Retrieve and print service details')
group.add_argument('--delete', action='store_true', help='Delete a service entry')
group.add_argument('--update', action='store_true', help='Update a service entry')
group.add_argument('--list', action='store_true', help='List all users')
group.add_argument('--stats', action='store_true', help='Statistics for all users')
parser.add_argument('service', nargs='?', default=None, type=str, help='Service username')
args = parser.parse_args()
if not args.service and not (args.list or args.stats):
from sys import exit
parser.print_help()
exit(1)
def hash_key(x: str):
m = hashlib.blake2b()
m.update(x.encode("utf-8"))
return m.digest()
def exists(key: str):
existing = r.exists(f"service:{key}") and r.sismember('services', key)
if not existing:
print(f"{key} not found")
else:
print(f"{key} exists")
return existing
def existing_users():
return [l.decode("utf-8") for l in r.smembers('services')]
def interactive_add():
public = "N/A"
public_opts = ["Y", "N"]
while public not in public_opts:
public = input("Public information? (Y/N): ")
display = input("Display name: ")
website = input("Website: ")
api_key = None
while not api_key:
api_key = getpass("API Key (hidden, will be hashed): ")
options = {
"public": public,
"display": display,
"website": website,
"api_key": hash_key(api_key),
"precache": 0,
"ondemand": 0
}
return options
def interactive_update():
public = "N/A"
public_opts = ["Y", "N", ""]
while public not in public_opts:
public = input("Public information? (Y/N): ")
display = input("Display name: ")
website = input("Website: ")
api_key = getpass("API Key (hidden, will be hashed): ")
options = dict()
if public:
options["public"] = public
if display:
options["display"] = display
if website:
options["website"] = website
if api_key:
options["api_key"] = hash_key(api_key)
return options
def display(user):
options = r.hgetall(f"service:{user}")
options = {k.decode("utf-8"): v for k,v in options.items()}
options = {k: v if k=="api_key" else v.decode("utf-8") for k,v in options.items()}
print(options)
def add(user):
print("Creating new entry.")
options = interactive_add()
r.hmset(f"service:{user}", options)
r.sadd("services", user)
print(f"User {user} created:")
display(user)
def update(user):
print("Updating entry. Leave a field blank to skip.")
options = interactive_update()
if options:
r.hmset(f"service:{user}", options)
print(f"User {user} updated:")
else:
print(f"No changes to {user}:")
display(user)
def delete(user):
print("Deleting entry.")
r.delete(f"service:{user}")
r.srem('services', user)
user_exists = exists(user)
if user_exists:
print("Failure in deleting")
else:
print("Deleting successfull")
def statistics(users):
for user in users:
stats = r.hgetall(f"service:{user}")
stats = {k.decode("utf-8"): v for k,v in stats.items()}
stats = {k: v if k=="api_key" else v.decode("utf-8") for k,v in stats.items()}
print(user)
print(f"\t{"PUBLIC" if stats["public"]=="Y" else "PRIVATE"}\n"
f"\tprecache: {stats.get("precache") or 0}"
f"\tondemand: {stats.get("ondemand") or 0}"
)
def main():
if args.list:
print("Services in database:\n", existing_users())
elif args.stats:
statistics(existing_users())
else:
user = args.service
user_exists = exists(user)
if not user_exists:
if args.add:
add(user)
else:
print("Services in database:\n", existing_users())
else:
if args.check:
display(user)
elif args.delete:
delete(user)
elif args.update:
update(user)
else:
NotImplementedError
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
import redis
import argparse
import hashlib
from getpass import getpass
r = redis.StrictRedis(host="localhost", port=6379)
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--add', action='store_true', help='Adds a service')
group.add_argument('--check', action='store_true', help='Retrieve and print service details')
group.add_argument('--delete', action='store_true', help='Delete a service entry')
group.add_argument('--update', action='store_true', help='Update a service entry')
group.add_argument('--list', action='store_true', help='List all users')
group.add_argument('--stats', action='store_true', help='Statistics for all users')
parser.add_argument('service', nargs='?', default=None, type=str, help='Service username')
args = parser.parse_args()
if not args.service and not (args.list or args.stats):
from sys import exit
parser.print_help()
exit(1)
def hash_key(x: str):
m = hashlib.blake2b()
m.update(x.encode("utf-8"))
return m.digest()
def exists(key: str):
existing = r.exists(f"service:{key}") and r.sismember('services', key)
if not existing:
print(f"{key} not found")
else:
print(f"{key} exists")
return existing
def existing_users():
return [l.decode("utf-8") for l in r.smembers('services')]
def interactive_add():
public = "N/A"
public_opts = ["Y", "N"]
while public not in public_opts:
public = input("Public information? (Y/N): ")
display = input("Display name: ")
website = input("Website: ")
api_key = None
while not api_key:
api_key = getpass("API Key (hidden, will be hashed): ")
options = {
"public": public,
"display": display,
"website": website,
"api_key": hash_key(api_key),
"precache": 0,
"ondemand": 0
}
return options
def interactive_update():
public = "N/A"
public_opts = ["Y", "N", ""]
while public not in public_opts:
public = input("Public information? (Y/N): ")
display = input("Display name: ")
website = input("Website: ")
api_key = getpass("API Key (hidden, will be hashed): ")
options = dict()
if public:
options["public"] = public
if display:
options["display"] = display
if website:
options["website"] = website
if api_key:
options["api_key"] = hash_key(api_key)
return options
def display(user):
options = r.hgetall(f"service:{user}")
options = {k.decode("utf-8"): v for k,v in options.items()}
options = {k: v if k=="api_key" else v.decode("utf-8") for k,v in options.items()}
print(options)
def add(user):
print("Creating new entry.")
options = interactive_add()
r.hmset(f"service:{user}", options)
r.sadd("services", user)
print(f"User {user} created:")
display(user)
def update(user):
print("Updating entry. Leave a field blank to skip.")
options = interactive_update()
if options:
r.hmset(f"service:{user}", options)
print(f"User {user} updated:")
else:
print(f"No changes to {user}:")
display(user)
def delete(user):
print("Deleting entry.")
r.delete(f"service:{user}")
r.srem('services', user)
user_exists = exists(user)
if user_exists:
print("Failure in deleting")
else:
print("Deleting successfull")
def statistics(users):
for user in users:
stats = r.hgetall(f"service:{user}")
stats = {k.decode("utf-8"): v for k,v in stats.items()}
stats = {k: v if k=="api_key" else v.decode("utf-8") for k,v in stats.items()}
print(user)
print(f"\t{'PUBLIC' if stats['public']=='Y' else 'PRIVATE'}\n"
f"\tprecache: {stats.get('precache') or 0}"
f"\tondemand: {stats.get('ondemand') or 0}"
)
def main():
if args.list:
print("Services in database:\n", existing_users())
elif args.stats:
statistics(existing_users())
else:
user = args.service
user_exists = exists(user)
if not user_exists:
if args.add:
add(user)
else:
print("Services in database:\n", existing_users())
else:
if args.check:
display(user)
elif args.delete:
delete(user)
elif args.update:
update(user)
else:
NotImplementedError
if __name__ == '__main__':
main()
|
import datetime
import shutil
import services.inventory
import workflow
import pandas as pd
import os
import file_system
import file_system.images as images
import json
from file_system.file_system_object import FileSystemObject
from services import inventory, library
from tabulate import tabulate
import cv2
TEMP_FOLDER = "tmp/eval"
RECYCLE_BIN = "tmp/recycle_bin"
def inventory_menu():
library_id = prompt_for_library()
while True:
print("\n")
print("###############################################")
print("Digital Library Utility - Inventory Management ")
print("###############################################")
print("[0] Return to Main Menu")
print("[1] Add/Update (Refresh) Inventory")
print("[3] View Inventory")
print("[4] Reconcile (Library) Inventory")
print("[5] Update Inventory Compare Scores")
print("[6] Manage Duplicate Inventory")
print("[7] Restore files from Recycle Bin")
print("[8] Classify Inventory")
choice = input("> ")
if choice.isnumeric() and int(choice) in range(10):
if int(choice) == 0:
workflow.main_menu()
elif int(choice) == 1: # add/update inventory
refresh_inventory(library_id=library_id)
reconcile_inventory(library_id=library_id, calculate_compare_score=False)
elif int(choice) == 3: # view inventory by library
display_library_inventory(library_id)
elif int(choice) == 4: # reconcile inventory
reconcile_inventory(library_id=library_id, calculate_compare_score=False)
elif int(choice) == 5: # reconcile inventory with compare score calculation
reconcile_inventory(library_id=library_id, calculate_compare_score=True)
elif int(choice) == 6: # manage duplicate inventory
refresh_inventory(library_id=library_id)
reconcile_inventory(library_id=library_id, calculate_compare_score=True)
get_comparable_inventory(library_id=library_id)
move_files_to_recycle_bin(library_id=library_id)
clear_eval_folder(TEMP_FOLDER)
refresh_inventory(library_id=library_id)
elif int(choice) == 7:
restore_from_recycle_bin()
reconcile_inventory(library_id=library_id, calculate_compare_score=False)
elif int(choice) == 8:
display_library_inventory(library_id)
update_classification()
else:
print("Selection not valid. Please try again.")
def refresh_inventory(library_id):
src = get_library_base_path(library_id)
exclusion_list = ['.map', 'venv', '.pyc', '__pycache__', '.DS_Store', 'ignore', '.idea', 'git']
restricted_list = []
data = file_system.search(search_path=src,
recursive=True,
exclusion_list=exclusion_list,
restricted_list=restricted_list)
for idx, item in enumerate(data):
data[idx]['library_id'] = library_id
if not data[idx]['is_hidden']:
inventory.refresh_inventory(**data[idx])
def prompt_for_library():
workflow.workflow_library.display_user_libraries()
prompt = input("Select Library ID: ")
if lib := services.library.get_library(prompt):
return lib.library_id
print(f"{prompt} is not a valid Library ID")
prompt_for_library()
def get_library_base_path(library_id):
lib = library.get_library(library_id)
return lib.base_path
def update_inventory_compare_scores(inventory_id, full_path):
fso = FileSystemObject(full_path).to_dict()
if fso and fso['is_found'] and not fso['is_hidden']:
fso['inventory_removed_date'] = None
inv = inventory.get_inventory_item(inventory_id)
if not inv.compare_score or inv.compare_score == 0 or inv.compare_score_dt < inv.modified_dt:
fso['compare_score'] = (update_compare_score(full_path, size=fso['size']))
fso['compare_score_dt'] = datetime.datetime.now()
inventory.update_inventory(inventory_id, **fso)
else:
data = {'inventory_removed_date': datetime.datetime.now()}
inventory.update_inventory(inventory_id, **data)
def update_compare_score(full_path, size):
return images.calculate_compare_score(full_path, size=size)
def get_inventory(library_id):
return inventory.get_library_inventory(library_id=library_id)
def display_all_inventory():
results = inventory.get_all_inventory()
df = pd.DataFrame(results)
# df = df.drop(['_sa_instance_state'], axis=1)
df.sort_values(by=['library_id', 'directory', 'full_path'])
print(tabulate(df.head(500), headers='keys', tablefmt='psql'))
def display_library_inventory(library_id):
if results := inventory.get_library_inventory(library_id):
df = pd.DataFrame(results)
# df = df.drop(['_sa_instance_state'], axis=1)
df.sort_values(by=['library_id', 'directory', 'full_path'])
print(tabulate(df.head(500), headers='keys', tablefmt='psql'))
else:
return None
def reconcile_inventory(library_id, calculate_compare_score: bool = False):
# Purpose: Identify files/folders that no longer exist and update DB accordingly
# library_id = prompt_for_library()
results = inventory.get_library_inventory(library_id)
for idx, item in enumerate(results):
if results[idx]['file']:
src_path = results[idx]['full_path']
inventory_id = results[idx]['inventory_id']
fso = FileSystemObject(src_path).to_dict()
if fso and fso['is_found'] and not fso['is_hidden']:
data = {
'inventory_removed_date': None,
'inventory_removed_reason': None,
'is_missing': False
}
else:
data = {'inventory_removed_date': datetime.datetime.now(),
'is_missing': True
}
inventory.update_inventory(inventory_id, **data)
if calculate_compare_score:
update_inventory_compare_scores(inventory_id, src_path)
def restore_from_recycle_bin():
path = RECYCLE_BIN
for root, folders, files in os.walk(path, topdown=True):
for file in files:
recycled_file = os.path.splitext(file)[0]
src = os.path.join(root, file)
original_file = services.inventory.get_inventory_item(recycled_file)
dest = original_file.full_path
shutil.move(src, dest)
def get_comparable_inventory(library_id):
try:
if data := inventory.get_comparable_inventory(library_id):
df = pd.DataFrame(data)
# df = df.drop(['_sa_instance_state'], axis=1)
df["file"] = df["file"].str.lower()
df['compare_score_frequency'] = df.groupby('compare_score')['compare_score'].transform('count')
df = df[df.groupby('compare_score')['compare_score'].transform('count') > 1]
df = df[['inventory_id', 'library_id', 'directory', 'full_path', 'file', 'file_extension',
'size', 'created_dt', 'modified_dt',
'compare_score_dt', 'compare_score', 'compare_score_frequency']]
# df.sort_values(by=['compare_score', 'size'])
# print(tabulate(df, headers='keys', tablefmt='psql'))
group_duplicates(df)
clear_eval_folder(TEMP_FOLDER)
else:
print("No duplicates were found.")
except:
print("An unexpected error has occurred")
def group_duplicates(df: pd.DataFrame):
distinct_scores = list(df['compare_score'].unique())
count = len(distinct_scores)
for counter, score in enumerate(distinct_scores, 1):
sample = df[df["compare_score"] == score]
sample = pd.DataFrame(sample, columns=['inventory_id', 'file', 'file_extension', 'full_path', 'directory',
'size', 'created_dt', 'modified_dt'])
sample.reset_index(drop=True, inplace=True)
print("###############################################")
print(f"Potential Duplicate Group {counter} of {count}")
print(f"Compare Score: {score}")
print("###############################################")
evaluate_duplicates_by_group(sample)
def evaluate_duplicates_by_group(sample: pd.DataFrame):
clear_eval_folder(path=TEMP_FOLDER)
group = []
# print(tabulate(sample.head(), headers='keys', tablefmt='psql'))
for idx, row in sample.iterrows():
group.append(row['inventory_id'])
inventory_id = row['inventory_id']
created = row['created_dt']
modified = row['modified_dt']
size = row['size']
src = row['full_path']
dest = f'{TEMP_FOLDER}/' + inventory_id + row['file_extension']
print(f"InventoryID: {inventory_id} | File: {row["file"]} | Created: {created} | "
f"Modified: {modified} | Size: {size}")
shutil.copy2(src, dest)
if retain := input("Enter Inventory IDs you wish to keep (separate by comma): ").split(","):
for idx, item in enumerate(retain):
retain[idx] = item.strip()
for inv_id in group:
if inv_id not in retain:
reason = input(f"Enter reason for removal of {inv_id}: ")
services.inventory.remove_inventory_item(inv_id.strip(), reason.strip())
def move_files_to_recycle_bin(library_id):
reconcile_inventory(library_id, calculate_compare_score=False)
if data := inventory.get_removed_inventory(library_id):
for idx, item in enumerate(data):
src = data[idx]['full_path']
inventory_id = data[idx]['inventory_id']
file_extension = data[idx]['file_extension']
dest = f'{RECYCLE_BIN}/' + inventory_id + file_extension
try:
shutil.move(src, dest)
except FileNotFoundError:
print("A FileNotFound error has occurred.")
def remove_inventory(group: list, retain: list):
for idx, item in enumerate(retain):
retain[idx] = item.strip()
for inv_id in group:
if inv_id not in retain:
reason = input(f"Enter reason for removal of {inv_id}: ")
services.inventory.remove_inventory_item(inv_id.strip(), reason.strip())
def clear_eval_folder(path: str):
mypath = path
for root, dirs, files in os.walk(mypath):
for file in files:
os.remove(os.path.join(root, file))
def select_inventory_item():
return input("Input Inventory ID: ")
def get_inventory_item(inventory_id):
return services.inventory.get_inventory_item(inventory_id=inventory_id)
def update_classification(library_id, incl_assignment: bool = False):
inv = workflow.workflow_inventory.get_inventory(library_id=library_id)
try:
for file in inv:
inventory_id = file['inventory_id']
if file['is_image']:
# inv = services.inventory.get_inventory_item(inventory_id=inventory_id).to_dict()
cv2.imshow(file['file'], cv2.imread(file['full_path']))
cv2.waitKey(1)
if file['classification']:
print(f"Current Tags: {file["classification"]["tags"]}")
tag_values = [item.strip() for item in input("Input Tags (separated by comma): ").split(',')]
data = {
'inventory_id': inventory_id,
'classification': {'tags': tag_values},
'model_assignment': input("Model Assignment Name: ") if incl_assignment else file['model_assignment']
}
services.inventory.update_inventory_classification(**data)
cv2.destroyAllWindows()
cv2.destroyAllWindows()
except:
raise
def update_classification_from_model(inventory_id, tags: str):
file = workflow.workflow_inventory.get_inventory_item(inventory_id).to_dict()
classification = file['classification']['tags'] if file['classification'] else []
classification.append(tags)
classification = list(set(classification))
data = {
'inventory_id': inventory_id,
'classification': {'tags': classification}
}
services.inventory.update_inventory_classification(**data)
# for image in inv:
# inventory_id = image['inventory_id']
#
# try:
# if inv := services.inventory.get_inventory_item(inventory_id=inventory_id).to_dict():
# cv2.imshow(image['file'], image['full_path'])
# # cv2.imwrite("tests/samples/ml/test/output.jpg", image)
# cv2.waitKey(0)
# # cv2.destroyAllWindows()
# if inv['classification']:
# print(f"Current Tags: {inv["classification"]["tags"]}")
#
# tag_values = [item.strip() for item in input("Input Tags (separated by comma): ").split(',')]
# data = {
# 'inventory_id': inventory_id,
# 'classification': {'tags': tag_values},
# 'model_assignment': input("Model Assignment Name: ") if incl_assignment else inv['model_assignment']
# }
# services.inventory.update_inventory_classification(**data)
#
# cv2.destroyAllWindows()
# except:
# raise
#5351dd023ef1440393b81ec0acbe2f4a
| import datetime
import shutil
import services.inventory
import workflow
import pandas as pd
import os
import file_system
import file_system.images as images
import json
from file_system.file_system_object import FileSystemObject
from services import inventory, library
from tabulate import tabulate
import cv2
TEMP_FOLDER = "tmp/eval"
RECYCLE_BIN = "tmp/recycle_bin"
def inventory_menu():
library_id = prompt_for_library()
while True:
print("\n")
print("###############################################")
print("Digital Library Utility - Inventory Management ")
print("###############################################")
print("[0] Return to Main Menu")
print("[1] Add/Update (Refresh) Inventory")
print("[3] View Inventory")
print("[4] Reconcile (Library) Inventory")
print("[5] Update Inventory Compare Scores")
print("[6] Manage Duplicate Inventory")
print("[7] Restore files from Recycle Bin")
print("[8] Classify Inventory")
choice = input("> ")
if choice.isnumeric() and int(choice) in range(10):
if int(choice) == 0:
workflow.main_menu()
elif int(choice) == 1: # add/update inventory
refresh_inventory(library_id=library_id)
reconcile_inventory(library_id=library_id, calculate_compare_score=False)
elif int(choice) == 3: # view inventory by library
display_library_inventory(library_id)
elif int(choice) == 4: # reconcile inventory
reconcile_inventory(library_id=library_id, calculate_compare_score=False)
elif int(choice) == 5: # reconcile inventory with compare score calculation
reconcile_inventory(library_id=library_id, calculate_compare_score=True)
elif int(choice) == 6: # manage duplicate inventory
refresh_inventory(library_id=library_id)
reconcile_inventory(library_id=library_id, calculate_compare_score=True)
get_comparable_inventory(library_id=library_id)
move_files_to_recycle_bin(library_id=library_id)
clear_eval_folder(TEMP_FOLDER)
refresh_inventory(library_id=library_id)
elif int(choice) == 7:
restore_from_recycle_bin()
reconcile_inventory(library_id=library_id, calculate_compare_score=False)
elif int(choice) == 8:
display_library_inventory(library_id)
update_classification()
else:
print("Selection not valid. Please try again.")
def refresh_inventory(library_id):
src = get_library_base_path(library_id)
exclusion_list = ['.map', 'venv', '.pyc', '__pycache__', '.DS_Store', 'ignore', '.idea', 'git']
restricted_list = []
data = file_system.search(search_path=src,
recursive=True,
exclusion_list=exclusion_list,
restricted_list=restricted_list)
for idx, item in enumerate(data):
data[idx]['library_id'] = library_id
if not data[idx]['is_hidden']:
inventory.refresh_inventory(**data[idx])
def prompt_for_library():
workflow.workflow_library.display_user_libraries()
prompt = input("Select Library ID: ")
if lib := services.library.get_library(prompt):
return lib.library_id
print(f"{prompt} is not a valid Library ID")
prompt_for_library()
def get_library_base_path(library_id):
lib = library.get_library(library_id)
return lib.base_path
def update_inventory_compare_scores(inventory_id, full_path):
fso = FileSystemObject(full_path).to_dict()
if fso and fso['is_found'] and not fso['is_hidden']:
fso['inventory_removed_date'] = None
inv = inventory.get_inventory_item(inventory_id)
if not inv.compare_score or inv.compare_score == 0 or inv.compare_score_dt < inv.modified_dt:
fso['compare_score'] = (update_compare_score(full_path, size=fso['size']))
fso['compare_score_dt'] = datetime.datetime.now()
inventory.update_inventory(inventory_id, **fso)
else:
data = {'inventory_removed_date': datetime.datetime.now()}
inventory.update_inventory(inventory_id, **data)
def update_compare_score(full_path, size):
return images.calculate_compare_score(full_path, size=size)
def get_inventory(library_id):
return inventory.get_library_inventory(library_id=library_id)
def display_all_inventory():
results = inventory.get_all_inventory()
df = pd.DataFrame(results)
# df = df.drop(['_sa_instance_state'], axis=1)
df.sort_values(by=['library_id', 'directory', 'full_path'])
print(tabulate(df.head(500), headers='keys', tablefmt='psql'))
def display_library_inventory(library_id):
if results := inventory.get_library_inventory(library_id):
df = pd.DataFrame(results)
# df = df.drop(['_sa_instance_state'], axis=1)
df.sort_values(by=['library_id', 'directory', 'full_path'])
print(tabulate(df.head(500), headers='keys', tablefmt='psql'))
else:
return None
def reconcile_inventory(library_id, calculate_compare_score: bool = False):
# Purpose: Identify files/folders that no longer exist and update DB accordingly
# library_id = prompt_for_library()
results = inventory.get_library_inventory(library_id)
for idx, item in enumerate(results):
if results[idx]['file']:
src_path = results[idx]['full_path']
inventory_id = results[idx]['inventory_id']
fso = FileSystemObject(src_path).to_dict()
if fso and fso['is_found'] and not fso['is_hidden']:
data = {
'inventory_removed_date': None,
'inventory_removed_reason': None,
'is_missing': False
}
else:
data = {'inventory_removed_date': datetime.datetime.now(),
'is_missing': True
}
inventory.update_inventory(inventory_id, **data)
if calculate_compare_score:
update_inventory_compare_scores(inventory_id, src_path)
def restore_from_recycle_bin():
path = RECYCLE_BIN
for root, folders, files in os.walk(path, topdown=True):
for file in files:
recycled_file = os.path.splitext(file)[0]
src = os.path.join(root, file)
original_file = services.inventory.get_inventory_item(recycled_file)
dest = original_file.full_path
shutil.move(src, dest)
def get_comparable_inventory(library_id):
try:
if data := inventory.get_comparable_inventory(library_id):
df = pd.DataFrame(data)
# df = df.drop(['_sa_instance_state'], axis=1)
df["file"] = df["file"].str.lower()
df['compare_score_frequency'] = df.groupby('compare_score')['compare_score'].transform('count')
df = df[df.groupby('compare_score')['compare_score'].transform('count') > 1]
df = df[['inventory_id', 'library_id', 'directory', 'full_path', 'file', 'file_extension',
'size', 'created_dt', 'modified_dt',
'compare_score_dt', 'compare_score', 'compare_score_frequency']]
# df.sort_values(by=['compare_score', 'size'])
# print(tabulate(df, headers='keys', tablefmt='psql'))
group_duplicates(df)
clear_eval_folder(TEMP_FOLDER)
else:
print("No duplicates were found.")
except:
print("An unexpected error has occurred")
def group_duplicates(df: pd.DataFrame):
distinct_scores = list(df['compare_score'].unique())
count = len(distinct_scores)
for counter, score in enumerate(distinct_scores, 1):
sample = df[df["compare_score"] == score]
sample = pd.DataFrame(sample, columns=['inventory_id', 'file', 'file_extension', 'full_path', 'directory',
'size', 'created_dt', 'modified_dt'])
sample.reset_index(drop=True, inplace=True)
print("###############################################")
print(f"Potential Duplicate Group {counter} of {count}")
print(f"Compare Score: {score}")
print("###############################################")
evaluate_duplicates_by_group(sample)
def evaluate_duplicates_by_group(sample: pd.DataFrame):
clear_eval_folder(path=TEMP_FOLDER)
group = []
# print(tabulate(sample.head(), headers='keys', tablefmt='psql'))
for idx, row in sample.iterrows():
group.append(row['inventory_id'])
inventory_id = row['inventory_id']
created = row['created_dt']
modified = row['modified_dt']
size = row['size']
src = row['full_path']
dest = f'{TEMP_FOLDER}/' + inventory_id + row['file_extension']
print(f"InventoryID: {inventory_id} | File: {row['file']} | Created: {created} | "
f"Modified: {modified} | Size: {size}")
shutil.copy2(src, dest)
if retain := input("Enter Inventory IDs you wish to keep (separate by comma): ").split(","):
for idx, item in enumerate(retain):
retain[idx] = item.strip()
for inv_id in group:
if inv_id not in retain:
reason = input(f"Enter reason for removal of {inv_id}: ")
services.inventory.remove_inventory_item(inv_id.strip(), reason.strip())
def move_files_to_recycle_bin(library_id):
reconcile_inventory(library_id, calculate_compare_score=False)
if data := inventory.get_removed_inventory(library_id):
for idx, item in enumerate(data):
src = data[idx]['full_path']
inventory_id = data[idx]['inventory_id']
file_extension = data[idx]['file_extension']
dest = f'{RECYCLE_BIN}/' + inventory_id + file_extension
try:
shutil.move(src, dest)
except FileNotFoundError:
print("A FileNotFound error has occurred.")
def remove_inventory(group: list, retain: list):
for idx, item in enumerate(retain):
retain[idx] = item.strip()
for inv_id in group:
if inv_id not in retain:
reason = input(f"Enter reason for removal of {inv_id}: ")
services.inventory.remove_inventory_item(inv_id.strip(), reason.strip())
def clear_eval_folder(path: str):
mypath = path
for root, dirs, files in os.walk(mypath):
for file in files:
os.remove(os.path.join(root, file))
def select_inventory_item():
return input("Input Inventory ID: ")
def get_inventory_item(inventory_id):
return services.inventory.get_inventory_item(inventory_id=inventory_id)
def update_classification(library_id, incl_assignment: bool = False):
inv = workflow.workflow_inventory.get_inventory(library_id=library_id)
try:
for file in inv:
inventory_id = file['inventory_id']
if file['is_image']:
# inv = services.inventory.get_inventory_item(inventory_id=inventory_id).to_dict()
cv2.imshow(file['file'], cv2.imread(file['full_path']))
cv2.waitKey(1)
if file['classification']:
print(f"Current Tags: {file['classification']['tags']}")
tag_values = [item.strip() for item in input("Input Tags (separated by comma): ").split(',')]
data = {
'inventory_id': inventory_id,
'classification': {'tags': tag_values},
'model_assignment': input("Model Assignment Name: ") if incl_assignment else file['model_assignment']
}
services.inventory.update_inventory_classification(**data)
cv2.destroyAllWindows()
cv2.destroyAllWindows()
except:
raise
def update_classification_from_model(inventory_id, tags: str):
file = workflow.workflow_inventory.get_inventory_item(inventory_id).to_dict()
classification = file['classification']['tags'] if file['classification'] else []
classification.append(tags)
classification = list(set(classification))
data = {
'inventory_id': inventory_id,
'classification': {'tags': classification}
}
services.inventory.update_inventory_classification(**data)
# for image in inv:
# inventory_id = image['inventory_id']
#
# try:
# if inv := services.inventory.get_inventory_item(inventory_id=inventory_id).to_dict():
# cv2.imshow(image['file'], image['full_path'])
# # cv2.imwrite("tests/samples/ml/test/output.jpg", image)
# cv2.waitKey(0)
# # cv2.destroyAllWindows()
# if inv['classification']:
# print(f"Current Tags: {inv['classification']['tags']}")
#
# tag_values = [item.strip() for item in input("Input Tags (separated by comma): ").split(',')]
# data = {
# 'inventory_id': inventory_id,
# 'classification': {'tags': tag_values},
# 'model_assignment': input("Model Assignment Name: ") if incl_assignment else inv['model_assignment']
# }
# services.inventory.update_inventory_classification(**data)
#
# cv2.destroyAllWindows()
# except:
# raise
#5351dd023ef1440393b81ec0acbe2f4a
|
from bilibili import bilibili
import datetime
import time
import asyncio
import traceback
import os
import configloader
import utils
from printer import Printer
class Tasks:
def __init__(self):
fileDir = os.path.dirname(os.path.realpath('__file__'))
file_user = fileDir + "/conf/user.conf"
self.dic_user = configloader.load_user(file_user)
# 获取每日包裹奖励
async def Daily_bag(self):
response = await bilibili().get_dailybag()
json_response = await response.json()
for i in range(0, len(json_response['data']['bag_list'])):
Printer().printer(f"获得-{json_response["data"]["bag_list"][i]["bag_name"]}-成功", "Info", "green")
def CurrentTime(self):
currenttime = str(int(time.mktime(datetime.datetime.now().timetuple())))
return currenttime
# 签到功能
async def DoSign(self):
response = await bilibili().get_dosign()
temp = await response.json(content_type=None)
Printer().printer(f"签到状态:{temp["msg"]}", "Info", "green")
# 领取每日任务奖励
async def Daily_Task(self):
response2 = await bilibili().get_dailytask()
json_response2 = await response2.json()
Printer().printer(f"双端观看直播:{json_response2["msg"]}", "Info", "green")
# 应援团签到
async def link_sign(self):
response = await bilibili().get_grouplist()
json_response = await response.json(content_type=None)
check = len(json_response['data']['list'])
group_id_list = []
owner_uid_list = []
for i in range(0, check):
group_id = json_response['data']['list'][i]['group_id']
owner_uid = json_response['data']['list'][i]['owner_uid']
group_id_list.append(group_id)
owner_uid_list.append(owner_uid)
for (i1, i2) in zip(group_id_list, owner_uid_list):
response = await bilibili().assign_group(i1, i2)
json_response = await response.json(content_type=None)
if json_response['code'] == 0:
if (json_response['data']['status']) == 1:
Printer().printer(f"应援团{i1}已应援过", "Info", "green")
if (json_response['data']['status']) == 0:
Printer().printer(f"应援团{i1}应援成功,获得{json_response["data"]["add_num"]}点亲密度", "Info", "green")
else:
Printer().printer(f"应援团{i1}应援失败,{json_response}", "Error", "red")
async def send_gift(self):
if self.dic_user['gift']['on/off'] == '1':
argvs, x = await utils.fetch_bag_list(printer=False)
for i in range(0, len(argvs)):
giftID = argvs[i][0]
giftNum = argvs[i][1]
bagID = argvs[i][2]
roomID = self.dic_user['gift']['send_to_room']
await utils.send_gift_web(roomID, giftID, giftNum, bagID)
if not argvs:
Printer().printer(f"没有将要过期的礼物~", "Info", "green")
async def auto_send_gift(self):
if self.dic_user['auto-gift']['on/off'] == "1":
a = await utils.fetch_medal(printer=False)
res = await bilibili().gift_list()
json_res = await res.json()
temp_dic = {}
for j in range(0, len(json_res['data'])):
price = json_res['data'][j]['price']
id = json_res['data'][j]['id']
temp_dic[id] = price
x, temp = await utils.fetch_bag_list(printer=False)
roomid = a[0]
today_feed = a[1]
day_limit = a[2]
left_num = int(day_limit) - int(today_feed)
calculate = 0
for i in range(0, len(temp)):
gift_id = int(temp[i][0])
gift_num = int(temp[i][1])
bag_id = int(temp[i][2])
expire = int(temp[i][3])
if (gift_id != 4 and gift_id != 3 and gift_id != 9 and gift_id != 10) and expire != 0:
if (gift_num * (temp_dic[gift_id] / 100) < left_num):
calculate = calculate + temp_dic[gift_id] / 100 * gift_num
tmp2 = temp_dic[gift_id] / 100 * gift_num
await utils.send_gift_web(roomid, gift_id, gift_num, bag_id)
left_num = left_num - tmp2
elif left_num - temp_dic[gift_id] / 100 >= 0:
tmp = (left_num) / (temp_dic[gift_id] / 100)
tmp1 = (temp_dic[gift_id] / 100) * int(tmp)
calculate = calculate + tmp1
await utils.send_gift_web(roomid, gift_id, tmp, bag_id)
left_num = left_num - tmp1
Printer().printer(f"自动送礼共送出亲密度为{int(calculate)}的礼物", "Info", "green")
async def doublegain_coin2silver(self):
if self.dic_user['doublegain_coin2silver']['on/off'] == "1":
response0 = await bilibili().request_doublegain_coin2silver()
json_response0 = await response0.json()
response1 = await bilibili().request_doublegain_coin2silver()
json_response1 = await response1.json()
print(json_response0['msg'], json_response1['msg'])
async def sliver2coin(self):
if self.dic_user['coin']['on/off'] == '1':
response1 = await bilibili().silver2coin_app()
json_response1 = await response1.json()
Printer().printer(f"银瓜子兑换硬币状态:{json_response1["msg"]}", "Info", "green")
async def run(self):
while 1:
try:
Printer().printer(f"开始执行每日任务", "Info", "green")
await self.DoSign()
await self.Daily_bag()
await self.Daily_Task()
await self.link_sign()
await self.send_gift()
await self.sliver2coin()
await self.doublegain_coin2silver()
await self.auto_send_gift()
await utils.reconnect()
await asyncio.sleep(21600)
except:
await asyncio.sleep(10)
Printer().printer(traceback.format_exc(), "Error", "red")
| from bilibili import bilibili
import datetime
import time
import asyncio
import traceback
import os
import configloader
import utils
from printer import Printer
class Tasks:
def __init__(self):
fileDir = os.path.dirname(os.path.realpath('__file__'))
file_user = fileDir + "/conf/user.conf"
self.dic_user = configloader.load_user(file_user)
# 获取每日包裹奖励
async def Daily_bag(self):
response = await bilibili().get_dailybag()
json_response = await response.json()
for i in range(0, len(json_response['data']['bag_list'])):
Printer().printer(f"获得-{json_response['data']['bag_list'][i]['bag_name']}-成功", "Info", "green")
def CurrentTime(self):
currenttime = str(int(time.mktime(datetime.datetime.now().timetuple())))
return currenttime
# 签到功能
async def DoSign(self):
response = await bilibili().get_dosign()
temp = await response.json(content_type=None)
Printer().printer(f"签到状态:{temp['msg']}", "Info", "green")
# 领取每日任务奖励
async def Daily_Task(self):
response2 = await bilibili().get_dailytask()
json_response2 = await response2.json()
Printer().printer(f"双端观看直播:{json_response2['msg']}", "Info", "green")
# 应援团签到
async def link_sign(self):
response = await bilibili().get_grouplist()
json_response = await response.json(content_type=None)
check = len(json_response['data']['list'])
group_id_list = []
owner_uid_list = []
for i in range(0, check):
group_id = json_response['data']['list'][i]['group_id']
owner_uid = json_response['data']['list'][i]['owner_uid']
group_id_list.append(group_id)
owner_uid_list.append(owner_uid)
for (i1, i2) in zip(group_id_list, owner_uid_list):
response = await bilibili().assign_group(i1, i2)
json_response = await response.json(content_type=None)
if json_response['code'] == 0:
if (json_response['data']['status']) == 1:
Printer().printer(f"应援团{i1}已应援过", "Info", "green")
if (json_response['data']['status']) == 0:
Printer().printer(f"应援团{i1}应援成功,获得{json_response['data']['add_num']}点亲密度", "Info", "green")
else:
Printer().printer(f"应援团{i1}应援失败,{json_response}", "Error", "red")
async def send_gift(self):
if self.dic_user['gift']['on/off'] == '1':
argvs, x = await utils.fetch_bag_list(printer=False)
for i in range(0, len(argvs)):
giftID = argvs[i][0]
giftNum = argvs[i][1]
bagID = argvs[i][2]
roomID = self.dic_user['gift']['send_to_room']
await utils.send_gift_web(roomID, giftID, giftNum, bagID)
if not argvs:
Printer().printer(f"没有将要过期的礼物~", "Info", "green")
async def auto_send_gift(self):
if self.dic_user['auto-gift']['on/off'] == "1":
a = await utils.fetch_medal(printer=False)
res = await bilibili().gift_list()
json_res = await res.json()
temp_dic = {}
for j in range(0, len(json_res['data'])):
price = json_res['data'][j]['price']
id = json_res['data'][j]['id']
temp_dic[id] = price
x, temp = await utils.fetch_bag_list(printer=False)
roomid = a[0]
today_feed = a[1]
day_limit = a[2]
left_num = int(day_limit) - int(today_feed)
calculate = 0
for i in range(0, len(temp)):
gift_id = int(temp[i][0])
gift_num = int(temp[i][1])
bag_id = int(temp[i][2])
expire = int(temp[i][3])
if (gift_id != 4 and gift_id != 3 and gift_id != 9 and gift_id != 10) and expire != 0:
if (gift_num * (temp_dic[gift_id] / 100) < left_num):
calculate = calculate + temp_dic[gift_id] / 100 * gift_num
tmp2 = temp_dic[gift_id] / 100 * gift_num
await utils.send_gift_web(roomid, gift_id, gift_num, bag_id)
left_num = left_num - tmp2
elif left_num - temp_dic[gift_id] / 100 >= 0:
tmp = (left_num) / (temp_dic[gift_id] / 100)
tmp1 = (temp_dic[gift_id] / 100) * int(tmp)
calculate = calculate + tmp1
await utils.send_gift_web(roomid, gift_id, tmp, bag_id)
left_num = left_num - tmp1
Printer().printer(f"自动送礼共送出亲密度为{int(calculate)}的礼物", "Info", "green")
async def doublegain_coin2silver(self):
if self.dic_user['doublegain_coin2silver']['on/off'] == "1":
response0 = await bilibili().request_doublegain_coin2silver()
json_response0 = await response0.json()
response1 = await bilibili().request_doublegain_coin2silver()
json_response1 = await response1.json()
print(json_response0['msg'], json_response1['msg'])
async def sliver2coin(self):
if self.dic_user['coin']['on/off'] == '1':
response1 = await bilibili().silver2coin_app()
json_response1 = await response1.json()
Printer().printer(f"银瓜子兑换硬币状态:{json_response1['msg']}", "Info", "green")
async def run(self):
while 1:
try:
Printer().printer(f"开始执行每日任务", "Info", "green")
await self.DoSign()
await self.Daily_bag()
await self.Daily_Task()
await self.link_sign()
await self.send_gift()
await self.sliver2coin()
await self.doublegain_coin2silver()
await self.auto_send_gift()
await utils.reconnect()
await asyncio.sleep(21600)
except:
await asyncio.sleep(10)
Printer().printer(traceback.format_exc(), "Error", "red")
|
#!/usr/bin/python3
# encoding='utf-8'
# author:weibk
# @time:2021/9/23 19:10
import pymysql
import random
con = pymysql.connect(host="localhost",
user="root",
password="123456",
database="db",
charset="utf8")
cursor = con.cursor(cursor=pymysql.cursors.DictCursor)
print("*****************************")
print("* 中国工商银行 *")
print("* 账户管理系统 *")
print("* V1.0 *")
print("*****************************")
print("* *")
print("* 1.开户 *")
print("* 2.存款 *")
print("* 3.取款 *")
print("* 4.转账 *")
print("* 5.查询 *")
print("* 6.退出 *")
print("*****************************")
BANK_NAME = "中国工商银行"
MONEY_INIT = 0
# 根据账号查询信息
def getinfo(account):
cursor.execute('select * from bank_user where account=%s', (account,))
result = cursor.fetchone()
return result
# 添加用户
def useradd():
# 判断用户库是否已满
s = cursor.execute("select * from bank_user")
if s == 100:
return 3
# 判断用户是否存在
while True:
username = input("请输入您的姓名:")
cursor.execute("select username from bank_user")
uname = cursor.fetchall()
for item in uname:
if username == item['username']:
return 2
break
password = input("请设置一个密码:")
print("请您填写地址:")
country = input("\t请输入您所在的国家:")
province = input("\t请输入您所在的城市:")
street = input("\t请输入您所在的街道:")
house_number = input("\t请输入您的门牌号:")
# 判断账号是否已经存在,如果已经存在则重新生成
while True:
account = str(random.randint(10, 99)) + str(
random.randint(10, 99)) + str(
random.randint(10, 99)) + str(random.randint(10, 99))
cursor.execute("select account from bank_user")
uname = cursor.fetchall()
for item in uname:
if account == item['account']:
continue
else:
break
cursor.execute("insert into bank_user values "
"(%s, %s, %s, %s, %s, %s, %s, %s, %s)",
(repr(account), repr(username), repr(password),
repr(country), repr(province),
repr(street), repr(house_number),
repr(BANK_NAME), repr(MONEY_INIT)))
con.commit()
cursor.execute("select * from bank_user where account=%s", (account,))
info1 = cursor.fetchone()
return info1
# 登录方法
def login():
while True:
acc = int(input("请输入您的账号"))
cursor.execute("select account from bank_user")
uname = cursor.fetchall()
for item in uname:
if acc == item['account']:
while True:
pwd = input("请输入密码:")
cursor.execute("select * from bank_user where "
"account=%s", (acc,))
info1 = cursor.fetchone()
if pwd == info1['password']:
return {"flag": 1, 'info': info1}
else:
return 2
else:
continue
return 3
while True:
step = input("请选择业务:")
if step == "1":
info = useradd()
print(type(info))
# 如果开户成功,打印用户信息
if isinstance(info, dict):
profile = '''
用户信息
---------------
账号:%s
姓名:%s
密码:%s
地址:%s-%s-%s-%s
余额:%s
开户行:%s
---------------
'''
print("恭喜你开户成功!!,您的信息如下:")
print(profile % (info['account'], info['username'],
info['password'], info['country'],
info['province'], info['street'],
info['house_number'], info['bank'],
info['balance']))
elif info == 2:
print("该用户已存在")
continue
elif info == 3:
print("用户库已满暂不支持开户业务")
continue
elif step == "2":
flag = login()
if isinstance(flag, dict):
bank = flag['info']
yue = bank['balance']
print(f"你好,{bank["username"]}登录成功!账户当前余额为{yue}")
# 登录成功存款
while True:
cunkuan = input("请输入您要存的金额:")
if cunkuan == 'Q' or cunkuan == 'q':
break
elif cunkuan.isdigit():
cunkuan = int(cunkuan)
else:
print('存款请输入正数,输入Q/q可退出业务')
continue
yue += cunkuan
print(f"存款成功!余额为{yue}")
cursor.execute("update bank_user set balance=%s where "
"account=%s", (yue, bank['account']))
con.commit()
break
elif flag == 2:
print("密码错误!")
continue
elif flag == 3:
print("账号不存在!")
continue
elif step == "3":
flag = login()
if isinstance(flag, dict):
bank = flag['info']
yue = bank['balance']
# 判断余额是否为0
if yue == 0:
print(f"你好,{bank["username"]},您的余额为0,不能使用取款业务")
continue
else:
print(f"你好,{bank["username"]},登录成功!账户当前余额为{yue}")
while True:
qukuan = input("请输入您要取的金额:")
if qukuan == 'Q' or qukuan == 'q':
break
elif qukuan.isdigit():
qukuan = int(qukuan)
else:
print('取款请输入正数,输入Q/q可退出业务')
# 判断余额是否足够
if yue < qukuan:
print('您的余额不足')
break
else:
yue -= qukuan
print(f"取款成功!余额为{yue}")
cursor.execute("update bank_user set balance=%s where "
"account=%s", (yue, bank['account']))
con.commit()
break
elif flag == 2:
print("密码错误!")
continue
elif flag == 3:
print("账号不存在!")
continue
elif step == "4":
flag = login()
if isinstance(flag, dict):
bank = flag['info']
yue = bank['balance']
acc1 = bank['account']
# 余额为0不能转账
if yue == 0:
print(f"你好,{bank["username"]},您的余额为0,不能使用转账业务")
continue
else:
print(f"你好,{bank["username"]},登录成功!账户当前余额为{yue}")
while True:
acc2 = input("请输入您要转账的账户:")
# 判断转入账户是否存在
y = cursor.execute(
"select * from bank_user where account=%s", (acc2,))
x = cursor.fetchone()
if y == 1:
# 判断转出和转入账户是否相同
if acc2 != acc1:
zhuan = input("请输入您要转的金额:")
if zhuan == 'Q' or zhuan == 'q':
break
elif zhuan.isdigit():
zhuan = int(zhuan)
else:
print('转账请输入正数,输入Q/q可退出业务')
# 判断余额
if yue < zhuan:
print("您的余额不足,输入Q/q可退出业务")
break
else:
# 转出账户余额减少
yue -= zhuan
print(f"转账成功!您的余额为{yue}")
cursor.execute(
"update bank_user set balance=%s where "
"account=%s", (yue, acc1))
con.commit()
# 转入账户余额增加
x['balance'] += zhuan
cursor.execute(
"update bank_user set balance=%s where "
"account=%s", (x['balance'], acc2))
con.commit()
break
else:
print('不能给自己转账,输入Q/q可退出业务')
continue
else:
print("您输入的账号不存在,输入Q/q可退出业务")
continue
elif flag == 2:
print("密码错误!")
continue
elif flag == 3:
print("账号不存在!")
continue
elif step == "5":
flag = login()
if isinstance(flag, dict):
bank = flag['info']
print(f"登录成功!账户当前信息如下:")
profile = '''
用户信息
---------------
账号:%s
姓名:%s
密码:%s
地址:%s-%s-%s-%s
开户行:%s
余额:%s
---------------
'''
print(profile % (bank['account'], bank['username'],
bank['password'], bank['country'],
bank['province'], bank['street'],
bank['house_number'], bank['bank'],
bank['balance']))
elif flag == 2:
print("密码错误!")
continue
elif flag == 3:
print("账号不存在!")
continue
elif step == "6":
break
con.commit()
cursor.close()
con.close()
| #!/usr/bin/python3
# encoding='utf-8'
# author:weibk
# @time:2021/9/23 19:10
import pymysql
import random
con = pymysql.connect(host="localhost",
user="root",
password="123456",
database="db",
charset="utf8")
cursor = con.cursor(cursor=pymysql.cursors.DictCursor)
print("*****************************")
print("* 中国工商银行 *")
print("* 账户管理系统 *")
print("* V1.0 *")
print("*****************************")
print("* *")
print("* 1.开户 *")
print("* 2.存款 *")
print("* 3.取款 *")
print("* 4.转账 *")
print("* 5.查询 *")
print("* 6.退出 *")
print("*****************************")
BANK_NAME = "中国工商银行"
MONEY_INIT = 0
# 根据账号查询信息
def getinfo(account):
cursor.execute('select * from bank_user where account=%s', (account,))
result = cursor.fetchone()
return result
# 添加用户
def useradd():
# 判断用户库是否已满
s = cursor.execute("select * from bank_user")
if s == 100:
return 3
# 判断用户是否存在
while True:
username = input("请输入您的姓名:")
cursor.execute("select username from bank_user")
uname = cursor.fetchall()
for item in uname:
if username == item['username']:
return 2
break
password = input("请设置一个密码:")
print("请您填写地址:")
country = input("\t请输入您所在的国家:")
province = input("\t请输入您所在的城市:")
street = input("\t请输入您所在的街道:")
house_number = input("\t请输入您的门牌号:")
# 判断账号是否已经存在,如果已经存在则重新生成
while True:
account = str(random.randint(10, 99)) + str(
random.randint(10, 99)) + str(
random.randint(10, 99)) + str(random.randint(10, 99))
cursor.execute("select account from bank_user")
uname = cursor.fetchall()
for item in uname:
if account == item['account']:
continue
else:
break
cursor.execute("insert into bank_user values "
"(%s, %s, %s, %s, %s, %s, %s, %s, %s)",
(repr(account), repr(username), repr(password),
repr(country), repr(province),
repr(street), repr(house_number),
repr(BANK_NAME), repr(MONEY_INIT)))
con.commit()
cursor.execute("select * from bank_user where account=%s", (account,))
info1 = cursor.fetchone()
return info1
# 登录方法
def login():
while True:
acc = int(input("请输入您的账号"))
cursor.execute("select account from bank_user")
uname = cursor.fetchall()
for item in uname:
if acc == item['account']:
while True:
pwd = input("请输入密码:")
cursor.execute("select * from bank_user where "
"account=%s", (acc,))
info1 = cursor.fetchone()
if pwd == info1['password']:
return {"flag": 1, 'info': info1}
else:
return 2
else:
continue
return 3
while True:
step = input("请选择业务:")
if step == "1":
info = useradd()
print(type(info))
# 如果开户成功,打印用户信息
if isinstance(info, dict):
profile = '''
用户信息
---------------
账号:%s
姓名:%s
密码:%s
地址:%s-%s-%s-%s
余额:%s
开户行:%s
---------------
'''
print("恭喜你开户成功!!,您的信息如下:")
print(profile % (info['account'], info['username'],
info['password'], info['country'],
info['province'], info['street'],
info['house_number'], info['bank'],
info['balance']))
elif info == 2:
print("该用户已存在")
continue
elif info == 3:
print("用户库已满暂不支持开户业务")
continue
elif step == "2":
flag = login()
if isinstance(flag, dict):
bank = flag['info']
yue = bank['balance']
print(f"你好,{bank['username']}登录成功!账户当前余额为{yue}")
# 登录成功存款
while True:
cunkuan = input("请输入您要存的金额:")
if cunkuan == 'Q' or cunkuan == 'q':
break
elif cunkuan.isdigit():
cunkuan = int(cunkuan)
else:
print('存款请输入正数,输入Q/q可退出业务')
continue
yue += cunkuan
print(f"存款成功!余额为{yue}")
cursor.execute("update bank_user set balance=%s where "
"account=%s", (yue, bank['account']))
con.commit()
break
elif flag == 2:
print("密码错误!")
continue
elif flag == 3:
print("账号不存在!")
continue
elif step == "3":
flag = login()
if isinstance(flag, dict):
bank = flag['info']
yue = bank['balance']
# 判断余额是否为0
if yue == 0:
print(f"你好,{bank['username']},您的余额为0,不能使用取款业务")
continue
else:
print(f"你好,{bank['username']},登录成功!账户当前余额为{yue}")
while True:
qukuan = input("请输入您要取的金额:")
if qukuan == 'Q' or qukuan == 'q':
break
elif qukuan.isdigit():
qukuan = int(qukuan)
else:
print('取款请输入正数,输入Q/q可退出业务')
# 判断余额是否足够
if yue < qukuan:
print('您的余额不足')
break
else:
yue -= qukuan
print(f"取款成功!余额为{yue}")
cursor.execute("update bank_user set balance=%s where "
"account=%s", (yue, bank['account']))
con.commit()
break
elif flag == 2:
print("密码错误!")
continue
elif flag == 3:
print("账号不存在!")
continue
elif step == "4":
flag = login()
if isinstance(flag, dict):
bank = flag['info']
yue = bank['balance']
acc1 = bank['account']
# 余额为0不能转账
if yue == 0:
print(f"你好,{bank['username']},您的余额为0,不能使用转账业务")
continue
else:
print(f"你好,{bank['username']},登录成功!账户当前余额为{yue}")
while True:
acc2 = input("请输入您要转账的账户:")
# 判断转入账户是否存在
y = cursor.execute(
"select * from bank_user where account=%s", (acc2,))
x = cursor.fetchone()
if y == 1:
# 判断转出和转入账户是否相同
if acc2 != acc1:
zhuan = input("请输入您要转的金额:")
if zhuan == 'Q' or zhuan == 'q':
break
elif zhuan.isdigit():
zhuan = int(zhuan)
else:
print('转账请输入正数,输入Q/q可退出业务')
# 判断余额
if yue < zhuan:
print("您的余额不足,输入Q/q可退出业务")
break
else:
# 转出账户余额减少
yue -= zhuan
print(f"转账成功!您的余额为{yue}")
cursor.execute(
"update bank_user set balance=%s where "
"account=%s", (yue, acc1))
con.commit()
# 转入账户余额增加
x['balance'] += zhuan
cursor.execute(
"update bank_user set balance=%s where "
"account=%s", (x['balance'], acc2))
con.commit()
break
else:
print('不能给自己转账,输入Q/q可退出业务')
continue
else:
print("您输入的账号不存在,输入Q/q可退出业务")
continue
elif flag == 2:
print("密码错误!")
continue
elif flag == 3:
print("账号不存在!")
continue
elif step == "5":
flag = login()
if isinstance(flag, dict):
bank = flag['info']
print(f"登录成功!账户当前信息如下:")
profile = '''
用户信息
---------------
账号:%s
姓名:%s
密码:%s
地址:%s-%s-%s-%s
开户行:%s
余额:%s
---------------
'''
print(profile % (bank['account'], bank['username'],
bank['password'], bank['country'],
bank['province'], bank['street'],
bank['house_number'], bank['bank'],
bank['balance']))
elif flag == 2:
print("密码错误!")
continue
elif flag == 3:
print("账号不存在!")
continue
elif step == "6":
break
con.commit()
cursor.close()
con.close()
|
from classes.Humanoid import Humanoid
class Player(Humanoid):
def __init__(self, name, room, dmg=1, hp=10):
super().__init__(name, room, dmg, hp)
self.equipped = None
def __str__(self):
return f'{self.name}: ', '{\n', f'\t[\n\t\thp: {self.hp}/{self.max_hp},\n\t\tdmg: {self.dmg}\n\tequipped: {self.equipped}\n]'
def use_item(self, item):
item.use(self)
def print_inventory(self):
if len(self.inventory):
print(f'\n{self.name}\'s Inventory:')
for inventory_item in self.inventory:
print(f'\t{inventory_item.name}: {inventory_item.description}')
else:
print(
'\nYou have no items in your inventory.\n\nTry roaming around to find some items.')
def take_damage(self, attacker):
if not self.blocking:
self.hp -= attacker.dmg
if self.hp <= 0:
print('You have died. Better luck next time!')
else:
print(
f'You were hit with {attacker.dmg} damage. ({self.hp}/{self.max_hp})')
else:
self.blocking = False
print('You blocked {attacker.name}\'s attack!')
def equip(self, item):
if item.equippable:
item.use(self)
| from classes.Humanoid import Humanoid
class Player(Humanoid):
def __init__(self, name, room, dmg=1, hp=10):
super().__init__(name, room, dmg, hp)
self.equipped = None
def __str__(self):
return f'{self.name}: ', '{\n', f'\t[\n\t\thp: {self.hp}/{self.max_hp},\n\t\tdmg: {self.dmg}\n\tequipped: {self.equipped}\n]'
def use_item(self, item):
item.use(self)
def print_inventory(self):
if len(self.inventory):
print(f'\n{self.name}\'s Inventory:')
for inventory_item in self.inventory:
print(f'\t{inventory_item.name}: {inventory_item.description}')
else:
print(
'\nYou have no items in your inventory.\n\nTry roaming around to find some items.')
def take_damage(self, attacker):
if not self.blocking:
self.hp -= attacker.dmg
if self.hp <= 0:
print('You have died. Better luck next time!')
else:
print(
f'You were hit with {attacker.dmg} damage. ({self.hp}/{self.max_hp})')
else:
self.blocking = False
print('You blocked {attacker.name}\'s attack!')
def equip(self, item):
if item.equippable:
item.use(self)
|
import subprocess
import sys
import os
DEFAULT_ARGS=[]
if (os.path.exists("build")):
dl=[]
for r,ndl,fl in os.walk("build"):
r=r.replace("\\","/").strip("/")+"/"
for d in ndl:
dl.insert(0,r+d)
for f in fl:
os.remove(r+f)
for k in dl:
os.rmdir(k)
else:
os.mkdir("build")
if (os.name=="nt"):
cd=os.getcwd()
os.chdir("build")
if ("--release" in sys.argv):
if (subprocess.run(["cl","/Wv:18","/c","/permissive-","/Zc:preprocessor","/GS","/utf-8","/W3","/Zc:wchar_t","/Gm-","/sdl","/Zc:inline","/fp:precise","/D","NDEBUG","/D","_WINDOWS","/D","_UNICODE","/D","UNICODE","/errorReport:none","/WX","/Zc:forScope","/Gd","/Oi","/FC","/EHsc","/nologo","/diagnostics:column","/GL","/Gy","/Zi","/O2","/Oi","/MD","/I","../src/include","../src/main.c","../src/hilbert_curve_fft_compression/*.c"]).returncode!=0 or subprocess.run(["link","*.obj","/OUT:hilbert_curve_fft_compression.exe","/DYNAMICBASE","kernel32.lib","user32.lib","gdi32.lib","winspool.lib","comdlg32.lib","advapi32.lib","shell32.lib","ole32.lib","oleaut32.lib","uuid.lib","odbc32.lib","odbccp32.lib","/MACHINE:X64","/SUBSYSTEM:CONSOLE","/ERRORREPORT:none","/NOLOGO","/TLBID:1","/WX","/LTCG","/OPT:REF","/INCREMENTAL:NO","/OPT:ICF"]).returncode!=0):
os.chdir(cd)
sys.exit(1)
else:
if (subprocess.run(["cl","/Wv:18","/c","/permissive-","/Zc:preprocessor","/GS","/utf-8","/W3","/Zc:wchar_t","/Gm-","/sdl","/Zc:inline","/fp:precise","/D","_DEBUG","/D","_WINDOWS","/D","_UNICODE","/D","UNICODE","/errorReport:none","/WX","/Zc:forScope","/Gd","/Oi","/FC","/EHsc","/nologo","/diagnostics:column","/ZI","/Od","/RTC1","/MDd","/I","../src/include","../src/main.c","../src/hilbert_curve_fft_compression/*.c"]).returncode!=0 or subprocess.run(["link","*.obj","/OUT:hilbert_curve_fft_compression.exe","/DYNAMICBASE","kernel32.lib","user32.lib","gdi32.lib","winspool.lib","comdlg32.lib","advapi32.lib","shell32.lib","ole32.lib","oleaut32.lib","uuid.lib","odbc32.lib","odbccp32.lib","/MACHINE:X64","/SUBSYSTEM:CONSOLE","/ERRORREPORT:none","/NOLOGO","/TLBID:1","/WX","/DEBUG","/INCREMENTAL"]).returncode!=0):
os.chdir(cd)
sys.exit(1)
os.chdir(cd)
if ("--run" in sys.argv):
subprocess.run(["build/hilbert_curve_fft_compression.exe"]+DEFAULT_ARGS)
else:
if ("--release" in sys.argv):
fl=[]
for r,_,cfl in os.walk("src"):
r=r.replace("\\","/").strip("/")+"/"
for f in cfl:
if (f[-2:]==".c"):
fl.append(f"build/{(r+f).replace("/","$")}.o")
if (subprocess.run(["gcc","-Wall","-lm","-Werror","-O3","-c",r+f,"-o",f"build/{(r+f).replace("/","$")}.o","-Isrc/include"]).returncode!=0):
sys.exit(1)
if (subprocess.run(["gcc","-o","build/hilbert_curve_fft_compression"]+fl+["-lm"]).returncode!=0):
sys.exit(1)
else:
fl=[]
for r,_,cfl in os.walk("src"):
r=r.replace("\\","/").strip("/")+"/"
for f in cfl:
if (f[-2:]==".c"):
fl.append(f"build/{(r+f).replace("/","$")}.o")
if (subprocess.run(["gcc","-Wall","-lm","-Werror","-O0","-c",r+f,"-o",f"build/{(r+f).replace("/","$")}.o","-Isrc/include"]).returncode!=0):
sys.exit(1)
if (subprocess.run(["gcc","-o","build/hilbert_curve_fft_compression"]+fl+["-lm"]).returncode!=0):
sys.exit(1)
if ("--run" in sys.argv):
subprocess.run(["build/hilbert_curve_fft_compression"]+DEFAULT_ARGS)
| import subprocess
import sys
import os
DEFAULT_ARGS=[]
if (os.path.exists("build")):
dl=[]
for r,ndl,fl in os.walk("build"):
r=r.replace("\\","/").strip("/")+"/"
for d in ndl:
dl.insert(0,r+d)
for f in fl:
os.remove(r+f)
for k in dl:
os.rmdir(k)
else:
os.mkdir("build")
if (os.name=="nt"):
cd=os.getcwd()
os.chdir("build")
if ("--release" in sys.argv):
if (subprocess.run(["cl","/Wv:18","/c","/permissive-","/Zc:preprocessor","/GS","/utf-8","/W3","/Zc:wchar_t","/Gm-","/sdl","/Zc:inline","/fp:precise","/D","NDEBUG","/D","_WINDOWS","/D","_UNICODE","/D","UNICODE","/errorReport:none","/WX","/Zc:forScope","/Gd","/Oi","/FC","/EHsc","/nologo","/diagnostics:column","/GL","/Gy","/Zi","/O2","/Oi","/MD","/I","../src/include","../src/main.c","../src/hilbert_curve_fft_compression/*.c"]).returncode!=0 or subprocess.run(["link","*.obj","/OUT:hilbert_curve_fft_compression.exe","/DYNAMICBASE","kernel32.lib","user32.lib","gdi32.lib","winspool.lib","comdlg32.lib","advapi32.lib","shell32.lib","ole32.lib","oleaut32.lib","uuid.lib","odbc32.lib","odbccp32.lib","/MACHINE:X64","/SUBSYSTEM:CONSOLE","/ERRORREPORT:none","/NOLOGO","/TLBID:1","/WX","/LTCG","/OPT:REF","/INCREMENTAL:NO","/OPT:ICF"]).returncode!=0):
os.chdir(cd)
sys.exit(1)
else:
if (subprocess.run(["cl","/Wv:18","/c","/permissive-","/Zc:preprocessor","/GS","/utf-8","/W3","/Zc:wchar_t","/Gm-","/sdl","/Zc:inline","/fp:precise","/D","_DEBUG","/D","_WINDOWS","/D","_UNICODE","/D","UNICODE","/errorReport:none","/WX","/Zc:forScope","/Gd","/Oi","/FC","/EHsc","/nologo","/diagnostics:column","/ZI","/Od","/RTC1","/MDd","/I","../src/include","../src/main.c","../src/hilbert_curve_fft_compression/*.c"]).returncode!=0 or subprocess.run(["link","*.obj","/OUT:hilbert_curve_fft_compression.exe","/DYNAMICBASE","kernel32.lib","user32.lib","gdi32.lib","winspool.lib","comdlg32.lib","advapi32.lib","shell32.lib","ole32.lib","oleaut32.lib","uuid.lib","odbc32.lib","odbccp32.lib","/MACHINE:X64","/SUBSYSTEM:CONSOLE","/ERRORREPORT:none","/NOLOGO","/TLBID:1","/WX","/DEBUG","/INCREMENTAL"]).returncode!=0):
os.chdir(cd)
sys.exit(1)
os.chdir(cd)
if ("--run" in sys.argv):
subprocess.run(["build/hilbert_curve_fft_compression.exe"]+DEFAULT_ARGS)
else:
if ("--release" in sys.argv):
fl=[]
for r,_,cfl in os.walk("src"):
r=r.replace("\\","/").strip("/")+"/"
for f in cfl:
if (f[-2:]==".c"):
fl.append(f"build/{(r+f).replace('/','$')}.o")
if (subprocess.run(["gcc","-Wall","-lm","-Werror","-O3","-c",r+f,"-o",f"build/{(r+f).replace('/','$')}.o","-Isrc/include"]).returncode!=0):
sys.exit(1)
if (subprocess.run(["gcc","-o","build/hilbert_curve_fft_compression"]+fl+["-lm"]).returncode!=0):
sys.exit(1)
else:
fl=[]
for r,_,cfl in os.walk("src"):
r=r.replace("\\","/").strip("/")+"/"
for f in cfl:
if (f[-2:]==".c"):
fl.append(f"build/{(r+f).replace('/','$')}.o")
if (subprocess.run(["gcc","-Wall","-lm","-Werror","-O0","-c",r+f,"-o",f"build/{(r+f).replace('/','$')}.o","-Isrc/include"]).returncode!=0):
sys.exit(1)
if (subprocess.run(["gcc","-o","build/hilbert_curve_fft_compression"]+fl+["-lm"]).returncode!=0):
sys.exit(1)
if ("--run" in sys.argv):
subprocess.run(["build/hilbert_curve_fft_compression"]+DEFAULT_ARGS)
|
from flask import send_file
from python_helper import Constant as c
from python_helper import EnvironmentHelper, log
from python_framework import ResourceManager, FlaskUtil, HttpStatus, LogConstant
from queue_manager_api import QueueManager
import ModelAssociation
app = ResourceManager.initialize(__name__, ModelAssociation.MODEL, managerList=[
QueueManager()
])
@app.route(f'{app.api.baseUrl}/audios/<string:key>')
def getAudio(key=None):
log.info(getAudio, f'{LogConstant.CONTROLLER_SPACE}{FlaskUtil.safellyGetVerb()}{c.SPACE_DASH_SPACE}{FlaskUtil.safellyGetUrl()}')
try:
dto = app.api.resource.service.speak.findAudioByKey(key)
path = f'''{dto.path.split(f'src{EnvironmentHelper.OS_SEPARATOR}')[-1]}{EnvironmentHelper.OS_SEPARATOR}{dto.name}{c.DOT}{dto.extension}'''
return send_file(
path,
mimetype="audio/mp3",
as_attachment=False
), HttpStatus.OK
except Exception as exception:
MESSAGE_KEY = 'message'
responseDto = {MESSAGE_KEY: 'Audio not found'}
log.error(getAudio, responseDto.get(MESSAGE_KEY), exception=exception)
return responseDto, 404
| from flask import send_file
from python_helper import Constant as c
from python_helper import EnvironmentHelper, log
from python_framework import ResourceManager, FlaskUtil, HttpStatus, LogConstant
from queue_manager_api import QueueManager
import ModelAssociation
app = ResourceManager.initialize(__name__, ModelAssociation.MODEL, managerList=[
QueueManager()
])
@app.route(f'{app.api.baseUrl}/audios/<string:key>')
def getAudio(key=None):
log.info(getAudio, f'{LogConstant.CONTROLLER_SPACE}{FlaskUtil.safellyGetVerb()}{c.SPACE_DASH_SPACE}{FlaskUtil.safellyGetUrl()}')
try:
dto = app.api.resource.service.speak.findAudioByKey(key)
path = f'''{dto.path.split(f'src{EnvironmentHelper.OS_SEPARATOR}')[-1]}{EnvironmentHelper.OS_SEPARATOR}{dto.name}{c.DOT}{dto.extension}'''
return send_file(
path,
mimetype="audio/mp3",
as_attachment=False
), HttpStatus.OK
except Exception as exception:
MESSAGE_KEY = 'message'
responseDto = {MESSAGE_KEY: 'Audio not found'}
log.error(getAudio, responseDto.get(MESSAGE_KEY), exception=exception)
return responseDto, 404
|
#!/usr/bin/env python
import json
import os
import re
import sys
import tarfile
from urllib.request import urlretrieve
def main():
# Read in given out_file and create target directory for file download
with open(sys.argv[1]) as fh:
params = json.load(fh)
target_directory = params['output_data'][0]['extra_files_path']
os.mkdir(target_directory)
# Process parameters for metadata and file download
url = params['param_dict']['url'].rstrip("/") + "/" + params['param_dict']['file_name'].lstrip("/")
m = re.search(r"(.*?)(merged|refseq)?_vep_(\d+?)_", params['param_dict']['file_name'])
version = str(m.group(3))
cache_type = m.group(2) if m.group(2) else "default"
species = m.group(1).rstrip("_")
display_name = f"{species.capitalize().replace("_", " ")} {params["param_dict"]["dbkey"]} (V{version}{"" if cache_type == "default" else ", " + cache_type.capitalize()})"
# Download and extract given cache archive, remove archive afterwards
final_file, headers = urlretrieve(url, os.path.join(target_directory, params['param_dict']['file_name']))
tar = tarfile.open(final_file, "r:gz")
tar.extractall(target_directory)
tar.close()
os.remove(final_file)
# Construct metadata for the new data table entry
data_manager_dict = {
'data_tables': {
'vep_versioned_annotation_cache': [
{
'value': params['param_dict']['file_name'].strip(".tar.gz"),
'dbkey': params['param_dict']['dbkey'],
'version': version,
'cachetype': cache_type,
'name': display_name,
'species': species,
'path': './%s' % params['param_dict']['file_name'].strip(".tar.gz")
}
]
}
}
# Save metadata to out_file
with open(sys.argv[1], 'w') as fh:
json.dump(data_manager_dict, fh, sort_keys=True)
if __name__ == "__main__":
main()
| #!/usr/bin/env python
import json
import os
import re
import sys
import tarfile
from urllib.request import urlretrieve
def main():
# Read in given out_file and create target directory for file download
with open(sys.argv[1]) as fh:
params = json.load(fh)
target_directory = params['output_data'][0]['extra_files_path']
os.mkdir(target_directory)
# Process parameters for metadata and file download
url = params['param_dict']['url'].rstrip("/") + "/" + params['param_dict']['file_name'].lstrip("/")
m = re.search(r"(.*?)(merged|refseq)?_vep_(\d+?)_", params['param_dict']['file_name'])
version = str(m.group(3))
cache_type = m.group(2) if m.group(2) else "default"
species = m.group(1).rstrip("_")
display_name = f"{species.capitalize().replace('_', ' ')} {params['param_dict']['dbkey']} (V{version}{'' if cache_type == 'default' else ', ' + cache_type.capitalize()})"
# Download and extract given cache archive, remove archive afterwards
final_file, headers = urlretrieve(url, os.path.join(target_directory, params['param_dict']['file_name']))
tar = tarfile.open(final_file, "r:gz")
tar.extractall(target_directory)
tar.close()
os.remove(final_file)
# Construct metadata for the new data table entry
data_manager_dict = {
'data_tables': {
'vep_versioned_annotation_cache': [
{
'value': params['param_dict']['file_name'].strip(".tar.gz"),
'dbkey': params['param_dict']['dbkey'],
'version': version,
'cachetype': cache_type,
'name': display_name,
'species': species,
'path': './%s' % params['param_dict']['file_name'].strip(".tar.gz")
}
]
}
}
# Save metadata to out_file
with open(sys.argv[1], 'w') as fh:
json.dump(data_manager_dict, fh, sort_keys=True)
if __name__ == "__main__":
main()
|
# RT Lib - Setting
from typing import (
TYPE_CHECKING, TypedDict, Optional, Union, Literal, Dict, Tuple, List,
overload, get_origin, get_args
)
from discord.ext import commands
import discord
from collections import defaultdict
from aiohttp import ClientSession
from functools import partial
from datetime import datetime
from ujson import dumps
from time import time
from pytz import utc
from . import websocket
from .slash import Option
if TYPE_CHECKING:
from .typed import RT
class CommandRunData(TypedDict):
command: str
kwargs: Dict[str, Union[str, int, float, bool]]
guild_id: Union[int, Literal[0]]
category: str
user_id: int
ip: str
class Setting:
@overload
def __init__(
_, mode: str, name: Optional[str] = None,
help_command: Tuple[str, str] = None, **kwargs
):
...
def __new__(cls, mode, name=None, help_command=None, **kwargs):
return lambda func: func
self = super().__new__(cls)
self.mode, self.name, self.kwargs = mode, name, kwargs
self.help_command = help_command
def _decorator(func):
func._setting = self
return func
return _decorator
class Context:
"ダッシュボードから呼ばれたコマンドで実行されるContextです。"
def __init__(
self, cog: "SettingManager", data: CommandRunData,
command: commands.Command, **kwargs
):
# IDを文字列から整数に変換する。
for key, value in list(data.items()):
if key.endswith("id"):
data[key] = int(value)
# 変数を作っていく。
self.data = data
self.setting_manager = cog
self.bot: "RT" = self.setting_manager.bot
self.guild: Optional[discord.Guild] = self.bot.get_guild(data["guild_id"])
self.created_at: datetime = datetime.now(utc)
self.edited_at = None
self.__setting_context__ = True
self.channel: Optional[
Union[discord.abc.GuildChannel, discord.DMChannel]
] = (
self.guild.get_channel(data["kwargs"].pop(
"channel_id", data["kwargs"].pop(
"channel", data["kwargs"].pop("Channel", 0)
)
))
if data["category"].endswith("guild")
else self.bot.get_user(data["user_id"])
)
self.author: Union[discord.User, discord.Member] = (
self.guild.get_member(data["user_id"]) if self.guild
else self.bot.get_user(data["user_id"])
)
for key in kwargs:
setattr(self, key, kwargs.pop(key, None))
self.command = command
self.cog = command.cog
self.voice_client: Optional[discord.VoiceClient] = \
getattr(self.guild, "voice_client", None)
self.prefix = "r2!" if self.bot.test else "rt!"
self.me: Union[discord.Member, discord.ClientUser] = \
getattr(self.guild, "me", self.bot.user)
self.message = self
self.reply = self.send
async def trigger_typing(self):
...
async def send(
self, content: str = None, embed: discord.Embed = None, *args, **kwargs
):
"返信をします。"
content = self.bot.cogs["Language"].get_text(
embed if embed else content, self.author.id
)
if isinstance(content, discord.Embed):
content = content.to_dict()
async with self.setting_manager.session.post(
f"{self.bot.get_url()}/api/settings/reply/{self.data["ip"]}",
json={"data": content}
) as r:
self.bot.print(
"[SettingManager]", "[Reply]",
f"Response: {await r.text()}, Content: {content}"
)
@overload
async def reply(
self, content: str = None, embed: discord.Embed = None, *args, **kwargs
):
...
async def delete(self) -> None:
...
class SettingManager(commands.Cog):
SUPPORTED_DISCORD_ANNOTATIONS = (
"Member", "User", "TextChannel", "VoiceChannel", "StageChannel",
"Thread", "Role"
)
SUPPORTED_ANNOTATIONS = (str, int, float, bool)
def __init__(self, bot: "RT"):
self.bot = bot
self.data: Dict[
str, Tuple[commands.Command, Setting]
] = {}
self.before = {}
@property
def session(self) -> ClientSession:
if not hasattr(self, "_session"):
self._session = ClientSession(
loop=self.bot.loop, json_serialize=partial(
dumps, ensure_ascii=False
)
)
return self._session
def get_parsed_args(self, annotation: object) -> Union[str, List[str]]:
"渡されたオブジェクトから設定項目の型の名前を判定し返します。"
if isinstance(annotation, Option):
annotation = annotation.annotation
if annotation in self.SUPPORTED_ANNOTATIONS:
return annotation.__name__
elif getattr(annotation, "__name__", "") in self.SUPPORTED_DISCORD_ANNOTATIONS:
return annotation.__name__.replace("Text", "").replace("Voice", "") \
.replace("Stage", "").replace("Thread", "Channel").replace("User", "Member")
elif (origin := get_origin(annotation)) == Union:
return ["Union"] + [self.get_parsed_args(arg) for arg in get_args(annotation)]
elif origin == Literal:
return ["Literal"] + list(get_args(annotation))
else:
return "str"
def reset(self):
self.data = {}
def add_command(self, command: commands.Command) -> None:
self.data[command.qualified_name] = (command, command.callback._setting)
@commands.Cog.listener()
async def on_command_add(self, command: commands.Command):
if hasattr(command.callback, "_setting"):
self.add_command(command)
@commands.Cog.listener("on_update_api")
async def update(self):
"APIにBotにあるコマンドの設定のJSONデータを送る。"
# バックエンド用のデータを作る。
data = defaultdict(dict)
for command, setting in self.data.values():
kwargs = {
parameter.name: (
ant := self.get_parsed_args(parameter.annotation),
"" if parameter.default == parameter.empty
else parameter.default,
parameter.kind == parameter.KEYWORD_ONLY \
and ant == "str"
) for parameter in command.clean_params.values()
}
kwargs.update({
key: (self.get_parsed_args(value), "", False)
for key, value in setting.kwargs.items()
})
data[setting.mode][command.qualified_name] = {
"help": (
self.bot.cogs["BotGeneral"].get_help_url(*setting.help_command)
if setting.help_command
else self.bot.cogs["BotGeneral"].get_command_url(command)
), "kwargs": kwargs, "sub_category": getattr(
command.parent, "name", None
), "headding": (
command.extras.get("headding")
or command.__original_kwargs__.get("headding")
), "display_name": setting.name or command.name
}
# データを送る。
async with self.bot.session.post(
f"{self.bot.get_url()}/api/settings/commands/update",
json=data
) as r:
self.bot.print("[SettingManager]", "[Updater]", time(), await r.text())
self.before = data
@websocket.websocket("/api/settings/websocket", auto_connect=True, reconnect=True)
async def setting_websocket(self, ws: websocket.WebSocket, _):
# ユーザーがダッシュボードから設定を更新した際に、すぐに反応できるようにするためのものです。
await ws.send("on_ready")
@setting_websocket.event("on_post")
async def post(self, ws: websocket.WebSocket, data: CommandRunData):
if isinstance(data, dict):
self.bot.loop.create_task(
self.run_command(self.data[data["command"]][0], data),
name=f"UpdateSetting[{data.get("command")}]: {data.get("user_id")}"
)
await ws.send("on_posted")
@setting_websocket.event("on_posted")
async def posted(self, ws: websocket.WebSocket, _):
await self.setting_websocket(ws, None)
async def run_command(self, command: commands.Command, data: CommandRunData):
"コマンドを走らせます。"
ctx = None
try:
# コマンドのメッセージを組み立てる。
content = f"{self.bot.command_prefix[0]}{command.qualified_name}"
for parameter in command.clean_params.values():
tentative = f' "{data['kwargs'].get(parameter.name, '')}"'
if parameter.kind == parameter.KEYWORD_ONLY:
tentative = f" {tentative[2:-1]}"
content += tentative
# 実行できるかチェックをしてからオリジナルContextでコマンドを実行する。
ctx = Context(self, data, command)
ctx.content = content
ctx._state = self.bot.http
parsed_ctx = await self.bot.get_context(ctx)
ctx.view = parsed_ctx.view
ctx.args, ctx.kwargs = parsed_ctx.args, parsed_ctx.kwargs
for name in dir(parsed_ctx):
if not name.startswith(
(
"__", "send", "reply", "trigger", "typing", "created",
"channel", "message", "guild"
)
):
setattr(ctx, name, getattr(parsed_ctx, name))
return await self.bot.invoke(ctx.message)
except Exception as e:
if ctx:
self.bot.dispatch("command_error", ctx, e)
def cog_unload(self):
if hasattr(self, "_session"):
self.bot.loop.create_task(self._session.close())
def setup(bot):
return
bot.add_cog(SettingManager(bot))
| # RT Lib - Setting
from typing import (
TYPE_CHECKING, TypedDict, Optional, Union, Literal, Dict, Tuple, List,
overload, get_origin, get_args
)
from discord.ext import commands
import discord
from collections import defaultdict
from aiohttp import ClientSession
from functools import partial
from datetime import datetime
from ujson import dumps
from time import time
from pytz import utc
from . import websocket
from .slash import Option
if TYPE_CHECKING:
from .typed import RT
class CommandRunData(TypedDict):
command: str
kwargs: Dict[str, Union[str, int, float, bool]]
guild_id: Union[int, Literal[0]]
category: str
user_id: int
ip: str
class Setting:
@overload
def __init__(
_, mode: str, name: Optional[str] = None,
help_command: Tuple[str, str] = None, **kwargs
):
...
def __new__(cls, mode, name=None, help_command=None, **kwargs):
return lambda func: func
self = super().__new__(cls)
self.mode, self.name, self.kwargs = mode, name, kwargs
self.help_command = help_command
def _decorator(func):
func._setting = self
return func
return _decorator
class Context:
"ダッシュボードから呼ばれたコマンドで実行されるContextです。"
def __init__(
self, cog: "SettingManager", data: CommandRunData,
command: commands.Command, **kwargs
):
# IDを文字列から整数に変換する。
for key, value in list(data.items()):
if key.endswith("id"):
data[key] = int(value)
# 変数を作っていく。
self.data = data
self.setting_manager = cog
self.bot: "RT" = self.setting_manager.bot
self.guild: Optional[discord.Guild] = self.bot.get_guild(data["guild_id"])
self.created_at: datetime = datetime.now(utc)
self.edited_at = None
self.__setting_context__ = True
self.channel: Optional[
Union[discord.abc.GuildChannel, discord.DMChannel]
] = (
self.guild.get_channel(data["kwargs"].pop(
"channel_id", data["kwargs"].pop(
"channel", data["kwargs"].pop("Channel", 0)
)
))
if data["category"].endswith("guild")
else self.bot.get_user(data["user_id"])
)
self.author: Union[discord.User, discord.Member] = (
self.guild.get_member(data["user_id"]) if self.guild
else self.bot.get_user(data["user_id"])
)
for key in kwargs:
setattr(self, key, kwargs.pop(key, None))
self.command = command
self.cog = command.cog
self.voice_client: Optional[discord.VoiceClient] = \
getattr(self.guild, "voice_client", None)
self.prefix = "r2!" if self.bot.test else "rt!"
self.me: Union[discord.Member, discord.ClientUser] = \
getattr(self.guild, "me", self.bot.user)
self.message = self
self.reply = self.send
async def trigger_typing(self):
...
async def send(
self, content: str = None, embed: discord.Embed = None, *args, **kwargs
):
"返信をします。"
content = self.bot.cogs["Language"].get_text(
embed if embed else content, self.author.id
)
if isinstance(content, discord.Embed):
content = content.to_dict()
async with self.setting_manager.session.post(
f"{self.bot.get_url()}/api/settings/reply/{self.data['ip']}",
json={"data": content}
) as r:
self.bot.print(
"[SettingManager]", "[Reply]",
f"Response: {await r.text()}, Content: {content}"
)
@overload
async def reply(
self, content: str = None, embed: discord.Embed = None, *args, **kwargs
):
...
async def delete(self) -> None:
...
class SettingManager(commands.Cog):
SUPPORTED_DISCORD_ANNOTATIONS = (
"Member", "User", "TextChannel", "VoiceChannel", "StageChannel",
"Thread", "Role"
)
SUPPORTED_ANNOTATIONS = (str, int, float, bool)
def __init__(self, bot: "RT"):
self.bot = bot
self.data: Dict[
str, Tuple[commands.Command, Setting]
] = {}
self.before = {}
@property
def session(self) -> ClientSession:
if not hasattr(self, "_session"):
self._session = ClientSession(
loop=self.bot.loop, json_serialize=partial(
dumps, ensure_ascii=False
)
)
return self._session
def get_parsed_args(self, annotation: object) -> Union[str, List[str]]:
"渡されたオブジェクトから設定項目の型の名前を判定し返します。"
if isinstance(annotation, Option):
annotation = annotation.annotation
if annotation in self.SUPPORTED_ANNOTATIONS:
return annotation.__name__
elif getattr(annotation, "__name__", "") in self.SUPPORTED_DISCORD_ANNOTATIONS:
return annotation.__name__.replace("Text", "").replace("Voice", "") \
.replace("Stage", "").replace("Thread", "Channel").replace("User", "Member")
elif (origin := get_origin(annotation)) == Union:
return ["Union"] + [self.get_parsed_args(arg) for arg in get_args(annotation)]
elif origin == Literal:
return ["Literal"] + list(get_args(annotation))
else:
return "str"
def reset(self):
self.data = {}
def add_command(self, command: commands.Command) -> None:
self.data[command.qualified_name] = (command, command.callback._setting)
@commands.Cog.listener()
async def on_command_add(self, command: commands.Command):
if hasattr(command.callback, "_setting"):
self.add_command(command)
@commands.Cog.listener("on_update_api")
async def update(self):
"APIにBotにあるコマンドの設定のJSONデータを送る。"
# バックエンド用のデータを作る。
data = defaultdict(dict)
for command, setting in self.data.values():
kwargs = {
parameter.name: (
ant := self.get_parsed_args(parameter.annotation),
"" if parameter.default == parameter.empty
else parameter.default,
parameter.kind == parameter.KEYWORD_ONLY \
and ant == "str"
) for parameter in command.clean_params.values()
}
kwargs.update({
key: (self.get_parsed_args(value), "", False)
for key, value in setting.kwargs.items()
})
data[setting.mode][command.qualified_name] = {
"help": (
self.bot.cogs["BotGeneral"].get_help_url(*setting.help_command)
if setting.help_command
else self.bot.cogs["BotGeneral"].get_command_url(command)
), "kwargs": kwargs, "sub_category": getattr(
command.parent, "name", None
), "headding": (
command.extras.get("headding")
or command.__original_kwargs__.get("headding")
), "display_name": setting.name or command.name
}
# データを送る。
async with self.bot.session.post(
f"{self.bot.get_url()}/api/settings/commands/update",
json=data
) as r:
self.bot.print("[SettingManager]", "[Updater]", time(), await r.text())
self.before = data
@websocket.websocket("/api/settings/websocket", auto_connect=True, reconnect=True)
async def setting_websocket(self, ws: websocket.WebSocket, _):
# ユーザーがダッシュボードから設定を更新した際に、すぐに反応できるようにするためのものです。
await ws.send("on_ready")
@setting_websocket.event("on_post")
async def post(self, ws: websocket.WebSocket, data: CommandRunData):
if isinstance(data, dict):
self.bot.loop.create_task(
self.run_command(self.data[data["command"]][0], data),
name=f"UpdateSetting[{data.get('command')}]: {data.get('user_id')}"
)
await ws.send("on_posted")
@setting_websocket.event("on_posted")
async def posted(self, ws: websocket.WebSocket, _):
await self.setting_websocket(ws, None)
async def run_command(self, command: commands.Command, data: CommandRunData):
"コマンドを走らせます。"
ctx = None
try:
# コマンドのメッセージを組み立てる。
content = f"{self.bot.command_prefix[0]}{command.qualified_name}"
for parameter in command.clean_params.values():
tentative = f' "{data["kwargs"].get(parameter.name, "")}"'
if parameter.kind == parameter.KEYWORD_ONLY:
tentative = f" {tentative[2:-1]}"
content += tentative
# 実行できるかチェックをしてからオリジナルContextでコマンドを実行する。
ctx = Context(self, data, command)
ctx.content = content
ctx._state = self.bot.http
parsed_ctx = await self.bot.get_context(ctx)
ctx.view = parsed_ctx.view
ctx.args, ctx.kwargs = parsed_ctx.args, parsed_ctx.kwargs
for name in dir(parsed_ctx):
if not name.startswith(
(
"__", "send", "reply", "trigger", "typing", "created",
"channel", "message", "guild"
)
):
setattr(ctx, name, getattr(parsed_ctx, name))
return await self.bot.invoke(ctx.message)
except Exception as e:
if ctx:
self.bot.dispatch("command_error", ctx, e)
def cog_unload(self):
if hasattr(self, "_session"):
self.bot.loop.create_task(self._session.close())
def setup(bot):
return
bot.add_cog(SettingManager(bot))
|
from django.shortcuts import render, get_object_or_404
from .models import Post, Comment
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .forms import EmailPostForm, CommentForm, SearchForm
from django.core.mail import send_mail
from taggit.models import Tag
from django.db.models import Count
from django.contrib.postgres.search import (SearchVector, SearchQuery, SearchRank)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import ListView, FormView, CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from django.utils.text import slugify
@login_required()
def post_list(request, tag_slug=None):
#posts = Post.published.all()
object_list = Post.published.all()
tag = None
if tag_slug:
tag = get_object_or_404(Tag, slug=tag_slug)
object_list = object_list.filter(tags__in=[tag])
paginator = Paginator(object_list, 2)
page = request.GET.get('page')
try:
posts = paginator.page(page) #tolong paginator kirimin page dengan halaman [page]
except PageNotAnInteger:
posts = paginator.page(1) #kl pagenya gaada, kirim page hal 1
except EmptyPage:
posts = paginator.page(paginator.num_pages)
return render(request, 'blog/post/list.html', {'posts': posts, 'page': page, 'tag': tag}) #return http response
class PostListView(LoginRequiredMixin, ListView):
queryset = Post.published.all()
context_object_name = 'posts'
paginate_by = 2
template_name = 'blog/post/list.html'
def get_queryset(self):
qs = super().get_queryset()
tag_slug = self.kwargs.get('tag_slug')
if tag_slug:
tag = get_object_or_404(Tag, slug=tag_slug)
qs = qs.filter(tags__in=[tag])
self.tag = tag
else:
self.tag = None
return qs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.tag:
context['tag'] = self.tag
return context
@login_required()
def post_detail(request, year, month, day, post):
post = get_object_or_404(Post, slug=post, status='published', publish__year=year, publish__month=month, publish__day=day)
comments = post.comments.filter(active=True)
new_comment = None
if request.method == 'POST':
comment_form = CommentForm(data=request.POST)
if comment_form.is_valid():
new_comment = comment_form.save(commit=False)
new_comment.post = post
new_comment.save()
else:
comment_form = CommentForm()
post_tags_ids = post.tags.values_list('id', flat=True) #biar dapet tupples id yg flat (1,2,3)
similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(id=post.id)
similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags', '-publish')[:4]
return render(request, 'blog/post/detail.html', {'post': post, 'comments': comments, 'new_comment': new_comment, 'comment_form': comment_form, 'similar_posts': similar_posts})
class PostDetailView(LoginRequiredMixin, FormView):
form_class = CommentForm
template_name = 'blog/post/detail.html'
def get_initial(self):
pk = self.kwargs.get('pk')
slug = self.kwargs.get('slug')
self.post = get_object_or_404(Post, pk=pk, slug=slug)
self.comments = self.post.comments.filter(active=True)
self.new_comment = None
post_tags_ids = self.post.tags.values_list('id', flat=True) #biar dapet tupples id yg flat (1,2,3)
similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(id=self.post.id)
self.similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags', '-publish')[:4]
return super().get_initial()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['post'] = self.post
context['comments'] = self.comments
context['similar_posts'] = self.similar_posts
return context
def form_valid(self, form):
new_comment = form.save(commit=False)
new_comment.post = self.post
new_comment.save()
context = self.get_context_data()
context['new_comment'] = new_comment
return render(self.request, self.template_name, context=context)
@login_required()
def post_share(request, post_id):
post = get_object_or_404(Post, id=post_id, status='published')
sent = False
if request.method == 'POST':
form = EmailPostForm(request.POST) #form terisi
if form.is_valid():
cd = form.cleaned_data
post_url = request.build_absolute_uri(post.get_absolute_url())
subject = f"{cd["name"]} recommends you read {post.title}" #f itu untuk format bisa terima variable name, title
message = (f"Read {post.title} at {post_url}\n\n"
f"{cd["name"]} comments: {cd["comments"]}")
send_mail(subject, message, 'django.patronus@gmail.com', [cd['to'],])
sent = True
else:
form = EmailPostForm() #form baru
return render(request, 'blog/post/share.html', { 'post' : post, 'form' : form, 'sent' : sent })
@login_required()
def post_search(request):
if 'query' in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
query = form.cleaned_data['query']
#results = Post.published.annotate(search = SearchVector('title', 'body')).filter(search=query) #search vector: bisa search from multiple fields
search_vector = SearchVector('title', 'body')
search_query = SearchQuery(query)
results = Post.published.annotate(search=search_vector, rank=SearchRank(search_vector, search_query)).filter(search=search_query).order_by('-rank')
else:
form = SearchForm()
query = None
results = []
return render(request, 'blog/post/search.html', {'form': form, 'query': query, 'results': results})
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
fields = ['title', 'body', 'tags']
template_name = 'blog/post/post_form.html'
def form_valid(self, form):
form.instance.author = self.request.user
form.instance.status = 'published'
form.instance.slug = slugify(form.instance.title, allow_unicode=True)
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin, UpdateView):
model = Post
fields = ['title', 'body', 'tags']
template_name = 'blog/post/post_form.html'
query_pk_and_slug = True
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(author = self.request.user)
def form_valid(self, form):
form.instance.slug = slugify(form.instance.title, allow_unicode=True)
return super().form_valid(form)
class PostDeleteView(LoginRequiredMixin, DeleteView):
model = Post
template_name = 'blog/post/post_confirm_delete.html'
success_url = reverse_lazy('blog:post_list')
query_pk_and_slug = True
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(author = self.request.user) | from django.shortcuts import render, get_object_or_404
from .models import Post, Comment
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .forms import EmailPostForm, CommentForm, SearchForm
from django.core.mail import send_mail
from taggit.models import Tag
from django.db.models import Count
from django.contrib.postgres.search import (SearchVector, SearchQuery, SearchRank)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import ListView, FormView, CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from django.utils.text import slugify
@login_required()
def post_list(request, tag_slug=None):
#posts = Post.published.all()
object_list = Post.published.all()
tag = None
if tag_slug:
tag = get_object_or_404(Tag, slug=tag_slug)
object_list = object_list.filter(tags__in=[tag])
paginator = Paginator(object_list, 2)
page = request.GET.get('page')
try:
posts = paginator.page(page) #tolong paginator kirimin page dengan halaman [page]
except PageNotAnInteger:
posts = paginator.page(1) #kl pagenya gaada, kirim page hal 1
except EmptyPage:
posts = paginator.page(paginator.num_pages)
return render(request, 'blog/post/list.html', {'posts': posts, 'page': page, 'tag': tag}) #return http response
class PostListView(LoginRequiredMixin, ListView):
queryset = Post.published.all()
context_object_name = 'posts'
paginate_by = 2
template_name = 'blog/post/list.html'
def get_queryset(self):
qs = super().get_queryset()
tag_slug = self.kwargs.get('tag_slug')
if tag_slug:
tag = get_object_or_404(Tag, slug=tag_slug)
qs = qs.filter(tags__in=[tag])
self.tag = tag
else:
self.tag = None
return qs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.tag:
context['tag'] = self.tag
return context
@login_required()
def post_detail(request, year, month, day, post):
post = get_object_or_404(Post, slug=post, status='published', publish__year=year, publish__month=month, publish__day=day)
comments = post.comments.filter(active=True)
new_comment = None
if request.method == 'POST':
comment_form = CommentForm(data=request.POST)
if comment_form.is_valid():
new_comment = comment_form.save(commit=False)
new_comment.post = post
new_comment.save()
else:
comment_form = CommentForm()
post_tags_ids = post.tags.values_list('id', flat=True) #biar dapet tupples id yg flat (1,2,3)
similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(id=post.id)
similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags', '-publish')[:4]
return render(request, 'blog/post/detail.html', {'post': post, 'comments': comments, 'new_comment': new_comment, 'comment_form': comment_form, 'similar_posts': similar_posts})
class PostDetailView(LoginRequiredMixin, FormView):
form_class = CommentForm
template_name = 'blog/post/detail.html'
def get_initial(self):
pk = self.kwargs.get('pk')
slug = self.kwargs.get('slug')
self.post = get_object_or_404(Post, pk=pk, slug=slug)
self.comments = self.post.comments.filter(active=True)
self.new_comment = None
post_tags_ids = self.post.tags.values_list('id', flat=True) #biar dapet tupples id yg flat (1,2,3)
similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(id=self.post.id)
self.similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags', '-publish')[:4]
return super().get_initial()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['post'] = self.post
context['comments'] = self.comments
context['similar_posts'] = self.similar_posts
return context
def form_valid(self, form):
new_comment = form.save(commit=False)
new_comment.post = self.post
new_comment.save()
context = self.get_context_data()
context['new_comment'] = new_comment
return render(self.request, self.template_name, context=context)
@login_required()
def post_share(request, post_id):
post = get_object_or_404(Post, id=post_id, status='published')
sent = False
if request.method == 'POST':
form = EmailPostForm(request.POST) #form terisi
if form.is_valid():
cd = form.cleaned_data
post_url = request.build_absolute_uri(post.get_absolute_url())
subject = f"{cd['name']} recommends you read {post.title}" #f itu untuk format bisa terima variable name, title
message = (f"Read {post.title} at {post_url}\n\n"
f"{cd['name']} comments: {cd['comments']}")
send_mail(subject, message, 'django.patronus@gmail.com', [cd['to'],])
sent = True
else:
form = EmailPostForm() #form baru
return render(request, 'blog/post/share.html', { 'post' : post, 'form' : form, 'sent' : sent })
@login_required()
def post_search(request):
if 'query' in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
query = form.cleaned_data['query']
#results = Post.published.annotate(search = SearchVector('title', 'body')).filter(search=query) #search vector: bisa search from multiple fields
search_vector = SearchVector('title', 'body')
search_query = SearchQuery(query)
results = Post.published.annotate(search=search_vector, rank=SearchRank(search_vector, search_query)).filter(search=search_query).order_by('-rank')
else:
form = SearchForm()
query = None
results = []
return render(request, 'blog/post/search.html', {'form': form, 'query': query, 'results': results})
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
fields = ['title', 'body', 'tags']
template_name = 'blog/post/post_form.html'
def form_valid(self, form):
form.instance.author = self.request.user
form.instance.status = 'published'
form.instance.slug = slugify(form.instance.title, allow_unicode=True)
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin, UpdateView):
model = Post
fields = ['title', 'body', 'tags']
template_name = 'blog/post/post_form.html'
query_pk_and_slug = True
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(author = self.request.user)
def form_valid(self, form):
form.instance.slug = slugify(form.instance.title, allow_unicode=True)
return super().form_valid(form)
class PostDeleteView(LoginRequiredMixin, DeleteView):
model = Post
template_name = 'blog/post/post_confirm_delete.html'
success_url = reverse_lazy('blog:post_list')
query_pk_and_slug = True
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(author = self.request.user) |
# Import libraries
from arcgis import gis
import logging
import json
#carole was here again
#Kerry test
secrets = r"H:\secrets\maphub_config.json"
# this is one method to
def readConfig(configFile):
# returns list of parameters
# with key 'name'
"""
reads the config file to dictionary
"""
logging.debug("Loading config")
with open(configFile) as json_file:
try:
d = json.load(json_file)
except:
print ("failed to parse configuration")
else:
return d
logging.debug("Config Loaded")
sites = readConfig(secrets)
for site in sites:
if site['name'].lower() == 'bc maphub':
params = site['params']
mh = gis.GIS(params['mapurl'],params['usr'],params['password'])
contents = mh.content.search(query="owner:{}".format(params['usr']))
for item in contents:
print (f"Name:{item["name"]} Id: {item["id"]}")
| # Import libraries
from arcgis import gis
import logging
import json
#carole was here again
#Kerry test
secrets = r"H:\secrets\maphub_config.json"
# this is one method to
def readConfig(configFile):
# returns list of parameters
# with key 'name'
"""
reads the config file to dictionary
"""
logging.debug("Loading config")
with open(configFile) as json_file:
try:
d = json.load(json_file)
except:
print ("failed to parse configuration")
else:
return d
logging.debug("Config Loaded")
sites = readConfig(secrets)
for site in sites:
if site['name'].lower() == 'bc maphub':
params = site['params']
mh = gis.GIS(params['mapurl'],params['usr'],params['password'])
contents = mh.content.search(query="owner:{}".format(params['usr']))
for item in contents:
print (f"Name:{item['name']} Id: {item['id']}")
|
import requests
from urllib.parse import urlencode
from urllib.request import urlopen
from urllib.error import HTTPError
import re
import json
from base64 import b64encode
def get_playlists(spotify_url):
with open('MY_SECRETS.json', 'r') as f:
spotify_key = json.load(f)['SPOTIFY_KEY']
playlist_id = spotify_url.split('/')[-1].split('?')[0]
r = requests.get(f"https://api.spotify.com/v1/playlists/{playlist_id}", headers={'Authorization': f'Bearer {spotify_key}'})
if r.status_code == 400 or r.status_code == 401:
raise TypeError('Invalid Spotify Token')
returned_tracks = {}
playlist_name = r.json()['name']
r = requests.get(f"https://api.spotify.com/v1/playlists/{playlist_id}/tracks", headers={'Authorization': f'Bearer {spotify_key}'})
data = r.json()
tracks = data['items']
while data['next']:
r = requests.get(data['next'], headers={'Authorization': f'Bearer {spotify_key}'})
data = r.json()
tracks = tracks + data['items']
for track in tracks:
song_name = track['track']['name']
artists = []
for artist in track['track']['artists']:
artists.append(artist['name'])
artist_name = ' '.join(artists)
try:
query_string = urlencode({'search_query': artist_name + ' ' + song_name})
htm_content = urlopen('http://www.youtube.com/results?' + query_string)
search_results = re.findall(r'/watch\?v=(.{11})', htm_content.read().decode())
returned_tracks.update({f'{song_name}': f'http://www.youtube.com/watch?v={search_results[0]}'})
except HTTPError:
print(f'Couldn\'t download "{song_name}", continuing')
continue
return playlist_name, returned_tracks
def get_access_token():
with open('MY_SECRETS.json', 'r') as f:
load_file = json.load(f)
spotify_client_id = load_file['spotify_client_id']
spotify_client_secret = load_file['spotify_client_secret']
headers = {
'Authorization': f'Basic {b64encode(f'{spotify_client_id}:{spotify_client_secret}'.encode()).decode()}',
}
data = {
'grant_type': 'client_credentials'
}
r = requests.post('https://accounts.spotify.com/api/token', headers=headers, data=data)
token = r.json()['access_token']
updated_dict = {
"spotify_client_id": f"{spotify_client_id}",
"spotify_client_secret": f"{spotify_client_secret}",
"SPOTIFY_KEY": token
}
with open('MY_SECRETS.json', 'w') as f:
json.dump(updated_dict, f)
| import requests
from urllib.parse import urlencode
from urllib.request import urlopen
from urllib.error import HTTPError
import re
import json
from base64 import b64encode
def get_playlists(spotify_url):
with open('MY_SECRETS.json', 'r') as f:
spotify_key = json.load(f)['SPOTIFY_KEY']
playlist_id = spotify_url.split('/')[-1].split('?')[0]
r = requests.get(f"https://api.spotify.com/v1/playlists/{playlist_id}", headers={'Authorization': f'Bearer {spotify_key}'})
if r.status_code == 400 or r.status_code == 401:
raise TypeError('Invalid Spotify Token')
returned_tracks = {}
playlist_name = r.json()['name']
r = requests.get(f"https://api.spotify.com/v1/playlists/{playlist_id}/tracks", headers={'Authorization': f'Bearer {spotify_key}'})
data = r.json()
tracks = data['items']
while data['next']:
r = requests.get(data['next'], headers={'Authorization': f'Bearer {spotify_key}'})
data = r.json()
tracks = tracks + data['items']
for track in tracks:
song_name = track['track']['name']
artists = []
for artist in track['track']['artists']:
artists.append(artist['name'])
artist_name = ' '.join(artists)
try:
query_string = urlencode({'search_query': artist_name + ' ' + song_name})
htm_content = urlopen('http://www.youtube.com/results?' + query_string)
search_results = re.findall(r'/watch\?v=(.{11})', htm_content.read().decode())
returned_tracks.update({f'{song_name}': f'http://www.youtube.com/watch?v={search_results[0]}'})
except HTTPError:
print(f'Couldn\'t download "{song_name}", continuing')
continue
return playlist_name, returned_tracks
def get_access_token():
with open('MY_SECRETS.json', 'r') as f:
load_file = json.load(f)
spotify_client_id = load_file['spotify_client_id']
spotify_client_secret = load_file['spotify_client_secret']
headers = {
'Authorization': f'Basic {b64encode(f"{spotify_client_id}:{spotify_client_secret}".encode()).decode()}',
}
data = {
'grant_type': 'client_credentials'
}
r = requests.post('https://accounts.spotify.com/api/token', headers=headers, data=data)
token = r.json()['access_token']
updated_dict = {
"spotify_client_id": f"{spotify_client_id}",
"spotify_client_secret": f"{spotify_client_secret}",
"SPOTIFY_KEY": token
}
with open('MY_SECRETS.json', 'w') as f:
json.dump(updated_dict, f)
|
import os
import yaml
import pandas as pd
import xml.etree.ElementTree as ET
from types import SimpleNamespace
from sklearn.model_selection import train_test_split
from utils.experiment_utils import create_linspace
from utils.preprocess import *
SOURCE_PATH = './source_data'
DATA_PATH = './data'
CONFIG_PATH = './conf'
DATASETS = ['ami', 'emoevent', 'haternet', 'hateval2019', 'mex-a3t', 'universal_joy', 'tass2019', 'detoxis']
class Colors:
BLACK = '\033[1;30m'
RED = '\033[1;31m'
GREEN = '\033[1;32m'
YELLOW = '\033[1;33m'
BLUE = '\033[1;34m'
PURPLE = '\033[1;35m'
CYAN = '\033[1;36m'
WHITE = '\033[1;37m'
ENDC = '\033[0m'
def colored(text, color):
return f'{color}{text}{Colors.ENDC}'
def write_split_files(dataset, trn, dev, tst):
trn.to_csv(os.path.join(DATA_PATH, dataset, 'train_es.tsv'), index=False, sep='\t', mode='w')
dev.to_csv(os.path.join(DATA_PATH, dataset, 'dev_es.tsv'), index=False, sep='\t', mode='w')
tst.to_csv(os.path.join(DATA_PATH, dataset, 'test_es.tsv'), index=False, sep='\t', mode='w')
def prepare_files():
seed = 100
test_ratio = 0.2
# EmoEvent and HaterNet
filename = 'original_es.tsv'
data = {'emoevent': pd.read_csv(os.path.join(SOURCE_PATH, 'emoevent', filename), sep='\t'),
'haternet': pd.read_csv(os.path.join(SOURCE_PATH, 'haternet', filename), sep=';\\|\\|;',
names=['id', 'text', 'hateful'],
header=None,
engine="python")}
labels = {'emoevent': 'offensive',
'haternet': 'hateful'}
for dataset in data:
data[dataset].text = basic_text_normalization(data[dataset].text)
y = data[dataset][labels[dataset]]
trn, tst = train_test_split(data[dataset], shuffle=True, test_size=test_ratio, stratify=y, random_state=seed)
y = trn[labels[dataset]]
trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=y, random_state=seed)
write_split_files(dataset, trn, dev, tst)
print(f'Dataset: {dataset} --> N. Instances: {data[dataset].shape[0]} --> Train, Dev., Test: '
f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}')
# HatEval 2019
dataset = 'hateval2019'
n_instances = {}
for phase in ['train', 'dev', 'test']:
data = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'original_{phase}_es.csv'), sep=',')
data.text = basic_text_normalization(data.text)
data.to_csv(os.path.join(DATA_PATH, dataset, f'{phase}_es.tsv'), index=False, sep='\t', mode='w')
n_instances[phase] = data.shape[0]
print(f'Dataset: {dataset} --> N. Instances: {sum(n_instances.values())} --> Train, Dev., Test: '
f'{n_instances['train']}, {n_instances['dev']}, {n_instances['test']}')
# MEX-A3T
dataset = 'mex-a3t'
columns = ['text', 'aggressiveness']
trn = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'original_train.tsv'), sep='\t', names=columns)
tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'original_test.tsv'), sep='\t', names=columns)
trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=trn.aggressiveness, random_state=seed)
for subset in [trn, dev, tst]:
subset.text = basic_text_normalization(subset.text)
write_split_files(dataset, trn, dev, tst)
print(f'Dataset: {dataset} --> N. Instances: {trn.shape[0] + dev.shape[0] + tst.shape[0]} --> Train, Dev., Test: '
f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}')
# TASS 2019
dataset = 'tass2019'
n_instances = {}
for phase in ['train', 'dev', 'test']:
phase_data = pd.DataFrame()
for country in ['ES', 'CR', 'MX', 'PE', 'UY']:
root = ET.parse(os.path.join(SOURCE_PATH, dataset, f'TASS2019_country_{country}_{phase}.xml')).getroot()
tweets = []
for item in root.iter('tweet'):
tweet = {'country': country}
for tweet_field in item.iter():
if tweet_field.tag not in ['tweet', 'sentiment', 'polarity']:
tweet[tweet_field.tag] = tweet_field.text
tweets.append(tweet)
phase_data = phase_data.append(tweets)
new_cols = {'tweetid': 'tweet_id', 'content': 'text', 'user': 'user_id', 'value': 'polarity'}
phase_data.rename(columns=new_cols, inplace=True)
phase_data = phase_data[['tweet_id', 'user_id', 'country', 'date', 'text', 'polarity']]
phase_data.text = basic_text_normalization(phase_data.text)
phase_data.to_csv(os.path.join(DATA_PATH, dataset, f'{phase}_es.tsv'), index=False, sep='\t', mode='w')
n_instances[phase] = phase_data.shape[0]
print(f'Dataset: {dataset} --> N. Instances: {sum(n_instances.values())} --> Train, Dev., Test: '
f'{n_instances['train']}, {n_instances['dev']}, {n_instances['test']}')
# Universal Joy
dataset = 'universal_joy'
trn_data = {}
for filename in ['small', 'large', 'combi']:
trn_data[filename] = pd.read_csv(os.path.join(SOURCE_PATH, dataset, filename + '.csv'))
trn_data[filename] = trn_data[filename][trn_data[filename].language == 'es']
trn_data[filename].text = trn_data[filename].text.apply(universal_joy_cleaning)
# Apparently, spanish comments in 'large' and 'combi' are the same and 'small' is created using a subset of those
trn = pd.concat(trn_data.values(), axis=0, ignore_index=True)
trn.drop_duplicates(inplace=True, subset='text')
# There is no overlapping between training, validation and test (also, they do not contain duplicates)
dev = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'val.csv'))
dev.drop_duplicates(inplace=True, subset='text')
tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'test.csv'))
tst.drop_duplicates(inplace=True, subset='text')
# The test set approximately represents 12.5% of the total data
# print(tst.shape[0]/(trn.shape[0] + dev.shape[0] + tst.shape[0]))
# DETOXIS
dataset = 'detoxis'
trn = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'train.csv'), sep=',')
tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'test.csv'), sep=',')
trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=trn.toxicity_level, random_state=seed)
for subset in [trn, dev, tst]:
subset.rename(columns={'comment': 'text'}, inplace=True)
subset.text = basic_text_normalization(subset.text)
write_split_files(dataset, trn, dev, tst)
print(f'Dataset: {dataset} --> N. Instances: {trn.shape[0] + dev.shape[0] + tst.shape[0]} --> Train, Dev., Test: '
f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}')
def read_datasets(datasets, tasks, lang='es'):
data = {}
for dataset in datasets:
if dataset not in DATASETS:
raise Exception(f'Dataset {dataset} is not in the list of available datasets!')
data[dataset] = {
'trn': pd.read_csv(os.path.join(DATA_PATH, dataset, f'train_{lang}.tsv'), sep='\t'),
'dev': pd.read_csv(os.path.join(DATA_PATH, dataset, f'dev_{lang}.tsv'), sep='\t'),
'tst': pd.read_csv(os.path.join(DATA_PATH, dataset, f'test_{lang}.tsv'), sep='\t')
}
for phase in data[dataset]:
data[dataset][phase] = data[dataset][phase][['text'] + tasks[dataset]]
return data
def create_namespace_from_dict(dic, name=None):
for k, v in dic.items():
if isinstance(v, dict):
dic[k] = create_namespace_from_dict(v, k)
ns = SimpleNamespace(**dic)
ns.__name__ = name
return ns
def process_config(dic, name=None):
for k, v in dic.items():
if k not in ['transfer_learning', 'optimization']:
if isinstance(v, dict):
dic[k] = process_config(v, k)
elif isinstance(v, list):
for vi in v:
if isinstance(vi, dict):
dic[k] += create_linspace(vi)
dic[k] = dic[k][1:]
else:
dic[k] = [v]
return dic
def load_config(config_file):
with open(os.path.join(CONFIG_PATH, config_file), 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
return process_config(config) # create_namespace_from_dict(config)
def log(string, indent=0):
start = '\t' * indent
print(f'{start}{string}')
| import os
import yaml
import pandas as pd
import xml.etree.ElementTree as ET
from types import SimpleNamespace
from sklearn.model_selection import train_test_split
from utils.experiment_utils import create_linspace
from utils.preprocess import *
SOURCE_PATH = './source_data'
DATA_PATH = './data'
CONFIG_PATH = './conf'
DATASETS = ['ami', 'emoevent', 'haternet', 'hateval2019', 'mex-a3t', 'universal_joy', 'tass2019', 'detoxis']
class Colors:
BLACK = '\033[1;30m'
RED = '\033[1;31m'
GREEN = '\033[1;32m'
YELLOW = '\033[1;33m'
BLUE = '\033[1;34m'
PURPLE = '\033[1;35m'
CYAN = '\033[1;36m'
WHITE = '\033[1;37m'
ENDC = '\033[0m'
def colored(text, color):
return f'{color}{text}{Colors.ENDC}'
def write_split_files(dataset, trn, dev, tst):
trn.to_csv(os.path.join(DATA_PATH, dataset, 'train_es.tsv'), index=False, sep='\t', mode='w')
dev.to_csv(os.path.join(DATA_PATH, dataset, 'dev_es.tsv'), index=False, sep='\t', mode='w')
tst.to_csv(os.path.join(DATA_PATH, dataset, 'test_es.tsv'), index=False, sep='\t', mode='w')
def prepare_files():
seed = 100
test_ratio = 0.2
# EmoEvent and HaterNet
filename = 'original_es.tsv'
data = {'emoevent': pd.read_csv(os.path.join(SOURCE_PATH, 'emoevent', filename), sep='\t'),
'haternet': pd.read_csv(os.path.join(SOURCE_PATH, 'haternet', filename), sep=';\\|\\|;',
names=['id', 'text', 'hateful'],
header=None,
engine="python")}
labels = {'emoevent': 'offensive',
'haternet': 'hateful'}
for dataset in data:
data[dataset].text = basic_text_normalization(data[dataset].text)
y = data[dataset][labels[dataset]]
trn, tst = train_test_split(data[dataset], shuffle=True, test_size=test_ratio, stratify=y, random_state=seed)
y = trn[labels[dataset]]
trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=y, random_state=seed)
write_split_files(dataset, trn, dev, tst)
print(f'Dataset: {dataset} --> N. Instances: {data[dataset].shape[0]} --> Train, Dev., Test: '
f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}')
# HatEval 2019
dataset = 'hateval2019'
n_instances = {}
for phase in ['train', 'dev', 'test']:
data = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'original_{phase}_es.csv'), sep=',')
data.text = basic_text_normalization(data.text)
data.to_csv(os.path.join(DATA_PATH, dataset, f'{phase}_es.tsv'), index=False, sep='\t', mode='w')
n_instances[phase] = data.shape[0]
print(f'Dataset: {dataset} --> N. Instances: {sum(n_instances.values())} --> Train, Dev., Test: '
f'{n_instances["train"]}, {n_instances["dev"]}, {n_instances["test"]}')
# MEX-A3T
dataset = 'mex-a3t'
columns = ['text', 'aggressiveness']
trn = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'original_train.tsv'), sep='\t', names=columns)
tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'original_test.tsv'), sep='\t', names=columns)
trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=trn.aggressiveness, random_state=seed)
for subset in [trn, dev, tst]:
subset.text = basic_text_normalization(subset.text)
write_split_files(dataset, trn, dev, tst)
print(f'Dataset: {dataset} --> N. Instances: {trn.shape[0] + dev.shape[0] + tst.shape[0]} --> Train, Dev., Test: '
f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}')
# TASS 2019
dataset = 'tass2019'
n_instances = {}
for phase in ['train', 'dev', 'test']:
phase_data = pd.DataFrame()
for country in ['ES', 'CR', 'MX', 'PE', 'UY']:
root = ET.parse(os.path.join(SOURCE_PATH, dataset, f'TASS2019_country_{country}_{phase}.xml')).getroot()
tweets = []
for item in root.iter('tweet'):
tweet = {'country': country}
for tweet_field in item.iter():
if tweet_field.tag not in ['tweet', 'sentiment', 'polarity']:
tweet[tweet_field.tag] = tweet_field.text
tweets.append(tweet)
phase_data = phase_data.append(tweets)
new_cols = {'tweetid': 'tweet_id', 'content': 'text', 'user': 'user_id', 'value': 'polarity'}
phase_data.rename(columns=new_cols, inplace=True)
phase_data = phase_data[['tweet_id', 'user_id', 'country', 'date', 'text', 'polarity']]
phase_data.text = basic_text_normalization(phase_data.text)
phase_data.to_csv(os.path.join(DATA_PATH, dataset, f'{phase}_es.tsv'), index=False, sep='\t', mode='w')
n_instances[phase] = phase_data.shape[0]
print(f'Dataset: {dataset} --> N. Instances: {sum(n_instances.values())} --> Train, Dev., Test: '
f'{n_instances["train"]}, {n_instances["dev"]}, {n_instances["test"]}')
# Universal Joy
dataset = 'universal_joy'
trn_data = {}
for filename in ['small', 'large', 'combi']:
trn_data[filename] = pd.read_csv(os.path.join(SOURCE_PATH, dataset, filename + '.csv'))
trn_data[filename] = trn_data[filename][trn_data[filename].language == 'es']
trn_data[filename].text = trn_data[filename].text.apply(universal_joy_cleaning)
# Apparently, spanish comments in 'large' and 'combi' are the same and 'small' is created using a subset of those
trn = pd.concat(trn_data.values(), axis=0, ignore_index=True)
trn.drop_duplicates(inplace=True, subset='text')
# There is no overlapping between training, validation and test (also, they do not contain duplicates)
dev = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'val.csv'))
dev.drop_duplicates(inplace=True, subset='text')
tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, 'test.csv'))
tst.drop_duplicates(inplace=True, subset='text')
# The test set approximately represents 12.5% of the total data
# print(tst.shape[0]/(trn.shape[0] + dev.shape[0] + tst.shape[0]))
# DETOXIS
dataset = 'detoxis'
trn = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'train.csv'), sep=',')
tst = pd.read_csv(os.path.join(SOURCE_PATH, dataset, f'test.csv'), sep=',')
trn, dev = train_test_split(trn, shuffle=True, test_size=test_ratio, stratify=trn.toxicity_level, random_state=seed)
for subset in [trn, dev, tst]:
subset.rename(columns={'comment': 'text'}, inplace=True)
subset.text = basic_text_normalization(subset.text)
write_split_files(dataset, trn, dev, tst)
print(f'Dataset: {dataset} --> N. Instances: {trn.shape[0] + dev.shape[0] + tst.shape[0]} --> Train, Dev., Test: '
f'{trn.shape[0]}, {dev.shape[0]}, {tst.shape[0]}')
def read_datasets(datasets, tasks, lang='es'):
data = {}
for dataset in datasets:
if dataset not in DATASETS:
raise Exception(f'Dataset {dataset} is not in the list of available datasets!')
data[dataset] = {
'trn': pd.read_csv(os.path.join(DATA_PATH, dataset, f'train_{lang}.tsv'), sep='\t'),
'dev': pd.read_csv(os.path.join(DATA_PATH, dataset, f'dev_{lang}.tsv'), sep='\t'),
'tst': pd.read_csv(os.path.join(DATA_PATH, dataset, f'test_{lang}.tsv'), sep='\t')
}
for phase in data[dataset]:
data[dataset][phase] = data[dataset][phase][['text'] + tasks[dataset]]
return data
def create_namespace_from_dict(dic, name=None):
for k, v in dic.items():
if isinstance(v, dict):
dic[k] = create_namespace_from_dict(v, k)
ns = SimpleNamespace(**dic)
ns.__name__ = name
return ns
def process_config(dic, name=None):
for k, v in dic.items():
if k not in ['transfer_learning', 'optimization']:
if isinstance(v, dict):
dic[k] = process_config(v, k)
elif isinstance(v, list):
for vi in v:
if isinstance(vi, dict):
dic[k] += create_linspace(vi)
dic[k] = dic[k][1:]
else:
dic[k] = [v]
return dic
def load_config(config_file):
with open(os.path.join(CONFIG_PATH, config_file), 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
return process_config(config) # create_namespace_from_dict(config)
def log(string, indent=0):
start = '\t' * indent
print(f'{start}{string}')
|
import contextlib
import json
import os
import pprint
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import time
from cytoolz import (
merge,
valmap,
)
from eth_utils.curried import (
apply_formatter_if,
is_bytes,
is_checksum_address,
is_dict,
is_same_address,
remove_0x_prefix,
to_hex,
to_text,
to_wei,
)
from webu import Webu
from webu.utils.module_testing.emitter_contract import (
EMITTER_ABI,
EMITTER_BYTECODE,
EMITTER_ENUM,
)
from webu.utils.module_testing.math_contract import (
MATH_ABI,
MATH_BYTECODE,
)
COINBASE = '0xdc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd'
COINBASE_PK = '0x58d23b55bc9cdce1f18c2500f40ff4ab7245df9a89505e9b1fa4851f623d241d'
KEYFILE_DATA = '{"address":"dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd","crypto":{"cipher":"aes-128-ctr","ciphertext":"52e06bc9397ea9fa2f0dae8de2b3e8116e92a2ecca9ad5ff0061d1c449704e98","cipherparams":{"iv":"aa5d0a5370ef65395c1a6607af857124"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"9fdf0764eb3645ffc184e166537f6fe70516bf0e34dc7311dea21f100f0c9263"},"mac":"4e0b51f42b865c15c485f4faefdd1f01a38637e5247f8c75ffe6a8c0eba856f6"},"id":"5a6124e0-10f1-4c1c-ae3e-d903eacb740a","version":3}' # noqa: E501
KEYFILE_PW = 'webupy-test'
KEYFILE_FILENAME = 'UTC--2017-08-24T19-42-47.517572178Z--dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd' # noqa: E501
RAW_TXN_ACCOUNT = '0x39EEed73fb1D3855E90Cbd42f348b3D7b340aAA6'
UNLOCKABLE_PRIVATE_KEY = '0x392f63a79b1ff8774845f3fa69de4a13800a59e7083f5187f1558f0797ad0f01'
UNLOCKABLE_ACCOUNT = '0x12efdc31b1a8fa1a1e756dfd8a1601055c971e13'
UNLOCKABLE_ACCOUNT_PW = KEYFILE_PW
GENESIS_DATA = {
"nonce": "0xdeadbeefdeadbeef",
"timestamp": "0x0",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", # noqa: E501
"extraData": "0x7765623370792d746573742d636861696e",
"gasLimit": "0x47d5cc",
"difficulty": "0x01",
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", # noqa: E501
"coinbase": "0x3333333333333333333333333333333333333333",
"alloc": {
remove_0x_prefix(COINBASE): {
'balance': str(to_wei(1000000000, 'huc')),
},
remove_0x_prefix(RAW_TXN_ACCOUNT): {
'balance': str(to_wei(10, 'huc')),
},
remove_0x_prefix(UNLOCKABLE_ACCOUNT): {
'balance': str(to_wei(10, 'huc')),
},
},
"config": {
"chainId": 131277322940537, # the string 'webupy' as an integer
"homesteadBlock": 0,
"eip155Block": 0,
"eip158Block": 0
},
}
def ensure_path_exists(dir_path):
"""
Make sure that a path exists
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return True
return False
@contextlib.contextmanager
def tempdir():
dir_path = tempfile.mkdtemp()
try:
yield dir_path
finally:
shutil.rmtree(dir_path)
def get_open_port():
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
port = sock.getsockname()[1]
sock.close()
return str(port)
def get_ghuc_binary():
from ghuc.install import (
get_executable_path,
install_ghuc,
)
if 'GETH_BINARY' in os.environ:
return os.environ['GETH_BINARY']
elif 'GETH_VERSION' in os.environ:
ghuc_version = os.environ['GETH_VERSION']
_ghuc_binary = get_executable_path(ghuc_version)
if not os.path.exists(_ghuc_binary):
install_ghuc(ghuc_version)
assert os.path.exists(_ghuc_binary)
return _ghuc_binary
else:
return 'ghuc'
def wait_for_popen(proc, timeout):
start = time.time()
while time.time() < start + timeout:
if proc.poll() is None:
time.sleep(0.01)
else:
break
def kill_proc_gracefully(proc):
if proc.poll() is None:
proc.send_signal(signal.SIGINT)
wait_for_popen(proc, 13)
if proc.poll() is None:
proc.terminate()
wait_for_popen(proc, 5)
if proc.poll() is None:
proc.kill()
wait_for_popen(proc, 2)
def wait_for_socket(ipc_path, timeout=30):
start = time.time()
while time.time() < start + timeout:
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(ipc_path)
sock.settimeout(timeout)
except (FileNotFoundError, socket.error):
time.sleep(0.01)
else:
break
@contextlib.contextmanager
def graceful_kill_on_exit(proc):
try:
yield proc
finally:
kill_proc_gracefully(proc)
@contextlib.contextmanager
def get_ghuc_process(ghuc_binary,
datadir,
genesis_file_path,
ghuc_ipc_path,
ghuc_port):
init_datadir_command = (
ghuc_binary,
'--datadir', datadir,
'init',
genesis_file_path,
)
subprocess.check_output(
init_datadir_command,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
run_ghuc_command = (
ghuc_binary,
'--datadir', datadir,
'--ipcpath', ghuc_ipc_path,
'--ethash.dagsondisk', '1',
'--gcmode', 'archive',
'--nodiscover',
'--port', ghuc_port,
'--coinbase', COINBASE[2:],
)
popen_proc = subprocess.Popen(
run_ghuc_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
)
with popen_proc as proc:
with graceful_kill_on_exit(proc) as graceful_proc:
yield graceful_proc
output, errors = proc.communicate()
print(
"Ghuc Process Exited:\n"
"stdout:{0}\n\n"
"stderr:{1}\n\n".format(
to_text(output),
to_text(errors),
)
)
def write_config_json(config, datadir):
bytes_to_hex = apply_formatter_if(is_bytes, to_hex)
config_json_dict = valmap(bytes_to_hex, config)
config_path = os.path.join(datadir, 'config.json')
with open(config_path, 'w') as config_file:
config_file.write(json.dumps(config_json_dict))
config_file.write('\n')
def generate_go_happyuc_fixture(destination_dir):
with contextlib.ExitStack() as stack:
datadir = stack.enter_context(tempdir())
keystore_dir = os.path.join(datadir, 'keystore')
ensure_path_exists(keystore_dir)
keyfile_path = os.path.join(keystore_dir, KEYFILE_FILENAME)
with open(keyfile_path, 'w') as keyfile:
keyfile.write(KEYFILE_DATA)
genesis_file_path = os.path.join(datadir, 'genesis.json')
with open(genesis_file_path, 'w') as genesis_file:
genesis_file.write(json.dumps(GENESIS_DATA))
ghuc_ipc_path_dir = stack.enter_context(tempdir())
ghuc_ipc_path = os.path.join(ghuc_ipc_path_dir, 'ghuc.ipc')
ghuc_port = get_open_port()
ghuc_binary = get_ghuc_binary()
with get_ghuc_process(
ghuc_binary=ghuc_binary,
datadir=datadir,
genesis_file_path=genesis_file_path,
ghuc_ipc_path=ghuc_ipc_path,
ghuc_port=ghuc_port):
wait_for_socket(ghuc_ipc_path)
webu = Webu(Webu.IPCProvider(ghuc_ipc_path))
chain_data = setup_chain_state(webu)
# close ghuc by exiting context
# must be closed before copying data dir
verify_chain_state(webu, chain_data)
# verify that chain state is still valid after closing
# and re-opening ghuc
with get_ghuc_process(
ghuc_binary=ghuc_binary,
datadir=datadir,
genesis_file_path=genesis_file_path,
ghuc_ipc_path=ghuc_ipc_path,
ghuc_port=ghuc_port):
wait_for_socket(ghuc_ipc_path)
webu = Webu(Webu.IPCProvider(ghuc_ipc_path))
verify_chain_state(webu, chain_data)
static_data = {
'raw_txn_account': RAW_TXN_ACCOUNT,
'keyfile_pw': KEYFILE_PW,
}
config = merge(chain_data, static_data)
pprint.pprint(config)
write_config_json(config, datadir)
shutil.copytree(datadir, destination_dir)
def verify_chain_state(webu, chain_data):
receipt = webu.eth.getTransactionReceipt(chain_data['mined_txn_hash'])
latest = webu.eth.getBlock('latest')
assert receipt.blockNumber <= latest.number
def mine_transaction_hash(webu, txn_hash):
start_time = time.time()
webu.miner.start(1)
while time.time() < start_time + 60:
receipt = webu.eth.getTransactionReceipt(txn_hash)
if receipt is not None:
webu.miner.stop()
return receipt
else:
time.sleep(0.1)
else:
raise ValueError("Math contract deploy transaction not mined during wait period")
def mine_block(webu):
origin_block_number = webu.eth.blockNumber
start_time = time.time()
webu.miner.start(1)
while time.time() < start_time + 60:
block_number = webu.eth.blockNumber
if block_number > origin_block_number:
webu.miner.stop()
return block_number
else:
time.sleep(0.1)
else:
raise ValueError("No block mined during wait period")
def deploy_contract(webu, name, factory):
webu.personal.unlockAccount(webu.eth.coinbase, KEYFILE_PW)
deploy_txn_hash = factory.deploy({'from': webu.eth.coinbase})
print('{0}_CONTRACT_DEPLOY_HASH: '.format(name.upper()), deploy_txn_hash)
deploy_receipt = mine_transaction_hash(webu, deploy_txn_hash)
print('{0}_CONTRACT_DEPLOY_TRANSACTION_MINED'.format(name.upper()))
contract_address = deploy_receipt['contractAddress']
assert is_checksum_address(contract_address)
print('{0}_CONTRACT_ADDRESS:'.format(name.upper()), contract_address)
return deploy_receipt
def setup_chain_state(webu):
coinbase = webu.eth.coinbase
assert is_same_address(coinbase, COINBASE)
#
# Math Contract
#
math_contract_factory = webu.eth.contract(
abi=MATH_ABI,
bytecode=MATH_BYTECODE,
)
math_deploy_receipt = deploy_contract(webu, 'math', math_contract_factory)
assert is_dict(math_deploy_receipt)
#
# Emitter Contract
#
emitter_contract_factory = webu.eth.contract(
abi=EMITTER_ABI,
bytecode=EMITTER_BYTECODE,
)
emitter_deploy_receipt = deploy_contract(webu, 'emitter', emitter_contract_factory)
emitter_contract = emitter_contract_factory(emitter_deploy_receipt['contractAddress'])
txn_hash_with_log = emitter_contract.transact({
'from': webu.eth.coinbase,
}).logDouble(which=EMITTER_ENUM['LogDoubleWithIndex'], arg0=12345, arg1=54321)
print('TXN_HASH_WITH_LOG:', txn_hash_with_log)
txn_receipt_with_log = mine_transaction_hash(webu, txn_hash_with_log)
block_with_log = webu.eth.getBlock(txn_receipt_with_log['blockHash'])
print('BLOCK_HASH_WITH_LOG:', block_with_log['hash'])
#
# Empty Block
#
empty_block_number = mine_block(webu)
print('MINED_EMPTY_BLOCK')
empty_block = webu.eth.getBlock(empty_block_number)
assert is_dict(empty_block)
assert not empty_block['transactions']
print('EMPTY_BLOCK_HASH:', empty_block['hash'])
#
# Block with Transaction
#
webu.personal.unlockAccount(coinbase, KEYFILE_PW)
webu.miner.start(1)
mined_txn_hash = webu.eth.sendTransaction({
'from': coinbase,
'to': coinbase,
'value': 1,
'gas': 21000,
'gas_price': webu.eth.gasPrice,
})
mined_txn_receipt = mine_transaction_hash(webu, mined_txn_hash)
print('MINED_TXN_HASH:', mined_txn_hash)
block_with_txn = webu.eth.getBlock(mined_txn_receipt['blockHash'])
print('BLOCK_WITH_TXN_HASH:', block_with_txn['hash'])
ghuc_fixture = {
'math_deploy_txn_hash': math_deploy_receipt['transactionHash'],
'math_address': math_deploy_receipt['contractAddress'],
'emitter_deploy_txn_hash': emitter_deploy_receipt['transactionHash'],
'emitter_address': emitter_deploy_receipt['contractAddress'],
'txn_hash_with_log': txn_hash_with_log,
'block_hash_with_log': block_with_log['hash'],
'empty_block_hash': empty_block['hash'],
'mined_txn_hash': mined_txn_hash,
'block_with_txn_hash': block_with_txn['hash'],
}
return ghuc_fixture
if __name__ == '__main__':
fixture_dir = sys.argv[1]
generate_go_happyuc_fixture(fixture_dir)
| import contextlib
import json
import os
import pprint
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import time
from cytoolz import (
merge,
valmap,
)
from eth_utils.curried import (
apply_formatter_if,
is_bytes,
is_checksum_address,
is_dict,
is_same_address,
remove_0x_prefix,
to_hex,
to_text,
to_wei,
)
from webu import Webu
from webu.utils.module_testing.emitter_contract import (
EMITTER_ABI,
EMITTER_BYTECODE,
EMITTER_ENUM,
)
from webu.utils.module_testing.math_contract import (
MATH_ABI,
MATH_BYTECODE,
)
COINBASE = '0xdc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd'
COINBASE_PK = '0x58d23b55bc9cdce1f18c2500f40ff4ab7245df9a89505e9b1fa4851f623d241d'
KEYFILE_DATA = '{"address":"dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd","crypto":{"cipher":"aes-128-ctr","ciphertext":"52e06bc9397ea9fa2f0dae8de2b3e8116e92a2ecca9ad5ff0061d1c449704e98","cipherparams":{"iv":"aa5d0a5370ef65395c1a6607af857124"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"9fdf0764eb3645ffc184e166537f6fe70516bf0e34dc7311dea21f100f0c9263"},"mac":"4e0b51f42b865c15c485f4faefdd1f01a38637e5247f8c75ffe6a8c0eba856f6"},"id":"5a6124e0-10f1-4c1c-ae3e-d903eacb740a","version":3}' # noqa: E501
KEYFILE_PW = 'webupy-test'
KEYFILE_FILENAME = 'UTC--2017-08-24T19-42-47.517572178Z--dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd' # noqa: E501
RAW_TXN_ACCOUNT = '0x39EEed73fb1D3855E90Cbd42f348b3D7b340aAA6'
UNLOCKABLE_PRIVATE_KEY = '0x392f63a79b1ff8774845f3fa69de4a13800a59e7083f5187f1558f0797ad0f01'
UNLOCKABLE_ACCOUNT = '0x12efdc31b1a8fa1a1e756dfd8a1601055c971e13'
UNLOCKABLE_ACCOUNT_PW = KEYFILE_PW
GENESIS_DATA = {
"nonce": "0xdeadbeefdeadbeef",
"timestamp": "0x0",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", # noqa: E501
"extraData": "0x7765623370792d746573742d636861696e",
"gasLimit": "0x47d5cc",
"difficulty": "0x01",
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", # noqa: E501
"coinbase": "0x3333333333333333333333333333333333333333",
"alloc": {
remove_0x_prefix(COINBASE): {
'balance': str(to_wei(1000000000, 'huc')),
},
remove_0x_prefix(RAW_TXN_ACCOUNT): {
'balance': str(to_wei(10, 'huc')),
},
remove_0x_prefix(UNLOCKABLE_ACCOUNT): {
'balance': str(to_wei(10, 'huc')),
},
},
"config": {
"chainId": 131277322940537, # the string 'webupy' as an integer
"homesteadBlock": 0,
"eip155Block": 0,
"eip158Block": 0
},
}
def ensure_path_exists(dir_path):
"""
Make sure that a path exists
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return True
return False
@contextlib.contextmanager
def tempdir():
dir_path = tempfile.mkdtemp()
try:
yield dir_path
finally:
shutil.rmtree(dir_path)
def get_open_port():
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
port = sock.getsockname()[1]
sock.close()
return str(port)
def get_ghuc_binary():
from ghuc.install import (
get_executable_path,
install_ghuc,
)
if 'GETH_BINARY' in os.environ:
return os.environ['GETH_BINARY']
elif 'GETH_VERSION' in os.environ:
ghuc_version = os.environ['GETH_VERSION']
_ghuc_binary = get_executable_path(ghuc_version)
if not os.path.exists(_ghuc_binary):
install_ghuc(ghuc_version)
assert os.path.exists(_ghuc_binary)
return _ghuc_binary
else:
return 'ghuc'
def wait_for_popen(proc, timeout):
start = time.time()
while time.time() < start + timeout:
if proc.poll() is None:
time.sleep(0.01)
else:
break
def kill_proc_gracefully(proc):
if proc.poll() is None:
proc.send_signal(signal.SIGINT)
wait_for_popen(proc, 13)
if proc.poll() is None:
proc.terminate()
wait_for_popen(proc, 5)
if proc.poll() is None:
proc.kill()
wait_for_popen(proc, 2)
def wait_for_socket(ipc_path, timeout=30):
start = time.time()
while time.time() < start + timeout:
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(ipc_path)
sock.settimeout(timeout)
except (FileNotFoundError, socket.error):
time.sleep(0.01)
else:
break
@contextlib.contextmanager
def graceful_kill_on_exit(proc):
try:
yield proc
finally:
kill_proc_gracefully(proc)
@contextlib.contextmanager
def get_ghuc_process(ghuc_binary,
datadir,
genesis_file_path,
ghuc_ipc_path,
ghuc_port):
init_datadir_command = (
ghuc_binary,
'--datadir', datadir,
'init',
genesis_file_path,
)
subprocess.check_output(
init_datadir_command,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
run_ghuc_command = (
ghuc_binary,
'--datadir', datadir,
'--ipcpath', ghuc_ipc_path,
'--ethash.dagsondisk', '1',
'--gcmode', 'archive',
'--nodiscover',
'--port', ghuc_port,
'--coinbase', COINBASE[2:],
)
popen_proc = subprocess.Popen(
run_ghuc_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
)
with popen_proc as proc:
with graceful_kill_on_exit(proc) as graceful_proc:
yield graceful_proc
output, errors = proc.communicate()
print(
"Ghuc Process Exited:\n"
"stdout:{0}\n\n"
"stderr:{1}\n\n".format(
to_text(output),
to_text(errors),
)
)
def write_config_json(config, datadir):
bytes_to_hex = apply_formatter_if(is_bytes, to_hex)
config_json_dict = valmap(bytes_to_hex, config)
config_path = os.path.join(datadir, 'config.json')
with open(config_path, 'w') as config_file:
config_file.write(json.dumps(config_json_dict))
config_file.write('\n')
def generate_go_happyuc_fixture(destination_dir):
with contextlib.ExitStack() as stack:
datadir = stack.enter_context(tempdir())
keystore_dir = os.path.join(datadir, 'keystore')
ensure_path_exists(keystore_dir)
keyfile_path = os.path.join(keystore_dir, KEYFILE_FILENAME)
with open(keyfile_path, 'w') as keyfile:
keyfile.write(KEYFILE_DATA)
genesis_file_path = os.path.join(datadir, 'genesis.json')
with open(genesis_file_path, 'w') as genesis_file:
genesis_file.write(json.dumps(GENESIS_DATA))
ghuc_ipc_path_dir = stack.enter_context(tempdir())
ghuc_ipc_path = os.path.join(ghuc_ipc_path_dir, 'ghuc.ipc')
ghuc_port = get_open_port()
ghuc_binary = get_ghuc_binary()
with get_ghuc_process(
ghuc_binary=ghuc_binary,
datadir=datadir,
genesis_file_path=genesis_file_path,
ghuc_ipc_path=ghuc_ipc_path,
ghuc_port=ghuc_port):
wait_for_socket(ghuc_ipc_path)
webu = Webu(Webu.IPCProvider(ghuc_ipc_path))
chain_data = setup_chain_state(webu)
# close ghuc by exiting context
# must be closed before copying data dir
verify_chain_state(webu, chain_data)
# verify that chain state is still valid after closing
# and re-opening ghuc
with get_ghuc_process(
ghuc_binary=ghuc_binary,
datadir=datadir,
genesis_file_path=genesis_file_path,
ghuc_ipc_path=ghuc_ipc_path,
ghuc_port=ghuc_port):
wait_for_socket(ghuc_ipc_path)
webu = Webu(Webu.IPCProvider(ghuc_ipc_path))
verify_chain_state(webu, chain_data)
static_data = {
'raw_txn_account': RAW_TXN_ACCOUNT,
'keyfile_pw': KEYFILE_PW,
}
config = merge(chain_data, static_data)
pprint.pprint(config)
write_config_json(config, datadir)
shutil.copytree(datadir, destination_dir)
def verify_chain_state(webu, chain_data):
receipt = webu.eth.getTransactionReceipt(chain_data['mined_txn_hash'])
latest = webu.eth.getBlock('latest')
assert receipt.blockNumber <= latest.number
def mine_transaction_hash(webu, txn_hash):
start_time = time.time()
webu.miner.start(1)
while time.time() < start_time + 60:
receipt = webu.eth.getTransactionReceipt(txn_hash)
if receipt is not None:
webu.miner.stop()
return receipt
else:
time.sleep(0.1)
else:
raise ValueError("Math contract deploy transaction not mined during wait period")
def mine_block(webu):
origin_block_number = webu.eth.blockNumber
start_time = time.time()
webu.miner.start(1)
while time.time() < start_time + 60:
block_number = webu.eth.blockNumber
if block_number > origin_block_number:
webu.miner.stop()
return block_number
else:
time.sleep(0.1)
else:
raise ValueError("No block mined during wait period")
def deploy_contract(webu, name, factory):
webu.personal.unlockAccount(webu.eth.coinbase, KEYFILE_PW)
deploy_txn_hash = factory.deploy({'from': webu.eth.coinbase})
print('{0}_CONTRACT_DEPLOY_HASH: '.format(name.upper()), deploy_txn_hash)
deploy_receipt = mine_transaction_hash(webu, deploy_txn_hash)
print('{0}_CONTRACT_DEPLOY_TRANSACTION_MINED'.format(name.upper()))
contract_address = deploy_receipt['contractAddress']
assert is_checksum_address(contract_address)
print('{0}_CONTRACT_ADDRESS:'.format(name.upper()), contract_address)
return deploy_receipt
def setup_chain_state(webu):
coinbase = webu.eth.coinbase
assert is_same_address(coinbase, COINBASE)
#
# Math Contract
#
math_contract_factory = webu.eth.contract(
abi=MATH_ABI,
bytecode=MATH_BYTECODE,
)
math_deploy_receipt = deploy_contract(webu, 'math', math_contract_factory)
assert is_dict(math_deploy_receipt)
#
# Emitter Contract
#
emitter_contract_factory = webu.eth.contract(
abi=EMITTER_ABI,
bytecode=EMITTER_BYTECODE,
)
emitter_deploy_receipt = deploy_contract(webu, 'emitter', emitter_contract_factory)
emitter_contract = emitter_contract_factory(emitter_deploy_receipt['contractAddress'])
txn_hash_with_log = emitter_contract.transact({
'from': webu.eth.coinbase,
}).logDouble(which=EMITTER_ENUM['LogDoubleWithIndex'], arg0=12345, arg1=54321)
print('TXN_HASH_WITH_LOG:', txn_hash_with_log)
txn_receipt_with_log = mine_transaction_hash(webu, txn_hash_with_log)
block_with_log = webu.eth.getBlock(txn_receipt_with_log['blockHash'])
print('BLOCK_HASH_WITH_LOG:', block_with_log['hash'])
#
# Empty Block
#
empty_block_number = mine_block(webu)
print('MINED_EMPTY_BLOCK')
empty_block = webu.eth.getBlock(empty_block_number)
assert is_dict(empty_block)
assert not empty_block['transactions']
print('EMPTY_BLOCK_HASH:', empty_block['hash'])
#
# Block with Transaction
#
webu.personal.unlockAccount(coinbase, KEYFILE_PW)
webu.miner.start(1)
mined_txn_hash = webu.eth.sendTransaction({
'from': coinbase,
'to': coinbase,
'value': 1,
'gas': 21000,
'gas_price': webu.eth.gasPrice,
})
mined_txn_receipt = mine_transaction_hash(webu, mined_txn_hash)
print('MINED_TXN_HASH:', mined_txn_hash)
block_with_txn = webu.eth.getBlock(mined_txn_receipt['blockHash'])
print('BLOCK_WITH_TXN_HASH:', block_with_txn['hash'])
ghuc_fixture = {
'math_deploy_txn_hash': math_deploy_receipt['transactionHash'],
'math_address': math_deploy_receipt['contractAddress'],
'emitter_deploy_txn_hash': emitter_deploy_receipt['transactionHash'],
'emitter_address': emitter_deploy_receipt['contractAddress'],
'txn_hash_with_log': txn_hash_with_log,
'block_hash_with_log': block_with_log['hash'],
'empty_block_hash': empty_block['hash'],
'mined_txn_hash': mined_txn_hash,
'block_with_txn_hash': block_with_txn['hash'],
}
return ghuc_fixture
if __name__ == '__main__':
fixture_dir = sys.argv[1]
generate_go_happyuc_fixture(fixture_dir)
|
import os
from collections import OrderedDict
from typing import Tuple, List, Callable
from fs_s3fs import S3FS
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
from skimage.exposure import match_histograms
from datetime import datetime
from eolearn.core import EOPatch
def augment(
lr: np.ndarray,
hr: np.ndarray,
flip: bool = True,
rotate: bool = True,
distribution_shift: bool = False,
distribution_scale: bool = False,
permute_timestamps: bool = True,
max_distribution_shift: float = 0.25,
max_distribution_scale_diff: float = 0.25,
proba_of_original: float = 0.67
) -> Tuple[np.ndarray, np.ndarray]:
"""
Performs a series of image augmentations with specified probability.
:param lr: array of low-resolution images, shape is `CxTxHxW`
:param hr: array of high-resolution images, shape is `CxHxW`
:param flip: whether to randomly flip height or width of arrays
:param rotate: whether to randomly rotate the arrays
:param distribution_shift: add an offset to the distribution
:param distribution_scale: scale the channels distribution
:param permute_timestamps: permute timestamps (not desired for HRN)
:param max_distribution_shift: set max distribution shift used in distribution shift augmentation
:param max_distribution_scale_diff: set max distribution scale used in distribution scale augmentation
:param proba_of_original: set probability of not modifying original patch, e.g. 1 means no augmetnations
:returns: augmented lr and hr arrays
"""
# Base probability which, after `n_aug_conditions`, reduces to `proba_of_original`
n_aug_conditions = sum(1. for aug_op in (flip, rotate, distribution_shift, distribution_scale, permute_timestamps)
if aug_op)
rng_threshold = proba_of_original ** (1. / n_aug_conditions)
if flip and np.random.random() > rng_threshold:
flip_axis = np.random.choice([-2, -1])
lr = np.flip(lr, axis=flip_axis)
hr = np.flip(hr, axis=flip_axis)
if rotate and np.random.random() > rng_threshold:
k = np.random.choice(np.arange(-2, 3))
lr = np.rot90(lr, k=k, axes=(-2, -1))
hr = np.rot90(hr, k=k, axes=(-2, -1))
if distribution_shift and np.random.random() > rng_threshold:
d_shift = (np.random.random() - 0.5) * max_distribution_shift
lr = lr + d_shift
hr = hr + d_shift
if distribution_scale and np.random.random() > rng_threshold:
d_scale = 1. + (np.random.random() - 0.5) * max_distribution_scale_diff
lr_mean = np.mean(lr, axis=(-2, -1))[..., None, None]
hr_mean = np.mean(hr, axis=(-2, -1))[..., None, None]
lr = (lr - lr_mean) * d_scale + lr_mean
hr = (hr - hr_mean) * d_scale + hr_mean
if permute_timestamps and np.random.random() > rng_threshold:
# expects lr in `CxTxHxW` shape
indices = np.random.permutation(lr.shape[1])
lr = lr[:, indices]
return lr, hr
def pad_to_k(feat: np.ndarray, k: int = 16, pad_to_front: bool = True) -> np.ndarray:
""" Create an array with first dimension equal to k, filling with 0s in front or at back """
n_pad = k - len(feat)
if n_pad < 0:
raise ValueError(f'Can not pad when length of features: {len(feat)} is longer than k: {k}')
(_, h, w, c) = feat.shape
if pad_to_front:
feat = np.concatenate((np.zeros(shape=(n_pad, h, w, c)), feat))
else:
feat = np.concatenate((feat, np.zeros(shape=(n_pad, h, w, c))))
return feat
class ImageSet(OrderedDict):
"""
An OrderedDict derived class to group the assets of an imageset, with a pretty-print functionality.
"""
def __init__(self, *args, **kwargs):
super(ImageSet, self).__init__(*args, **kwargs)
def __repr__(self):
dict_info = f"{"name":>10} : {self["name"]}"
for name, v in self.items():
if hasattr(v, 'shape'):
dict_info += f"\n{name:>10} : {v.shape} {v.__class__.__name__} ({v.dtype})"
else:
dict_info += f"\n{name:>10} : {v.__class__.__name__} ({v})"
return dict_info
def read_imageset(imset_file: str,
filesystem: S3FS = None,
normalize: bool = True,
country_norm_df: pd.DataFrame = None,
norm_deimos_npz: np.lib.npyio.NpzFile = None,
norm_s2_npz: np.lib.npyio.NpzFile = None,
n_views: int = 16,
padding: str = 'zeros',
histogram_matching: bool = False) -> ImageSet:
"""
Retrieves all assets from the given directory.
:param imset_file: name of npz file with sample imageset
:param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk
:param normalize: whether to normalize data or not
:param country_norm_df: S2 median/std normalization factors stored per country
:param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS
:param norm_s2_npz: 1st and 99th percentile normalization factors for S2
:param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time
frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first
:param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported
options are `zeros`, where 0 frames are prepended to features, or `repeat` where random repeats of
timeframes are taken
:param histogram_matching: whether to match the histogram between the HR and the corresponding LR image
"""
assert padding in ['zeros', 'repeat']
# Read asset names
npz = np.load(filesystem.openbin(imset_file), allow_pickle=True) if filesystem else np.load(imset_file,
allow_pickle=True)
features = npz['features']
hr = npz['labels']
if normalize:
country = npz['countries']
country_stats = country_norm_df[country_norm_df.country == str(country)]
norm_median = country_stats[['median_0', 'median_1', 'median_2', 'median_3']].values
norm_std = country_stats[['std_0', 'std_1', 'std_2', 'std_3']].values
features = (features - norm_median) / norm_std
deimos_p1 = norm_deimos_npz['p1']
deimos_p99 = norm_deimos_npz['p99']
s2_p1 = norm_s2_npz['p1']
s2_p99 = norm_s2_npz['p99']
hr = (hr - deimos_p1) / (deimos_p99 - deimos_p1)
features = (features - s2_p1) / (s2_p99 - s2_p1)
alphas = np.ones(n_views)
if histogram_matching:
hr = match_histograms(hr, features[-1], multichannel=True)
n_feature_timestamps = len(features)
if n_feature_timestamps < n_views:
if padding == 'zeros':
features = pad_to_k(features, n_views, pad_to_front=False)
alphas[n_feature_timestamps:] = 0
elif padding == 'repeat':
n_pad = n_views - n_feature_timestamps
padded = features[-1:].repeat(n_pad, axis=0)
features = np.concatenate((features, padded))
else:
features = features[-n_views:, ...]
# Tensor is `CxTxHxW`
features = np.moveaxis(features, -1, 0)
hr = np.moveaxis(hr, 2, 0)
imageset = ImageSet(name=os.path.basename(imset_file),
timestamp_deimos=str(npz['timetamps_deimos'].item()),
lr=features,
hr=hr,
alphas=alphas)
return imageset
class ImagesetDataset(Dataset):
""" Derived Dataset class for loading many imagesets from a list of directories.
:param imset_dir: name of directory containing files
:param imset_npz_files: list of filenames that constitute the dataset
:param time_first: whether returned lrs sequence should have time dimension first or channels. Use `time_first=True`
if you are training HRN model (`BxTxCxHxW`), `time_first=False` if you are training RAMS
(`BxTxCxHxW`)
:param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk
:param normalize: whether to normalize data or not
:param country_norm_df: S2 median/std normalization factors stored per country
:param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS
:param norm_s2_npz: 1st and 99th percentile normalization factors for S2
:param channels_feats: which channels (i.e. indices) are extracted from lrs sequence
:param channels_labels: which channels (i.e. indices) are extracted from hr image
:param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time
frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first
:param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported
options are `zeros`, where 0 frames are appended to features, or `repeat` where random repeats of
timeframes are taken
:param transform: function executed on lr and hr arrays as augmentation
:param histogram_matching: whether to match the histogram between the HR and the corresponding LR image
"""
def __init__(
self,
imset_dir: str,
imset_npz_files: list,
time_first: bool,
filesystem: object = None,
normalize: bool = True,
country_norm_df: object = None,
norm_deimos_npz: np.ndarray = None,
norm_s2_npz: np.ndarray = None,
channels_feats: List[int] = [0, 1, 2, 3],
channels_labels: List[int] = [0, 1, 2, 3],
n_views: int = 16,
padding: str = 'zeros',
transform: Callable = None,
histogram_matching: bool = False
):
super().__init__()
self.imset_dir = imset_dir
self.filesystem = filesystem
self.imset_npz_files = imset_npz_files
self.time_first = time_first
self.normalize = normalize
self.country_norm_df = country_norm_df
self.norm_deimos_npz = norm_deimos_npz
self.norm_s2_npz = norm_s2_npz
self.channels_feats = channels_feats
self.channels_labels = channels_labels
self.n_views = n_views
self.padding = padding
self.transform = transform
self.histogram_matching = histogram_matching
def __len__(self):
return len(self.imset_npz_files)
def __getitem__(self, index: int) -> ImageSet:
""" Returns an ImageSet dict of all assets in the directory of the given index."""
if isinstance(index, int):
imset_file = os.path.join(self.imset_dir, self.imset_npz_files[index])
else:
raise KeyError('Index must be of type `int`.')
imset = read_imageset(
imset_file=imset_file,
filesystem=self.filesystem,
normalize=self.normalize,
country_norm_df=self.country_norm_df,
norm_deimos_npz=self.norm_deimos_npz,
norm_s2_npz=self.norm_s2_npz,
n_views=self.n_views,
padding=self.padding,
histogram_matching=self.histogram_matching
)
lr = imset['lr'][self.channels_feats]
hr = imset['hr'][self.channels_labels]
if self.transform is not None:
lr, hr = self.transform(lr, hr)
if self.time_first:
lr = np.swapaxes(lr, 0, 1)
imset['lr'] = torch.from_numpy(lr.copy())
imset['hr'] = torch.from_numpy(hr.copy())
imset['alphas'] = torch.from_numpy(imset['alphas'])
return imset
def filter_cloudy_s2(eop, max_cc):
idxs = []
for i, _ in enumerate(eop.timestamp):
if (eop.mask['CLM'][i, ...].mean() <= max_cc) and (eop.mask['IS_DATA'].mean() == 1):
idxs.append(i)
eop.data['BANDS'] = eop.data['BANDS'][idxs, ...]
eop.data['CLP'] = eop.data['CLP'][idxs, ...]
eop.mask['CLM'] = eop.mask['CLM'][idxs, ...]
eop.mask['IS_DATA'] = eop.mask['IS_DATA'][idxs, ...]
eop.timestamp = list(np.array(eop.timestamp)[idxs])
return eop
def timestamps_within_date(timestamps, start_date, end_date):
timestamps = [ts.replace(tzinfo=None) for ts in timestamps] # Remove TZINfo that is present in batch
return [i for i, ts in enumerate(timestamps) if ts >= start_date and ts < end_date]
def read_imageset_eopatch(imset_file: str,
start_date: datetime,
end_date: datetime,
country: str,
filesystem: S3FS = None,
normalize: bool = True,
country_norm_df: pd.DataFrame = None,
norm_s2_npz: np.lib.npyio.NpzFile = None,
n_views: int = 16,
padding: str = 'zeros', histogram_matching: bool = False) -> ImageSet:
"""
Retrieves all assets from the given directory.
:param imset_file: name of npz file with sample imageset
:param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk
:param start_date: specifies the start of the temporal range of the stack of images used for prediction
:param end_date: specifies the end of the temporal range of the stack of images used for prediction
:param country: specifies the name of the country so it can be matched with the country_norm_df
:param normalize: whether to normalize data or not
:param country_norm_df: S2 median/std normalization factors stored per country
:param norm_s2_npz: 1st and 99th percentile normalization factors for S2
:param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time
frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first
:param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported
options are `zeros`, where 0 frames are prepended to features, or `repeat` where random repeats of
timeframes are taken
"""
assert padding in ['zeros', 'repeat']
eopatch = EOPatch.load(imset_file, filesystem=filesystem, lazy_loading=True)
noncloudy = filter_cloudy_s2(eopatch, max_cc=0.1)
ts_idxs = timestamps_within_date(noncloudy.timestamp, start_date, end_date)
features = noncloudy.data['BANDS'][ts_idxs, ...] / 10000
filtered_ts = [eopatch.timestamp[tsi] for tsi in ts_idxs]
if normalize:
country_stats = country_norm_df[country_norm_df.country == str(country)]
norm_median = country_stats[['median_0', 'median_1', 'median_2', 'median_3']].values
norm_std = country_stats[['std_0', 'std_1', 'std_2', 'std_3']].values
features = (features - norm_median) / norm_std
s2_p1 = norm_s2_npz['p1']
s2_p99 = norm_s2_npz['p99']
features = (features - s2_p1) / (s2_p99 - s2_p1)
alphas = np.ones(n_views)
if histogram_matching:
hr = match_histograms(hr, features[-1], multichannel=True)
n_feature_timestamps = len(features)
if n_feature_timestamps < n_views:
if padding == 'zeros':
features = pad_to_k(features, n_views, pad_to_front=False)
alphas[n_feature_timestamps:] = 0
elif padding == 'repeat':
n_pad = n_views - n_feature_timestamps
padded = features[-1:].repeat(n_pad, axis=0)
features = np.concatenate((features, padded))
else:
features = features[-n_views:, ...]
# Tensor is `CxTxHxW`
features = np.moveaxis(features, -1, 0)
imageset = ImageSet(name=os.path.basename(imset_file),
lr=features,
alphas=alphas,
ts=filtered_ts[::-1])
return imageset
class EopatchPredictionDataset(Dataset):
""" Derived Dataset class for loading many imagesets from a list of directories.
:param imset_dir: name of directory containing files
:param imset_npz_files: list of filenames that constitute the dataset
:param time_first: whether returned lrs sequence should have time dimension first or channels. Use `time_first=True`
if you are training HRN model (`BxTxCxHxW`), `time_first=False` if you are training RAMS
(`BxTxCxHxW`)
:param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk
:param start_date: specifies the start of the temporal range of the stack of images used for prediction
:param end_date: specifies the end of the temporal range of the stack of images used for prediction
:param country: specifies the name of the country so it can be matched with the country_norm_df
:param normalize: whether to normalize data or not
:param country_norm_df: S2 median/std normalization factors stored per country
:param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS
:param norm_s2_npz: 1st and 99th percentile normalization factors for S2
:param channels_feats: which channels (i.e. indices) are extracted from lrs sequence
:param channels_labels: which channels (i.e. indices) are extracted from hr image
:param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time
frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first
:param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported
options are `zeros`, where 0 frames are appended to features, or `repeat` where random repeats of
timeframes are taken
:param transform: function executed on lr and hr arrays as augmentation
"""
def __init__(
self,
imset_dir: str,
imset_npz_files: list,
time_first: bool,
start_date: datetime,
end_date: datetime,
country: str,
filesystem: object = None,
normalize: bool = True,
country_norm_df: object = None,
norm_deimos_npz: np.ndarray = None,
norm_s2_npz: np.ndarray = None,
channels_feats: List[int] = [0, 1, 2, 3],
n_views: int = 16,
padding: str = 'zeros',
histogram_matching: bool = False
):
super().__init__()
self.imset_dir = imset_dir
self.filesystem = filesystem
self.imset_npz_files = imset_npz_files
self.time_first = time_first
self.normalize = normalize
self.country_norm_df = country_norm_df
self.norm_deimos_npz = norm_deimos_npz
self.norm_s2_npz = norm_s2_npz
self.channels_feats = channels_feats
self.n_views = n_views
self.padding = padding
self.start_date = start_date
self.end_date = end_date
self.histogram_matching = histogram_matching
self.country = country
def __len__(self):
return len(self.imset_npz_files)
def __getitem__(self, index: int) -> ImageSet:
""" Returns an ImageSet dict of all assets in the directory of the given index."""
if isinstance(index, int):
imset_file = os.path.join(self.imset_dir, self.imset_npz_files[index])
else:
raise KeyError('Index must be of type `int`.')
imset = read_imageset_eopatch(
imset_file=imset_file,
filesystem=self.filesystem,
normalize=self.normalize,
country_norm_df=self.country_norm_df,
norm_deimos_npz=self.norm_deimos_npz,
norm_s2_npz=self.norm_s2_npz,
n_views=self.n_views,
padding=self.padding,
start_date=self.start_date,
end_date=self.end_date,
country=self.country,
histogram_matching=self.histogram_matching,
)
lr = imset['lr'][self.channels_feats]
if self.time_first:
lr = np.swapaxes(lr, 0, 1)
imset['lr'] = torch.from_numpy(lr.copy())
imset['alphas'] = torch.from_numpy(imset['alphas'])
return imset
| import os
from collections import OrderedDict
from typing import Tuple, List, Callable
from fs_s3fs import S3FS
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
from skimage.exposure import match_histograms
from datetime import datetime
from eolearn.core import EOPatch
def augment(
lr: np.ndarray,
hr: np.ndarray,
flip: bool = True,
rotate: bool = True,
distribution_shift: bool = False,
distribution_scale: bool = False,
permute_timestamps: bool = True,
max_distribution_shift: float = 0.25,
max_distribution_scale_diff: float = 0.25,
proba_of_original: float = 0.67
) -> Tuple[np.ndarray, np.ndarray]:
"""
Performs a series of image augmentations with specified probability.
:param lr: array of low-resolution images, shape is `CxTxHxW`
:param hr: array of high-resolution images, shape is `CxHxW`
:param flip: whether to randomly flip height or width of arrays
:param rotate: whether to randomly rotate the arrays
:param distribution_shift: add an offset to the distribution
:param distribution_scale: scale the channels distribution
:param permute_timestamps: permute timestamps (not desired for HRN)
:param max_distribution_shift: set max distribution shift used in distribution shift augmentation
:param max_distribution_scale_diff: set max distribution scale used in distribution scale augmentation
:param proba_of_original: set probability of not modifying original patch, e.g. 1 means no augmetnations
:returns: augmented lr and hr arrays
"""
# Base probability which, after `n_aug_conditions`, reduces to `proba_of_original`
n_aug_conditions = sum(1. for aug_op in (flip, rotate, distribution_shift, distribution_scale, permute_timestamps)
if aug_op)
rng_threshold = proba_of_original ** (1. / n_aug_conditions)
if flip and np.random.random() > rng_threshold:
flip_axis = np.random.choice([-2, -1])
lr = np.flip(lr, axis=flip_axis)
hr = np.flip(hr, axis=flip_axis)
if rotate and np.random.random() > rng_threshold:
k = np.random.choice(np.arange(-2, 3))
lr = np.rot90(lr, k=k, axes=(-2, -1))
hr = np.rot90(hr, k=k, axes=(-2, -1))
if distribution_shift and np.random.random() > rng_threshold:
d_shift = (np.random.random() - 0.5) * max_distribution_shift
lr = lr + d_shift
hr = hr + d_shift
if distribution_scale and np.random.random() > rng_threshold:
d_scale = 1. + (np.random.random() - 0.5) * max_distribution_scale_diff
lr_mean = np.mean(lr, axis=(-2, -1))[..., None, None]
hr_mean = np.mean(hr, axis=(-2, -1))[..., None, None]
lr = (lr - lr_mean) * d_scale + lr_mean
hr = (hr - hr_mean) * d_scale + hr_mean
if permute_timestamps and np.random.random() > rng_threshold:
# expects lr in `CxTxHxW` shape
indices = np.random.permutation(lr.shape[1])
lr = lr[:, indices]
return lr, hr
def pad_to_k(feat: np.ndarray, k: int = 16, pad_to_front: bool = True) -> np.ndarray:
""" Create an array with first dimension equal to k, filling with 0s in front or at back """
n_pad = k - len(feat)
if n_pad < 0:
raise ValueError(f'Can not pad when length of features: {len(feat)} is longer than k: {k}')
(_, h, w, c) = feat.shape
if pad_to_front:
feat = np.concatenate((np.zeros(shape=(n_pad, h, w, c)), feat))
else:
feat = np.concatenate((feat, np.zeros(shape=(n_pad, h, w, c))))
return feat
class ImageSet(OrderedDict):
"""
An OrderedDict derived class to group the assets of an imageset, with a pretty-print functionality.
"""
def __init__(self, *args, **kwargs):
super(ImageSet, self).__init__(*args, **kwargs)
def __repr__(self):
dict_info = f"{'name':>10} : {self['name']}"
for name, v in self.items():
if hasattr(v, 'shape'):
dict_info += f"\n{name:>10} : {v.shape} {v.__class__.__name__} ({v.dtype})"
else:
dict_info += f"\n{name:>10} : {v.__class__.__name__} ({v})"
return dict_info
def read_imageset(imset_file: str,
filesystem: S3FS = None,
normalize: bool = True,
country_norm_df: pd.DataFrame = None,
norm_deimos_npz: np.lib.npyio.NpzFile = None,
norm_s2_npz: np.lib.npyio.NpzFile = None,
n_views: int = 16,
padding: str = 'zeros',
histogram_matching: bool = False) -> ImageSet:
"""
Retrieves all assets from the given directory.
:param imset_file: name of npz file with sample imageset
:param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk
:param normalize: whether to normalize data or not
:param country_norm_df: S2 median/std normalization factors stored per country
:param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS
:param norm_s2_npz: 1st and 99th percentile normalization factors for S2
:param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time
frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first
:param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported
options are `zeros`, where 0 frames are prepended to features, or `repeat` where random repeats of
timeframes are taken
:param histogram_matching: whether to match the histogram between the HR and the corresponding LR image
"""
assert padding in ['zeros', 'repeat']
# Read asset names
npz = np.load(filesystem.openbin(imset_file), allow_pickle=True) if filesystem else np.load(imset_file,
allow_pickle=True)
features = npz['features']
hr = npz['labels']
if normalize:
country = npz['countries']
country_stats = country_norm_df[country_norm_df.country == str(country)]
norm_median = country_stats[['median_0', 'median_1', 'median_2', 'median_3']].values
norm_std = country_stats[['std_0', 'std_1', 'std_2', 'std_3']].values
features = (features - norm_median) / norm_std
deimos_p1 = norm_deimos_npz['p1']
deimos_p99 = norm_deimos_npz['p99']
s2_p1 = norm_s2_npz['p1']
s2_p99 = norm_s2_npz['p99']
hr = (hr - deimos_p1) / (deimos_p99 - deimos_p1)
features = (features - s2_p1) / (s2_p99 - s2_p1)
alphas = np.ones(n_views)
if histogram_matching:
hr = match_histograms(hr, features[-1], multichannel=True)
n_feature_timestamps = len(features)
if n_feature_timestamps < n_views:
if padding == 'zeros':
features = pad_to_k(features, n_views, pad_to_front=False)
alphas[n_feature_timestamps:] = 0
elif padding == 'repeat':
n_pad = n_views - n_feature_timestamps
padded = features[-1:].repeat(n_pad, axis=0)
features = np.concatenate((features, padded))
else:
features = features[-n_views:, ...]
# Tensor is `CxTxHxW`
features = np.moveaxis(features, -1, 0)
hr = np.moveaxis(hr, 2, 0)
imageset = ImageSet(name=os.path.basename(imset_file),
timestamp_deimos=str(npz['timetamps_deimos'].item()),
lr=features,
hr=hr,
alphas=alphas)
return imageset
class ImagesetDataset(Dataset):
""" Derived Dataset class for loading many imagesets from a list of directories.
:param imset_dir: name of directory containing files
:param imset_npz_files: list of filenames that constitute the dataset
:param time_first: whether returned lrs sequence should have time dimension first or channels. Use `time_first=True`
if you are training HRN model (`BxTxCxHxW`), `time_first=False` if you are training RAMS
(`BxTxCxHxW`)
:param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk
:param normalize: whether to normalize data or not
:param country_norm_df: S2 median/std normalization factors stored per country
:param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS
:param norm_s2_npz: 1st and 99th percentile normalization factors for S2
:param channels_feats: which channels (i.e. indices) are extracted from lrs sequence
:param channels_labels: which channels (i.e. indices) are extracted from hr image
:param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time
frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first
:param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported
options are `zeros`, where 0 frames are appended to features, or `repeat` where random repeats of
timeframes are taken
:param transform: function executed on lr and hr arrays as augmentation
:param histogram_matching: whether to match the histogram between the HR and the corresponding LR image
"""
def __init__(
self,
imset_dir: str,
imset_npz_files: list,
time_first: bool,
filesystem: object = None,
normalize: bool = True,
country_norm_df: object = None,
norm_deimos_npz: np.ndarray = None,
norm_s2_npz: np.ndarray = None,
channels_feats: List[int] = [0, 1, 2, 3],
channels_labels: List[int] = [0, 1, 2, 3],
n_views: int = 16,
padding: str = 'zeros',
transform: Callable = None,
histogram_matching: bool = False
):
super().__init__()
self.imset_dir = imset_dir
self.filesystem = filesystem
self.imset_npz_files = imset_npz_files
self.time_first = time_first
self.normalize = normalize
self.country_norm_df = country_norm_df
self.norm_deimos_npz = norm_deimos_npz
self.norm_s2_npz = norm_s2_npz
self.channels_feats = channels_feats
self.channels_labels = channels_labels
self.n_views = n_views
self.padding = padding
self.transform = transform
self.histogram_matching = histogram_matching
def __len__(self):
return len(self.imset_npz_files)
def __getitem__(self, index: int) -> ImageSet:
""" Returns an ImageSet dict of all assets in the directory of the given index."""
if isinstance(index, int):
imset_file = os.path.join(self.imset_dir, self.imset_npz_files[index])
else:
raise KeyError('Index must be of type `int`.')
imset = read_imageset(
imset_file=imset_file,
filesystem=self.filesystem,
normalize=self.normalize,
country_norm_df=self.country_norm_df,
norm_deimos_npz=self.norm_deimos_npz,
norm_s2_npz=self.norm_s2_npz,
n_views=self.n_views,
padding=self.padding,
histogram_matching=self.histogram_matching
)
lr = imset['lr'][self.channels_feats]
hr = imset['hr'][self.channels_labels]
if self.transform is not None:
lr, hr = self.transform(lr, hr)
if self.time_first:
lr = np.swapaxes(lr, 0, 1)
imset['lr'] = torch.from_numpy(lr.copy())
imset['hr'] = torch.from_numpy(hr.copy())
imset['alphas'] = torch.from_numpy(imset['alphas'])
return imset
def filter_cloudy_s2(eop, max_cc):
idxs = []
for i, _ in enumerate(eop.timestamp):
if (eop.mask['CLM'][i, ...].mean() <= max_cc) and (eop.mask['IS_DATA'].mean() == 1):
idxs.append(i)
eop.data['BANDS'] = eop.data['BANDS'][idxs, ...]
eop.data['CLP'] = eop.data['CLP'][idxs, ...]
eop.mask['CLM'] = eop.mask['CLM'][idxs, ...]
eop.mask['IS_DATA'] = eop.mask['IS_DATA'][idxs, ...]
eop.timestamp = list(np.array(eop.timestamp)[idxs])
return eop
def timestamps_within_date(timestamps, start_date, end_date):
timestamps = [ts.replace(tzinfo=None) for ts in timestamps] # Remove TZINfo that is present in batch
return [i for i, ts in enumerate(timestamps) if ts >= start_date and ts < end_date]
def read_imageset_eopatch(imset_file: str,
start_date: datetime,
end_date: datetime,
country: str,
filesystem: S3FS = None,
normalize: bool = True,
country_norm_df: pd.DataFrame = None,
norm_s2_npz: np.lib.npyio.NpzFile = None,
n_views: int = 16,
padding: str = 'zeros', histogram_matching: bool = False) -> ImageSet:
"""
Retrieves all assets from the given directory.
:param imset_file: name of npz file with sample imageset
:param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk
:param start_date: specifies the start of the temporal range of the stack of images used for prediction
:param end_date: specifies the end of the temporal range of the stack of images used for prediction
:param country: specifies the name of the country so it can be matched with the country_norm_df
:param normalize: whether to normalize data or not
:param country_norm_df: S2 median/std normalization factors stored per country
:param norm_s2_npz: 1st and 99th percentile normalization factors for S2
:param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time
frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first
:param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported
options are `zeros`, where 0 frames are prepended to features, or `repeat` where random repeats of
timeframes are taken
"""
assert padding in ['zeros', 'repeat']
eopatch = EOPatch.load(imset_file, filesystem=filesystem, lazy_loading=True)
noncloudy = filter_cloudy_s2(eopatch, max_cc=0.1)
ts_idxs = timestamps_within_date(noncloudy.timestamp, start_date, end_date)
features = noncloudy.data['BANDS'][ts_idxs, ...] / 10000
filtered_ts = [eopatch.timestamp[tsi] for tsi in ts_idxs]
if normalize:
country_stats = country_norm_df[country_norm_df.country == str(country)]
norm_median = country_stats[['median_0', 'median_1', 'median_2', 'median_3']].values
norm_std = country_stats[['std_0', 'std_1', 'std_2', 'std_3']].values
features = (features - norm_median) / norm_std
s2_p1 = norm_s2_npz['p1']
s2_p99 = norm_s2_npz['p99']
features = (features - s2_p1) / (s2_p99 - s2_p1)
alphas = np.ones(n_views)
if histogram_matching:
hr = match_histograms(hr, features[-1], multichannel=True)
n_feature_timestamps = len(features)
if n_feature_timestamps < n_views:
if padding == 'zeros':
features = pad_to_k(features, n_views, pad_to_front=False)
alphas[n_feature_timestamps:] = 0
elif padding == 'repeat':
n_pad = n_views - n_feature_timestamps
padded = features[-1:].repeat(n_pad, axis=0)
features = np.concatenate((features, padded))
else:
features = features[-n_views:, ...]
# Tensor is `CxTxHxW`
features = np.moveaxis(features, -1, 0)
imageset = ImageSet(name=os.path.basename(imset_file),
lr=features,
alphas=alphas,
ts=filtered_ts[::-1])
return imageset
class EopatchPredictionDataset(Dataset):
""" Derived Dataset class for loading many imagesets from a list of directories.
:param imset_dir: name of directory containing files
:param imset_npz_files: list of filenames that constitute the dataset
:param time_first: whether returned lrs sequence should have time dimension first or channels. Use `time_first=True`
if you are training HRN model (`BxTxCxHxW`), `time_first=False` if you are training RAMS
(`BxTxCxHxW`)
:param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk
:param start_date: specifies the start of the temporal range of the stack of images used for prediction
:param end_date: specifies the end of the temporal range of the stack of images used for prediction
:param country: specifies the name of the country so it can be matched with the country_norm_df
:param normalize: whether to normalize data or not
:param country_norm_df: S2 median/std normalization factors stored per country
:param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS
:param norm_s2_npz: 1st and 99th percentile normalization factors for S2
:param channels_feats: which channels (i.e. indices) are extracted from lrs sequence
:param channels_labels: which channels (i.e. indices) are extracted from hr image
:param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time
frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first
:param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported
options are `zeros`, where 0 frames are appended to features, or `repeat` where random repeats of
timeframes are taken
:param transform: function executed on lr and hr arrays as augmentation
"""
def __init__(
self,
imset_dir: str,
imset_npz_files: list,
time_first: bool,
start_date: datetime,
end_date: datetime,
country: str,
filesystem: object = None,
normalize: bool = True,
country_norm_df: object = None,
norm_deimos_npz: np.ndarray = None,
norm_s2_npz: np.ndarray = None,
channels_feats: List[int] = [0, 1, 2, 3],
n_views: int = 16,
padding: str = 'zeros',
histogram_matching: bool = False
):
super().__init__()
self.imset_dir = imset_dir
self.filesystem = filesystem
self.imset_npz_files = imset_npz_files
self.time_first = time_first
self.normalize = normalize
self.country_norm_df = country_norm_df
self.norm_deimos_npz = norm_deimos_npz
self.norm_s2_npz = norm_s2_npz
self.channels_feats = channels_feats
self.n_views = n_views
self.padding = padding
self.start_date = start_date
self.end_date = end_date
self.histogram_matching = histogram_matching
self.country = country
def __len__(self):
return len(self.imset_npz_files)
def __getitem__(self, index: int) -> ImageSet:
""" Returns an ImageSet dict of all assets in the directory of the given index."""
if isinstance(index, int):
imset_file = os.path.join(self.imset_dir, self.imset_npz_files[index])
else:
raise KeyError('Index must be of type `int`.')
imset = read_imageset_eopatch(
imset_file=imset_file,
filesystem=self.filesystem,
normalize=self.normalize,
country_norm_df=self.country_norm_df,
norm_deimos_npz=self.norm_deimos_npz,
norm_s2_npz=self.norm_s2_npz,
n_views=self.n_views,
padding=self.padding,
start_date=self.start_date,
end_date=self.end_date,
country=self.country,
histogram_matching=self.histogram_matching,
)
lr = imset['lr'][self.channels_feats]
if self.time_first:
lr = np.swapaxes(lr, 0, 1)
imset['lr'] = torch.from_numpy(lr.copy())
imset['alphas'] = torch.from_numpy(imset['alphas'])
return imset
|
# RT - Twitter
from typing import TYPE_CHECKING, Union, Dict, Tuple, List
from discord.ext import commands
import discord
from tweepy.asynchronous import AsyncStream
from tweepy import API, OAuthHandler
from tweepy.errors import NotFound
from tweepy.models import Status
from jishaku.functools import executor_function
from asyncio import Event
if TYPE_CHECKING:
from asyncio import AbstractEventLoop
from tweepy.models import Status
from aiomysql import Pool
from rtlib import Backend
class DataManager:
TABLE = "TwitterNotification"
DEFAULT_MAX = 5
def __init__(self, loop: "AbstractEventLoop", pool: "Pool"):
self.pool = pool
loop.create_task(self._prepare_table())
async def _prepare_table(self):
# テーブルを準備します。
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
await cursor.execute(
f"""CREATE TABLE IF NOT EXISTS {self.TABLE} (
GuildID BIGINT, ChannelID BIGINT, UserName TEXT
);"""
)
await self._update_users(cursor)
self.ready.set()
async def _read(self, cursor, channel, username):
await cursor.execute(
f"SELECT * FROM {self.TABLE} WHERE ChannelID = %s AND UserName = %s;",
(channel.id, username)
)
return await cursor.fetchone()
async def write(self, channel: discord.TextChannel, username: str) -> None:
"設定を保存します。"
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
assert not await self._read(cursor, channel, username), "既に設定されています。"
await cursor.execute(
f"SELECT * FROM {self.TABLE} WHERE GuildID = %s;",
(channel.guild.id,)
)
assert len(await cursor.fetchall()) <= self.DEFAULT_MAX, "追加しすぎです。"
await cursor.execute(
f"INSERT INTO {self.TABLE} VALUES (%s, %s, %s);",
(channel.guild.id, channel.id, username)
)
async def delete(self, channel: discord.TextChannel, username: str) -> None:
"設定を削除します。"
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
assert await self._read(cursor, channel, username), "その設定はありません。"
await cursor.execute(
f"DELETE FROM {self.TABLE} WHERE ChannelID = %s AND UserName = %s;",
(channel.id, username)
)
async def _update_users(self, cursor):
await cursor.execute(
f"SELECT ChannelID, UserName FROM {self.TABLE};"
)
self.users = {
username: channel_id
for channel_id, username in await cursor.fetchall()
}
async def update_users(self) -> List[Tuple[int, str]]:
"設定のキャッシュを更新します。"
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
await self._update_users(cursor)
class TwitterNotification(commands.Cog, DataManager, AsyncStream):
TWITTERID_HEADERS = {
"authority": "tweeterid.com",
"sec-ch-ua": "^\\^Microsoft",
"accept": "*/*",
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
"x-requested-with": "XMLHttpRequest",
"sec-ch-ua-mobile": "?0",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36 Edg/93.0.961.38",
"sec-ch-ua-platform": "^\\^Windows^\\^",
"origin": "https://tweeterid.com",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
"referer": "https://tweeterid.com/",
"accept-language": "ja,en;q=0.9,en-GB;q=0.8,en-US;q=0.7",
}
BASE_URL = "https://twitter.com/{}/status/{}"
def __init__(self, bot: "Backend"):
self.bot = bot
self.users: Dict[str, int] = {}
self.ready = Event()
oauth = OAuthHandler(
self.bot.secret["twitter"]["consumer_key"],
self.bot.secret["twitter"]["consumer_secret"]
)
oauth.set_access_token(
self.bot.secret["twitter"]["access_token"],
self.bot.secret["twitter"]["access_token_secret"]
)
self.api = API(oauth)
super(commands.Cog, self).__init__(self.bot.loop, self.bot.mysql.pool)
super(DataManager, self).__init__(**self.bot.secret["twitter"])
self.connected = False
self.cache: Dict[str, str] = {}
self.bot.loop.create_task(self.start_stream())
def filter(self, *args, **kwargs):
# connectedを使えるようにするためにオーバーライドした関数です。
self.connected = True
super().filter(*args, **kwargs)
def disconnect(self, *args, **kwargs):
# connectedを使えるようにするためにオーバーライドした関数です。
self.connected = False
super().disconnect(*args, **kwargs)
def get_url(self, status: Union[Status, Tuple[str, int]]) -> str:
"渡されたStatusからツイートのURLを取得します。"
return self.BASE_URL.format(
status.user.screen_name, status.id_str
) if isinstance(status, Status) else self.BASE_URL.format(*status)
async def on_status(self, status: "Status"):
# ツイートを取得した際に呼ばれる関数です。
if status.user.screen_name in self.users:
# 通知対象のユーザーのツイートなら通知を行います。
if not (channel := self.bot.get_channel(
self.users[status.user.screen_name]
)):
# もし通知するチャンネルが見当たらない場合はその設定を削除する。
return await self.delete(
self.users[status.user.screen_name], status.user.screen_name
)
# Tweetに飛ぶリンクボタンを追加しておく。
view = discord.ui.View(timeout=1)
view.add_item(discord.ui.Button(
label="Tweetを見る", url=self.get_url(status)
))
# メッセージを調整する。
if hasattr(status, "retweeted_status") and status.retweeted_status:
# リツイート
status.text = status.text.replace(
"RT @", "🔁 Retweeted @", 1
)
elif hasattr(status, "quoted_status") and status.quoted_status:
# 引用リツイート
status.text = "🔁 Retweeted [Original]({})\n{}".format(
self.get_url(status.quoted_status), status.text
)
elif (hasattr(status, "in_reply_to_status_id")
and status.in_reply_to_status_id):
# 返信
status.text = "⤴ Replied [Original]({})\n{}".format(
self.get_url((
status.in_reply_to_screen_name,
status.in_reply_to_status_id
)), status.text
)
# メンションが飛ばないように@は全角に置き換えておく。
status.text = status.text.replace("@", "@")
try:
# 通知の送信を行う。
await channel.webhook_send(
content=status.text,
username=status.user.screen_name + \
("✅" if status.user.verified else "") \
+ " - RT Twitter Notification",
avatar_url=(
"" if status.user.default_profile_image
else status.user.profile_image_url_https
), view=view
)
except discord.Forbidden:
await channel.send(
"Twitter通知をしようとしましたが権限がないため通知に失敗しました。\n" \
"チャンネルのWebhookを管理できるように権限を付与してください。\n" \
"またRTにはたくさんの機能があり全てを動かすのなら管理者権限を付与する方が手っ取り早いです。"
)
except Exception as e:
await channel.send(
f"Twitter通知をしようとしましたが失敗しました。\nエラーコード:`{e}`"
)
@executor_function
def get_user_id(self, username: str) -> str:
"ユーザー名からユーザーのIDを取得します。※これは子ルーチン関数です。"
return self.api.get_user(screen_name=username).id_str
async def start_stream(self, disconnect: bool = False) -> None:
"Twitterのストリームを開始します。"
if disconnect and self.connected:
self.disconnect()
if hasattr(self, "ready"):
await self.ready.wait()
del self.ready
if self.users:
follow = []
for username in self.users:
try:
follow.append(await self.get_user_id(username))
except NotFound:
channel = self.bot.get_channel(self.users[username])
await self.delete(channel, username)
del self.users[username]
await channel.send(
"Twitter通知をしようとしましたがエラーが発生しました。\n" \
+ f"{username.replace("@", "@")}のユーザーが見つかりませんでした。"
)
self.filter(follow=follow)
def cog_unload(self):
if self.connected:
self.disconnect()
@commands.group(
aliases=["ツイッター", "tw"], extras={
"headding": {"ja": "Twitter通知", "en": "Twitter Notification"},
"parent": "ServerUseful"
}
)
async def twitter(self, ctx):
"""!lang ja
--------
Twitterの指定したユーザーのツイートを指定したチャンネルに通知させます。
Aliases
-------
tw, ツイッター
!lang en
--------
Notify the specified channel of tweets from the specified user on Twitter.
Aliases
-------
tw"""
if not ctx.invoked_subcommand:
await ctx.reply("使用方法が違います。 / It is used in different ways.")
@twitter.command("set", aliases=["s", "設定"])
@commands.has_permissions(manage_channels=True, manage_webhooks=True)
@commands.cooldown(1, 60, commands.BucketType.channel)
async def set_(self, ctx, onoff: bool, *, username):
"""!lang ja
--------
Twitterの通知を設定します。
このコマンドを実行したチャンネルに指定したユーザーのツイートの通知が来るようになります。
Parameters
----------
onoff : bool
onまたはoffで通知を有効にするか無効にするかです。
username : str
通知する対象のユーザーの名前です。
`@`から始まるものです。
Examples
--------
`rt!twitter set on tasuren1`
RTの開発者のtasurenのTwitterの通知を有効にします。
Aliases
-------
s, 設定
!lang en
--------
Sets up Twitter notifications.
The channel where this command is executed will receive notifications of tweets from the specified user.
Parameters
----------
onoff : bool
Enables or disables notifications with on or off.
username : str
The name of the user to be notified.
It must start with `@`.
Examples
--------
`rt!twitter set on tasuren1`
Enables Twitter notifications for the RT developer tasuren.
Aliases
-------
s"""
await ctx.trigger_typing()
try:
if onoff:
await self.get_user_id(username)
await self.write(ctx.channel, username)
else:
await self.delete(ctx.channel, username)
except AssertionError:
await ctx.reply(
{"ja": "既に設定されています。\nまたは設定しすぎです。",
"en": "The username is already set.\nOr it is set too high."} \
if onoff else {
"ja": "設定されていません。",
"en": "The username is not set yet."
}
)
except NotFound:
await ctx.reply(
{"ja": "そのユーザーが見つかりませんでした。",
"en": "The user is not found."}
)
else:
await self.update_users()
await self.start_stream(True)
await ctx.reply("Ok")
@twitter.command("list", aliases=["l", "一覧"])
async def list_(self, ctx):
"""!lang ja
--------
設定しているTwitter通知のリストを表示します。
Aliases
-------
l, 一覧
!lang en
--------
Displays twitter notification settings
Aliases
-------
l"""
await ctx.reply(
embed=discord.Embed(
title="Twitter",
description="\n".join(
f"<#{channel_id}>:{username}"
for username, channel_id in self.users.items()
)
)
)
def setup(bot):
bot.add_cog(TwitterNotification(bot)) | # RT - Twitter
from typing import TYPE_CHECKING, Union, Dict, Tuple, List
from discord.ext import commands
import discord
from tweepy.asynchronous import AsyncStream
from tweepy import API, OAuthHandler
from tweepy.errors import NotFound
from tweepy.models import Status
from jishaku.functools import executor_function
from asyncio import Event
if TYPE_CHECKING:
from asyncio import AbstractEventLoop
from tweepy.models import Status
from aiomysql import Pool
from rtlib import Backend
class DataManager:
TABLE = "TwitterNotification"
DEFAULT_MAX = 5
def __init__(self, loop: "AbstractEventLoop", pool: "Pool"):
self.pool = pool
loop.create_task(self._prepare_table())
async def _prepare_table(self):
# テーブルを準備します。
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
await cursor.execute(
f"""CREATE TABLE IF NOT EXISTS {self.TABLE} (
GuildID BIGINT, ChannelID BIGINT, UserName TEXT
);"""
)
await self._update_users(cursor)
self.ready.set()
async def _read(self, cursor, channel, username):
await cursor.execute(
f"SELECT * FROM {self.TABLE} WHERE ChannelID = %s AND UserName = %s;",
(channel.id, username)
)
return await cursor.fetchone()
async def write(self, channel: discord.TextChannel, username: str) -> None:
"設定を保存します。"
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
assert not await self._read(cursor, channel, username), "既に設定されています。"
await cursor.execute(
f"SELECT * FROM {self.TABLE} WHERE GuildID = %s;",
(channel.guild.id,)
)
assert len(await cursor.fetchall()) <= self.DEFAULT_MAX, "追加しすぎです。"
await cursor.execute(
f"INSERT INTO {self.TABLE} VALUES (%s, %s, %s);",
(channel.guild.id, channel.id, username)
)
async def delete(self, channel: discord.TextChannel, username: str) -> None:
"設定を削除します。"
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
assert await self._read(cursor, channel, username), "その設定はありません。"
await cursor.execute(
f"DELETE FROM {self.TABLE} WHERE ChannelID = %s AND UserName = %s;",
(channel.id, username)
)
async def _update_users(self, cursor):
await cursor.execute(
f"SELECT ChannelID, UserName FROM {self.TABLE};"
)
self.users = {
username: channel_id
for channel_id, username in await cursor.fetchall()
}
async def update_users(self) -> List[Tuple[int, str]]:
"設定のキャッシュを更新します。"
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
await self._update_users(cursor)
class TwitterNotification(commands.Cog, DataManager, AsyncStream):
TWITTERID_HEADERS = {
"authority": "tweeterid.com",
"sec-ch-ua": "^\\^Microsoft",
"accept": "*/*",
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
"x-requested-with": "XMLHttpRequest",
"sec-ch-ua-mobile": "?0",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36 Edg/93.0.961.38",
"sec-ch-ua-platform": "^\\^Windows^\\^",
"origin": "https://tweeterid.com",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
"referer": "https://tweeterid.com/",
"accept-language": "ja,en;q=0.9,en-GB;q=0.8,en-US;q=0.7",
}
BASE_URL = "https://twitter.com/{}/status/{}"
def __init__(self, bot: "Backend"):
self.bot = bot
self.users: Dict[str, int] = {}
self.ready = Event()
oauth = OAuthHandler(
self.bot.secret["twitter"]["consumer_key"],
self.bot.secret["twitter"]["consumer_secret"]
)
oauth.set_access_token(
self.bot.secret["twitter"]["access_token"],
self.bot.secret["twitter"]["access_token_secret"]
)
self.api = API(oauth)
super(commands.Cog, self).__init__(self.bot.loop, self.bot.mysql.pool)
super(DataManager, self).__init__(**self.bot.secret["twitter"])
self.connected = False
self.cache: Dict[str, str] = {}
self.bot.loop.create_task(self.start_stream())
def filter(self, *args, **kwargs):
# connectedを使えるようにするためにオーバーライドした関数です。
self.connected = True
super().filter(*args, **kwargs)
def disconnect(self, *args, **kwargs):
# connectedを使えるようにするためにオーバーライドした関数です。
self.connected = False
super().disconnect(*args, **kwargs)
def get_url(self, status: Union[Status, Tuple[str, int]]) -> str:
"渡されたStatusからツイートのURLを取得します。"
return self.BASE_URL.format(
status.user.screen_name, status.id_str
) if isinstance(status, Status) else self.BASE_URL.format(*status)
async def on_status(self, status: "Status"):
# ツイートを取得した際に呼ばれる関数です。
if status.user.screen_name in self.users:
# 通知対象のユーザーのツイートなら通知を行います。
if not (channel := self.bot.get_channel(
self.users[status.user.screen_name]
)):
# もし通知するチャンネルが見当たらない場合はその設定を削除する。
return await self.delete(
self.users[status.user.screen_name], status.user.screen_name
)
# Tweetに飛ぶリンクボタンを追加しておく。
view = discord.ui.View(timeout=1)
view.add_item(discord.ui.Button(
label="Tweetを見る", url=self.get_url(status)
))
# メッセージを調整する。
if hasattr(status, "retweeted_status") and status.retweeted_status:
# リツイート
status.text = status.text.replace(
"RT @", "🔁 Retweeted @", 1
)
elif hasattr(status, "quoted_status") and status.quoted_status:
# 引用リツイート
status.text = "🔁 Retweeted [Original]({})\n{}".format(
self.get_url(status.quoted_status), status.text
)
elif (hasattr(status, "in_reply_to_status_id")
and status.in_reply_to_status_id):
# 返信
status.text = "⤴ Replied [Original]({})\n{}".format(
self.get_url((
status.in_reply_to_screen_name,
status.in_reply_to_status_id
)), status.text
)
# メンションが飛ばないように@は全角に置き換えておく。
status.text = status.text.replace("@", "@")
try:
# 通知の送信を行う。
await channel.webhook_send(
content=status.text,
username=status.user.screen_name + \
("✅" if status.user.verified else "") \
+ " - RT Twitter Notification",
avatar_url=(
"" if status.user.default_profile_image
else status.user.profile_image_url_https
), view=view
)
except discord.Forbidden:
await channel.send(
"Twitter通知をしようとしましたが権限がないため通知に失敗しました。\n" \
"チャンネルのWebhookを管理できるように権限を付与してください。\n" \
"またRTにはたくさんの機能があり全てを動かすのなら管理者権限を付与する方が手っ取り早いです。"
)
except Exception as e:
await channel.send(
f"Twitter通知をしようとしましたが失敗しました。\nエラーコード:`{e}`"
)
@executor_function
def get_user_id(self, username: str) -> str:
"ユーザー名からユーザーのIDを取得します。※これは子ルーチン関数です。"
return self.api.get_user(screen_name=username).id_str
async def start_stream(self, disconnect: bool = False) -> None:
"Twitterのストリームを開始します。"
if disconnect and self.connected:
self.disconnect()
if hasattr(self, "ready"):
await self.ready.wait()
del self.ready
if self.users:
follow = []
for username in self.users:
try:
follow.append(await self.get_user_id(username))
except NotFound:
channel = self.bot.get_channel(self.users[username])
await self.delete(channel, username)
del self.users[username]
await channel.send(
"Twitter通知をしようとしましたがエラーが発生しました。\n" \
+ f"{username.replace('@', '@')}のユーザーが見つかりませんでした。"
)
self.filter(follow=follow)
def cog_unload(self):
if self.connected:
self.disconnect()
@commands.group(
aliases=["ツイッター", "tw"], extras={
"headding": {"ja": "Twitter通知", "en": "Twitter Notification"},
"parent": "ServerUseful"
}
)
async def twitter(self, ctx):
"""!lang ja
--------
Twitterの指定したユーザーのツイートを指定したチャンネルに通知させます。
Aliases
-------
tw, ツイッター
!lang en
--------
Notify the specified channel of tweets from the specified user on Twitter.
Aliases
-------
tw"""
if not ctx.invoked_subcommand:
await ctx.reply("使用方法が違います。 / It is used in different ways.")
@twitter.command("set", aliases=["s", "設定"])
@commands.has_permissions(manage_channels=True, manage_webhooks=True)
@commands.cooldown(1, 60, commands.BucketType.channel)
async def set_(self, ctx, onoff: bool, *, username):
"""!lang ja
--------
Twitterの通知を設定します。
このコマンドを実行したチャンネルに指定したユーザーのツイートの通知が来るようになります。
Parameters
----------
onoff : bool
onまたはoffで通知を有効にするか無効にするかです。
username : str
通知する対象のユーザーの名前です。
`@`から始まるものです。
Examples
--------
`rt!twitter set on tasuren1`
RTの開発者のtasurenのTwitterの通知を有効にします。
Aliases
-------
s, 設定
!lang en
--------
Sets up Twitter notifications.
The channel where this command is executed will receive notifications of tweets from the specified user.
Parameters
----------
onoff : bool
Enables or disables notifications with on or off.
username : str
The name of the user to be notified.
It must start with `@`.
Examples
--------
`rt!twitter set on tasuren1`
Enables Twitter notifications for the RT developer tasuren.
Aliases
-------
s"""
await ctx.trigger_typing()
try:
if onoff:
await self.get_user_id(username)
await self.write(ctx.channel, username)
else:
await self.delete(ctx.channel, username)
except AssertionError:
await ctx.reply(
{"ja": "既に設定されています。\nまたは設定しすぎです。",
"en": "The username is already set.\nOr it is set too high."} \
if onoff else {
"ja": "設定されていません。",
"en": "The username is not set yet."
}
)
except NotFound:
await ctx.reply(
{"ja": "そのユーザーが見つかりませんでした。",
"en": "The user is not found."}
)
else:
await self.update_users()
await self.start_stream(True)
await ctx.reply("Ok")
@twitter.command("list", aliases=["l", "一覧"])
async def list_(self, ctx):
"""!lang ja
--------
設定しているTwitter通知のリストを表示します。
Aliases
-------
l, 一覧
!lang en
--------
Displays twitter notification settings
Aliases
-------
l"""
await ctx.reply(
embed=discord.Embed(
title="Twitter",
description="\n".join(
f"<#{channel_id}>:{username}"
for username, channel_id in self.users.items()
)
)
)
def setup(bot):
bot.add_cog(TwitterNotification(bot)) |
""" Module containing a class for encapsulating the settings of the tree search
"""
import os
import yaml
from aizynthfinder.utils.logging import logger
from aizynthfinder.utils.paths import data_path
from aizynthfinder.mcts.policy import Policy
from aizynthfinder.mcts.stock import Stock, MongoDbInchiKeyQuery
class Configuration:
"""
Encapsulating the settings of the tree search, including the policy,
the stock and various parameters.
All the parameters can be retrieved as attributes of the Configuration
object, e.g.
.. code-block::
config.max_transforms # The maximum of transform
config.iteration_limit # The maximum number of iterations
On instantiation it will read default parameters from a config.yml
file located in the `data` folder of the package.
"""
def __init__(self):
self._properties = {}
filename = os.path.join(data_path(), "config.yml")
with open(filename, "r") as fileobj:
_config = yaml.load(fileobj.read(), Loader=yaml.SafeLoader)
self._update_from_config(_config)
self.stock = Stock()
self.policy = Policy(self)
self._logger = logger()
def __eq__(self, other):
return self._properties == other._properties
@classmethod
def from_file(cls, filename):
"""
Loads a configuration from a yaml file.
The parameters not set in the yaml file are taken from the default values.
The policies and stocks specified in the yaml file are directly loaded.
:param filename: the path to a yaml file
:type filename: str
:return: a Configuration object with settings from the yaml file
:rtype: Configuration
"""
config_obj = Configuration()
with open(filename, "r") as fileobj:
_config = yaml.load(fileobj.read(), Loader=yaml.SafeLoader)
config_obj._update_from_config(_config)
for key, policy_spec in _config.get("policy", {}).get("files", {}).items():
modelfile, templatefile = policy_spec
config_obj.policy.load_policy(modelfile, templatefile, key)
for key, stockfile in _config.get("stock", {}).get("files", {}).items():
config_obj.stock.load_stock(stockfile, key)
if "mongodb" in _config.get("stock", {}):
query_obj = MongoDbInchiKeyQuery(**(_config["stock"]["mongodb"] or {}))
config_obj.stock.load_stock(query_obj, "mongodb_stock")
return config_obj
def update(self, **settings):
""" Update the configuration using dictionary of parameters
"""
for setting, value in settings.items():
setattr(self, setting, value)
self._logger.info(f"Setting {setting.replace("_", " ")} to {value}")
def _update_from_config(self, config):
self._properties.update(config.get("finder", {}).get("properties", {}))
self._properties.update(config.get("policy", {}).get("properties", {}))
self._properties.update(config.get("properties", {}))
self.__dict__.update(self._properties)
| """ Module containing a class for encapsulating the settings of the tree search
"""
import os
import yaml
from aizynthfinder.utils.logging import logger
from aizynthfinder.utils.paths import data_path
from aizynthfinder.mcts.policy import Policy
from aizynthfinder.mcts.stock import Stock, MongoDbInchiKeyQuery
class Configuration:
"""
Encapsulating the settings of the tree search, including the policy,
the stock and various parameters.
All the parameters can be retrieved as attributes of the Configuration
object, e.g.
.. code-block::
config.max_transforms # The maximum of transform
config.iteration_limit # The maximum number of iterations
On instantiation it will read default parameters from a config.yml
file located in the `data` folder of the package.
"""
def __init__(self):
self._properties = {}
filename = os.path.join(data_path(), "config.yml")
with open(filename, "r") as fileobj:
_config = yaml.load(fileobj.read(), Loader=yaml.SafeLoader)
self._update_from_config(_config)
self.stock = Stock()
self.policy = Policy(self)
self._logger = logger()
def __eq__(self, other):
return self._properties == other._properties
@classmethod
def from_file(cls, filename):
"""
Loads a configuration from a yaml file.
The parameters not set in the yaml file are taken from the default values.
The policies and stocks specified in the yaml file are directly loaded.
:param filename: the path to a yaml file
:type filename: str
:return: a Configuration object with settings from the yaml file
:rtype: Configuration
"""
config_obj = Configuration()
with open(filename, "r") as fileobj:
_config = yaml.load(fileobj.read(), Loader=yaml.SafeLoader)
config_obj._update_from_config(_config)
for key, policy_spec in _config.get("policy", {}).get("files", {}).items():
modelfile, templatefile = policy_spec
config_obj.policy.load_policy(modelfile, templatefile, key)
for key, stockfile in _config.get("stock", {}).get("files", {}).items():
config_obj.stock.load_stock(stockfile, key)
if "mongodb" in _config.get("stock", {}):
query_obj = MongoDbInchiKeyQuery(**(_config["stock"]["mongodb"] or {}))
config_obj.stock.load_stock(query_obj, "mongodb_stock")
return config_obj
def update(self, **settings):
""" Update the configuration using dictionary of parameters
"""
for setting, value in settings.items():
setattr(self, setting, value)
self._logger.info(f"Setting {setting.replace('_', ' ')} to {value}")
def _update_from_config(self, config):
self._properties.update(config.get("finder", {}).get("properties", {}))
self._properties.update(config.get("policy", {}).get("properties", {}))
self._properties.update(config.get("properties", {}))
self.__dict__.update(self._properties)
|
"""API ROUTER"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from flask import jsonify, Blueprint
from gfwanalysis.errors import WHRCBiomassError
from gfwanalysis.middleware import get_geo_by_hash, get_geo_by_use, get_geo_by_wdpa, \
get_geo_by_national, get_geo_by_subnational, get_geo_by_regional
from gfwanalysis.routes.api import error, set_params
from gfwanalysis.serializers import serialize_whrc_biomass
from gfwanalysis.services.analysis.whrc_biomass_service import WHRCBiomassService
from gfwanalysis.validators import validate_geostore
whrc_biomass_endpoints_v1 = Blueprint('whrc_biomass', __name__)
def analyze(geojson, area_ha):
"""Analyze WHRC Biomass"""
logging.info('[ROUTER]: WHRC Getting biomass')
if not geojson:
return error(status=400, detail='A Geojson argument is required')
threshold, start, end, table = set_params()
logging.info(f'[ROUTER]: whrc biomass params {threshold}, {start}, {end}')
try:
data = WHRCBiomassService.analyze(
geojson=geojson,
threshold=threshold)
except WHRCBiomassError as e:
logging.error('[ROUTER]: ' + e.message)
return error(status=500, detail=e.message)
except Exception as e:
logging.error('[ROUTER]: ' + str(e))
return error(status=500, detail='Generic Error')
data['area_ha'] = area_ha
data['biomass_density'] = data['biomass'] / data['tree_cover'] if data['tree_cover'] > 0 else 0
# logging.info(f"[Router WHRC Biomass] - response from service: biomass density {data.get("biomass_density")}")
# logging.info(f"[Router WHRC Biomass] - response from service: biomass {data.get("biomass")}")
return jsonify(data=serialize_whrc_biomass(data, 'whrc-biomass')), 200
@whrc_biomass_endpoints_v1.route('/', strict_slashes=False, methods=['GET', 'POST'])
@validate_geostore
@get_geo_by_hash
def get_by_geostore(geojson, area_ha):
"""By Geostore Endpoint"""
logging.info('[ROUTER]: Getting biomass by geostore')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/use/<name>/<id>', strict_slashes=False, methods=['GET'])
@get_geo_by_use
def get_by_use(name, id, geojson, area_ha):
"""Use Endpoint"""
logging.info('[ROUTER]: Getting biomass by use')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/wdpa/<id>', strict_slashes=False, methods=['GET'])
@get_geo_by_wdpa
def get_by_wdpa(id, geojson, area_ha):
"""Wdpa Endpoint"""
logging.info('[ROUTER]: Getting biomass by wdpa')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/admin/<iso>', strict_slashes=False, methods=['GET'])
@get_geo_by_national
def get_by_national(iso, geojson, area_ha):
"""National Endpoint"""
logging.info('[ROUTER]: Getting biomass loss by iso')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/admin/<iso>/<id1>', strict_slashes=False, methods=['GET'])
@get_geo_by_subnational
def get_by_subnational(iso, id1, geojson, area_ha):
"""Subnational Endpoint"""
logging.info('[ROUTER]: Getting biomass loss by admin1')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/admin/<iso>/<id1>/<id2>', strict_slashes=False, methods=['GET'])
@get_geo_by_regional
def get_by_regional(iso, id1, id2, geojson, area_ha):
"""Subnational Endpoint"""
logging.info('[ROUTER]: Getting biomass loss by admin2 ')
return analyze(geojson, area_ha)
| """API ROUTER"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from flask import jsonify, Blueprint
from gfwanalysis.errors import WHRCBiomassError
from gfwanalysis.middleware import get_geo_by_hash, get_geo_by_use, get_geo_by_wdpa, \
get_geo_by_national, get_geo_by_subnational, get_geo_by_regional
from gfwanalysis.routes.api import error, set_params
from gfwanalysis.serializers import serialize_whrc_biomass
from gfwanalysis.services.analysis.whrc_biomass_service import WHRCBiomassService
from gfwanalysis.validators import validate_geostore
whrc_biomass_endpoints_v1 = Blueprint('whrc_biomass', __name__)
def analyze(geojson, area_ha):
"""Analyze WHRC Biomass"""
logging.info('[ROUTER]: WHRC Getting biomass')
if not geojson:
return error(status=400, detail='A Geojson argument is required')
threshold, start, end, table = set_params()
logging.info(f'[ROUTER]: whrc biomass params {threshold}, {start}, {end}')
try:
data = WHRCBiomassService.analyze(
geojson=geojson,
threshold=threshold)
except WHRCBiomassError as e:
logging.error('[ROUTER]: ' + e.message)
return error(status=500, detail=e.message)
except Exception as e:
logging.error('[ROUTER]: ' + str(e))
return error(status=500, detail='Generic Error')
data['area_ha'] = area_ha
data['biomass_density'] = data['biomass'] / data['tree_cover'] if data['tree_cover'] > 0 else 0
# logging.info(f"[Router WHRC Biomass] - response from service: biomass density {data.get('biomass_density')}")
# logging.info(f"[Router WHRC Biomass] - response from service: biomass {data.get('biomass')}")
return jsonify(data=serialize_whrc_biomass(data, 'whrc-biomass')), 200
@whrc_biomass_endpoints_v1.route('/', strict_slashes=False, methods=['GET', 'POST'])
@validate_geostore
@get_geo_by_hash
def get_by_geostore(geojson, area_ha):
"""By Geostore Endpoint"""
logging.info('[ROUTER]: Getting biomass by geostore')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/use/<name>/<id>', strict_slashes=False, methods=['GET'])
@get_geo_by_use
def get_by_use(name, id, geojson, area_ha):
"""Use Endpoint"""
logging.info('[ROUTER]: Getting biomass by use')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/wdpa/<id>', strict_slashes=False, methods=['GET'])
@get_geo_by_wdpa
def get_by_wdpa(id, geojson, area_ha):
"""Wdpa Endpoint"""
logging.info('[ROUTER]: Getting biomass by wdpa')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/admin/<iso>', strict_slashes=False, methods=['GET'])
@get_geo_by_national
def get_by_national(iso, geojson, area_ha):
"""National Endpoint"""
logging.info('[ROUTER]: Getting biomass loss by iso')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/admin/<iso>/<id1>', strict_slashes=False, methods=['GET'])
@get_geo_by_subnational
def get_by_subnational(iso, id1, geojson, area_ha):
"""Subnational Endpoint"""
logging.info('[ROUTER]: Getting biomass loss by admin1')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/admin/<iso>/<id1>/<id2>', strict_slashes=False, methods=['GET'])
@get_geo_by_regional
def get_by_regional(iso, id1, id2, geojson, area_ha):
"""Subnational Endpoint"""
logging.info('[ROUTER]: Getting biomass loss by admin2 ')
return analyze(geojson, area_ha)
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from commands.basecommand import BaseCommand
class Ports(BaseCommand):
def __init__(self):
self.__name__ = 'Ports'
def run_ssh(self, sshc):
res = self._ssh_data_with_header(sshc, '/ip service print detail')
sus_dns, recommendation = self.check_results_ssh(res)
return {'raw_data': res,
'suspicious': sus_dns,
'recommendation': recommendation}
def check_results_ssh(self, res):
sus_ports = []
recommendation = []
def_ports = {'telnet': 23, 'ftp': 21, 'www': 80, 'ssh': 22, 'www-ssl': 443, 'api': 8728, 'winbox': 8291,
'api-ssl': 8729}
for item in res:
service = item['name']
if def_ports[service] != int(item['port']):
sus_ports.append(f'The port for {service}, has changed from {def_ports[service]} to {item['port']} - '
f'severity: low')
if (service == 'ssh') and (int(item['port']) == 22):
recommendation.append('The port for ssh protocol is as ssh default port (22)- Mikrotik company '
'recommended to change it')
return sus_ports, recommendation
| # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from commands.basecommand import BaseCommand
class Ports(BaseCommand):
def __init__(self):
self.__name__ = 'Ports'
def run_ssh(self, sshc):
res = self._ssh_data_with_header(sshc, '/ip service print detail')
sus_dns, recommendation = self.check_results_ssh(res)
return {'raw_data': res,
'suspicious': sus_dns,
'recommendation': recommendation}
def check_results_ssh(self, res):
sus_ports = []
recommendation = []
def_ports = {'telnet': 23, 'ftp': 21, 'www': 80, 'ssh': 22, 'www-ssl': 443, 'api': 8728, 'winbox': 8291,
'api-ssl': 8729}
for item in res:
service = item['name']
if def_ports[service] != int(item['port']):
sus_ports.append(f'The port for {service}, has changed from {def_ports[service]} to {item["port"]} - '
f'severity: low')
if (service == 'ssh') and (int(item['port']) == 22):
recommendation.append('The port for ssh protocol is as ssh default port (22)- Mikrotik company '
'recommended to change it')
return sus_ports, recommendation
|
from .objects import Server, Zone, RRSet, Record, Comment, Cryptokey, Metadata, SearchResult, StatisticItem, \
MapStatisticItem, RingStatisticItem, SimpleStatisticItem, CacheFlushResult
from .exceptions import PDNSApiException, PDNSApiNotFound
import json
from functools import partial
import requests
import logging
logger = logging.getLogger(__name__)
# TODO:
# - Logging
# - TSIGKeys
class APIClient:
def __init__(self, api_host, api_key, tls_verify=True, request_timeout=None):
self._api_url = api_host if 'api/v1' in api_host else f"{api_host}/api/v1"
self._api_key = api_key
self._tls_verify = tls_verify
self._request_timeout = request_timeout
if not self._tls_verify:
logger.warning("Disabling TLS certificate validation.")
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
self.request_headers = {'X-API-Key': self._api_key}
self.get = partial(self.request, method='GET')
self.post = partial(self.request, method='POST')
self.put = partial(self.request, method='PUT')
self.patch = partial(self.request, method='PATCH')
self.delete = partial(self.request, method='DELETE')
self.servers = self._set_servers()
self.current_server = self.servers[0]
self.zones = self._set_zones()
def request(self, path: str, method: str, data=None, **kwargs):
url = f"{self._api_url}/{path.lstrip("/")}"
if data is None:
data = {}
response = requests.request(method,
url,
json=data,
headers=self.request_headers,
timeout=self._request_timeout,
verify=self._tls_verify,
**kwargs
)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as e:
if response.status_code == 404:
raise (PDNSApiNotFound(e)) from None
try:
status_message = response.json()
status_message = status_message.get('error', status_message.get('errors', 'Unknown error'))
except:
status_message = response.text
raise PDNSApiException(response.status_code, status_message) from None
except json.decoder.JSONDecodeError:
return response.text
def _set_servers(self):
new_servers = list()
for server in self.get('servers'):
new_servers.append(Server(**server))
return new_servers
def _set_zones(self):
new_zones = list()
for zone in self.get(f'servers/{self.current_server.id}/zones'):
new_zones.append(Zone(**zone))
return new_zones
def create_zone(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones'
return Zone(**self.post(path, data=zone))
# Zones
def get_zone(self, zone_name):
path = f'servers/{self.current_server.id}/zones/{zone_name}'
zone = Zone(**self.get(path))
new_rrsets = []
for rrset in zone.rrsets:
new_comments = []
new_records = []
rrset = RRSet(**rrset)
for comment in rrset.comments:
new_comments.append(Comment(**comment))
for record in rrset.records:
new_records.append(Record(**record))
rrset.comments = new_comments
rrset.records = new_records
new_rrsets.append(rrset)
zone.rrsets = new_rrsets
return zone
def delete_zone(self, zone_name):
path = f'servers/{self.current_server.id}/zones/{zone_name}'
self.delete(path)
def update_zone_metadata(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}'
self.put(path, data=zone)
return self.get_zone(zone.name)
def patch_rrsets(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}'
self.patch(path, data={'rrsets': zone.rrsets})
return self.get_zone(zone.name)
def create_records(self, zone: Zone, rrsets: list):
for rrset in rrsets:
rrset.changetype = 'REPLACE'
zone = Zone(name=zone.name, kind=zone.kind, rrsets=rrsets)
return self.patch_rrsets(zone)
def delete_records(self, zone: Zone, rrsets: list):
for rrset in rrsets:
rrset.changetype = 'DELETE'
zone = Zone(name=zone.name, kind=zone.kind, rrsets=rrsets)
return self.patch_rrsets(zone)
# Cryptokeys
def get_zone_cryptokeys(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys'
cryptkeys_new = []
for cryptokey in self.get(path):
cryptkeys_new.append(Cryptokey(**cryptokey))
return cryptkeys_new
def create_cryptokey(self, zone: Zone, cryptokey: Cryptokey):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys'
return self.post(path, data=cryptokey)
def get_cryptokey(self, zone: Zone, key_id):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys/{key_id}'
return Cryptokey(**self.get(path))
def put_cryptokey(self, zone: Zone, cryptokey: Cryptokey):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys/{cryptokey.id}'
self.put(path, data=cryptokey)
# Metadata
def get_zone_metadata(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata'
metadata_new = []
for metadata in self.get(path):
metadata_new.append(Metadata(**metadata))
return metadata_new
def create_metadata(self, zone: Zone, metadata: Metadata):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata'
self.post(path, data=metadata)
return self.get_zone_metadata(zone)
def get_metadata(self, zone: Zone, metadata_kind):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata/{metadata_kind}'
return Metadata(**self.get(path))
def put_metadata(self, zone: Zone, metadata: Metadata):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata/{metadata.kind}'
return Metadata(**self.put(path, data=metadata))
def delete_metadata(self, zone: Zone, metadata: Metadata):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata/{metadata.kind}'
self.delete(path)
# TSIGKeys
# FIXME TBW
# Searching
def search(self, query: str, max_results: int, object_type: str):
path = f'servers/{self.current_server.id}/search-data'
object_types = ['all', 'zone', 'record', 'comment']
if object_type not in object_types:
raise TypeError(f"object_type must be one of {", ".join(object_types)}")
if not isinstance(max_results, int):
raise TypeError("max_results needs to be an integer.")
payload = {'q': query, 'max': max_results, 'object_type': object_type}
new_results = []
for result in self.get(path, params=payload):
new_results.append(SearchResult(**result))
return new_results
# Statistics
def statistics(self, statistic=None, includerings=True):
path = f'servers/{self.current_server.id}/statistics'
payload = {'statistic': statistic, 'includerings': includerings}
type_map = {
'StatisticItem': StatisticItem,
'MapStatisticItem': MapStatisticItem,
'RingStatisticItem': RingStatisticItem
}
new_statistics = []
for item in self.get(path, params=payload):
if item.get('type') in type_map.keys():
new_statistic = type_map[item.get('type')](**item)
if isinstance(new_statistic.value, list):
new_values = []
for value in new_statistic.value:
new_values.append(SimpleStatisticItem(**value))
new_statistic.value = new_values
if statistic is not None:
return new_statistic
new_statistics.append(new_statistic)
return new_statistics
# Cache
def flush_cache(self, domain: str):
path = f'servers/{self.current_server.id}/cache/flush'
payload = {'domain': domain}
return CacheFlushResult(**self.put(path, params=payload))
| from .objects import Server, Zone, RRSet, Record, Comment, Cryptokey, Metadata, SearchResult, StatisticItem, \
MapStatisticItem, RingStatisticItem, SimpleStatisticItem, CacheFlushResult
from .exceptions import PDNSApiException, PDNSApiNotFound
import json
from functools import partial
import requests
import logging
logger = logging.getLogger(__name__)
# TODO:
# - Logging
# - TSIGKeys
class APIClient:
def __init__(self, api_host, api_key, tls_verify=True, request_timeout=None):
self._api_url = api_host if 'api/v1' in api_host else f"{api_host}/api/v1"
self._api_key = api_key
self._tls_verify = tls_verify
self._request_timeout = request_timeout
if not self._tls_verify:
logger.warning("Disabling TLS certificate validation.")
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
self.request_headers = {'X-API-Key': self._api_key}
self.get = partial(self.request, method='GET')
self.post = partial(self.request, method='POST')
self.put = partial(self.request, method='PUT')
self.patch = partial(self.request, method='PATCH')
self.delete = partial(self.request, method='DELETE')
self.servers = self._set_servers()
self.current_server = self.servers[0]
self.zones = self._set_zones()
def request(self, path: str, method: str, data=None, **kwargs):
url = f"{self._api_url}/{path.lstrip('/')}"
if data is None:
data = {}
response = requests.request(method,
url,
json=data,
headers=self.request_headers,
timeout=self._request_timeout,
verify=self._tls_verify,
**kwargs
)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as e:
if response.status_code == 404:
raise (PDNSApiNotFound(e)) from None
try:
status_message = response.json()
status_message = status_message.get('error', status_message.get('errors', 'Unknown error'))
except:
status_message = response.text
raise PDNSApiException(response.status_code, status_message) from None
except json.decoder.JSONDecodeError:
return response.text
def _set_servers(self):
new_servers = list()
for server in self.get('servers'):
new_servers.append(Server(**server))
return new_servers
def _set_zones(self):
new_zones = list()
for zone in self.get(f'servers/{self.current_server.id}/zones'):
new_zones.append(Zone(**zone))
return new_zones
def create_zone(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones'
return Zone(**self.post(path, data=zone))
# Zones
def get_zone(self, zone_name):
path = f'servers/{self.current_server.id}/zones/{zone_name}'
zone = Zone(**self.get(path))
new_rrsets = []
for rrset in zone.rrsets:
new_comments = []
new_records = []
rrset = RRSet(**rrset)
for comment in rrset.comments:
new_comments.append(Comment(**comment))
for record in rrset.records:
new_records.append(Record(**record))
rrset.comments = new_comments
rrset.records = new_records
new_rrsets.append(rrset)
zone.rrsets = new_rrsets
return zone
def delete_zone(self, zone_name):
path = f'servers/{self.current_server.id}/zones/{zone_name}'
self.delete(path)
def update_zone_metadata(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}'
self.put(path, data=zone)
return self.get_zone(zone.name)
def patch_rrsets(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}'
self.patch(path, data={'rrsets': zone.rrsets})
return self.get_zone(zone.name)
def create_records(self, zone: Zone, rrsets: list):
for rrset in rrsets:
rrset.changetype = 'REPLACE'
zone = Zone(name=zone.name, kind=zone.kind, rrsets=rrsets)
return self.patch_rrsets(zone)
def delete_records(self, zone: Zone, rrsets: list):
for rrset in rrsets:
rrset.changetype = 'DELETE'
zone = Zone(name=zone.name, kind=zone.kind, rrsets=rrsets)
return self.patch_rrsets(zone)
# Cryptokeys
def get_zone_cryptokeys(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys'
cryptkeys_new = []
for cryptokey in self.get(path):
cryptkeys_new.append(Cryptokey(**cryptokey))
return cryptkeys_new
def create_cryptokey(self, zone: Zone, cryptokey: Cryptokey):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys'
return self.post(path, data=cryptokey)
def get_cryptokey(self, zone: Zone, key_id):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys/{key_id}'
return Cryptokey(**self.get(path))
def put_cryptokey(self, zone: Zone, cryptokey: Cryptokey):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys/{cryptokey.id}'
self.put(path, data=cryptokey)
# Metadata
def get_zone_metadata(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata'
metadata_new = []
for metadata in self.get(path):
metadata_new.append(Metadata(**metadata))
return metadata_new
def create_metadata(self, zone: Zone, metadata: Metadata):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata'
self.post(path, data=metadata)
return self.get_zone_metadata(zone)
def get_metadata(self, zone: Zone, metadata_kind):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata/{metadata_kind}'
return Metadata(**self.get(path))
def put_metadata(self, zone: Zone, metadata: Metadata):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata/{metadata.kind}'
return Metadata(**self.put(path, data=metadata))
def delete_metadata(self, zone: Zone, metadata: Metadata):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata/{metadata.kind}'
self.delete(path)
# TSIGKeys
# FIXME TBW
# Searching
def search(self, query: str, max_results: int, object_type: str):
path = f'servers/{self.current_server.id}/search-data'
object_types = ['all', 'zone', 'record', 'comment']
if object_type not in object_types:
raise TypeError(f"object_type must be one of {', '.join(object_types)}")
if not isinstance(max_results, int):
raise TypeError("max_results needs to be an integer.")
payload = {'q': query, 'max': max_results, 'object_type': object_type}
new_results = []
for result in self.get(path, params=payload):
new_results.append(SearchResult(**result))
return new_results
# Statistics
def statistics(self, statistic=None, includerings=True):
path = f'servers/{self.current_server.id}/statistics'
payload = {'statistic': statistic, 'includerings': includerings}
type_map = {
'StatisticItem': StatisticItem,
'MapStatisticItem': MapStatisticItem,
'RingStatisticItem': RingStatisticItem
}
new_statistics = []
for item in self.get(path, params=payload):
if item.get('type') in type_map.keys():
new_statistic = type_map[item.get('type')](**item)
if isinstance(new_statistic.value, list):
new_values = []
for value in new_statistic.value:
new_values.append(SimpleStatisticItem(**value))
new_statistic.value = new_values
if statistic is not None:
return new_statistic
new_statistics.append(new_statistic)
return new_statistics
# Cache
def flush_cache(self, domain: str):
path = f'servers/{self.current_server.id}/cache/flush'
payload = {'domain': domain}
return CacheFlushResult(**self.put(path, params=payload))
|
"""
BROS
Copyright 2022-present NAVER Corp.
Apache License v2.0
Do 2nd preprocess on top of the result of the 'preprocess.sh' file.
Reference: https://github.com/microsoft/unilm/blob/master/layoutlm/deprecated/examples/seq_labeling/run_seq_labeling.py
"""
import json
import os
from collections import Counter
from tqdm import tqdm
from transformers import BertTokenizer
MAX_SEQ_LENGTH = 512
MODEL_TYPE = "bert"
VOCA = "bert-base-uncased"
INPUT_PATH = "./data"
OUTPUT_PATH = "../../datasets/funsd"
os.makedirs(OUTPUT_PATH, exist_ok=True)
os.makedirs(os.path.join(OUTPUT_PATH, "preprocessed"), exist_ok=True)
def main():
for dataset_split in ["train", "val"]:
print(f"dataset_split: {dataset_split}")
do_2nd_preprocess(dataset_split)
os.system(f"cp -r {os.path.join(INPUT_PATH, "training_data")} {OUTPUT_PATH}")
os.system(f"cp -r {os.path.join(INPUT_PATH, "testing_data")} {OUTPUT_PATH}")
os.system(f"cp {os.path.join(INPUT_PATH, "labels.txt")} {OUTPUT_PATH}")
def do_2nd_preprocess(dataset_split):
label_fpath = os.path.join(INPUT_PATH, "labels.txt")
labels = get_labels(label_fpath)
tokenizer = BertTokenizer.from_pretrained(VOCA, do_lower_case=True)
cls_token_id = tokenizer.convert_tokens_to_ids("[CLS]")
sep_token_id = tokenizer.convert_tokens_to_ids("[SEP]")
pad_token_id = tokenizer.convert_tokens_to_ids("[PAD]")
ignore_index = -100
if dataset_split == "train":
mode = "train"
elif dataset_split == "val":
mode = "test"
else:
raise ValueError(f"Invalid dataset_split={dataset_split}")
examples = read_examples_from_file(INPUT_PATH, mode)
features = convert_examples_to_features(
examples,
labels,
MAX_SEQ_LENGTH,
tokenizer,
cls_token_at_end=bool(MODEL_TYPE in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if MODEL_TYPE in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(MODEL_TYPE in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(MODEL_TYPE in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if MODEL_TYPE in ["xlnet"] else 0,
pad_token_label_id=ignore_index,
)
# Save image ocr files
image_cnter = Counter()
preprocessed_fnames = []
for example, feature in tqdm(zip(examples, features), total=len(examples)):
# Example: guid, words, labels, boxes, actual_bboxes, file_name, page_size
# Feature: input_ids, input_mask, segment_ids, label_ids,
# boxes, actual_bboxes, file_name, page_size
this_file_name = "{}_{}.json".format(
example.file_name[: example.file_name.rfind(".")],
image_cnter[example.file_name],
)
image_cnter[example.file_name] += 1
data_obj = {}
# meta
data_obj["meta"] = {}
# data_obj["meta"]["image_size"]
# = example.page_size[::-1] + [3] # [height, width, rgb?]
height, width = example.page_size[::-1]
data_obj["meta"]["imageSize"] = {"width": width, "height": height}
data_obj["meta"]["voca"] = VOCA
if mode == "train":
data_obj["meta"]["image_path"] = os.path.join(
"training_data", "images", example.file_name
)
elif mode == "test":
data_obj["meta"]["image_path"] = os.path.join(
"testing_data", "images", example.file_name
)
else:
raise ValueError(f"Unknown mode={mode}")
# words
# text, tokens, boundingBox
data_obj["words"] = []
this_input_ids = []
for word, bb in zip(example.words, example.actual_bboxes):
word_tokens = []
for splitted_word in word.split():
word_tokens.append(
tokenizer.convert_tokens_to_ids(tokenizer.tokenize(splitted_word))
)
tokens = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(word))
word_obj = {
"text": word,
"tokens": tokens,
"boundingBox": [
[bb[0], bb[1]],
[bb[2], bb[1]],
[bb[2], bb[3]],
[bb[0], bb[3]],
],
}
data_obj["words"].append(word_obj)
this_input_ids.extend(tokens)
if VOCA == "bert-base-uncased":
feature_input_ids = feature.input_ids
assert feature_input_ids[0] == cls_token_id
feature_input_ids = feature_input_ids[
1 : feature_input_ids.index(sep_token_id)
]
assert feature_input_ids == this_input_ids
else:
raise NotImplementedError
# masks, labels
data_obj["parse"] = {}
if VOCA == "bert-base-uncased":
data_obj["parse"]["seq_len"] = sum(feature.input_mask)
data_obj["parse"]["input_ids"] = feature.input_ids
data_obj["parse"]["input_mask"] = feature.input_mask
data_obj["parse"]["label_ids"] = feature.label_ids
else:
raise NotImplementedError
# Save file name to list
preprocessed_fnames.append(os.path.join("preprocessed", this_file_name))
# Save to file
data_obj_file = os.path.join(OUTPUT_PATH, "preprocessed", this_file_name)
with open(data_obj_file, "w", encoding="utf-8") as fp:
json.dump(data_obj, fp, ensure_ascii=False)
# Save file name list file
preprocessed_filelist_file = os.path.join(
OUTPUT_PATH, f"preprocessed_files_{dataset_split}.txt"
)
with open(preprocessed_filelist_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(preprocessed_fnames))
def get_labels(path):
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, words, labels, boxes, actual_bboxes, file_name, page_size):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
self.boxes = boxes
self.actual_bboxes = actual_bboxes
self.file_name = file_name
self.page_size = page_size
def read_examples_from_file(data_dir, mode):
file_path = os.path.join(data_dir, "{}.txt".format(mode))
box_file_path = os.path.join(data_dir, "{}_box.txt".format(mode))
image_file_path = os.path.join(data_dir, "{}_image.txt".format(mode))
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f, open(
box_file_path, encoding="utf-8"
) as fb, open(image_file_path, encoding="utf-8") as fi:
words = []
boxes = []
actual_bboxes = []
file_name = None
page_size = None
labels = []
for line, bline, iline in zip(f, fb, fi):
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(
InputExample(
guid="{}-{}".format(mode, guid_index),
words=words,
labels=labels,
boxes=boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
guid_index += 1
words = []
boxes = []
actual_bboxes = []
file_name = None
page_size = None
labels = []
else:
splits = line.split("\t")
bsplits = bline.split("\t")
isplits = iline.split("\t")
assert len(splits) == 2
assert len(bsplits) == 2
assert len(isplits) == 4
assert splits[0] == bsplits[0]
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
box = bsplits[-1].replace("\n", "")
box = [int(b) for b in box.split()]
boxes.append(box)
actual_bbox = [int(b) for b in isplits[1].split()]
actual_bboxes.append(actual_bbox)
page_size = [int(i) for i in isplits[2].split()]
file_name = isplits[3].strip()
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(
InputExample(
guid="%s-%d".format(mode, guid_index),
words=words,
labels=labels,
boxes=boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
return examples
class InputFeatures(object):
"""A single set of features of data."""
def __init__(
self,
input_ids,
input_mask,
segment_ids,
label_ids,
boxes,
actual_bboxes,
file_name,
page_size,
):
assert (
0 <= all(boxes) <= 1000
), "Error with input bbox ({}): the coordinate value is not between 0 and 1000".format(
boxes
)
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
self.boxes = boxes
self.actual_bboxes = actual_bboxes
self.file_name = file_name
self.page_size = page_size
def convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
cls_token_box=[0, 0, 0, 0],
sep_token_box=[1000, 1000, 1000, 1000],
pad_token_box=[0, 0, 0, 0],
pad_token_segment_id=0,
pad_token_label_id=-1,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
):
"""Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
file_name = example.file_name
page_size = example.page_size
width, height = page_size
# if ex_index % 10000 == 0:
# print("Writing example {} of {}".format(ex_index, len(examples)))
tokens = []
token_boxes = []
actual_bboxes = []
label_ids = []
for word, label, box, actual_bbox in zip(
example.words, example.labels, example.boxes, example.actual_bboxes
):
word_tokens = tokenizer.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
actual_bboxes.extend([actual_bbox] * len(word_tokens))
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend(
[label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1)
)
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
token_boxes = token_boxes[: (max_seq_length - special_tokens_count)]
actual_bboxes = actual_bboxes[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
token_boxes += [sep_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
token_boxes += [sep_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
token_boxes += [cls_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
token_boxes = [cls_token_box] + token_boxes
actual_bboxes = [[0, 0, width, height]] + actual_bboxes
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = (
[0 if mask_padding_with_zero else 1] * padding_length
) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
token_boxes = ([pad_token_box] * padding_length) + token_boxes
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
token_boxes += [pad_token_box] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(token_boxes) == max_seq_length
# if ex_index < 5:
# print("*** Example ***")
# print("guid: {}".format(example.guid))
# print("tokens: {}".format(" ".join([str(x) for x in tokens])))
# print("input_ids: {}".format(" ".join([str(x) for x in input_ids])))
# print("input_mask: {}".format(" ".join([str(x) for x in input_mask])))
# print("segment_ids: {}".format(" ".join([str(x) for x in segment_ids])))
# print("label_ids: {}".format(" ".join([str(x) for x in label_ids])))
# print("boxes: {}".format(" ".join([str(x) for x in token_boxes])))
# print("actual_bboxes: {}".format(" ".join([str(x) for x in actual_bboxes])))
features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids,
boxes=token_boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
return features
if __name__ == "__main__":
main()
| """
BROS
Copyright 2022-present NAVER Corp.
Apache License v2.0
Do 2nd preprocess on top of the result of the 'preprocess.sh' file.
Reference: https://github.com/microsoft/unilm/blob/master/layoutlm/deprecated/examples/seq_labeling/run_seq_labeling.py
"""
import json
import os
from collections import Counter
from tqdm import tqdm
from transformers import BertTokenizer
MAX_SEQ_LENGTH = 512
MODEL_TYPE = "bert"
VOCA = "bert-base-uncased"
INPUT_PATH = "./data"
OUTPUT_PATH = "../../datasets/funsd"
os.makedirs(OUTPUT_PATH, exist_ok=True)
os.makedirs(os.path.join(OUTPUT_PATH, "preprocessed"), exist_ok=True)
def main():
for dataset_split in ["train", "val"]:
print(f"dataset_split: {dataset_split}")
do_2nd_preprocess(dataset_split)
os.system(f"cp -r {os.path.join(INPUT_PATH, 'training_data')} {OUTPUT_PATH}")
os.system(f"cp -r {os.path.join(INPUT_PATH, 'testing_data')} {OUTPUT_PATH}")
os.system(f"cp {os.path.join(INPUT_PATH, 'labels.txt')} {OUTPUT_PATH}")
def do_2nd_preprocess(dataset_split):
label_fpath = os.path.join(INPUT_PATH, "labels.txt")
labels = get_labels(label_fpath)
tokenizer = BertTokenizer.from_pretrained(VOCA, do_lower_case=True)
cls_token_id = tokenizer.convert_tokens_to_ids("[CLS]")
sep_token_id = tokenizer.convert_tokens_to_ids("[SEP]")
pad_token_id = tokenizer.convert_tokens_to_ids("[PAD]")
ignore_index = -100
if dataset_split == "train":
mode = "train"
elif dataset_split == "val":
mode = "test"
else:
raise ValueError(f"Invalid dataset_split={dataset_split}")
examples = read_examples_from_file(INPUT_PATH, mode)
features = convert_examples_to_features(
examples,
labels,
MAX_SEQ_LENGTH,
tokenizer,
cls_token_at_end=bool(MODEL_TYPE in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if MODEL_TYPE in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(MODEL_TYPE in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(MODEL_TYPE in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if MODEL_TYPE in ["xlnet"] else 0,
pad_token_label_id=ignore_index,
)
# Save image ocr files
image_cnter = Counter()
preprocessed_fnames = []
for example, feature in tqdm(zip(examples, features), total=len(examples)):
# Example: guid, words, labels, boxes, actual_bboxes, file_name, page_size
# Feature: input_ids, input_mask, segment_ids, label_ids,
# boxes, actual_bboxes, file_name, page_size
this_file_name = "{}_{}.json".format(
example.file_name[: example.file_name.rfind(".")],
image_cnter[example.file_name],
)
image_cnter[example.file_name] += 1
data_obj = {}
# meta
data_obj["meta"] = {}
# data_obj["meta"]["image_size"]
# = example.page_size[::-1] + [3] # [height, width, rgb?]
height, width = example.page_size[::-1]
data_obj["meta"]["imageSize"] = {"width": width, "height": height}
data_obj["meta"]["voca"] = VOCA
if mode == "train":
data_obj["meta"]["image_path"] = os.path.join(
"training_data", "images", example.file_name
)
elif mode == "test":
data_obj["meta"]["image_path"] = os.path.join(
"testing_data", "images", example.file_name
)
else:
raise ValueError(f"Unknown mode={mode}")
# words
# text, tokens, boundingBox
data_obj["words"] = []
this_input_ids = []
for word, bb in zip(example.words, example.actual_bboxes):
word_tokens = []
for splitted_word in word.split():
word_tokens.append(
tokenizer.convert_tokens_to_ids(tokenizer.tokenize(splitted_word))
)
tokens = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(word))
word_obj = {
"text": word,
"tokens": tokens,
"boundingBox": [
[bb[0], bb[1]],
[bb[2], bb[1]],
[bb[2], bb[3]],
[bb[0], bb[3]],
],
}
data_obj["words"].append(word_obj)
this_input_ids.extend(tokens)
if VOCA == "bert-base-uncased":
feature_input_ids = feature.input_ids
assert feature_input_ids[0] == cls_token_id
feature_input_ids = feature_input_ids[
1 : feature_input_ids.index(sep_token_id)
]
assert feature_input_ids == this_input_ids
else:
raise NotImplementedError
# masks, labels
data_obj["parse"] = {}
if VOCA == "bert-base-uncased":
data_obj["parse"]["seq_len"] = sum(feature.input_mask)
data_obj["parse"]["input_ids"] = feature.input_ids
data_obj["parse"]["input_mask"] = feature.input_mask
data_obj["parse"]["label_ids"] = feature.label_ids
else:
raise NotImplementedError
# Save file name to list
preprocessed_fnames.append(os.path.join("preprocessed", this_file_name))
# Save to file
data_obj_file = os.path.join(OUTPUT_PATH, "preprocessed", this_file_name)
with open(data_obj_file, "w", encoding="utf-8") as fp:
json.dump(data_obj, fp, ensure_ascii=False)
# Save file name list file
preprocessed_filelist_file = os.path.join(
OUTPUT_PATH, f"preprocessed_files_{dataset_split}.txt"
)
with open(preprocessed_filelist_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(preprocessed_fnames))
def get_labels(path):
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, words, labels, boxes, actual_bboxes, file_name, page_size):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
self.boxes = boxes
self.actual_bboxes = actual_bboxes
self.file_name = file_name
self.page_size = page_size
def read_examples_from_file(data_dir, mode):
file_path = os.path.join(data_dir, "{}.txt".format(mode))
box_file_path = os.path.join(data_dir, "{}_box.txt".format(mode))
image_file_path = os.path.join(data_dir, "{}_image.txt".format(mode))
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f, open(
box_file_path, encoding="utf-8"
) as fb, open(image_file_path, encoding="utf-8") as fi:
words = []
boxes = []
actual_bboxes = []
file_name = None
page_size = None
labels = []
for line, bline, iline in zip(f, fb, fi):
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(
InputExample(
guid="{}-{}".format(mode, guid_index),
words=words,
labels=labels,
boxes=boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
guid_index += 1
words = []
boxes = []
actual_bboxes = []
file_name = None
page_size = None
labels = []
else:
splits = line.split("\t")
bsplits = bline.split("\t")
isplits = iline.split("\t")
assert len(splits) == 2
assert len(bsplits) == 2
assert len(isplits) == 4
assert splits[0] == bsplits[0]
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
box = bsplits[-1].replace("\n", "")
box = [int(b) for b in box.split()]
boxes.append(box)
actual_bbox = [int(b) for b in isplits[1].split()]
actual_bboxes.append(actual_bbox)
page_size = [int(i) for i in isplits[2].split()]
file_name = isplits[3].strip()
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(
InputExample(
guid="%s-%d".format(mode, guid_index),
words=words,
labels=labels,
boxes=boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
return examples
class InputFeatures(object):
"""A single set of features of data."""
def __init__(
self,
input_ids,
input_mask,
segment_ids,
label_ids,
boxes,
actual_bboxes,
file_name,
page_size,
):
assert (
0 <= all(boxes) <= 1000
), "Error with input bbox ({}): the coordinate value is not between 0 and 1000".format(
boxes
)
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
self.boxes = boxes
self.actual_bboxes = actual_bboxes
self.file_name = file_name
self.page_size = page_size
def convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
cls_token_box=[0, 0, 0, 0],
sep_token_box=[1000, 1000, 1000, 1000],
pad_token_box=[0, 0, 0, 0],
pad_token_segment_id=0,
pad_token_label_id=-1,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
):
"""Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
file_name = example.file_name
page_size = example.page_size
width, height = page_size
# if ex_index % 10000 == 0:
# print("Writing example {} of {}".format(ex_index, len(examples)))
tokens = []
token_boxes = []
actual_bboxes = []
label_ids = []
for word, label, box, actual_bbox in zip(
example.words, example.labels, example.boxes, example.actual_bboxes
):
word_tokens = tokenizer.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
actual_bboxes.extend([actual_bbox] * len(word_tokens))
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend(
[label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1)
)
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
token_boxes = token_boxes[: (max_seq_length - special_tokens_count)]
actual_bboxes = actual_bboxes[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
token_boxes += [sep_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
token_boxes += [sep_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
token_boxes += [cls_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
token_boxes = [cls_token_box] + token_boxes
actual_bboxes = [[0, 0, width, height]] + actual_bboxes
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = (
[0 if mask_padding_with_zero else 1] * padding_length
) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
token_boxes = ([pad_token_box] * padding_length) + token_boxes
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
token_boxes += [pad_token_box] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(token_boxes) == max_seq_length
# if ex_index < 5:
# print("*** Example ***")
# print("guid: {}".format(example.guid))
# print("tokens: {}".format(" ".join([str(x) for x in tokens])))
# print("input_ids: {}".format(" ".join([str(x) for x in input_ids])))
# print("input_mask: {}".format(" ".join([str(x) for x in input_mask])))
# print("segment_ids: {}".format(" ".join([str(x) for x in segment_ids])))
# print("label_ids: {}".format(" ".join([str(x) for x in label_ids])))
# print("boxes: {}".format(" ".join([str(x) for x in token_boxes])))
# print("actual_bboxes: {}".format(" ".join([str(x) for x in actual_bboxes])))
features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids,
boxes=token_boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
return features
if __name__ == "__main__":
main()
|
from database import (
fix_ids,
ImageModel,
CategoryModel,
AnnotationModel,
DatasetModel,
TaskModel,
ExportModel
)
# import pycocotools.mask as mask
import numpy as np
import time
import json
import os
from celery import shared_task
from ..socket import create_socket
from mongoengine import Q
from config import Config
from pathlib import PurePath
def bbox2seg(bbox):
return [bbox[0],bbox[1],bbox[0]+bbox[2],bbox[1],bbox[0]+bbox[2],bbox[1]+bbox[3],bbox[0],bbox[1]+bbox[3]]
@shared_task
def export_annotations(task_id, dataset_id, categories):
task = TaskModel.objects.get(id=task_id)
dataset = DatasetModel.objects.get(id=dataset_id)
task.update(status="PROGRESS")
socket = create_socket()
task.info("Beginning Export (COCO Format)")
db_categories = CategoryModel.objects(id__in=categories, deleted=False) \
.only(*CategoryModel.COCO_PROPERTIES)
db_images = ImageModel.objects(
deleted=False, dataset_id=dataset.id).only(
*ImageModel.COCO_PROPERTIES)
db_annotations = AnnotationModel.objects(
deleted=False, category_id__in=categories)
total_items = db_categories.count()
coco = {
'images': [],
'categories': [],
'annotations': []
}
total_items += db_images.count()
progress = 0
# iterate though all categoires and upsert
category_names = []
for category in fix_ids(db_categories):
if len(category.get('keypoint_labels', [])) > 0:
category['keypoints'] = category.pop('keypoint_labels', [])
category['skeleton'] = category.pop('keypoint_edges', [])
else:
if 'keypoint_edges' in category:
del category['keypoint_edges']
if 'keypoint_labels' in category:
del category['keypoint_labels']
task.info(f"Adding category: {category.get("name")}")
coco.get('categories').append(category)
category_names.append(category.get('name'))
progress += 1
task.set_progress((progress / total_items) * 100, socket=socket)
total_annotations = db_annotations.count()
total_images = db_images.count()
for image in db_images:
image = fix_ids(image)
if Config.EXPORT_RELPATH and 'relpath' in image:
image['file_name'] = image['relpath']
progress += 1
task.set_progress((progress / total_items) * 100, socket=socket)
annotations = db_annotations.filter(image_id=image.get('id'))\
.only(*AnnotationModel.COCO_PROPERTIES)
annotations = fix_ids(annotations)
if len(annotations) == 0:
continue
num_annotations = 0
for annotation in annotations:
has_keypoints = len(annotation.get('keypoints', [])) > 0
has_segmentation = len(annotation.get('segmentation', [])) > 0
if has_keypoints or has_segmentation:
if not has_keypoints:
if 'keypoints' in annotation:
del annotation['keypoints']
else:
arr = np.array(annotation.get('keypoints', []))
arr = arr[2::3]
annotation['num_keypoints'] = len(arr[arr > 0])
num_annotations += 1
coco.get('annotations').append(annotation)
'''
if num_annotations > 0:
image["num_annotations"]=num_annotations
image["annotated"]=True
'''
task.info(
f"Exporting {num_annotations} annotations for image {image.get("id")}")
coco.get('images').append(image)
task.info(
f"Done export {total_annotations} annotations and {total_images} images from {dataset.name}")
timestamp = time.time()
directory = f"{dataset.directory}.exports/"
file_path = f"{directory}coco-{timestamp}.json"
if not os.path.exists(directory):
os.makedirs(directory)
task.info(f"Writing export to file {file_path}")
with open(file_path, 'w') as fp:
json.dump(coco, fp)
task.info("Creating export object")
export = ExportModel(dataset_id=dataset.id, path=file_path, tags=[
"COCO", *category_names])
export.save()
task.set_progress(100, socket=socket)
@shared_task
def import_annotations(task_id, dataset_id, coco_json):
task = TaskModel.objects.get(id=task_id)
dataset = DatasetModel.objects.get(id=dataset_id)
# UR added relpath
directory = os.path.join(Config.DATASET_DIRECTORY, dataset.name)
task.update(status="PROGRESS")
socket = create_socket()
task.info("Beginning Import")
images = ImageModel.objects(dataset_id=dataset.id)
categories = CategoryModel.objects
coco_images = coco_json.get('images', [])
coco_annotations = coco_json.get('annotations', [])
coco_categories = coco_json.get('categories', [])
task.info(f"Importing {len(coco_categories)} categories, "
f"{len(coco_images)} images, and "
f"{len(coco_annotations)} annotations")
total_items = sum([
len(coco_categories),
len(coco_annotations),
len(coco_images)
])
progress = 0
task.info("===== Importing Categories =====")
# category id mapping ( file : database )
categories_id = {}
# Create any missing categories
for category in coco_categories:
category_name = category.get('name')
category_id = category.get('id')
category_model = categories.filter(name__iexact=category_name).first()
if category_model is None:
task.warning(
f"{category_name} category not found (creating a new one)")
new_category = CategoryModel(
name=category_name,
keypoint_edges=category.get('skeleton', []),
keypoint_labels=category.get('keypoints', [])
)
new_category.save()
category_model = new_category
dataset.categories.append(new_category.id)
task.info(f"{category_name} category found")
# map category ids
categories_id[category_id] = category_model.id
# update progress
progress += 1
task.set_progress((progress / total_items) * 100, socket=socket)
dataset.update(set__categories=dataset.categories)
task.info("===== Loading Images =====")
# image id mapping ( file: database )
images_id = {}
categories_by_image = {}
# Find all images
for image in coco_images:
image_id = image.get('id')
image_filename = image.get('file_name')
# update progress
progress += 1
task.set_progress((progress / total_items) * 100, socket=socket)
# UR added relpath
image_model = images.filter(relpath=image_filename).all()
if len(image_model) == 0:
task.warning(f"Could not find image {image_filename}")
continue
if len(image_model) > 1:
task.error(
f"Too many images found with the same file name: {image_filename}")
continue
task.info(f"Image {image_filename} found")
image_model = image_model[0]
images_id[image_id] = image_model
categories_by_image[image_id] = list()
task.info("===== Import Annotations =====")
for annotation in coco_annotations:
image_id = annotation.get('image_id')
category_id = annotation.get('category_id')
segmentation = annotation.get('segmentation', [])
keypoints = annotation.get('keypoints', [])
# is_crowd = annotation.get('iscrowed', False)
area = annotation.get('area', 0)
bbox = annotation.get('bbox', [0, 0, 0, 0])
isbbox = annotation.get('isbbox', False)
progress += 1
task.set_progress((progress / total_items) * 100, socket=socket)
has_segmentation = (len(segmentation) > 0 or isbbox) and sum(bbox) > 1
has_keypoints = len(keypoints) > 0
if not has_segmentation and not has_keypoints:
task.warning(
f"Annotation {annotation.get("id")} has no segmentation, bbox or keypoints")
continue
try:
image_model = images_id[image_id]
category_model_id = categories_id[category_id]
image_categories = categories_by_image[image_id]
except KeyError:
task.warning(
f"Could not find image assoicated with annotation {annotation.get("id")}")
continue
annotation_model = AnnotationModel.objects(
image_id=image_model.id,
category_id=category_model_id,
segmentation=segmentation,
keypoints=keypoints,
bbox = bbox
).first()
if annotation_model is None:
task.info(f"Creating annotation data ({image_id}, {category_id})")
annotation_model = AnnotationModel(image_id=image_model.id)
annotation_model.category_id = category_model_id
annotation_model.color = annotation.get('color')
annotation_model.metadata = annotation.get('metadata', {})
if has_segmentation:
if len(segmentation) < 1 or len(segmentation[0]) < 1: ## we have an empty segment with a bbox
task.info(f"Creating segment from bbox {bbox}")
segmentation = [bbox2seg(bbox)]
isbbox = True
annotation_model.segmentation = segmentation
annotation_model.area = area
annotation_model.bbox = bbox
if has_keypoints:
annotation_model.keypoints = keypoints
annotation_model.isbbox = isbbox
annotation_model.save()
image_categories.append(category_id)
else:
annotation_model.update(deleted=False, isbbox=isbbox)
task.info(
f"Annotation already exists (i:{image_id}, c:{category_id})")
for image_id in images_id:
image_model = images_id[image_id]
category_ids = categories_by_image[image_id]
all_category_ids = list(image_model.category_ids)
all_category_ids += category_ids
num_annotations = AnnotationModel.objects(
Q(image_id=image_id) & Q(deleted=False) &
(Q(area__gt=0) | Q(keypoints__size__gt=0))
).count()
image_model.update(
set__annotated=True,
set__category_ids=list(set(all_category_ids)),
set__num_annotations=num_annotations
)
task.set_progress(100, socket=socket)
__all__ = ["export_annotations", "import_annotations"]
|
from database import (
fix_ids,
ImageModel,
CategoryModel,
AnnotationModel,
DatasetModel,
TaskModel,
ExportModel
)
# import pycocotools.mask as mask
import numpy as np
import time
import json
import os
from celery import shared_task
from ..socket import create_socket
from mongoengine import Q
from config import Config
from pathlib import PurePath
def bbox2seg(bbox):
return [bbox[0],bbox[1],bbox[0]+bbox[2],bbox[1],bbox[0]+bbox[2],bbox[1]+bbox[3],bbox[0],bbox[1]+bbox[3]]
@shared_task
def export_annotations(task_id, dataset_id, categories):
task = TaskModel.objects.get(id=task_id)
dataset = DatasetModel.objects.get(id=dataset_id)
task.update(status="PROGRESS")
socket = create_socket()
task.info("Beginning Export (COCO Format)")
db_categories = CategoryModel.objects(id__in=categories, deleted=False) \
.only(*CategoryModel.COCO_PROPERTIES)
db_images = ImageModel.objects(
deleted=False, dataset_id=dataset.id).only(
*ImageModel.COCO_PROPERTIES)
db_annotations = AnnotationModel.objects(
deleted=False, category_id__in=categories)
total_items = db_categories.count()
coco = {
'images': [],
'categories': [],
'annotations': []
}
total_items += db_images.count()
progress = 0
# iterate though all categoires and upsert
category_names = []
for category in fix_ids(db_categories):
if len(category.get('keypoint_labels', [])) > 0:
category['keypoints'] = category.pop('keypoint_labels', [])
category['skeleton'] = category.pop('keypoint_edges', [])
else:
if 'keypoint_edges' in category:
del category['keypoint_edges']
if 'keypoint_labels' in category:
del category['keypoint_labels']
task.info(f"Adding category: {category.get('name')}")
coco.get('categories').append(category)
category_names.append(category.get('name'))
progress += 1
task.set_progress((progress / total_items) * 100, socket=socket)
total_annotations = db_annotations.count()
total_images = db_images.count()
for image in db_images:
image = fix_ids(image)
if Config.EXPORT_RELPATH and 'relpath' in image:
image['file_name'] = image['relpath']
progress += 1
task.set_progress((progress / total_items) * 100, socket=socket)
annotations = db_annotations.filter(image_id=image.get('id'))\
.only(*AnnotationModel.COCO_PROPERTIES)
annotations = fix_ids(annotations)
if len(annotations) == 0:
continue
num_annotations = 0
for annotation in annotations:
has_keypoints = len(annotation.get('keypoints', [])) > 0
has_segmentation = len(annotation.get('segmentation', [])) > 0
if has_keypoints or has_segmentation:
if not has_keypoints:
if 'keypoints' in annotation:
del annotation['keypoints']
else:
arr = np.array(annotation.get('keypoints', []))
arr = arr[2::3]
annotation['num_keypoints'] = len(arr[arr > 0])
num_annotations += 1
coco.get('annotations').append(annotation)
'''
if num_annotations > 0:
image["num_annotations"]=num_annotations
image["annotated"]=True
'''
task.info(
f"Exporting {num_annotations} annotations for image {image.get('id')}")
coco.get('images').append(image)
task.info(
f"Done export {total_annotations} annotations and {total_images} images from {dataset.name}")
timestamp = time.time()
directory = f"{dataset.directory}.exports/"
file_path = f"{directory}coco-{timestamp}.json"
if not os.path.exists(directory):
os.makedirs(directory)
task.info(f"Writing export to file {file_path}")
with open(file_path, 'w') as fp:
json.dump(coco, fp)
task.info("Creating export object")
export = ExportModel(dataset_id=dataset.id, path=file_path, tags=[
"COCO", *category_names])
export.save()
task.set_progress(100, socket=socket)
@shared_task
def import_annotations(task_id, dataset_id, coco_json):
task = TaskModel.objects.get(id=task_id)
dataset = DatasetModel.objects.get(id=dataset_id)
# UR added relpath
directory = os.path.join(Config.DATASET_DIRECTORY, dataset.name)
task.update(status="PROGRESS")
socket = create_socket()
task.info("Beginning Import")
images = ImageModel.objects(dataset_id=dataset.id)
categories = CategoryModel.objects
coco_images = coco_json.get('images', [])
coco_annotations = coco_json.get('annotations', [])
coco_categories = coco_json.get('categories', [])
task.info(f"Importing {len(coco_categories)} categories, "
f"{len(coco_images)} images, and "
f"{len(coco_annotations)} annotations")
total_items = sum([
len(coco_categories),
len(coco_annotations),
len(coco_images)
])
progress = 0
task.info("===== Importing Categories =====")
# category id mapping ( file : database )
categories_id = {}
# Create any missing categories
for category in coco_categories:
category_name = category.get('name')
category_id = category.get('id')
category_model = categories.filter(name__iexact=category_name).first()
if category_model is None:
task.warning(
f"{category_name} category not found (creating a new one)")
new_category = CategoryModel(
name=category_name,
keypoint_edges=category.get('skeleton', []),
keypoint_labels=category.get('keypoints', [])
)
new_category.save()
category_model = new_category
dataset.categories.append(new_category.id)
task.info(f"{category_name} category found")
# map category ids
categories_id[category_id] = category_model.id
# update progress
progress += 1
task.set_progress((progress / total_items) * 100, socket=socket)
dataset.update(set__categories=dataset.categories)
task.info("===== Loading Images =====")
# image id mapping ( file: database )
images_id = {}
categories_by_image = {}
# Find all images
for image in coco_images:
image_id = image.get('id')
image_filename = image.get('file_name')
# update progress
progress += 1
task.set_progress((progress / total_items) * 100, socket=socket)
# UR added relpath
image_model = images.filter(relpath=image_filename).all()
if len(image_model) == 0:
task.warning(f"Could not find image {image_filename}")
continue
if len(image_model) > 1:
task.error(
f"Too many images found with the same file name: {image_filename}")
continue
task.info(f"Image {image_filename} found")
image_model = image_model[0]
images_id[image_id] = image_model
categories_by_image[image_id] = list()
task.info("===== Import Annotations =====")
for annotation in coco_annotations:
image_id = annotation.get('image_id')
category_id = annotation.get('category_id')
segmentation = annotation.get('segmentation', [])
keypoints = annotation.get('keypoints', [])
# is_crowd = annotation.get('iscrowed', False)
area = annotation.get('area', 0)
bbox = annotation.get('bbox', [0, 0, 0, 0])
isbbox = annotation.get('isbbox', False)
progress += 1
task.set_progress((progress / total_items) * 100, socket=socket)
has_segmentation = (len(segmentation) > 0 or isbbox) and sum(bbox) > 1
has_keypoints = len(keypoints) > 0
if not has_segmentation and not has_keypoints:
task.warning(
f"Annotation {annotation.get('id')} has no segmentation, bbox or keypoints")
continue
try:
image_model = images_id[image_id]
category_model_id = categories_id[category_id]
image_categories = categories_by_image[image_id]
except KeyError:
task.warning(
f"Could not find image assoicated with annotation {annotation.get('id')}")
continue
annotation_model = AnnotationModel.objects(
image_id=image_model.id,
category_id=category_model_id,
segmentation=segmentation,
keypoints=keypoints,
bbox = bbox
).first()
if annotation_model is None:
task.info(f"Creating annotation data ({image_id}, {category_id})")
annotation_model = AnnotationModel(image_id=image_model.id)
annotation_model.category_id = category_model_id
annotation_model.color = annotation.get('color')
annotation_model.metadata = annotation.get('metadata', {})
if has_segmentation:
if len(segmentation) < 1 or len(segmentation[0]) < 1: ## we have an empty segment with a bbox
task.info(f"Creating segment from bbox {bbox}")
segmentation = [bbox2seg(bbox)]
isbbox = True
annotation_model.segmentation = segmentation
annotation_model.area = area
annotation_model.bbox = bbox
if has_keypoints:
annotation_model.keypoints = keypoints
annotation_model.isbbox = isbbox
annotation_model.save()
image_categories.append(category_id)
else:
annotation_model.update(deleted=False, isbbox=isbbox)
task.info(
f"Annotation already exists (i:{image_id}, c:{category_id})")
for image_id in images_id:
image_model = images_id[image_id]
category_ids = categories_by_image[image_id]
all_category_ids = list(image_model.category_ids)
all_category_ids += category_ids
num_annotations = AnnotationModel.objects(
Q(image_id=image_id) & Q(deleted=False) &
(Q(area__gt=0) | Q(keypoints__size__gt=0))
).count()
image_model.update(
set__annotated=True,
set__category_ids=list(set(all_category_ids)),
set__num_annotations=num_annotations
)
task.set_progress(100, socket=socket)
__all__ = ["export_annotations", "import_annotations"]
|
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
import logging
import re
import subprocess
import tempfile
from pathlib import Path
from typing import Tuple, Optional, Union, List
import git
import yaml
from ogr.parsing import RepoUrl, parse_git_repo
from packit.exceptions import PackitException
logger = logging.getLogger(__name__)
class RepositoryCache:
"""
Cache for git repositories base on the reference option of `git clone`.
* The cache is located in the specified directory
and contains separate git repository for each project.
* Project name is used to match the git project in the cache.
"""
def __init__(self, cache_path: Union[str, Path], add_new=False) -> None:
self.cache_path = (
Path(cache_path) if isinstance(cache_path, str) else cache_path
)
self.add_new = add_new
logger.debug(
f"Instantiation of the repository cache at {self.cache_path}. "
f"New projects will {"not " if not self.add_new else ""}be added."
)
@property
def cached_projects(self) -> List[str]:
"""Project names we have in the cache."""
if not self.cache_path.is_dir():
self.cache_path.mkdir(parents=True)
return [f.name for f in self.cache_path.iterdir() if f.is_dir()]
def _clone(self, **kwargs) -> git.Repo:
"""Wrapper around git function so we are able to check the call in tests more easily."""
return git.repo.Repo.clone_from(**kwargs)
def get_repo(
self,
url: str,
directory: Union[Path, str] = None,
) -> git.Repo:
"""
Clone the repository.
* If we have this repository in a cache, use the cached repo as a reference when cloning.
* If we don't have this repository in a cache and {add_new} is True,
clone the repository to cache first and then use it as a reference.
:param url: will be used to clone the repo
:param directory: target path for cloning the repository
:return: cloned repository
"""
directory = str(directory) if directory else tempfile.mkdtemp()
if is_git_repo(directory=directory):
logger.debug(f"Repo already exists in {directory}.")
return git.repo.Repo(directory)
logger.debug(
f"Cloning repo {url} -> {directory} using repository cache at {self.cache_path}"
)
cached_projects = self.cached_projects
cached_projects_str = "\n".join(f"- {project}" for project in cached_projects)
logger.debug(
f"Repositories in the cache ({len(cached_projects)} project(s)):\n{cached_projects_str}"
)
project_name = RepoUrl.parse(url).repo
reference_repo = self.cache_path.joinpath(project_name)
if project_name not in cached_projects and self.add_new:
logger.debug(f"Creating reference repo: {reference_repo}")
self._clone(url=url, to_path=str(reference_repo), tags=True)
if self.add_new or project_name in cached_projects:
logger.debug(f"Using reference repo: {reference_repo}")
return self._clone(
url=url, to_path=directory, tags=True, reference=str(reference_repo)
)
return self._clone(url=url, to_path=directory, tags=True)
def is_git_repo(directory: Union[Path, str]) -> bool:
"""
Test, if the directory is a git repo.
(Has .git subdirectory?)
"""
return Path(directory, ".git").is_dir()
def get_repo(url: str, directory: Union[Path, str] = None) -> git.Repo:
"""
Use directory as a git repo or clone repo to the tempdir.
"""
directory = str(directory) if directory else tempfile.mkdtemp()
if is_git_repo(directory=directory):
logger.debug(f"Repo already exists in {directory}.")
repo = git.repo.Repo(directory)
else:
logger.debug(f"Cloning repo {url} -> {directory}")
repo = git.repo.Repo.clone_from(url=url, to_path=directory, tags=True)
return repo
def get_namespace_and_repo_name(url: str) -> Tuple[Optional[str], str]:
parsed_git_repo = parse_git_repo(url)
if parsed_git_repo is None or not parsed_git_repo.repo:
raise PackitException(
f"Invalid URL format, can't obtain namespace and repository name: {url}"
)
return parsed_git_repo.namespace, parsed_git_repo.repo
def is_a_git_ref(repo: git.Repo, ref: str) -> bool:
try:
commit = repo.commit(ref)
return bool(commit)
except git.BadName:
return False
def get_default_branch(repository: git.Repo) -> str:
"""
Returns default branch for newly created repos in the parent directory of
passed in repository. Accepts `repository` to ensure the closest override of
git configuration is used.
Args:
repository (git.Repo): Git repository closest to the directory where
the configuration is applied.
Returns:
Default branch for new repos, if not supported or not configured returns
`master`.
"""
config = repository.config_reader()
return config.get_value("init", "defaultBranch", "master")
def git_remote_url_to_https_url(inp: str) -> str:
"""
turn provided git remote URL to https URL:
returns empty string if the input can't be processed
"""
logger.debug(f"Parsing git remote URL {inp!r} and converting it to https-like URL.")
parsed_repo = parse_git_repo(inp)
if not parsed_repo or not parsed_repo.hostname:
logger.debug(f"{inp!r} is not an URL we recognize.")
return ""
if inp.startswith(("http", "https")):
logger.debug(f"Provided input {inp!r} is an url.")
return inp
optional_suffix = ".git" if inp.endswith(".git") else ""
url_str = "https://{}/{}/{}{}".format(
parsed_repo.hostname, parsed_repo.namespace, parsed_repo.repo, optional_suffix
)
logger.debug(f"URL {inp!r} turned into HTTPS {url_str!r}")
return url_str
def get_current_version_command(
glob_pattern: str, refs: Optional[str] = "tags"
) -> List[str]:
"""
Returns command that find latest git reference matching given pattern.
:param glob_pattern: pattern that is used to find latest ref
:param refs: specifies what kind of ref is used; \
default is `"tags"` that searches through all tags (including non-annotated), \
pass `None` to search only annotated tags or `"all"` to search through \
all refs (including branches and remote refs)
:return: command to find latest ref
"""
return [
"git",
"describe",
"--abbrev=0",
f"--{refs}" if refs else "",
"--match",
glob_pattern,
]
def create_new_repo(cwd: Path, switches: List[str]):
subprocess.check_call(["git", "init"] + switches + [str(cwd)])
# TODO: Replace with -b / --initial-branch in `git init` when possible
if "--bare" not in switches:
subprocess.check_call(["git", "checkout", "-b", "main"], cwd=cwd)
else:
subprocess.check_call(
["git", "symbolic-ref", "HEAD", "refs/heads/main"], cwd=cwd
)
def git_patch_ish(patch: str) -> str:
"""
Massage patch to look like a Git-style patch, so that it can
be passed to 'git patch-id' in order to calculate a patch-id.
:param patch: Patch to transform.
:return: Transformed patch.
"""
# Prettend as if format is 'diff --git'
pattern = re.compile(r"^diff -\w+ ", flags=re.MULTILINE)
repl = r"diff --git "
patch = re.sub(pattern, repl, patch)
# Remove timestamps from comparison lines
pattern = re.compile(r"^((---|\+\+\+) .+)\t\d{4}.+$", flags=re.MULTILINE)
repl = r"\1"
patch = re.sub(pattern, repl, patch)
# Add missing 'diff --git' lines
if "diff --git " not in patch:
# Timestamps (see above) already need to be removed
# for this substitution pattern to work.
pattern = re.compile(r"(\n--- (.+)\n\+\+\+ (.+)\n)")
repl = r"\ndiff --git \2 \3\1"
patch = re.sub(pattern, repl, patch)
return patch
def get_message_from_metadata(metadata: dict, header: Optional[str] = None) -> str:
if not isinstance(metadata, dict):
raise PackitException(
f"We can save only dictionaries to metadata. Not {metadata}"
)
content = (
yaml.dump(metadata, indent=4, default_flow_style=False) if metadata else ""
)
if not header:
return content
return f"{header}\n\n{content}"
def get_metadata_from_message(commit: git.Commit) -> Optional[dict]:
"""
Tries to load yaml format from the git message.
We are skipping first line until
the rest of the content is yaml-loaded to dictionary (yaml object type).
If nothing found, we return None.
Reference:
https://gitpython.readthedocs.io/en/stable/reference.html
?highlight=archive#module-git.objects.commit
e.g.:
I)
key: value
another: value
-> {"key": "value", "another": "value"}
II)
On sentence.
key: value
another: value
-> {"key": "value", "another": "value"}
III)
A lot of
text
before keys.
key: value
another: value
-> {"key": "value", "another": "value"}
IV)
Other values are supported as well:
key:
- first
- second
- third
:param commit: git.Commit object
:return: dict loaded from message if it satisfies the rules above
"""
splitted_message = commit.message.split("\n")
for i in range(len(splitted_message)):
message_part = "\n".join(splitted_message[i:])
try:
loaded_part = yaml.safe_load(message_part)
except yaml.YAMLError:
continue
if isinstance(loaded_part, dict):
return loaded_part
return None
| # Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
import logging
import re
import subprocess
import tempfile
from pathlib import Path
from typing import Tuple, Optional, Union, List
import git
import yaml
from ogr.parsing import RepoUrl, parse_git_repo
from packit.exceptions import PackitException
logger = logging.getLogger(__name__)
class RepositoryCache:
"""
Cache for git repositories base on the reference option of `git clone`.
* The cache is located in the specified directory
and contains separate git repository for each project.
* Project name is used to match the git project in the cache.
"""
def __init__(self, cache_path: Union[str, Path], add_new=False) -> None:
self.cache_path = (
Path(cache_path) if isinstance(cache_path, str) else cache_path
)
self.add_new = add_new
logger.debug(
f"Instantiation of the repository cache at {self.cache_path}. "
f"New projects will {'not ' if not self.add_new else ''}be added."
)
@property
def cached_projects(self) -> List[str]:
"""Project names we have in the cache."""
if not self.cache_path.is_dir():
self.cache_path.mkdir(parents=True)
return [f.name for f in self.cache_path.iterdir() if f.is_dir()]
def _clone(self, **kwargs) -> git.Repo:
"""Wrapper around git function so we are able to check the call in tests more easily."""
return git.repo.Repo.clone_from(**kwargs)
def get_repo(
self,
url: str,
directory: Union[Path, str] = None,
) -> git.Repo:
"""
Clone the repository.
* If we have this repository in a cache, use the cached repo as a reference when cloning.
* If we don't have this repository in a cache and {add_new} is True,
clone the repository to cache first and then use it as a reference.
:param url: will be used to clone the repo
:param directory: target path for cloning the repository
:return: cloned repository
"""
directory = str(directory) if directory else tempfile.mkdtemp()
if is_git_repo(directory=directory):
logger.debug(f"Repo already exists in {directory}.")
return git.repo.Repo(directory)
logger.debug(
f"Cloning repo {url} -> {directory} using repository cache at {self.cache_path}"
)
cached_projects = self.cached_projects
cached_projects_str = "\n".join(f"- {project}" for project in cached_projects)
logger.debug(
f"Repositories in the cache ({len(cached_projects)} project(s)):\n{cached_projects_str}"
)
project_name = RepoUrl.parse(url).repo
reference_repo = self.cache_path.joinpath(project_name)
if project_name not in cached_projects and self.add_new:
logger.debug(f"Creating reference repo: {reference_repo}")
self._clone(url=url, to_path=str(reference_repo), tags=True)
if self.add_new or project_name in cached_projects:
logger.debug(f"Using reference repo: {reference_repo}")
return self._clone(
url=url, to_path=directory, tags=True, reference=str(reference_repo)
)
return self._clone(url=url, to_path=directory, tags=True)
def is_git_repo(directory: Union[Path, str]) -> bool:
"""
Test, if the directory is a git repo.
(Has .git subdirectory?)
"""
return Path(directory, ".git").is_dir()
def get_repo(url: str, directory: Union[Path, str] = None) -> git.Repo:
"""
Use directory as a git repo or clone repo to the tempdir.
"""
directory = str(directory) if directory else tempfile.mkdtemp()
if is_git_repo(directory=directory):
logger.debug(f"Repo already exists in {directory}.")
repo = git.repo.Repo(directory)
else:
logger.debug(f"Cloning repo {url} -> {directory}")
repo = git.repo.Repo.clone_from(url=url, to_path=directory, tags=True)
return repo
def get_namespace_and_repo_name(url: str) -> Tuple[Optional[str], str]:
parsed_git_repo = parse_git_repo(url)
if parsed_git_repo is None or not parsed_git_repo.repo:
raise PackitException(
f"Invalid URL format, can't obtain namespace and repository name: {url}"
)
return parsed_git_repo.namespace, parsed_git_repo.repo
def is_a_git_ref(repo: git.Repo, ref: str) -> bool:
try:
commit = repo.commit(ref)
return bool(commit)
except git.BadName:
return False
def get_default_branch(repository: git.Repo) -> str:
"""
Returns default branch for newly created repos in the parent directory of
passed in repository. Accepts `repository` to ensure the closest override of
git configuration is used.
Args:
repository (git.Repo): Git repository closest to the directory where
the configuration is applied.
Returns:
Default branch for new repos, if not supported or not configured returns
`master`.
"""
config = repository.config_reader()
return config.get_value("init", "defaultBranch", "master")
def git_remote_url_to_https_url(inp: str) -> str:
"""
turn provided git remote URL to https URL:
returns empty string if the input can't be processed
"""
logger.debug(f"Parsing git remote URL {inp!r} and converting it to https-like URL.")
parsed_repo = parse_git_repo(inp)
if not parsed_repo or not parsed_repo.hostname:
logger.debug(f"{inp!r} is not an URL we recognize.")
return ""
if inp.startswith(("http", "https")):
logger.debug(f"Provided input {inp!r} is an url.")
return inp
optional_suffix = ".git" if inp.endswith(".git") else ""
url_str = "https://{}/{}/{}{}".format(
parsed_repo.hostname, parsed_repo.namespace, parsed_repo.repo, optional_suffix
)
logger.debug(f"URL {inp!r} turned into HTTPS {url_str!r}")
return url_str
def get_current_version_command(
glob_pattern: str, refs: Optional[str] = "tags"
) -> List[str]:
"""
Returns command that find latest git reference matching given pattern.
:param glob_pattern: pattern that is used to find latest ref
:param refs: specifies what kind of ref is used; \
default is `"tags"` that searches through all tags (including non-annotated), \
pass `None` to search only annotated tags or `"all"` to search through \
all refs (including branches and remote refs)
:return: command to find latest ref
"""
return [
"git",
"describe",
"--abbrev=0",
f"--{refs}" if refs else "",
"--match",
glob_pattern,
]
def create_new_repo(cwd: Path, switches: List[str]):
subprocess.check_call(["git", "init"] + switches + [str(cwd)])
# TODO: Replace with -b / --initial-branch in `git init` when possible
if "--bare" not in switches:
subprocess.check_call(["git", "checkout", "-b", "main"], cwd=cwd)
else:
subprocess.check_call(
["git", "symbolic-ref", "HEAD", "refs/heads/main"], cwd=cwd
)
def git_patch_ish(patch: str) -> str:
"""
Massage patch to look like a Git-style patch, so that it can
be passed to 'git patch-id' in order to calculate a patch-id.
:param patch: Patch to transform.
:return: Transformed patch.
"""
# Prettend as if format is 'diff --git'
pattern = re.compile(r"^diff -\w+ ", flags=re.MULTILINE)
repl = r"diff --git "
patch = re.sub(pattern, repl, patch)
# Remove timestamps from comparison lines
pattern = re.compile(r"^((---|\+\+\+) .+)\t\d{4}.+$", flags=re.MULTILINE)
repl = r"\1"
patch = re.sub(pattern, repl, patch)
# Add missing 'diff --git' lines
if "diff --git " not in patch:
# Timestamps (see above) already need to be removed
# for this substitution pattern to work.
pattern = re.compile(r"(\n--- (.+)\n\+\+\+ (.+)\n)")
repl = r"\ndiff --git \2 \3\1"
patch = re.sub(pattern, repl, patch)
return patch
def get_message_from_metadata(metadata: dict, header: Optional[str] = None) -> str:
if not isinstance(metadata, dict):
raise PackitException(
f"We can save only dictionaries to metadata. Not {metadata}"
)
content = (
yaml.dump(metadata, indent=4, default_flow_style=False) if metadata else ""
)
if not header:
return content
return f"{header}\n\n{content}"
def get_metadata_from_message(commit: git.Commit) -> Optional[dict]:
"""
Tries to load yaml format from the git message.
We are skipping first line until
the rest of the content is yaml-loaded to dictionary (yaml object type).
If nothing found, we return None.
Reference:
https://gitpython.readthedocs.io/en/stable/reference.html
?highlight=archive#module-git.objects.commit
e.g.:
I)
key: value
another: value
-> {"key": "value", "another": "value"}
II)
On sentence.
key: value
another: value
-> {"key": "value", "another": "value"}
III)
A lot of
text
before keys.
key: value
another: value
-> {"key": "value", "another": "value"}
IV)
Other values are supported as well:
key:
- first
- second
- third
:param commit: git.Commit object
:return: dict loaded from message if it satisfies the rules above
"""
splitted_message = commit.message.split("\n")
for i in range(len(splitted_message)):
message_part = "\n".join(splitted_message[i:])
try:
loaded_part = yaml.safe_load(message_part)
except yaml.YAMLError:
continue
if isinstance(loaded_part, dict):
return loaded_part
return None
|
Subsets and Splits