Skip to content

Commit 913644d

Browse files
committed
quick fixes
1 parent ea51a08 commit 913644d

6 files changed

+230
-73
lines changed

app.py

+9-7
Original file line numberDiff line numberDiff line change
@@ -37,9 +37,9 @@
3737
app.register_blueprint(SWAGGERUI_BLUEPRINT, url_prefix=SWAGGER_URL)
3838

3939
'''Logging Configuration'''
40-
logging.basicConfig(filename="logfile.log", level=logging.INFO)
41-
logging.info("APP: APP started at " + str(datetime.datetime.now()))
42-
print("logging configured")
40+
# logging.basicConfig(filename="logfile.log", level=logging.INFO)
41+
# logging.info("APP: APP started at " + str(datetime.datetime.now()))
42+
# print("logging configured")
4343

4444

4545
'''API Endpoints'''
@@ -201,23 +201,25 @@ def debiasing_bam():
201201

202202

203203
# Debiasing of bias specifications using GBDD and BAM, returning values
204-
@app.route('/REST/debiasing/full/gbddxbam', methods=['POST'])
204+
@app.route('/REST/debiasing/gbddxbam', methods=['POST'])
205205
def debiasing_gbdd_bam():
206206
logging.info("APP: " + str(datetime.datetime.now()) + " GBDDxBAM Debiasing started")
207207
content = request.get_json()
208208
bar = request.args.to_dict()
209-
response, status_code = debiasing_controller.debiasing('gbddXbam', content, bar)
209+
response, status_code = debiasing_controller.debiasing('gbddxbam', content, bar)
210210

211211
return response, status_code
212212

213213

214214
# Debiasing of bias specifications using BAM and GBDD, returning values in full size
215-
@app.route('/REST/debiasing/full/bamxgbdd', methods=['POST'])
215+
@app.route('/REST/debiasing/bamxgbdd', methods=['POST'])
216216
def debiasing_bam_gbdd():
217217
logging.info("APP: " + str(datetime.datetime.now()) + " BAMxGBDD Debiasing started")
218218
content = request.get_json()
219219
bar = request.args.to_dict()
220-
response, status_code = debiasing_controller.debiasing('bamXgbdd', content, bar)
220+
# print(content)
221+
# print(bar)
222+
response, status_code = debiasing_controller.debiasing('bamxgbdd', content, bar)
221223

222224
return response, status_code
223225

calculation.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ def vocab_to_dict(vocab, vecs, lists):
174174
return res
175175

176176

177-
def vocabs_to_dicts(vocab, vecs, t1_list, t2_list, a1_list, a2_list, lex=None):
177+
def vocabs_to_dicts(vocab, vecs, t1_list, t2_list, a1_list, a2_list, aug1_list=None, aug2_list=None, lex=None):
178178
t1 = vocab_to_dict(vocab, vecs, t1_list)
179179
t2 = vocab_to_dict(vocab, vecs, t2_list)
180180
a1 = vocab_to_dict(vocab, vecs, a1_list)
@@ -188,6 +188,10 @@ def vocabs_to_dicts(vocab, vecs, t1_list, t2_list, a1_list, a2_list, lex=None):
188188
# print('Vocab to dicts --- WORDSIM')
189189
lex_dict = vocab_to_dict(vocab, vecs, wordsim_vocab)
190190
return t1, t2, a1, a2, lex_dict
191+
if aug1_list is not None and aug2_list is not None:
192+
aug1 = vocab_to_dict(vocab, vecs, aug1_list)
193+
aug2 = vocab_to_dict(vocab, vecs, aug2_list)
194+
return t1, t2, a1, a2, aug1, aug2
191195
return t1, t2, a1, a2
192196

193197

debiasing/debiasing_controller.py

+49-45
Original file line numberDiff line numberDiff line change
@@ -57,80 +57,84 @@ def debiasing(methods, content, bar):
5757
else:
5858
vocab, vecs = calculation.create_vocab_and_vecs(t1, t2, a1, a2, aug1, aug2)
5959

60-
t1_deb, t2_deb, a1_deb, a2_deb, new_vecs = [], [], [], [], []
60+
t1_deb, t2_deb, a1_deb, a2_deb, aug1_deb, aug2_deb, new_vecs = [], [], [], [], [], [], []
6161
logging.info("Debiasing-Engine: Specs loaded, starting computing")
6262
if methods == 'bam':
63-
new_vecs, t1_deb, t2_deb, a1_deb, a2_deb = debiasing_bam(equality_sets, vocab, vecs, t1_list, t2_list, a1_list,
64-
a2_list)
63+
new_vecs, t1_deb, t2_deb, a1_deb, a2_deb, aug1_deb, aug2_deb = debiasing_bam(equality_sets, vocab, vecs, t1_list, t2_list, a1_list,
64+
a2_list, aug1_list, aug2_list)
6565
if methods == 'gbdd':
66-
new_vecs, t1_deb, t2_deb, a1_deb, a2_deb = debiasing_gbdd(equality_sets, vocab, vecs, t1_list,
67-
t2_list, a1_list, a2_list)
68-
if methods == 'bamXgbdd':
69-
new_vecs, t1_deb, t2_deb, a1_deb, a2_deb = debiasing_bam_gbdd(equality_sets, vocab, vecs, t1_list, t2_list,
70-
a1_list, a2_list)
71-
if methods == 'gbddXbam':
72-
new_vecs, t1_deb, t2_deb, a1_deb, a2_deb = debiasing_gbdd_bam(equality_sets, vocab, vecs, t1_list, t2_list,
73-
a1_list, a2_list)
66+
new_vecs, t1_deb, t2_deb, a1_deb, a2_deb, aug1_deb, aug2_deb = debiasing_gbdd(equality_sets, vocab, vecs, t1_list,
67+
t2_list, a1_list, a2_list, aug1_list, aug2_list)
68+
if methods == 'bamxgbdd':
69+
new_vecs, t1_deb, t2_deb, a1_deb, a2_deb, aug1_deb, aug2_deb = debiasing_bam_gbdd(equality_sets, vocab, vecs, t1_list, t2_list,
70+
a1_list, a2_list, aug1_list, aug2_list)
71+
if methods == 'gbddxbam':
72+
new_vecs, t1_deb, t2_deb, a1_deb, a2_deb, aug1_deb, aug2_deb = debiasing_gbdd_bam(equality_sets, vocab, vecs, t1_list, t2_list,
73+
a1_list, a2_list, aug1_list, aug2_list)
7474
if pca == 'true':
7575
biased_space = calculation.principal_componant_analysis2(vecs)
7676
debiased_space = calculation.principal_componant_analysis2(new_vecs)
77-
t1_pca_bias, t2_pca_bias, a1_pca_bias, a2_pca_bias = calculation.vocabs_to_dicts(vocab, biased_space, t1_list,
77+
t1_pca_bias, t2_pca_bias, a1_pca_bias, a2_pca_bias, aug1_pca_bias, aug2_pca_bias = calculation.vocabs_to_dicts(vocab, biased_space, t1_list,
7878
t2_list,
79-
a1_list, a2_list)
80-
t1_pca_deb, t2_pca_deb, a1_pca_deb, a2_pca_deb = calculation.vocabs_to_dicts(vocab, debiased_space, t1_list,
79+
a1_list, a2_list, aug1_list, aug2_list)
80+
t1_pca_deb, t2_pca_deb, a1_pca_deb, a2_pca_deb, aug1_pca_deb, aug2_pca_deb = calculation.vocabs_to_dicts(vocab, debiased_space, t1_list,
8181
t2_list,
82-
a1_list, a2_list)
82+
a1_list, a2_list, aug1_list, aug2_list)
8383
if lex == 'false':
84-
response = json_controller.debiasing_json(space, lower, methods, pca, aug1_list, aug2_list, t1, t2, a1, a2,
85-
t1_deb, t2_deb, a1_deb, a2_deb, not_found, deleted,
86-
t1_pca_bias, t2_pca_bias, a1_pca_bias, a2_pca_bias,
87-
t1_pca_deb, t2_pca_deb, a1_pca_deb, a2_pca_deb)
84+
response = json_controller.debiasing_json(space, lower, methods, pca, aug1_list, aug2_list,
85+
t1, t2, a1, a2, aug1, aug2,
86+
t1_deb, t2_deb, a1_deb, a2_deb, aug1_deb, aug2_deb,
87+
not_found, deleted,
88+
t1_pca_bias, t2_pca_bias, a1_pca_bias, a2_pca_bias, aug1_pca_bias, aug2_pca_bias,
89+
t1_pca_deb, t2_pca_deb, a1_pca_deb, a2_pca_deb, aug1_pca_deb, aug2_pca_deb)
8890
else:
8991
if lex == 'simlex':
9092
lex_dict = calculation.vocab_to_dict(vocab, new_vecs, calculation.simlex_vocab)
9193
if lex == 'wordsim':
9294
lex_dict = calculation.vocab_to_dict(vocab, new_vecs, calculation.wordsim_vocab)
93-
response = json_controller.debiasing_json(space, lower, methods, pca, aug1_list, aug2_list, t1, t2, a1, a2,
94-
t1_deb, t2_deb, a1_deb, a2_deb, not_found, deleted,
95-
t1_pca_bias, t2_pca_bias, a1_pca_bias, a2_pca_bias,
96-
t1_pca_deb, t2_pca_deb, a1_pca_deb, a2_pca_deb, lex_dict)
95+
response = json_controller.debiasing_json(space, lower, methods, pca, aug1_list, aug2_list,
96+
t1, t2, a1, a2, aug1, aug2,
97+
t1_deb, t2_deb, a1_deb, a2_deb, aug1_deb, aug2_deb,
98+
not_found, deleted,
99+
t1_pca_bias, t2_pca_bias, a1_pca_bias, a2_pca_bias, aug1_pca_bias, aug2_pca_bias,
100+
t1_pca_deb, t2_pca_deb, a1_pca_deb, a2_pca_deb, aug1_pca_deb, aug2_pca_deb, lex_dict=lex_dict)
97101
else:
98102
if lex == 'false':
99-
response = json_controller.debiasing_json(space, lower, methods, pca, aug1_list, aug2_list, t1, t2, a1, a2,
100-
t1_deb, t2_deb, a1_deb, a2_deb, not_found, deleted)
103+
response = json_controller.debiasing_json(space, lower, methods, pca, aug1_list, aug2_list, t1, t2, a1, a2, aug1, aug2,
104+
t1_deb, t2_deb, a1_deb, a2_deb, aug1_deb, aug2_deb, not_found, deleted)
101105
else:
102-
response = json_controller.debiasing_json(space, lower, methods, pca, aug1_list, aug2_list, t1, t2, a1, a2,
103-
t1_deb, t2_deb, a1_deb, a2_deb, not_found, deleted, lex_dict)
106+
response = json_controller.debiasing_json(space, lower, methods, pca, aug1_list, aug2_list, t1, t2, a1, a2, aug1, aug2,
107+
t1_deb, t2_deb, a1_deb, a2_deb, aug1_deb, aug2_deb, not_found, deleted, lex_dict=lex_dict)
104108

105109
logging.info("Debiasing-Engine: Finished")
106110

107111
return response, 200
108112

109113

110-
def debiasing_bam(equality_sets, vocab, vecs, t1_list, t2_list, a1_list, a2_list):
114+
def debiasing_bam(equality_sets, vocab, vecs, t1_list, t2_list, a1_list, a2_list, aug1_list, aug2_list):
111115
new_vecs, proj_mat = bam.debias_proc(equality_sets, vocab, vecs)
112-
t1, t2, a1, a2 = calculation.vocabs_to_dicts(vocab, new_vecs, t1_list, t2_list, a1_list, a2_list)
113-
return new_vecs, t1, t2, a1, a2,
116+
t1, t2, a1, a2, aug1, aug2 = calculation.vocabs_to_dicts(vocab, new_vecs, t1_list, t2_list, a1_list, a2_list, aug1_list, aug2_list)
117+
return new_vecs, t1, t2, a1, a2, aug1, aug2
114118

115119

116-
def debiasing_gbdd(equality_sets, vocab, vecs, t1_list, t2_list, a1_list, a2_list):
120+
def debiasing_gbdd(equality_sets, vocab, vecs, t1_list, t2_list, a1_list, a2_list, aug1_list, aug2_list):
117121
v_b = gbdd.get_bias_direction(equality_sets, vocab, vecs)
118122
new_vecs = gbdd.debias_direction_linear(v_b, vecs)
119-
t1, t2, a1, a2 = calculation.vocabs_to_dicts(vocab, new_vecs, t1_list, t2_list, a1_list, a2_list)
120-
return new_vecs, t1, t2, a1, a2
123+
t1, t2, a1, a2, aug1, aug2 = calculation.vocabs_to_dicts(vocab, new_vecs, t1_list, t2_list, a1_list, a2_list, aug1_list, aug2_list)
124+
return new_vecs, t1, t2, a1, a2, aug1, aug2
121125

122126

123-
def debiasing_bam_gbdd(equality_sets, vocab, vecs, t1_list, t2_list, a1_list, a2_list):
124-
new_vocab, new_vecs = bam.debias_proc(equality_sets, vocab, vecs)
125-
v_b = gbdd.get_bias_direction(equality_sets, new_vecs, new_vocab)
126-
new_vocab, new_vecs = gbdd.debias_direction_linear(v_b, new_vecs)
127-
t1, t2, a1, a2 = calculation.vocabs_to_dicts(new_vocab, new_vecs, t1_list, t2_list, a1_list, a2_list)
128-
return new_vecs, t1, t2, a1, a2
127+
def debiasing_bam_gbdd(equality_sets, vocab, vecs, t1_list, t2_list, a1_list, a2_list, aug1_list, aug2_list):
128+
new_vecs, proj_matrix = bam.debias_proc(equality_sets, vocab, vecs)
129+
v_b = gbdd.get_bias_direction(equality_sets, vocab, new_vecs)
130+
new_vecs = gbdd.debias_direction_linear(v_b, new_vecs)
131+
t1, t2, a1, a2, aug1, aug2 = calculation.vocabs_to_dicts(vocab, new_vecs, t1_list, t2_list, a1_list, a2_list, aug1_list, aug2_list)
132+
return new_vecs, t1, t2, a1, a2, aug1, aug2
129133

130134

131-
def debiasing_gbdd_bam(equality_sets, vocab, vecs, t1_list, t2_list, a1_list, a2_list):
132-
v_b = gbdd.get_bias_direction(equality_sets, vecs, vocab)
133-
new_vocab, new_vecs = gbdd.debias_direction_linear(v_b, vecs)
134-
new_vocab, new_vecs = bam.debias_proc(equality_sets, new_vocab, new_vecs)
135-
t1, t2, a1, a2 = calculation.vocabs_to_dicts(new_vocab, new_vecs, t1_list, t2_list, a1_list, a2_list)
136-
return new_vecs, t1, t2, a1, a2
135+
def debiasing_gbdd_bam(equality_sets, vocab, vecs, t1_list, t2_list, a1_list, a2_list, aug1_list, aug2_list):
136+
v_b = gbdd.get_bias_direction(equality_sets, vocab, vecs)
137+
new_vecs = gbdd.debias_direction_linear(v_b, vecs)
138+
new_vecs, proj_matrix = bam.debias_proc(equality_sets, vocab, new_vecs)
139+
t1, t2, a1, a2, aug1, aug2 = calculation.vocabs_to_dicts(vocab, new_vecs, t1_list, t2_list, a1_list, a2_list, aug1_list, aug2_list)
140+
return new_vecs, t1, t2, a1, a2, aug1, aug2

json_controller.py

+29-19
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ def json_to_debias_spec(content):
4646
attributes2 = content['A2'].split(' ')
4747
if 'Augmentations1' in content:
4848
augments1 = content['Augmentations1'].split(' ')
49+
print('IS USED')
4950
if 'Augmentations2' in content:
5051
augments2 = content['Augmentations2'].split(' ')
5152
logging.info("JsonController: Found following bias spec: T1: " + str(target1) + "; T2: " + str(target2) + "; A1: " +
@@ -67,63 +68,72 @@ def bias_evaluation_json(scores, space, lower, t1, t2, a1, a2, not_found, delete
6768

6869

6970
def debiasing_json(space, lower, method, pca, aug1_list, aug2_list,
70-
t1, t2, a1, a2,
71-
t1_deb, t2_deb, a1_deb, a2_deb, not_found, deleted,
72-
t1_pca_bias=None, t2_pca_bias=None, a1_pca_bias=None, a2_pca_bias=None,
73-
t1_pca_deb=None, t2_pca_deb=None, a1_pca_deb=None, a2_pca_deb=None, lex_dict=None):
71+
t1, t2, a1, a2, aug1, aug2,
72+
t1_deb, t2_deb, a1_deb, a2_deb, aug1_deb, aug2_deb,
73+
not_found, deleted,
74+
t1_pca_bias=None, t2_pca_bias=None, a1_pca_bias=None, a2_pca_bias=None, aug1_pca_bias=None, aug2_pca_bias=None,
75+
t1_pca_deb=None, t2_pca_deb=None, a1_pca_deb=None, a2_pca_deb=None, aug1_pca_deb=None, aug2_pca_deb=None, lex_dict=None):
7476

7577
t1 = dict_to_json(t1)
7678
t2 = dict_to_json(t2)
7779
a1 = dict_to_json(a1)
7880
a2 = dict_to_json(a2)
81+
aug1 = dict_to_json(aug1)
82+
aug2 = dict_to_json(aug2)
7983
t1_deb = dict_to_json(t1_deb)
8084
t2_deb = dict_to_json(t2_deb)
8185
a1_deb = dict_to_json(a1_deb)
8286
a2_deb = dict_to_json(a2_deb)
87+
aug1_deb = dict_to_json(aug1_deb)
88+
aug2_deb = dict_to_json(aug2_deb)
8389

8490
if pca == 'false':
8591
if lex_dict is None:
8692
response = json.dumps(
8793
{"Space": space, "Model": method, "Lower": lower, "PCA": pca,
8894
"UsedAugmentations": {"A1": aug1_list, "A2": aug2_list},
89-
"BiasedSpace:": {"T1": t1, "T2": t2, "A1": a1, "A2": a2},
90-
"DebiasedSpace": {"T1": t1_deb, "T2": t2_deb, "A1": a1_deb, "A2": a2_deb},
95+
"BiasedSpace:": {"T1": t1, "T2": t2, "A1": a1, "A2": a2, "Augmentations1": aug1, "Augmentations": aug2},
96+
"DebiasedSpace": {"T1": t1_deb, "T2": t2_deb, "A1": a1_deb, "A2": a2_deb, "Augmentations1": aug1_deb, "Augmentation2": aug2_deb},
9197
"NotFound": not_found, "Deleted": deleted})
9298
else:
9399
lex_dict = dict_to_json(lex_dict)
94100
response = json.dumps(
95101
{"Space": space, "Model": method, "Lower": lower, "PCA": pca,
96102
"UsedAugmentations": {"A1": aug1_list, "A2": aug2_list},
97-
"BiasedSpace:": {"T1": t1, "T2": t2, "A1": a1, "A2": a2},
98-
"DebiasedSpace": {"T1": t1_deb, "T2": t2_deb, "A1": a1_deb, "A2": a2_deb},
103+
"BiasedSpace:": {"T1": t1, "T2": t2, "A1": a1, "A2": a2, "Augmentations1": aug1, "Augmentations": aug2},
104+
"DebiasedSpace": {"T1": t1_deb, "T2": t2_deb, "A1": a1_deb, "A2": a2_deb, "Augmentations1": aug1_deb, "Augmentation2": aug2_deb},
99105
"NotFound": not_found, "Deleted": deleted, "LexDictionary": lex_dict})
100106
else:
101107
t1_pca_bias = dict_to_json(t1_pca_bias)
102108
t2_pca_bias = dict_to_json(t2_pca_bias)
103109
a1_pca_bias = dict_to_json(a1_pca_bias)
104110
a2_pca_bias = dict_to_json(a2_pca_bias)
111+
aug1_pca_bias = dict_to_json(aug1_pca_bias)
112+
aug2_pca_bias = dict_to_json(aug2_pca_bias)
105113
t1_pca_deb = dict_to_json(t1_pca_deb)
106114
t2_pca_deb = dict_to_json(t2_pca_deb)
107115
a1_pca_deb = dict_to_json(a1_pca_deb)
108116
a2_pca_deb = dict_to_json(a2_pca_deb)
117+
aug1_pca_deb = dict_to_json(aug1_pca_deb)
118+
aug2_pca_deb = dict_to_json(aug2_pca_deb)
109119
if lex_dict is None:
110120
response = json.dumps(
111121
{"Space": space, "Model": method, "Lower": lower, "PCA": pca,
112-
"UsedAugmentations": {"Augmentations1": aug1_list, "Augmentations2": aug2_list},
113-
"BiasedSpace:": {"T1": t1, "T2": t2, "A1": a1, "A2": a2},
114-
"DebiasedSpace": {"T1": t1_deb, "T2": t2_deb, "A1": a1_deb, "A2": a2_deb},
115-
"BiasedSpacePCA": {"T1": t1_pca_bias, "T2": t2_pca_bias, "A1": a1_pca_bias, "A2": a2_pca_bias},
116-
"DebiasedSpacePCA": {"T1": t1_pca_deb, "T2": t2_pca_deb, "A1": a1_pca_deb, "A2": a2_pca_deb},
122+
"UsedAugmentations": {"A1": aug1_list, "A2": aug2_list},
123+
"BiasedSpace:": {"T1": t1, "T2": t2, "A1": a1, "A2": a2, "Augmentations1": aug1, "Augmentations": aug2},
124+
"DebiasedSpace": {"T1": t1_deb, "T2": t2_deb, "A1": a1_deb, "A2": a2_deb, "Augmentations1": aug1_deb, "Augmentation2": aug2_deb},
125+
"BiasedSpacePCA": {"T1": t1_pca_bias, "T2": t2_pca_bias, "A1": a1_pca_bias, "A2": a2_pca_bias, "Augmentations1": aug1_pca_bias, "Augmentations2": aug2_pca_bias},
126+
"DebiasedSpacePCA": {"T1": t1_pca_deb, "T2": t2_pca_deb, "A1": a1_pca_deb, "A2": a2_pca_deb, "Augmentations1": aug1_pca_deb, "Augmentations2": aug2_pca_deb},
117127
"NotFound": not_found, "Deleted": deleted})
118128
else:
119129
lex_dict = dict_to_json(lex_dict)
120130
response = json.dumps(
121131
{"Space": space, "Model": method, "Lower": lower, "PCA": pca,
122-
"UsedAugmentations": {"Augmentations1": aug1_list, "Augmentations2": aug2_list},
123-
"BiasedSpace:": {"T1": t1, "T2": t2, "A1": a1, "A2": a2},
124-
"DebiasedSpace": {"T1": t1_deb, "T2": t2_deb, "A1": a1_deb, "A2": a2_deb},
125-
"BiasedSpacePCA": {"T1": t1_pca_bias, "T2": t2_pca_bias, "A1": a1_pca_bias, "A2": a2_pca_bias},
126-
"DebiasedSpacePCA": {"T1": t1_pca_deb, "T2": t2_pca_deb, "A1": a1_pca_deb, "A2": a2_pca_deb},
132+
"UsedAugmentations": {"A1": aug1_list, "A2": aug2_list},
133+
"BiasedSpace:": {"T1": t1, "T2": t2, "A1": a1, "A2": a2, "Augmentations1": aug1, "Augmentations": aug2},
134+
"DebiasedSpace": {"T1": t1_deb, "T2": t2_deb, "A1": a1_deb, "A2": a2_deb, "Augmentations1": aug1_deb, "Augmentation2": aug2_deb},
135+
"BiasedSpacePCA": {"T1": t1_pca_bias, "T2": t2_pca_bias, "A1": a1_pca_bias, "A2": a2_pca_bias, "Augmentations1": aug1_pca_bias, "Augmentations2": aug2_pca_bias},
136+
"DebiasedSpacePCA": {"T1": t1_pca_deb, "T2": t2_pca_deb, "A1": a1_pca_deb, "A2": a2_pca_deb, "Augmentations1": aug1_pca_deb, "Augmentations2": aug2_pca_deb},
127137
"NotFound": not_found, "Deleted": deleted, "LexDictionary": lex_dict})
128138

129139
return response
@@ -138,7 +148,7 @@ def json_with_vector_data(content):
138148
attributes2 = content['DebiasedSpace']['A2']
139149
if 'Augmentations1' in content['DebiasedSpace']:
140150
augments1 = content['DebiasedSpace']['Augmentations1']
141-
if 'augments1' in content['DebiasedSpace']:
151+
if 'Augmentations2' in content['DebiasedSpace']:
142152
augments2 = content['DebiasedSpace']['Augmentations2']
143153
return target1, target2, attributes1, attributes2, augments1, augments2
144154

0 commit comments

Comments
 (0)