Skip to content

Commit 68c456b

Browse files
committed
fixing
1 parent 9493c84 commit 68c456b

File tree

14 files changed

+71
-34
lines changed

14 files changed

+71
-34
lines changed

.flake8

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
[flake8]
22
max_line_length=122
3-
ignore=E305,W504,E126,E401,E721,E722
3+
ignore=E305,W504,E126,E401,E721,F722
44
max-complexity=19

.idea/workspace.xml

+12-4
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

.pre-commit-config.yaml

+6
Original file line numberDiff line numberDiff line change
@@ -5,3 +5,9 @@ repos:
55
- id: trailing-whitespace
66
- id: end-of-file-fixer
77
- id: check-yaml
8+
9+
- repo: https://github.com/hhatto/autopep8
10+
rev: v2.0.4
11+
hooks:
12+
- id: autopep8
13+
args: [--in-place, --aggressive, --aggressive, --max-line-length=122]

flood_forecast/basic/d_n_linear.py

+4
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ class NLinear(nn.Module):
66
"""
77
Normalization-Linear
88
"""
9+
910
def __init__(self, forecast_history: int, forecast_length: int, enc_in=128, individual=False, n_targs=1):
1011
super(NLinear, self).__init__()
1112
self.seq_len = forecast_history
@@ -43,6 +44,7 @@ class MovingAvg(nn.Module):
4344
"""
4445
Moving average block to highlight the trend of time series
4546
"""
47+
4648
def __init__(self, kernel_size, stride):
4749
super(MovingAvg, self).__init__()
4850
self.kernel_size = kernel_size
@@ -62,6 +64,7 @@ class SeriesDecomp(nn.Module):
6264
"""
6365
Series decomposition block
6466
"""
67+
6568
def __init__(self, kernel_size):
6669
super(SeriesDecomp, self).__init__()
6770
self.moving_avg = MovingAvg(kernel_size, stride=1)
@@ -76,6 +79,7 @@ class DLinear(nn.Module):
7679
"""
7780
Decomposition-Linear
7881
"""
82+
7983
def __init__(self, forecast_history: int, forecast_length: int, individual, enc_in: int, n_targs=1):
8084
"""Code from
8185

flood_forecast/custom/custom_opt.py

+1
Original file line numberDiff line numberDiff line change
@@ -317,6 +317,7 @@ class NegativeLogLikelihood(torch.nn.Module):
317317
target -> True y
318318
output -> predicted distribution
319319
"""
320+
320321
def __init__(self):
321322
super().__init__()
322323

flood_forecast/da_rnn/model.py

-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@ def __init__(
3737
gru_lstm=True,
3838
probabilistic=False,
3939
final_act=None):
40-
4140
"""For model benchmark information see link on side https://rb.gy/koozff
4241
4342
:param n_time_series: Number of time series present in input

flood_forecast/deployment/inference.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,8 @@
1414

1515

1616
class InferenceMode(object):
17-
def __init__(self, forecast_steps: int, num_prediction_samples: int, model_params, csv_path: Union[str, pd.DataFrame], weight_path,
18-
wandb_proj: str = None, torch_script=False):
17+
def __init__(self, forecast_steps: int, num_prediction_samples: int, model_params,
18+
csv_path: Union[str, pd.DataFrame], weight_path, wandb_proj: str = None, torch_script=False):
1919
"""Class to handle inference for models,
2020
2121
:param forecast_steps: Number of time-steps to forecast (doesn't have to be hours)

flood_forecast/evaluator.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -284,12 +284,12 @@ def infer_on_torch_model(
284284
df_prediction_arr_1 = []
285285

286286
for i in range(0, len(vals[0])):
287-
df_train_and_test, end_tensor, history_length, forecast_start_idx, csv_test_loader, df_prediction = handle_later_ev(model, vals[0][i][1], vals[1][i], model.params, csv_series_id_loader, multi_params, vals[0][i][2], vals[0][i][0], datetime_start=datetime_start) # noqa
287+
df_train_and_test, end_tensor, history_length, forecast_start_idx, csv_test_loader, df_prediction = handle_later_ev(model, vals[0][i][1], vals[1][i], model.params, csv_series_id_loader, multi_params, vals[0][i][2], vals[0][i][0], datetime_start=datetime_start) # noqa
288288
df_train_and_test_arr.append(df_train_and_test)
289289
end_tensor_arr.append(end_tensor)
290290
forecast_start_idx_arr.append(forecast_start_idx)
291291
df_prediction_arr_1.append(df_prediction)
292-
return df_train_and_test_arr, end_tensor_arr, history_length, forecast_start_idx_arr, csv_test_loader, df_prediction_arr_1 # noqa
292+
return df_train_and_test_arr, end_tensor_arr, history_length, forecast_start_idx_arr, csv_test_loader, df_prediction_arr_1 # noqa
293293
else:
294294
csv_test_loader = CSVTestLoader(
295295
test_csv_path,
@@ -419,7 +419,7 @@ def handle_evaluation_series_loader(csv_series_id_loader: SeriesIDTestLoader, mo
419419
multi_params=1
420420
)
421421
end_tenor_arr.append(end_tensor)
422-
return data, end_tenor_arr, model.params["dataset_params"]["forecast_history"], forecast_start_idx, csv_series_id_loader, [] # noqa
422+
return data, end_tenor_arr, model.params["dataset_params"]["forecast_history"], forecast_start_idx, csv_series_id_loader, [] # noqa
423423

424424

425425
def handle_ci_multi(prediction_samples: torch.Tensor, csv_test_loader: CSVTestLoader, multi_params: int,

flood_forecast/multi_models/crossvivit.py

-1
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,6 @@ def forward(
171171
:param src: Source sequence. By this point the shape of the code will be
172172
:type src: Float[torch.Tensor, "batch_t_steps variable_sequence_length model_dim"]
173173
:param src_pos_emb: Positional embedding of source sequence's tokens of shape [batch_t_steps, variable_sequence_length, model_dim/2]
174-
175174
"""
176175

177176
attention_scores = {}

flood_forecast/preprocessing/pytorch_loaders.py

+6-5
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,7 @@ def __getitem__(self, idx: int) -> Tuple[Dict, Dict]:
234234
idx2 = va[self.series_id_col].iloc[0]
235235
va_returned = va[va.columns.difference([self.series_id_col], sort=False)]
236236
t = torch.Tensor(va_returned.iloc[idx: self.forecast_history + idx].values)[:, 1:]
237-
targ = torch.Tensor(va_returned.iloc[targ_start_idx: targ_start_idx + self.forecast_length].to_numpy())[:, 1:] # noqa
237+
targ = torch.Tensor(va_returned.iloc[targ_start_idx: targ_start_idx + self.forecast_length].to_numpy())[:, 1:] # noqa
238238
src_list[self.unique_dict[idx2]] = t
239239
targ_list[self.unique_dict[idx2]] = targ
240240
return src_list, targ_list
@@ -355,6 +355,7 @@ def __len__(self) -> int:
355355
len(self.df.index) - self.forecast_history - self.forecast_total - 1
356356
)
357357

358+
358359
class AEDataloader(CSVDataLoader):
359360
def __init__(
360361
self,
@@ -433,7 +434,7 @@ def __init__(self, params: Dict, n_classes: int = 2):
433434
:param params: The standard dictionary for a dataloader (see CSVDataLoader)
434435
:type params: Dict
435436
:param n_classes: The number of classes in the problem
436-
""" # noqa
437+
""" # noqa
437438
self.n_classes = n_classes
438439
params["forecast_history"] = params["sequence_length"]
439440
params["no_scale"] = True
@@ -455,7 +456,7 @@ def __getitem__(self, idx: int):
455456
targ_labs = torch.zeros(self.n_classes)
456457
casted_shit = int(targ.data.tolist())
457458
if casted_shit > self.n_classes:
458-
raise ValueError("The class " + str(casted_shit) + " is greater than the number of classes " + str(self.n_classes)) # noqa
459+
raise ValueError("The class " + str(casted_shit) + " is greater than the number of classes " + str(self.n_classes)) # noqa
459460
targ_labs[casted_shit] = 1
460461
return src.float(), targ_labs.float().unsqueeze(0)
461462

@@ -624,7 +625,7 @@ def get_item_classification(self, idx: int):
624625
targ_labs = torch.zeros(self.n_classes)
625626
casted_shit = int(targ.data.tolist())
626627
if casted_shit > self.n_classes - 1: # -1 because counting starts at zero
627-
raise ValueError("The class " + str(casted_shit) + " is greater than the number of classes " + str(self.n_classes)) # noqa
628+
raise ValueError("The class " + str(casted_shit) + " is greater than the number of classes " + str(self.n_classes)) # noqa
628629
targ_labs[casted_shit] = 1
629630
return src.float(), targ_labs.float().unsqueeze(0)
630631

@@ -669,7 +670,7 @@ def __init__(self, series_id_col: str, main_params: dict, return_method: str, fo
669670
super().__init__(series_id_col, main_params, return_method, return_all)
670671
print("forecast_total is: " + str(forecast_total))
671672
self.forecast_total = forecast_total
672-
self.csv_test_loaders = [CSVTestLoader(loader_1, forecast_total, **main_params) for loader_1 in self.df_orig_list] # noqa
673+
self.csv_test_loaders = [CSVTestLoader(loader_1, forecast_total, **main_params) for loader_1 in self.df_orig_list] # noqa
673674

674675
def get_from_start_date_all(self, forecast_start: datetime, series_id: int = None):
675676
res = []

flood_forecast/pytorch_training.py

-1
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,6 @@ def train_transformer_style(
9393
forward_params: Dict = {},
9494
model_filepath: str = "model_save",
9595
class2=False) -> None:
96-
9796
"""Function to train any PyTorchForecast model
9897
9998
:param model: A properly wrapped PyTorchForecast model

flood_forecast/trainer.py

+28-10
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
plot_df_test_with_confidence_interval,
1414
plot_df_test_with_probabilistic_confidence_interval)
1515

16+
1617
def handle_model_evaluation1(test_acc, params: Dict) -> None:
1718
"""Utility function to help handle model evaluation. Primarily used at the moment for forecasting models.
1819
@@ -28,7 +29,7 @@ def handle_model_evaluation1(test_acc, params: Dict) -> None:
2829
forecast_start_idx = test_acc[2]
2930
df_prediction_samples = test_acc[3]
3031
mae = (df_train_and_test.loc[forecast_start_idx:, "preds"] -
31-
df_train_and_test.loc[forecast_start_idx:, params["dataset_params"]["target_col"][0]]).abs()
32+
df_train_and_test.loc[forecast_start_idx:, params["dataset_params"]["target_col"][0]]).abs()
3233
inverse_mae = 1 / mae
3334
i = 0
3435
for df in df_prediction_samples:
@@ -71,6 +72,7 @@ def handle_model_evaluation1(test_acc, params: Dict) -> None:
7172
name=relevant_col))
7273
wandb.log({"test_plot_all": test_plot_all})
7374

75+
7476
def handle_core_eval(trained_model, params: Dict, model_type: str):
7577
"""_summary_
7678
@@ -89,8 +91,8 @@ def handle_core_eval(trained_model, params: Dict, model_type: str):
8991
params["inference_params"],
9092
{})
9193
if params["dataset_params"]["class"] == "SeriesIDLoader":
92-
data = test_acc[1]
93-
for i in range(len(data)):
94+
data = test_acc[1]
95+
for i in range(len(data)):
9496
tuple_for_eval = (test_acc[0][i], test_acc[1][i], test_acc[2][i], test_acc[3][i])
9597
handle_model_evaluation1(tuple_for_eval, params)
9698
else:
@@ -146,12 +148,26 @@ def train_function(model_type: str, params: Dict) -> PyTorchForecast:
146148
trained_model.params["inference_params"]["dataset_params"] = trained_model.params["dataset_params"].copy()
147149
del trained_model.params["inference_params"]["dataset_params"]["class"]
148150
# noqa: F501
149-
trained_model.params["inference_params"]["dataset_params"]["interpolate_param"] = trained_model.params["inference_params"]["dataset_params"].pop("interpolate")
150-
trained_model.params["inference_params"]["dataset_params"]["scaling"] = trained_model.params["inference_params"]["dataset_params"].pop("scaler")
151+
trained_model.params["inference_params"]["dataset_params"]["interpolate_param"] = trained_model.params["inference_params"]["dataset_params"].pop(
152+
"interpolate")
153+
trained_model.params["inference_params"]["dataset_params"]["scaling"] = trained_model.params["inference_params"]["dataset_params"].pop(
154+
"scaler")
151155
if "feature_param" in trained_model.params["dataset_params"]:
152-
trained_model.params["inference_params"]["dataset_params"]["feature_params"] = trained_model.params["inference_params"]["dataset_params"].pop("feature_param")
153-
delete_params = ["num_workers", "pin_memory", "train_start", "train_end", "valid_start", "valid_end", "test_start", "test_end",
154-
"training_path", "validation_path", "test_path", "batch_size"]
156+
trained_model.params["inference_params"]["dataset_params"]["feature_params"] = trained_model.params["inference_params"]["dataset_params"].pop(
157+
"feature_param")
158+
delete_params = [
159+
"num_workers",
160+
"pin_memory",
161+
"train_start",
162+
"train_end",
163+
"valid_start",
164+
"valid_end",
165+
"test_start",
166+
"test_end",
167+
"training_path",
168+
"validation_path",
169+
"test_path",
170+
"batch_size"]
155171
for param in delete_params:
156172
if param in trained_model.params["inference_params"]["dataset_params"]:
157173
del trained_model.params["inference_params"]["dataset_params"][param]
@@ -168,21 +184,23 @@ def train_function(model_type: str, params: Dict) -> PyTorchForecast:
168184
dataset_params)["scaling"]
169185
params["inference_params"]["dataset_params"].pop('scaler_params', None)
170186
# TODO Move to other func
171-
if params["dataset_params"]["class"] != "GeneralClassificationLoader" and params["dataset_params"]["class"] !="VariableSequenceLength":
187+
if params["dataset_params"]["class"] != "GeneralClassificationLoader" and params["dataset_params"]["class"] != "VariableSequenceLength":
172188
handle_core_eval(trained_model, params, model_type)
173189

174190
else:
175191
raise Exception("Please supply valid model type for forecasting or classification")
176192
return trained_model
177193

194+
178195
def correct_stupid_sklearn_error(training_conf: Dict) -> Dict:
179196
"""Sklearn for whatever reason decided to only allow scaler params in the form of tuples
180197
this was stupid so now we have to convert JSON list to tuple.
181198
182199
:param scaling_params: A list of the scaling params
183200
:type training_conf: Dict
184201
"""
185-
training_conf["dataset_params"]["scaler_params"]["feature_range"] = tuple(training_conf["dataset_params"]["scaler_params"]["feature_range"])
202+
training_conf["dataset_params"]["scaler_params"]["feature_range"] = tuple(
203+
training_conf["dataset_params"]["scaler_params"]["feature_range"])
186204
if "dataset_params" in training_conf["inference_params"]:
187205
del training_conf["inference_params"]["dataset_params"]
188206
print("Fixed dumbass sklearn errors morons should've never changed it")

flood_forecast/transformer_xl/attn.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -80,8 +80,8 @@ def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):
8080
kv = keys.transpose(-2, -1) @ (values * normalizer_col_refine[:, :, :, None])
8181
x = (
8282
(
83-
((queries @ kv) * normalizer_row[:, :, :, None])
84-
* normalizer_row_refine[:, :, :, None]
83+
((queries @ kv) * normalizer_row[:, :, :, None]) *
84+
normalizer_row_refine[:, :, :, None]
8585
)
8686
.transpose(1, 2)
8787
.contiguous()
@@ -167,8 +167,8 @@ def flash_attention_forward(self, Q, K, V, mask=None):
167167

168168
mi_new = torch.maximum(m_block_ij, mi)
169169
li_new = (
170-
torch.exp(mi - mi_new) * li
171-
+ torch.exp(m_block_ij - mi_new) * l_block_ij
170+
torch.exp(mi - mi_new) * li +
171+
torch.exp(m_block_ij - mi_new) * l_block_ij
172172
)
173173

174174
O_BLOCKS[i] = (li / li_new) * torch.exp(mi - mi_new) * Oi + (

flood_forecast/transformer_xl/data_embedding.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,8 @@ def __init__(self, channels: int):
244244
inv_freq = 1.0 / (10000 ** (torch.arange(0, self.channels, 2).float() / self.channels))
245245
self.register_buffer("inv_freq", inv_freq)
246246

247-
def forward(self, coords: Float[torch.Tensor, "batch_size x y channels"]) -> Float[torch.Tensor, "batch_size height width channels"]:
247+
def forward(self, coords: Float[torch.Tensor, "batch_size x y channels"]
248+
) -> Float[torch.Tensor, "batch_size height width channels"]:
248249
"""
249250
Forward pass of the PositionalEncoding2D module.
250251
@@ -285,9 +286,10 @@ def forward(self, coords: Float[torch.Tensor, "batch_size x y channels"]) -> Flo
285286
emb = torch.zeros((batch_size, height, width, self.channels * 2),
286287
device=coords.device).type(coords.type())
287288
emb[:, :, :, :self.channels] = emb_x
288-
emb[:, :, :, self.channels:2*self.channels] = emb_y
289+
emb[:, :, :, self.channels:2 * self.channels] = emb_y
289290
return emb
290291

292+
291293
class NeRF_embedding(nn.Module):
292294
def __init__(self, n_layers: int = 5):
293295
super().__init__()

0 commit comments

Comments
 (0)