Skip to content

Commit f60a791

Browse files
committed
formatting
1 parent c0f554e commit f60a791

7 files changed

+16
-10
lines changed

ConvTSMixer-elec.ipynb

+3-2
Original file line numberDiff line numberDiff line change
@@ -247,7 +247,7 @@
247247
"predictor = estimator.train(\n",
248248
" training_data=dataset_train,\n",
249249
" cache_data=True,\n",
250-
"# shuffle_buffer_length=1024,\n",
250+
" # shuffle_buffer_length=1024,\n",
251251
")"
252252
]
253253
},
@@ -273,7 +273,8 @@
273273
"outputs": [],
274274
"source": [
275275
"evaluator = MultivariateEvaluator(\n",
276-
" quantiles=(np.arange(20) / 20.0)[1:], target_agg_funcs={\"sum\": np.sum},\n",
276+
" quantiles=(np.arange(20) / 20.0)[1:],\n",
277+
" target_agg_funcs={\"sum\": np.sum},\n",
277278
")\n",
278279
"evaluator.aggregation_strategy = aggregate_valid"
279280
]

Transformer-MV-Solar.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@
141141
" lags_seq=[1],\n",
142142
" d_model=128,\n",
143143
" prediction_length=dataset.metadata.prediction_length,\n",
144-
" context_length=dataset.metadata.prediction_length*3,\n",
144+
" context_length=dataset.metadata.prediction_length * 3,\n",
145145
" freq=dataset.metadata.freq,\n",
146146
" scaling=\"std\",\n",
147147
" trainer_kwargs=dict(max_epochs=100, accelerator=\"gpu\", devices=\"1\"),\n",

Transformer-exchange.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@
129129
" input_size=int(dataset.metadata.feat_static_cat[0].cardinality),\n",
130130
" d_model=32,\n",
131131
" prediction_length=dataset.metadata.prediction_length,\n",
132-
" context_length=dataset.metadata.prediction_length*2,\n",
132+
" context_length=dataset.metadata.prediction_length * 2,\n",
133133
" freq=dataset.metadata.freq,\n",
134134
" scaling=\"std\",\n",
135135
" batch_size=64,\n",

TsT-elec.ipynb

+5-3
Original file line numberDiff line numberDiff line change
@@ -80,11 +80,13 @@
8080
" depth=1,\n",
8181
" dim=64,\n",
8282
" norm_first=True,\n",
83-
" \n",
8483
" patch_size=(3, 3),\n",
8584
" batch_size=64,\n",
8685
" num_batches_per_epoch=100,\n",
87-
" trainer_kwargs=dict(accelerator=\"cuda\", max_epochs=100,),\n",
86+
" trainer_kwargs=dict(\n",
87+
" accelerator=\"cuda\",\n",
88+
" max_epochs=100,\n",
89+
" ),\n",
8890
")"
8991
]
9092
},
@@ -167,7 +169,7 @@
167169
"predictor = estimator.train(\n",
168170
" training_data=dataset_train,\n",
169171
" cache_data=True,\n",
170-
" #shuffle_buffer_length=1024,\n",
172+
" # shuffle_buffer_length=1024,\n",
171173
")"
172174
]
173175
},

TsT-exchange.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@
7070
" depth=3,\n",
7171
" dim=32,\n",
7272
" patch_size=(4, 4),\n",
73-
" patch_reverse_mapping_layer = \"mlp\",\n",
73+
" patch_reverse_mapping_layer=\"mlp\",\n",
7474
" batch_size=64,\n",
7575
" num_batches_per_epoch=100,\n",
7676
" trainer_kwargs=dict(accelerator=\"cuda\", max_epochs=100),\n",

TsT-solar.ipynb

+4-1
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,10 @@
8282
" # patch_size=(5, 5),\n",
8383
" batch_size=128,\n",
8484
" num_batches_per_epoch=100,\n",
85-
" trainer_kwargs=dict(accelerator=\"cuda\", max_epochs=100,),\n",
85+
" trainer_kwargs=dict(\n",
86+
" accelerator=\"cuda\",\n",
87+
" max_epochs=100,\n",
88+
" ),\n",
8689
")"
8790
]
8891
},

deepVAR-exchange.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@
113113
" num_layers=3,\n",
114114
" dropout_rate=0.1,\n",
115115
" prediction_length=dataset.metadata.prediction_length,\n",
116-
" context_length=dataset.metadata.prediction_length*2,\n",
116+
" context_length=dataset.metadata.prediction_length * 2,\n",
117117
" freq=dataset.metadata.freq,\n",
118118
" scaling=\"std\",\n",
119119
" batch_size=64,\n",

0 commit comments

Comments
 (0)