28
28
from pytorch_lightning .callbacks import Callback
29
29
from pytorch_lightning .callbacks .early_stopping import EarlyStopping
30
30
from pytorch_lightning .loggers import TensorBoardLogger , CSVLogger
31
+
32
+ from scipy .stats import median_abs_deviation
31
33
from sklearn .metrics import mean_absolute_error , mean_squared_error , r2_score
32
34
import sklearn .metrics
33
35
@@ -236,13 +238,6 @@ def ml_mlp_mul_ms(station_name="종로구"):
236
238
batch_size = batch_size )
237
239
238
240
def objective (trial ):
239
- # PyTorch Lightning will try to restore model parameters from previous trials if checkpoint
240
- # filenames match. Therefore, the filenames for each trial must be made unique.
241
- checkpoint_callback = pl .callbacks .ModelCheckpoint (
242
- os .path .join (model_dir , "trial_{}" .format (trial .number )), monitor = "val_loss" ,
243
- period = 10
244
- )
245
-
246
241
model = BaseMLPModel (trial = trial ,
247
242
hparams = hparams ,
248
243
input_size = sample_size * len (train_features ),
@@ -269,15 +264,15 @@ def objective(trial):
269
264
logger = True ,
270
265
checkpoint_callback = False ,
271
266
callbacks = [PyTorchLightningPruningCallback (
272
- trial , monitor = "valid/MSE " )])
267
+ trial , monitor = "valid/MAD " )])
273
268
274
269
trainer .fit (model )
275
270
276
271
# Don't Log
277
272
# hyperparameters = model.hparams
278
273
# trainer.logger.log_hyperparams(hyperparameters)
279
274
280
- return trainer .callback_metrics .get ("valid/MSE " )
275
+ return trainer .callback_metrics .get ("valid/MAD " )
281
276
282
277
if n_trials > 1 :
283
278
study = optuna .create_study (direction = "minimize" )
@@ -363,12 +358,12 @@ def objective(trial):
363
358
test_dataset .to_csv (model .data_dir / ("df_testset_" + target + ".csv" ))
364
359
365
360
checkpoint_callback = pl .callbacks .ModelCheckpoint (
366
- os .path .join (model_dir , "train_{epoch}_{valid/MSE :.2f}" ), monitor = "valid/MSE " ,
361
+ os .path .join (model_dir , "train_{epoch}_{valid/MAD :.2f}" ), monitor = "valid/MAD " ,
367
362
period = 10
368
363
)
369
364
370
365
early_stop_callback = EarlyStopping (
371
- monitor = 'valid/MSE ' ,
366
+ monitor = 'valid/MAD ' ,
372
367
min_delta = 0.001 ,
373
368
patience = 30 ,
374
369
verbose = True ,
@@ -417,7 +412,7 @@ def __init__(self, *args, **kwargs):
417
412
self .features_nonperiodic = kwargs .get ('features_nonperiodic' ,
418
413
["temp" , "wind_spd" , "wind_cdir" , "wind_sdir" ,
419
414
"pres" , "humid" , "prep" ])
420
- self .metrics = kwargs .get ('metrics' , ['MAE' , 'MSE' , 'R2' ])
415
+ self .metrics = kwargs .get ('metrics' , ['MAE' , 'MSE' , 'R2' , 'MAD' ])
421
416
self .num_workers = kwargs .get ('num_workers' , 1 )
422
417
self .output_dir = kwargs .get (
423
418
'output_dir' , Path ('/mnt/data/MLPMS2Multivariate/' ))
@@ -516,15 +511,17 @@ def training_step(self, batch, batch_idx):
516
511
y_hat = _y_hat .detach ().cpu ().clone ().numpy ()
517
512
y_raw = _y_raw .detach ().cpu ().clone ().numpy ()
518
513
519
- _mae = mean_absolute_error (y_hat , y )
520
- _mse = mean_squared_error (y_hat , y )
521
- _r2 = r2_score (y_hat , y )
514
+ _mae = mean_absolute_error (y , y_hat )
515
+ _mse = mean_squared_error (y , y_hat )
516
+ _r2 = r2_score (y , y_hat )
517
+ _mad = median_abs_deviation (y - y_hat )
522
518
523
519
return {
524
520
'loss' : _loss ,
525
521
'metric' : {
526
522
'MSE' : _mse ,
527
523
'MAE' : _mae ,
524
+ 'MAD' : _mad ,
528
525
'R2' : _r2
529
526
}
530
527
}
@@ -546,6 +543,7 @@ def training_epoch_end(self, outputs):
546
543
# self.log('train/loss', tensorboard_logs['train/loss'].item(), prog_bar=True)
547
544
self .log ('train/MSE' , tensorboard_logs ['train/MSE' ].item (), on_epoch = True , logger = self .logger )
548
545
self .log ('train/MAE' , tensorboard_logs ['train/MAE' ].item (), on_epoch = True , logger = self .logger )
546
+ self .log ('train/MAD' , tensorboard_logs ['train/MAD' ].item (), on_epoch = True , logger = self .logger )
549
547
self .log ('train/avg_loss' , _log ['loss' ], on_epoch = True , logger = self .logger )
550
548
551
549
def validation_step (self , batch , batch_idx ):
@@ -557,15 +555,17 @@ def validation_step(self, batch, batch_idx):
557
555
y_hat = _y_hat .detach ().cpu ().clone ().numpy ()
558
556
y_raw = _y_raw .detach ().cpu ().clone ().numpy ()
559
557
560
- _mae = mean_absolute_error (y_hat , y )
561
- _mse = mean_squared_error (y_hat , y )
562
- _r2 = r2_score (y_hat , y )
558
+ _mae = mean_absolute_error (y , y_hat )
559
+ _mse = mean_squared_error (y , y_hat )
560
+ _r2 = r2_score (y , y_hat )
561
+ _mad = median_abs_deviation (y - y_hat )
563
562
564
563
return {
565
564
'loss' : _loss ,
566
565
'metric' : {
567
566
'MSE' : _mse ,
568
567
'MAE' : _mae ,
568
+ 'MAD' : _mad ,
569
569
'R2' : _r2
570
570
}
571
571
}
@@ -586,6 +586,7 @@ def validation_epoch_end(self, outputs):
586
586
587
587
self .log ('valid/MSE' , tensorboard_logs ['valid/MSE' ].item (), on_epoch = True , logger = self .logger )
588
588
self .log ('valid/MAE' , tensorboard_logs ['valid/MAE' ].item (), on_epoch = True , logger = self .logger )
589
+ self .log ('valid/MAD' , tensorboard_logs ['valid/MAD' ].item (), on_epoch = True , logger = self .logger )
589
590
self .log ('valid/loss' , _log ['loss' ], on_epoch = True , logger = self .logger )
590
591
591
592
def test_step (self , batch , batch_idx ):
@@ -598,11 +599,12 @@ def test_step(self, batch, batch_idx):
598
599
y_hat = _y_hat .detach ().cpu ().clone ().numpy ()
599
600
y_hat2 = relu_mul (
600
601
np .array (self .test_dataset .inverse_transform (y_hat , dates )))
601
- _loss = self .loss (torch .as_tensor (y_hat2 ).to (device ), _y_raw )
602
+ _loss = self .loss (_y_raw , torch .as_tensor (y_hat2 ).to (device ))
602
603
603
- _mae = mean_absolute_error (y_hat2 , y_raw )
604
- _mse = mean_squared_error (y_hat2 , y_raw )
605
- _r2 = r2_score (y_hat2 , y_raw )
604
+ _mae = mean_absolute_error (y_raw , y_hat2 )
605
+ _mse = mean_squared_error (y_raw , y_hat2 )
606
+ _r2 = r2_score (y_raw , y_hat2 )
607
+ _mad = median_abs_deviation (y_raw - y_hat2 )
606
608
607
609
return {
608
610
'loss' : _loss ,
@@ -612,6 +614,7 @@ def test_step(self, batch, batch_idx):
612
614
'metric' : {
613
615
'MSE' : _mse ,
614
616
'MAE' : _mae ,
617
+ 'MAD' : _mad ,
615
618
'R2' : _r2
616
619
}
617
620
}
@@ -660,6 +663,7 @@ def test_epoch_end(self, outputs):
660
663
661
664
self .log ('test/MSE' , tensorboard_logs ['test/MSE' ].item (), on_epoch = True , logger = self .logger )
662
665
self .log ('test/MAE' , tensorboard_logs ['test/MAE' ].item (), on_epoch = True , logger = self .logger )
666
+ self .log ('test/MAD' , tensorboard_logs ['test/MAD' ].item (), on_epoch = True , logger = self .logger )
663
667
self .log ('test/loss' , avg_loss , on_epoch = True , logger = self .logger )
664
668
665
669
self .df_obs = df_obs
@@ -1083,31 +1087,6 @@ def _mccr(x):
1083
1087
return torch .mean (_mccr (input - target ))
1084
1088
1085
1089
1086
- class LogCoshLoss (nn .Module ):
1087
- __constants__ = ['reduction' ]
1088
-
1089
- def __init__ (self ):
1090
- super ().__init__ ()
1091
-
1092
- def forward (self , input : torch .Tensor , target : torch .Tensor ) -> torch .Tensor :
1093
- """
1094
- Implement numerically stable log-cosh which is used in Keras
1095
-
1096
- log(cosh(x)) = logaddexp(x, -x) - log(2)
1097
- = abs(x) + log1p(exp(-2 * abs(x))) - log(2)
1098
-
1099
- Reference:
1100
- * https://stackoverflow.com/a/57786270
1101
- """
1102
- # not to compute log(0), add 1e-24 (small value)
1103
- def _log_cosh (x ):
1104
- return torch .abs (x ) + \
1105
- torch .log1p (torch .exp (- 2 * torch .abs (x ))) + \
1106
- torch .log (torch .full_like (x , 2 , dtype = x .dtype ))
1107
-
1108
- return torch .mean (_log_cosh (input - target ))
1109
-
1110
-
1111
1090
def relu_mul (x ):
1112
1091
"""[fastest method](https://stackoverflow.com/a/32109519/743078)
1113
1092
"""
0 commit comments