Skip to content

Commit

Permalink
[devops] Upgrade to lightning 2.0 (#1514)
Browse files Browse the repository at this point in the history
* relax torch to greater 2

* upgrade lightning to 2.0, open version limits of other packages

* upgrade to python3.11

* re-relax python versions

* upgrade to lightning 2+

* fix toml file

* fic poetry

* added kaleido

* downgrade numpy

* fix linting

* added python 3.12

* clean up pyproject.toml dependencies

* update poetry lock

* changed is True

* changed bool check

* uncomment test_glocal lines 152 onward

---------

Co-authored-by: MaiBe-ctrl <[email protected]>
Co-authored-by: Maisa Ben Salah <[email protected]>
  • Loading branch information
3 people authored Jun 21, 2024
1 parent 0aafec9 commit 79a6907
Show file tree
Hide file tree
Showing 10 changed files with 151 additions and 138 deletions.
4 changes: 2 additions & 2 deletions neuralprophet/configure.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,15 @@
import types
from collections import OrderedDict
from dataclasses import dataclass, field
from typing import Callable, Iterable, List, Optional
from typing import Callable, List, Optional
from typing import OrderedDict as OrderedDictType
from typing import Type, Union

import numpy as np
import pandas as pd
import torch

from neuralprophet import df_utils, np_types, utils, utils_torch
from neuralprophet import df_utils, np_types, utils_torch
from neuralprophet.custom_loss_metrics import PinballLoss
from neuralprophet.hdays_utils import get_holidays_from_country

Expand Down
14 changes: 7 additions & 7 deletions neuralprophet/custom_loss_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,15 +33,15 @@ def forward(self, outputs, target):
"""
target = target.repeat(1, 1, len(self.quantiles)) # increase the quantile dimension of the targets
differences = target - outputs
base_losses = self.loss_func(outputs, target) # dimensions - [n_batch, n_forecasts, no. of quantiles]
positive_losses = (
torch.tensor(self.quantiles, device=target.device).unsqueeze(dim=0).unsqueeze(dim=0) * base_losses
base_losses = self.loss_func(outputs, target).float() # dimensions - [n_batch, n_forecasts, no. of quantiles]
quantiles_tensor = (
torch.tensor(self.quantiles, device=target.device, dtype=torch.float32).unsqueeze(dim=0).unsqueeze(dim=0)
)
negative_losses = (
1 - torch.tensor(self.quantiles, device=target.device).unsqueeze(dim=0).unsqueeze(dim=0)
) * base_losses
positive_losses = quantiles_tensor * base_losses
negative_losses = (1 - quantiles_tensor) * base_losses
differences = differences.float()
pinball_losses = torch.where(differences >= 0, positive_losses, negative_losses)
multiplier = torch.ones(size=(1, 1, len(self.quantiles)), device=target.device)
multiplier = torch.ones(size=(1, 1, len(self.quantiles)), device=target.device, dtype=torch.float32)
multiplier[:, :, 0] = 2
pinball_losses = multiplier * pinball_losses # double the loss for the median quantile
return pinball_losses
14 changes: 9 additions & 5 deletions neuralprophet/forecaster.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import torch
from matplotlib import pyplot
from matplotlib.axes import Axes
from pytorch_lightning.tuner.tuning import Tuner
from torch.utils.data import DataLoader

from neuralprophet import configure, df_utils, np_types, time_dataset, time_net, utils, utils_metrics
Expand Down Expand Up @@ -2756,6 +2757,8 @@ def _train(
else:
self.model = self._init_model()

self.model.train_loader = train_loader

# Init the Trainer
self.trainer, checkpoint_callback = utils.configure_trainer(
config_train=self.config_train,
Expand All @@ -2780,8 +2783,9 @@ def _train(
# Set parameters for the learning rate finder
self.config_train.set_lr_finder_args(dataset_size=dataset_size, num_batches=len(train_loader))
# Find suitable learning rate
lr_finder = self.trainer.tuner.lr_find(
self.model,
tuner = Tuner(self.trainer)
lr_finder = tuner.lr_find(
model=self.model,
train_dataloaders=train_loader,
val_dataloaders=val_loader,
**self.config_train.lr_finder_args,
Expand All @@ -2802,8 +2806,9 @@ def _train(
# Set parameters for the learning rate finder
self.config_train.set_lr_finder_args(dataset_size=dataset_size, num_batches=len(train_loader))
# Find suitable learning rate
lr_finder = self.trainer.tuner.lr_find(
self.model,
tuner = Tuner(self.trainer)
lr_finder = tuner.lr_find(
model=self.model,
train_dataloaders=train_loader,
**self.config_train.lr_finder_args,
)
Expand Down Expand Up @@ -2831,7 +2836,6 @@ def _train(

if not metrics_enabled:
return None

# Return metrics collected in logger as dataframe
metrics_df = pd.DataFrame(self.metrics_logger.history)
return metrics_df
Expand Down
1 change: 1 addition & 0 deletions neuralprophet/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ class ProgressBar(TQDMProgressBar):
def __init__(self, *args, **kwargs):
self.epochs = kwargs.pop("epochs")
super().__init__(*args, **kwargs)
self.main_progress_bar = super().init_train_tqdm()

def on_train_epoch_start(self, trainer: "pl.Trainer", *_) -> None:
self.main_progress_bar.reset(self.epochs)
Expand Down
6 changes: 5 additions & 1 deletion neuralprophet/time_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -795,7 +795,8 @@ def training_step(self, batch, batch_idx):
scheduler.step()

# Manually track the loss for the lr finder
self.trainer.fit_loop.running_loss.append(loss)
self.log("train_loss", loss, on_step=False, on_epoch=True, prog_bar=True, logger=True)
self.log("reg_loss", reg_loss, on_step=False, on_epoch=True, prog_bar=True, logger=True)

# Metrics
if self.metrics_enabled:
Expand Down Expand Up @@ -983,6 +984,9 @@ def denormalize(self, ts):
ts = scale_y * ts + shift_y
return ts

def train_dataloader(self):
return self.train_loader


class FlatNet(nn.Module):
"""
Expand Down
7 changes: 1 addition & 6 deletions neuralprophet/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,14 @@
import os
import sys
from collections import OrderedDict
from typing import IO, TYPE_CHECKING, BinaryIO, Iterable, Optional, Union
from typing import IO, TYPE_CHECKING, BinaryIO, Optional, Union

import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch

from neuralprophet import utils_torch
from neuralprophet.hdays_utils import get_country_holidays
from neuralprophet.logger import ProgressBar

if TYPE_CHECKING:
Expand Down Expand Up @@ -856,10 +855,6 @@ def configure_trainer(
"""
config = config.copy()

# Enable Learning rate finder if not learning rate provided
if config_train.learning_rate is None:
config["auto_lr_find"] = True

# Set max number of epochs
if hasattr(config_train, "epochs"):
if config_train.epochs is not None:
Expand Down
Loading

0 comments on commit 79a6907

Please sign in to comment.