Skip to content

Commit

Permalink
fix docs
Browse files Browse the repository at this point in the history
  • Loading branch information
AlexanderVNikitin committed Mar 14, 2024
1 parent c8bef73 commit e5db4bf
Show file tree
Hide file tree
Showing 7 changed files with 48 additions and 17 deletions.
Empty file removed docs/guides/resources.rst
Empty file.
2 changes: 1 addition & 1 deletion docs/index.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
:github_url: https://github.com/

Time Series Simulator (TSGM) Official Documentation
Time Series Generative Modeling (TSGM) Official Documentation
========================================

Time Series Generative Modeling (TSGM) is a Python framework for time series data generation. It include data-driven and model-based approaches to synthetic time-series generation. It uses both generative
Expand Down
6 changes: 3 additions & 3 deletions tsgm/models/augmentations.py
Original file line number Diff line number Diff line change
Expand Up @@ -412,7 +412,7 @@ def generate(
Initial timesteries to start from for the optimization process, with shape (original_size, d).
In case y is given, the shape of initial_timeseries is assumed to be (n_classes, original_size, d)
initial_labels: array or None (default: None)
TODO
Labels for samples from `initial_timeseries`
Returns
-------
np.array of shape (n_samples, original_size, d) if y is None
Expand All @@ -437,8 +437,8 @@ def generate(

y_new = []
X_new = []
unique_lables = np.unique(initial_labels)
for i, label in enumerate(unique_lables):
unique_labels = np.unique(initial_labels)
for i, label in enumerate(unique_labels):
logger.debug(f"DTWBA Class {label}...")
cur_initial_timeseries = initial_timeseries[np.ravel(initial_labels) == label]
n_samples_per_label = len(cur_initial_timeseries)
Expand Down
30 changes: 23 additions & 7 deletions tsgm/models/cgan.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,12 @@ def _get_random_vector_labels(self, batch_size: int, labels=None) -> tsgm.types.
def train_step(self, data: tsgm.types.Tensor) -> T.Dict[str, float]:
"""
Performs a training step using a batch of data, stored in data.
:param data: A batch of data in a format batch_size x seq_len x feat_dim
:type data: tsgm.types.Tensor
:returns: A dictionary with generator (key "g_loss") and discriminator (key "d_loss") losses
:rtype: T.Dict[str, float]
"""
real_data = data
batch_size = tf.shape(real_data)[0]
Expand Down Expand Up @@ -140,11 +144,20 @@ def generate(self, num: int) -> tsgm.types.Tensor:
:param num: the number of samples to be generated.
:type num: int
:returns: Generated samples
:rtype: tsgm.types.Tensor
"""
random_vector_labels = self._get_random_vector_labels(batch_size=num)
return self.generator(random_vector_labels)

def clone(self) -> "GAN":
"""
Clones GAN object
:returns: The exact copy of the object
:rtype: "GAN"
"""
copy_model = GAN(self.discriminator, self.generator, latent_dim=self.latent_dim)
copy_model = copy_model.set_weights(self.get_weights())
return copy_model
Expand Down Expand Up @@ -181,6 +194,7 @@ def __init__(self, discriminator: keras.Model, generator: keras.Model, latent_di
def metrics(self) -> T.List:
"""
:returns: A list of metrics trackers (e.g., generator's loss and discriminator's loss).
:rtype: T.List
"""
return [self.gen_loss_tracker, self.disc_loss_tracker]

Expand Down Expand Up @@ -233,14 +247,13 @@ def _get_output_shape(self, labels: tsgm.types.Tensor) -> int:

def train_step(self, data: T.Tuple) -> T.Dict[str, float]:
"""
Compiles the generator and discriminator models.
Performs a training step using a batch of data, stored in data.
:param d_optimizer: An optimizer for the GAN's discriminator.
:type d_optimizer: keras.Model
:param g_optimizer: An optimizer for the GAN's generator.
:type generator: keras.Model
:param loss_fn: Loss function.
:type loss_fn: keras.losses.Loss
:param data: A batch of data in a format batch_size x seq_len x feat_dim
:type data: tsgm.types.Tensor
:returns: A dictionary with generator (key "g_loss") and discriminator (key "d_loss") losses
:rtype: T.Dict[str, float]
"""
real_ts, labels = data
output_dim = self._get_output_shape(labels)
Expand Down Expand Up @@ -319,6 +332,9 @@ def generate(self, labels: tsgm.types.Tensor) -> tsgm.types.Tensor:
:param labels: the number of samples to be generated.
:type labels: tsgm.types.Tensor
:returns: generated samples
:rtype: tsgm.types.Tensor
"""
batch_size = labels.shape[0]

Expand Down
20 changes: 17 additions & 3 deletions tsgm/models/cvae.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,12 @@ def metrics(self) -> T.List:
def call(self, X: tsgm.types.Tensor) -> tsgm.types.Tensor:
"""
Encodes and decodes time series dataset X.
:param X: The size of the noise vector.
:type X: tsgm.types.Tensor
:returns: Generated samples
:rtype: tsgm.types.Tensor
"""
z_mean, _, _ = self.encoder(X)
x_decoded = self.decoder(z_mean)
Expand All @@ -64,10 +68,12 @@ def _get_reconstruction_loss(self, X: tsgm.types.Tensor, Xr: tsgm.types.Tensor)
def train_step(self, data: tsgm.types.Tensor) -> T.Dict:
"""
Performs a training step using a batch of data, stored in data.
:param data: A batch of data in a format batch_size x seq_len x feat_dim
:type data: tsgm.types.Tensor
:returns a dict with losses:
:returns: A dict with losses
:rtype: T.Dict
"""
with tf.GradientTape() as tape:
z_mean, z_log_var, z = self.encoder(data)
Expand All @@ -94,7 +100,8 @@ def generate(self, n: int) -> tsgm.types.Tensor:
:param n: the number of samples to be generated.
:type n: int
:returns: a tensor with generated samples.
:returns: A tensor with generated samples.
:rtype: tsgm.types.Tensor
"""
z = tf.random.normal((n, self.latent_dim))
return self.decoder(z)
Expand Down Expand Up @@ -135,6 +142,7 @@ def generate(self, labels: tsgm.types.Tensor) -> T.Tuple[tsgm.types.Tensor, tsgm
:type labels: tsgm.types.Tensor
:returns: a tuple of synthetically generated data and labels.
:rtype: T.Tuple[tsgm.types.Tensor, tsgm.types.Tensor]
"""
batch_size = tf.shape(labels)[0]
z = tf.random.normal((batch_size, self._seq_len, self.latent_dim), dtype=labels.dtype)
Expand All @@ -144,8 +152,12 @@ def generate(self, labels: tsgm.types.Tensor) -> T.Tuple[tsgm.types.Tensor, tsgm
def call(self, data: tsgm.types.Tensor) -> tsgm.types.Tensor:
"""
Encodes and decodes time series dataset X.
:param X: The size of the noise vector.
:type X: tsgm.types.Tensor
:returns: Generated samples
:rtype: tsgm.types.Tensor
"""
X, labels = data
encoder_input = self._get_encoder_input(X, labels)
Expand Down Expand Up @@ -180,10 +192,12 @@ def _get_decoder_input(self, z: tsgm.types.Tensor, labels: tsgm.types.Tensor) ->
def train_step(self, data: tsgm.types.Tensor) -> T.Dict[str, float]:
"""
Performs a training step using a batch of data, stored in data.
:param data: A batch of data in a format batch_size x seq_len x feat_dim
:type data: tsgm.types.Tensor
:returns a dict with losses:
:returns: A dict with losses
:rtype: T.Dict[str, float]
"""
X, labels = data
with tf.GradientTape() as tape:
Expand Down
5 changes: 3 additions & 2 deletions tsgm/optimization/abc.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,10 @@ def _calc_statistics(self, data: tsgm.dataset.Dataset) -> tsgm.types.Tensor:
def sample_parameters(self, n_samples: int) -> T.List:
"""
Samples parameters from the rejection sampler.
:param n_samples: Number of samples
:type simulator: int
:return: A list of samples. Each sample is represent as dict.
:returns: A list of samples. Each sample is represent as dict.
:rtype: T.List[T.Dict]
"""
cur_sim = self._simulator.clone()
Expand All @@ -78,7 +79,7 @@ def sample_parameters(self, n_samples: int) -> T.List:
return samples


def prior_samples(priors: T.Dict, params: T.List) -> T.List:
def prior_samples(priors: T.Dict, params: T.List) -> T.Dict:
"""
Generate prior samples for the specified parameters.
Expand Down
2 changes: 1 addition & 1 deletion tsgm/version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.0.4"
__version__ = "0.0.4"

0 comments on commit e5db4bf

Please sign in to comment.