Unverified Commit ba0e370d authored by Kashif Rasul's avatar Kashif Rasul Committed by GitHub
Browse files

[time series] updated expected values for integration test. (#21762)

* updated expected

* prediction_length fix

* prediction_length default value

* default prediction_length 24

* revert back prediction_length default

* move prediction_length test
parent 440f3975
...@@ -44,7 +44,8 @@ class TimeSeriesTransformerConfig(PretrainedConfig): ...@@ -44,7 +44,8 @@ class TimeSeriesTransformerConfig(PretrainedConfig):
Args: Args:
prediction_length (`int`): prediction_length (`int`):
The prediction length for the decoder. In other words, the prediction horizon of the model. The prediction length for the decoder. In other words, the prediction horizon of the model. This value is
typically dictated by the dataset and we recommend to set it appropriately.
context_length (`int`, *optional*, defaults to `prediction_length`): context_length (`int`, *optional*, defaults to `prediction_length`):
The context length for the encoder. If `None`, the context length will be the same as the The context length for the encoder. If `None`, the context length will be the same as the
`prediction_length`. `prediction_length`.
...@@ -60,8 +61,8 @@ class TimeSeriesTransformerConfig(PretrainedConfig): ...@@ -60,8 +61,8 @@ class TimeSeriesTransformerConfig(PretrainedConfig):
Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the
scaler is set to "mean". scaler is set to "mean".
lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`): lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):
The lags of the input time series as covariates often dictated by the frequency. Default is `[1, 2, 3, 4, The lags of the input time series as covariates often dictated by the frequency of the data. Default is
5, 6, 7]`. `[1, 2, 3, 4, 5, 6, 7]` but we recommend to change it based on the dataset appropriately.
num_time_features (`int`, *optional*, defaults to 0): num_time_features (`int`, *optional*, defaults to 0):
The number of time features in the input time series. The number of time features in the input time series.
num_dynamic_real_features (`int`, *optional*, defaults to 0): num_dynamic_real_features (`int`, *optional*, defaults to 0):
...@@ -117,8 +118,8 @@ class TimeSeriesTransformerConfig(PretrainedConfig): ...@@ -117,8 +118,8 @@ class TimeSeriesTransformerConfig(PretrainedConfig):
```python ```python
>>> from transformers import TimeSeriesTransformerConfig, TimeSeriesTransformerModel >>> from transformers import TimeSeriesTransformerConfig, TimeSeriesTransformerModel
>>> # Initializing a default Time Series Transformer configuration >>> # Initializing a Time Series Transformer configuration with 12 time steps for prediction
>>> configuration = TimeSeriesTransformerConfig() >>> configuration = TimeSeriesTransformerConfig(prediction_length=12)
>>> # Randomly initializing a model (with random weights) from the configuration >>> # Randomly initializing a model (with random weights) from the configuration
>>> model = TimeSeriesTransformerModel(configuration) >>> model = TimeSeriesTransformerModel(configuration)
......
...@@ -1176,6 +1176,8 @@ class TimeSeriesTransformerEncoder(TimeSeriesTransformerPreTrainedModel): ...@@ -1176,6 +1176,8 @@ class TimeSeriesTransformerEncoder(TimeSeriesTransformerPreTrainedModel):
self.dropout = config.dropout self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop self.layerdrop = config.encoder_layerdrop
if config.prediction_length is None:
raise ValueError("The `prediction_length` config needs to be specified.")
self.value_embedding = ValueEmbedding(feature_size=config.feature_size, d_model=config.d_model) self.value_embedding = ValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
self.embed_positions = TimeSeriesSinusoidalPositionalEmbedding( self.embed_positions = TimeSeriesSinusoidalPositionalEmbedding(
...@@ -1311,6 +1313,8 @@ class TimeSeriesTransformerDecoder(TimeSeriesTransformerPreTrainedModel): ...@@ -1311,6 +1313,8 @@ class TimeSeriesTransformerDecoder(TimeSeriesTransformerPreTrainedModel):
super().__init__(config) super().__init__(config)
self.dropout = config.dropout self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop self.layerdrop = config.decoder_layerdrop
if config.prediction_length is None:
raise ValueError("The `prediction_length` config needs to be specified.")
self.value_embedding = ValueEmbedding(feature_size=config.feature_size, d_model=config.d_model) self.value_embedding = ValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
self.embed_positions = TimeSeriesSinusoidalPositionalEmbedding( self.embed_positions = TimeSeriesSinusoidalPositionalEmbedding(
......
...@@ -401,7 +401,7 @@ class TimeSeriesTransformerModelIntegrationTests(unittest.TestCase): ...@@ -401,7 +401,7 @@ class TimeSeriesTransformerModelIntegrationTests(unittest.TestCase):
self.assertEqual(output.shape, expected_shape) self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor( expected_slice = torch.tensor(
[[-0.6322, -1.5771, -0.9340], [-0.1011, -1.0263, -0.7208], [0.4979, -0.6487, -0.7189]], device=torch_device [[0.8196, -1.5131, 1.4620], [1.1268, -1.3238, 1.5997], [1.5098, -1.0715, 1.7359]], device=torch_device
) )
self.assertTrue(torch.allclose(output[0, :3, :3], expected_slice, atol=TOLERANCE)) self.assertTrue(torch.allclose(output[0, :3, :3], expected_slice, atol=TOLERANCE))
...@@ -423,7 +423,7 @@ class TimeSeriesTransformerModelIntegrationTests(unittest.TestCase): ...@@ -423,7 +423,7 @@ class TimeSeriesTransformerModelIntegrationTests(unittest.TestCase):
self.assertEqual(output.shape, expected_shape) self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor( expected_slice = torch.tensor(
[[0.8177, -1.7989, -0.3127], [1.6964, -1.0607, -0.1749], [1.8395, 0.1110, 0.0263]], device=torch_device [[-1.2957, -1.0280, -0.6045], [-0.7017, -0.8193, -0.3717], [-1.0449, -0.8149, 0.1405]], device=torch_device
) )
self.assertTrue(torch.allclose(output[0, :3, :3], expected_slice, atol=TOLERANCE)) self.assertTrue(torch.allclose(output[0, :3, :3], expected_slice, atol=TOLERANCE))
...@@ -444,6 +444,6 @@ class TimeSeriesTransformerModelIntegrationTests(unittest.TestCase): ...@@ -444,6 +444,6 @@ class TimeSeriesTransformerModelIntegrationTests(unittest.TestCase):
expected_shape = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length)) expected_shape = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length))
self.assertEqual(outputs.sequences.shape, expected_shape) self.assertEqual(outputs.sequences.shape, expected_shape)
expected_slice = torch.tensor([3883.5037, 4630.2251, 7562.1338], device=torch_device) expected_slice = torch.tensor([2825.2749, 3584.9207, 6763.9951], device=torch_device)
mean_prediction = outputs.sequences.mean(dim=1) mean_prediction = outputs.sequences.mean(dim=1)
self.assertTrue(torch.allclose(mean_prediction[0, -3:], expected_slice, rtol=1e-1)) self.assertTrue(torch.allclose(mean_prediction[0, -3:], expected_slice, rtol=1e-1))
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment