Datasets:

Modalities:
Tabular
Text
Formats:
parquet
Languages:
English
Size:
< 1K
ArXiv:
Libraries:
Datasets
pandas
License:
marcotet commited on
Commit
8ac49a8
·
verified ·
1 Parent(s): e2d40ed

Upload compute_rcrps_with_hf_dataset.py

Browse files
Files changed (1) hide show
  1. compute_rcrps_with_hf_dataset.py +533 -0
compute_rcrps_with_hf_dataset.py ADDED
@@ -0,0 +1,533 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Copyright 2025 ServiceNow
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ """
13
+
14
+ # This code is an adaptation of
15
+ # https://github.com/ServiceNow/context-is-key-forecasting/blob/main/cik_benchmark/metrics/roi_metric.py
16
+ # to make it convenient to use with the Hugging Face version of the Context-is-Key benchmark.
17
+ # Please see the __main__ section for an example of how to use it.
18
+
19
+ import numpy as np
20
+ import pandas as pd
21
+ from io import StringIO
22
+ from datasets import Dataset
23
+ from fractions import Fraction
24
+
25
+
26
+ def crps(
27
+ target: np.array,
28
+ samples: np.array,
29
+ ) -> np.array:
30
+ """
31
+ Compute the CRPS using the probability weighted moment form.
32
+ See Eq ePWM from "Estimation of the Continuous Ranked Probability Score with
33
+ Limited Information and Applications to Ensemble Weather Forecasts"
34
+ https://link.springer.com/article/10.1007/s11004-017-9709-7
35
+
36
+ This is a O(n log n) per variable exact implementation, without estimation bias.
37
+
38
+ Parameters:
39
+ -----------
40
+ target: np.ndarray
41
+ The target values. (variable dimensions)
42
+ samples: np.ndarray
43
+ The forecast values. (n_samples, variable dimensions)
44
+
45
+ Returns:
46
+ --------
47
+ crps: np.ndarray
48
+ The CRPS for each of the (variable dimensions)
49
+ """
50
+ assert (
51
+ target.shape == samples.shape[1:]
52
+ ), f"shapes mismatch between: {target.shape} and {samples.shape}"
53
+
54
+ num_samples = samples.shape[0]
55
+ num_dims = samples.ndim
56
+ sorted_samples = np.sort(samples, axis=0)
57
+
58
+ abs_diff = (
59
+ np.abs(np.expand_dims(target, axis=0) - sorted_samples).sum(axis=0)
60
+ / num_samples
61
+ )
62
+
63
+ beta0 = sorted_samples.sum(axis=0) / num_samples
64
+
65
+ # An array from 0 to num_samples - 1, but expanded to allow broadcasting over the variable dimensions
66
+ i_array = np.expand_dims(np.arange(num_samples), axis=tuple(range(1, num_dims)))
67
+ beta1 = (i_array * sorted_samples).sum(axis=0) / (num_samples * (num_samples - 1))
68
+
69
+ return abs_diff + beta0 - 2 * beta1
70
+
71
+
72
+ def _crps_ea_Xy_eb_Xy(Xa, ya, Xb, yb):
73
+ """
74
+ Unbiased estimate of:
75
+ E|Xa - ya| * E|Xb' - yb|
76
+ """
77
+ N = len(Xa)
78
+ result = 0.0
79
+ product = np.abs(Xa[:, None] - ya) * np.abs(Xb[None, :] - yb) # i, j
80
+ i, j = np.diag_indices(N)
81
+ product[i, j] = 0
82
+ result = product.sum()
83
+ return result / (N * (N - 1))
84
+
85
+
86
+ def _crps_ea_XX_eb_XX(Xa, ya, Xb, yb):
87
+ """
88
+ Unbiased estimate of:
89
+ E|Xa - Xa'| * E|Xb'' - Xb'''|
90
+ """
91
+ N = len(Xa)
92
+
93
+ # We want to compute:
94
+ # sum_i≠j≠k≠l |Xa_i - Xa_j| |Xb_k - Xb_l|
95
+ # Instead of doing a sum over i, j, k, l all differents,
96
+ # we take the sum over all i, j, k, l (which is the product between a sum over i, j and a sum over k, l),
97
+ # then substract the collisions, ignoring those between i and j and those between k and l, since those
98
+ # automatically gives zero.
99
+
100
+ sum_ea_XX = np.abs(Xa[:, None] - Xa[None, :]).sum()
101
+ sum_eb_XX = np.abs(Xb[:, None] - Xb[None, :]).sum()
102
+
103
+ # Single conflicts: either i=k, i=l, j=k, or j=l
104
+ # By symmetry, we are left with: 4 sum_i≠j≠k |Xa_i - Xa_j| |Xb_i - Xb_k|
105
+ left = np.abs(Xa[:, None, None] - Xa[None, :, None]) # i, j, k
106
+ right = np.abs(Xb[:, None, None] - Xb[None, None, :]) # i, j, k
107
+ product = left * right
108
+ j, k = np.diag_indices(N)
109
+ product[:, j, k] = 0
110
+ sum_single_conflict = product.sum()
111
+
112
+ # Double conflicts: either i=k and j=l, or i=l and j=k
113
+ # By symmetry, we are left with: 2 sum_i≠j |Xa_i - Xa_j| |Xb_i - Xb_j|
114
+ left = np.abs(Xa[:, None] - Xa[None, :]) # i, j
115
+ right = np.abs(Xb[:, None] - Xb[None, :]) # i, j
116
+ product = left * right
117
+ sum_double_conflict = product.sum()
118
+
119
+ result = sum_ea_XX * sum_eb_XX - 4 * sum_single_conflict - 2 * sum_double_conflict
120
+ return result / (N * (N - 1) * (N - 2) * (N - 3))
121
+
122
+
123
+ def _crps_ea_Xy_eb_XX(Xa, ya, Xb, yb):
124
+ """
125
+ Unbiased estimate of:
126
+ E|Xa - ya| * E|Xb' - Xb''|
127
+ """
128
+ N = len(Xa)
129
+
130
+ left = np.abs(Xa[:, None, None] - ya) # i, j, k
131
+ right = np.abs(Xb[None, :, None] - Xb[None, None, :]) # i, j, k
132
+ product = left * right
133
+ i, j = np.diag_indices(N)
134
+ product[i, j, :] = 0
135
+ i, k = np.diag_indices(N)
136
+ product[i, :, k] = 0
137
+ result = product.sum()
138
+ return result / (N * (N - 1) * (N - 2))
139
+
140
+
141
+ def _crps_f_Xy(Xa, ya, Xb, yb):
142
+ """
143
+ Unbiased estimate of:
144
+ E(|Xa - ya| * |Xb - yb|)
145
+ """
146
+ N = len(Xa)
147
+ product = np.abs(Xa - ya) * np.abs(Xb - yb) # i
148
+ result = product.sum()
149
+ return result / N
150
+
151
+
152
+ def _crps_f_XXXy(Xa, ya, Xb, yb):
153
+ """
154
+ Unbiased estimate of:
155
+ E(|Xa - Xa'| * |Xb - yb|)
156
+ """
157
+ N = len(Xa)
158
+ left = np.abs(Xa[:, None] - Xa[None, :]) # i, j
159
+ right = np.abs(Xb[:, None] - yb) # i, j
160
+ product = left * right
161
+ result = product.sum()
162
+ return result / (N * (N - 1))
163
+
164
+
165
+ def _crps_f_XX(Xa, ya, Xb, yb):
166
+ """
167
+ Unbiased estimate of:
168
+ E(|Xa - Xa'| * |Xb - Xb'|)
169
+ """
170
+ N = len(Xa)
171
+ left = np.abs(Xa[:, None] - Xa[None, :]) # i, j
172
+ right = np.abs(Xb[:, None] - Xb[None, :]) # i, j
173
+ product = left * right
174
+ result = product.sum()
175
+ return result / (N * (N - 1))
176
+
177
+
178
+ def _crps_f_XXXX(Xa, ya, Xb, yb):
179
+ """
180
+ Unbiased estimate of:
181
+ E(|Xa - Xa'| * |Xb - Xb''|)
182
+ """
183
+ N = len(Xa)
184
+ left = np.abs(Xa[:, None, None] - Xa[None, :, None]) # i, j, k
185
+ right = np.abs(Xb[:, None, None] - Xb[None, None, :]) # i, j, k
186
+ product = left * right
187
+ j, k = np.diag_indices(N)
188
+ product[:, j, k] = 0
189
+ result = product.sum()
190
+ return result / (N * (N - 1) * (N - 2))
191
+
192
+
193
+ def crps_covariance(
194
+ Xa: np.array,
195
+ ya: float,
196
+ Xb: np.array,
197
+ yb: float,
198
+ ) -> float:
199
+ """
200
+ Unbiased estimate of the covariance between the CRPS of two correlated random variables.
201
+ If Xa == Xb and ya == yb, returns the variance of the CRPS instead.
202
+
203
+ Parameters:
204
+ -----------
205
+ Xa: np.ndarray
206
+ Samples from a forecast for the first variable. (n_samples)
207
+ ya: float
208
+ The ground-truth value for the first variable.
209
+ Xb: np.ndarray
210
+ Samples from a forecast for the second variable. (n_samples)
211
+ yb: float
212
+ The ground-truth value for the second variable.
213
+
214
+ Returns:
215
+ --------
216
+ covariance: float
217
+ The covariance between the CRPS estimators.
218
+ """
219
+ N = len(Xa)
220
+
221
+ ea_Xy_eb_Xy = _crps_ea_Xy_eb_Xy(Xa, ya, Xb, yb)
222
+ ea_Xy_eb_XX = _crps_ea_Xy_eb_XX(Xa, ya, Xb, yb)
223
+ ea_XX_eb_Xy = _crps_ea_Xy_eb_XX(Xb, yb, Xa, ya)
224
+ ea_XX_eb_XX = _crps_ea_XX_eb_XX(Xa, ya, Xb, yb)
225
+
226
+ f_Xy = _crps_f_Xy(Xa, ya, Xb, yb)
227
+ f_XXXy = _crps_f_XXXy(Xa, ya, Xb, yb)
228
+ f_XyXX = _crps_f_XXXy(Xb, yb, Xa, ya)
229
+ f_XX = _crps_f_XX(Xa, ya, Xb, yb)
230
+ f_XXXX = _crps_f_XXXX(Xa, ya, Xb, yb)
231
+
232
+ return (
233
+ -(1 / N) * ea_Xy_eb_Xy
234
+ + (1 / N) * ea_Xy_eb_XX
235
+ + (1 / N) * ea_XX_eb_Xy
236
+ - ((2 * N - 3) / (2 * N * (N - 1))) * ea_XX_eb_XX
237
+ + (1 / N) * f_Xy
238
+ - (1 / N) * f_XXXy
239
+ - (1 / N) * f_XyXX
240
+ + (1 / (2 * N * (N - 1))) * f_XX
241
+ + ((N - 2) / (N * (N - 1))) * f_XXXX
242
+ )
243
+
244
+
245
+ def weighted_sum_crps_variance(
246
+ target: np.array,
247
+ samples: np.array,
248
+ weights: np.array,
249
+ ) -> float:
250
+ """
251
+ Unbiased estimator of the variance of the numerical estimate of the
252
+ given weighted sum of CRPS values.
253
+
254
+ This implementation assumes that the univariate is estimated using:
255
+ CRPS(X, y) ~ (1 / n) * sum_i |x_i - y| - 1 / (2 * n * (n-1)) * sum_i,i' |x_i - x_i'|.
256
+ This formula gives the same result as the one used in the crps() implementation above.
257
+
258
+ Note that this is a heavy computation, being O(k^2 n^3) with k variables and n samples.
259
+ Also, while it is unbiased, it is not guaranteed to be >= 0.
260
+
261
+ Parameters:
262
+ -----------
263
+ target: np.ndarray
264
+ The target values: y in the above formula. (k variables)
265
+ samples: np.ndarray
266
+ The forecast values: X in the above formula. (n samples, k variables)
267
+ weights: np.array
268
+ The weight given to the CRPS of each variable. (k variables)
269
+
270
+ Returns:
271
+ --------
272
+ variance: float
273
+ The variance of the weighted sum of the CRPS estimators.
274
+ """
275
+ assert len(target.shape) == 1
276
+ assert len(samples.shape) == 2
277
+ assert len(weights.shape) == 1
278
+ assert target.shape[0] == samples.shape[1] == weights.shape[0]
279
+
280
+ s = 0.0
281
+
282
+ for i in range(target.shape[0]):
283
+ for j in range(i, target.shape[0]):
284
+ Xa = samples[:, i]
285
+ Xb = samples[:, j]
286
+ ya = target[i]
287
+ yb = target[j]
288
+
289
+ if i == j:
290
+ s += weights[i] * weights[j] * crps_covariance(Xa, ya, Xb, yb)
291
+ else:
292
+ # Multiply by 2 since we would get the same results by switching i and j
293
+ s += 2 * weights[i] * weights[j] * crps_covariance(Xa, ya, Xb, yb)
294
+
295
+ return s
296
+
297
+
298
+ def mean_crps(target, samples):
299
+ """
300
+ The mean of the CRPS over all variables
301
+ """
302
+ if target.size > 0:
303
+ return crps(target, samples).mean()
304
+ else:
305
+ raise RuntimeError(
306
+ f"CRPS received an empty target. Shapes = {target.shape} and {samples.shape}"
307
+ )
308
+
309
+
310
+ def compute_constraint_violation(
311
+ entry: dict,
312
+ samples: np.array,
313
+ scaling: float,
314
+ ) -> float:
315
+ violation = 0.0
316
+ scaled_samples = scaling * samples
317
+
318
+ # Min constraint
319
+ scaled_threshold = scaling * entry["constraint_min"]
320
+ violation += (scaled_threshold - scaled_samples).clip(min=0).mean(axis=1)
321
+
322
+ # Max constraint
323
+ scaled_threshold = scaling * entry["constraint_max"]
324
+ violation += (scaled_samples - scaled_threshold).clip(min=0).mean(axis=1)
325
+
326
+ # Variable max constraint
327
+ if len(entry["constraint_variable_max_index"]) > 0:
328
+ indexed_samples = scaled_samples[:, entry["constraint_variable_max_index"]]
329
+ scaled_thresholds = scaling * np.array(entry["constraint_variable_max_values"])
330
+ violation += (
331
+ (indexed_samples - scaled_thresholds[None, :]).clip(min=0).mean(axis=1)
332
+ )
333
+
334
+ return violation
335
+
336
+
337
+ def roi_crps(
338
+ entry: dict,
339
+ forecast: np.array,
340
+ ) -> dict[str, float]:
341
+ """
342
+ Compute the Region-of-Interest CRPS for a single entry of the context-is-key Hugging Face dataset,
343
+ for the given forecast.
344
+
345
+ Parameters:
346
+ ----------
347
+ entry: dict
348
+ A dictionary containing a single entry of the context-is-key Hugging Face dataset.
349
+ forecast: np.array
350
+ The forecast values. (n_samples, n_timesteps)
351
+
352
+ Returns:
353
+ --------
354
+ result: dict[str, float]
355
+ A dictionary containing the following entries:
356
+ "metric": the final metric.
357
+ "raw_metric": the metric before the log transformation.
358
+ "scaling": the scaling factor applied to the CRPS and the violations.
359
+ "crps": the weighted CRPS.
360
+ "roi_crps": the CRPS only for the region of interest.
361
+ "non_roi_crps": the CRPS only for the forecast not in the region of interest.
362
+ "violation_mean": the average constraint violation over the samples.
363
+ "violation_crps": the CRPS of the constraint violation.
364
+ "metric_variance": an unbiased estimate of the variance of the metric.
365
+ """
366
+ future_time = pd.read_json(StringIO(entry["future_time"]))
367
+ target = future_time[future_time.columns[-1]].to_numpy()
368
+
369
+ assert (
370
+ future_time.shape[0] == forecast.shape[1]
371
+ ), "Incorrect number of timesteps in forecast"
372
+
373
+ variance_target = target.to_numpy() if isinstance(target, pd.Series) else target
374
+ variance_forecast = forecast
375
+
376
+ if entry["region_of_interest"]:
377
+ roi_mask = np.zeros(forecast.shape[1], dtype=bool)
378
+ for i in entry["region_of_interest"]:
379
+ roi_mask[i] = True
380
+
381
+ roi_crps = mean_crps(target=target[roi_mask], samples=forecast[:, roi_mask])
382
+ non_roi_crps = mean_crps(
383
+ target=target[~roi_mask], samples=forecast[:, ~roi_mask]
384
+ )
385
+ crps_value = 0.5 * roi_crps + 0.5 * non_roi_crps
386
+ standard_crps = mean_crps(target=target, samples=forecast)
387
+ num_roi_timesteps = roi_mask.sum()
388
+ num_non_roi_timesteps = (~roi_mask).sum()
389
+ variance_weights = entry["metric_scaling"] * (
390
+ 0.5 * roi_mask / num_roi_timesteps
391
+ + (1 - 0.5) * ~roi_mask / num_non_roi_timesteps
392
+ )
393
+ else:
394
+ crps_value = mean_crps(target=target, samples=forecast)
395
+ # Those will only be used in the reporting
396
+ roi_crps = crps_value
397
+ non_roi_crps = crps_value
398
+ standard_crps = crps_value
399
+ num_roi_timesteps = len(target)
400
+ num_non_roi_timesteps = 0
401
+ variance_weights = np.full(
402
+ target.shape, fill_value=entry["metric_scaling"] / len(target)
403
+ )
404
+
405
+ violation_amount = compute_constraint_violation(
406
+ entry, samples=forecast, scaling=entry["metric_scaling"]
407
+ )
408
+ violation_func = 10.0 * violation_amount
409
+
410
+ # The target is set to zero, since we make sure that the ground truth always satisfy the constraints
411
+ # The crps code assume multivariate input, so add a dummy dimension
412
+ violation_crps = crps(target=np.zeros(1), samples=violation_func[:, None])[0]
413
+
414
+ variance_target = np.concatenate((variance_target, np.zeros(1)), axis=0)
415
+ variance_forecast = np.concatenate(
416
+ (variance_forecast, violation_func[:, None]), axis=1
417
+ )
418
+ variance_weights = np.concatenate((variance_weights, 1.0 * np.ones(1)), axis=0)
419
+
420
+ raw_metric = entry["metric_scaling"] * crps_value + violation_crps
421
+ metric = raw_metric
422
+
423
+ # Computing the variance of the RCPRS is much more expensive,
424
+ # especially when the number of samples is large.
425
+ # So it can be commented out if not desired.
426
+ variance = weighted_sum_crps_variance(
427
+ target=variance_target,
428
+ samples=variance_forecast,
429
+ weights=variance_weights,
430
+ )
431
+
432
+ return {
433
+ "metric": metric,
434
+ "raw_metric": raw_metric,
435
+ "scaling": entry["metric_scaling"],
436
+ "crps": entry["metric_scaling"] * crps_value,
437
+ "roi_crps": entry["metric_scaling"] * roi_crps,
438
+ "non_roi_crps": entry["metric_scaling"] * non_roi_crps,
439
+ "standard_crps": entry["metric_scaling"] * standard_crps,
440
+ "num_roi_timesteps": num_roi_timesteps,
441
+ "num_non_roi_timesteps": num_non_roi_timesteps,
442
+ "violation_mean": violation_amount.mean(),
443
+ "violation_crps": violation_crps,
444
+ "variance": variance,
445
+ }
446
+
447
+
448
+ def compute_all_rcprs(
449
+ dataset: Dataset,
450
+ forecasts: list[dict],
451
+ ) -> tuple[float, float]:
452
+ """
453
+ Compute the Region-of-Interest CRPS for all instances in the Context-is-Key dataset.
454
+
455
+ Parameters:
456
+ ----------
457
+ dataset: Dataset
458
+ The Context-is-Key dataset.
459
+ forecasts: list[dict]
460
+ A list of dictionaries, each containing the following keys:
461
+ - "name": the name of the task for which the forecast is made.
462
+ - "seed": the seed of the instance for which the forecast is made.
463
+ - "forecast": the forecast values. (n_samples, n_timesteps)
464
+
465
+ Returns:
466
+ --------
467
+ mean_crps: float
468
+ The aggregated RCRPS over all instances.
469
+ std_crps: float
470
+ An estimate of the standard error of the aggregated RCRPS.
471
+ """
472
+ weighted_sum_rcprs = 0.0
473
+ weighted_sum_variance = 0.0
474
+ total_weight = 0.0
475
+
476
+ for entry, forecast in zip(dataset, forecasts):
477
+ if entry["name"] != forecast["name"]:
478
+ raise ValueError(
479
+ f"Forecast name {forecast['name']} does not match dataset entry name {entry['name']}"
480
+ )
481
+ if entry["seed"] != forecast["seed"]:
482
+ raise ValueError(
483
+ f"Forecast seed {forecast['seed']} does not match dataset entry seed {entry['seed']}"
484
+ )
485
+ metric_output = roi_crps(
486
+ entry=entry,
487
+ forecast=forecast["forecast"],
488
+ )
489
+
490
+ weight = Fraction(entry["weight"])
491
+
492
+ # Apply the cap of RCPRS = 5 to the metric
493
+ if metric_output["metric"] >= 5.0:
494
+ metric_output["metric"] = 5.0
495
+ metric_output["variance"] = 0.0
496
+
497
+ weighted_sum_rcprs += weight * metric_output["metric"]
498
+ weighted_sum_variance += weight * weight * metric_output["variance"]
499
+ total_weight += weight
500
+
501
+ mean_crps = weighted_sum_rcprs / total_weight
502
+ std_crps = np.sqrt(weighted_sum_variance) / total_weight
503
+
504
+ return mean_crps, std_crps
505
+
506
+
507
+ if __name__ == "__main__":
508
+ # An example of how to use this function,
509
+ # by using a naive forecaster which use random values from the past as its forecast.
510
+
511
+ from datasets import load_dataset
512
+
513
+ dataset = load_dataset("ServiceNow/context-is-key", split="test")
514
+
515
+ # Create a random forecast for each instance in the dataset
516
+ forecasts = []
517
+ for entry in dataset:
518
+ past_time = pd.read_json(StringIO(entry["past_time"]))
519
+ future_time = pd.read_json(StringIO(entry["future_time"]))
520
+ forecast = {
521
+ "name": entry["name"],
522
+ "seed": entry["seed"],
523
+ "forecast": np.random.choice(
524
+ past_time.to_numpy()[:, -1],
525
+ size=(25, len(future_time)),
526
+ replace=True,
527
+ ),
528
+ }
529
+ forecasts.append(forecast)
530
+
531
+ mean_crps, std_crps = compute_all_rcprs(dataset, forecasts)
532
+ print(f"Mean RCRPS: {mean_crps}")
533
+ print(f"Standard error of RCRPS: {std_crps}")