gluonts.dataset.arrow.enc 模块#
- class gluonts.dataset.arrow.enc.ArrowEncoder(columns: List[str], ndarray_columns: Set[str] = <factory>, flatten_arrays: bool = True)[source]#
- 基类: - object- columns: List[str]#
 - flatten_arrays: bool = True#
 - ndarray_columns: Set[str]#
 
- class gluonts.dataset.arrow.enc.ArrowWriter(stream: bool = False, suffix: str = '.feather', compression: Union[typing_extensions.Literal['lz4'], typing_extensions.Literal['zstd'], NoneType] = None, flatten_arrays: bool = True, metadata: Union[dict, NoneType] = None)[source]#
- 基类: - gluonts.dataset.DatasetWriter- compression: Optional[Union[typing_extensions.Literal[lz4], typing_extensions.Literal[zstd]]] = None#
 - flatten_arrays: bool = True#
 - metadata: Optional[dict] = None#
 - stream: bool = False#
 - suffix: str = '.feather'#
 - write_to_file(dataset: gluonts.dataset.Dataset, path: pathlib.Path) None#
 - write_to_folder(dataset: gluonts.dataset.Dataset, folder: pathlib.Path, name: Optional[str] = None) None#
 
- class gluonts.dataset.arrow.enc.ParquetWriter(suffix: str = '.parquet', flatten_arrays: bool = True, metadata: Union[dict, NoneType] = None)[source]#
- 基类: - gluonts.dataset.DatasetWriter- flatten_arrays: bool = True#
 - metadata: Optional[dict] = None#
 - suffix: str = '.parquet'#
 - write_to_file(dataset: gluonts.dataset.Dataset, path: pathlib.Path) None#
 - write_to_folder(dataset: gluonts.dataset.Dataset, folder: pathlib.Path, name: Optional[str] = None) None#
 
- gluonts.dataset.arrow.enc.into_arrow_batches(dataset, batch_size=1024, flatten_arrays=True)#
- gluonts.dataset.arrow.enc.write_dataset(Writer, dataset, path, metadata=None, batch_size=1024, flatten_arrays=True)#