-
Notifications
You must be signed in to change notification settings - Fork 56
Add data checkpointing capability #222
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from all commits
Commits
Show all changes
2 commits
Select commit
Hold shift + click to select a range
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,86 @@ | ||
| # Standard | ||
| import logging | ||
| import uuid | ||
|
|
||
| # Third Party | ||
| from datasets import Dataset, concatenate_datasets, load_dataset | ||
| from datasets.data_files import EmptyDatasetError | ||
|
|
||
| # First Party | ||
| from instructlab.sdg.utils import pandas | ||
|
|
||
| logger = logging.getLogger(__name__) | ||
|
|
||
|
|
||
| class Checkpointer: | ||
| def __init__(self, checkpoint_dir=None, save_freq=1): | ||
| self._checkpoint_dir = checkpoint_dir | ||
|
|
||
| self._save_freq = save_freq | ||
| self._cache = [] | ||
|
|
||
| def checkpoint(self, dataset): | ||
| self._cache.append(dataset) | ||
| if len(self._cache) < self._save_freq: | ||
| return | ||
| self.save() | ||
| self._cache.clear() | ||
|
|
||
| def done(self): | ||
| if self._cache: | ||
| self.save() | ||
| self._cache.clear() | ||
|
|
||
| def save(self): | ||
| if self._checkpoint_dir is None: | ||
| return | ||
| checkpoint_id = uuid.uuid4().hex | ||
| checkpoint_file = ( | ||
| f"{self._checkpoint_dir}/data_checkpoint_{checkpoint_id}.jsonl" | ||
| ) | ||
| logger.info(f"Saving checkpoint to {checkpoint_file}") | ||
| # Saves all the current records to new file in the checkpoint dir | ||
| concatenate_datasets(self._cache).to_json( | ||
| checkpoint_file, orient="records", lines=True | ||
| ) | ||
|
|
||
| def load(self, dataset: Dataset) -> Dataset: | ||
| if self._checkpoint_dir is None: | ||
| return dataset, None | ||
|
|
||
| try: | ||
| pre_generated_data = load_dataset( | ||
| "json", data_dir=self._checkpoint_dir, split="train" | ||
| ) | ||
| except EmptyDatasetError: | ||
| logger.info( | ||
| f"No existing checkpoints found in {self._checkpoint_dir}, generating from scratch" | ||
| ) | ||
| return dataset, None | ||
|
|
||
| logger.info( | ||
| f"Loading existing checkpoints from {self._checkpoint_dir}, with {pre_generated_data.num_rows} rows" | ||
| ) | ||
| seed_data = self._get_missing_data(dataset, pre_generated_data) | ||
| logger.info(f"Found {seed_data.num_rows} missing rows in the dataset") | ||
| return seed_data, pre_generated_data | ||
|
|
||
| def _get_missing_data(self, seed_data, generated_data): | ||
| # Get the common columns between the two datasets | ||
| common_columns = list( | ||
| set(seed_data.column_names) & set(generated_data.column_names) | ||
| ) | ||
|
|
||
| # Extract the relevant data based on common columns | ||
| seed_data_common = seed_data.select_columns(common_columns) | ||
| generated_data_common = generated_data.select_columns(common_columns) | ||
|
|
||
| # Convert to Pandas DataFrames for easier comparison | ||
| seed_df = seed_data_common.to_pandas() | ||
| generated_df = generated_data_common.to_pandas() | ||
|
|
||
| # Identify missing rows | ||
| missing_rows = ~seed_df.apply(tuple, 1).isin(generated_df.apply(tuple, 1)) | ||
|
|
||
| missing_df = seed_data.to_pandas()[missing_rows] | ||
| return pandas.dataset_from_pandas_dataframe(missing_df) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,117 @@ | ||
| # Standard | ||
| import json | ||
| import os | ||
|
|
||
| # Third Party | ||
| from datasets import Dataset | ||
| import pytest | ||
|
|
||
| # First Party | ||
| from instructlab.sdg.checkpointing import Checkpointer | ||
|
|
||
|
|
||
| def _add_bar(sample, add_value=100): | ||
| sample["bar"] = sample["foo"] + add_value | ||
| return sample | ||
|
|
||
|
|
||
| def _populate_checkpoints(tmpdir, dataset, checkpoints_count, remove_column): | ||
| for i in range(0, checkpoints_count): | ||
| checkpoint_dataset = dataset.select(range(i * 10, (i + 1) * 10)) | ||
| checkpoint_dataset = checkpoint_dataset.map( | ||
| lambda x: _add_bar(x, add_value=100) | ||
| ) | ||
| if remove_column: | ||
| checkpoint_dataset = checkpoint_dataset.remove_columns("foo") | ||
| checkpoint_dataset.to_json( | ||
| os.path.join(tmpdir, f"data_checkpoint_abcde{i}.jsonl"), | ||
| orient="records", | ||
| lines=True, | ||
| ) | ||
|
|
||
|
|
||
| def _validate_checkpoints(tmpdir, expected_files_count, expected_length, remove_column): | ||
| saved_files = os.listdir(tmpdir) | ||
| assert len(saved_files) == expected_files_count | ||
| assert all(f.startswith("data_checkpoint_") for f in saved_files) | ||
| assert all(f.endswith(".jsonl") for f in saved_files) | ||
|
|
||
| for f in saved_files: | ||
| with open(os.path.join(tmpdir, f), "r") as f: | ||
| l = list(f) | ||
| if isinstance(expected_length, list): | ||
| expected_length.remove(len(l)) | ||
| else: | ||
| assert len(l) == expected_length | ||
| for s in l: | ||
| data = json.loads(s) | ||
| if remove_column: | ||
| assert "foo" not in data and "bar" in data | ||
| else: | ||
| assert "foo" in data and "bar" in data | ||
|
|
||
|
|
||
| @pytest.mark.parametrize( | ||
| "save_freq, remove_column, dataset_size, init_checkpoints, splits, final_checkpoints, checkpoint_length", | ||
| [ | ||
| (1, False, 10, 0, 0, 1, 10), | ||
| (1, True, 10, 0, 0, 1, 10), | ||
| (1, False, 100, 1, 9, 10, 10), | ||
| (1, True, 100, 1, 9, 10, 10), | ||
| (1, False, 100, 2, 8, 10, 10), | ||
| (3, False, 100, 2, 8, 5, [10, 10, 30, 30, 20]), | ||
| ], | ||
| ) | ||
| def test_checkpointing( | ||
| tmpdir, | ||
| save_freq, | ||
| remove_column, | ||
| dataset_size, | ||
| init_checkpoints, | ||
| splits, | ||
| final_checkpoints, | ||
| checkpoint_length, | ||
| ): | ||
| # Our initial dataset | ||
| dataset = Dataset.from_list([{"idx": i, "foo": i} for i in range(dataset_size)]) | ||
|
|
||
| # Generate and save some checkpoints to disk | ||
| _populate_checkpoints(tmpdir, dataset, init_checkpoints, remove_column) | ||
|
|
||
| # Load checkpoints, giving us the remaining dataset to process and | ||
| # the generated data loaded from the checkpoints | ||
| checkpointer = Checkpointer(checkpoint_dir=tmpdir, save_freq=save_freq) | ||
| dataset, pre_generated_data = checkpointer.load(dataset) | ||
|
|
||
| # Should be present, even if removed from the checkpoint (remove_column=True) | ||
| assert "foo" in dataset.features | ||
|
|
||
| # When testing save_freq, we will have checkpoints of different lengths | ||
| if isinstance(checkpoint_length, list): | ||
| checkpoints_total = sum(checkpoint_length[:init_checkpoints]) | ||
| else: | ||
| checkpoints_total = checkpoint_length * init_checkpoints | ||
|
|
||
| # Validate pre-generated data loaded from the checkpoints | ||
| assert len(dataset) == (dataset_size - checkpoints_total) | ||
| if init_checkpoints > 0: | ||
| assert len(pre_generated_data) == checkpoints_total | ||
|
|
||
| # Apply pipeline to the remaining dataset and save checkpoints | ||
| if splits: | ||
| for i in range(0, splits): | ||
| split = dataset.select(range(i * 10, (i + 1) * 10)) | ||
| split = split.map(lambda x: _add_bar(x, add_value=100)) | ||
| if remove_column: | ||
| split = split.remove_columns("foo") | ||
| checkpointer.checkpoint(split) | ||
| else: | ||
| dataset = dataset.map(lambda x: _add_bar(x, add_value=10)) | ||
| if remove_column: | ||
| dataset = dataset.remove_columns("foo") | ||
| checkpointer.checkpoint(dataset) | ||
|
|
||
| checkpointer.done() | ||
|
|
||
| # Validate that all checkpoints are now saved to disk | ||
| _validate_checkpoints(tmpdir, final_checkpoints, checkpoint_length, remove_column) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.