diff --git a/samples/snippets/rapid/storage_create_and_write_appendable_object.py b/samples/snippets/rapid/storage_create_and_write_appendable_object.py new file mode 100644 index 000000000..98fb7534b --- /dev/null +++ b/samples/snippets/rapid/storage_create_and_write_appendable_object.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python + +# Copyright 2026 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import asyncio + +from google.cloud.storage._experimental.asyncio.async_appendable_object_writer import ( + AsyncAppendableObjectWriter, +) +from google.cloud.storage._experimental.asyncio.async_grpc_client import AsyncGrpcClient + + +# [START storage_create_and_write_appendable_object] + + +async def storage_create_and_write_appendable_object(bucket_name, object_name): + """Uploads a appendable object to zonal bucket.""" + + grpc_client = AsyncGrpcClient().grpc_client + writer = AsyncAppendableObjectWriter( + client=grpc_client, + bucket_name=bucket_name, + object_name=object_name, + ) + # creates a new appendable of size 0 + await writer.open() + + # appends data to the object + # you can perform `.append` multiple times as needed. Data will be appended + # to the end of the object. + await writer.append(b"Some data") + + # Once all appends are done, closes the gRPC bidirectional stream. + await writer.close() + + print('Appended object {} created of size {} bytes.'.format(object_name, writer.persisted_size)) + + + + +# [END storage_create_and_write_appendable_object] + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.") + parser.add_argument("--object_name", help="Your Cloud Storage object name.") + + args = parser.parse_args() + + asyncio.run( + storage_create_and_write_appendable_object( + bucket_name=args.bucket_name, + object_name=args.object_name, + ) + ) diff --git a/samples/snippets/rapid/storage_finalize_appendable_object_upload.py b/samples/snippets/rapid/storage_finalize_appendable_object_upload.py new file mode 100644 index 000000000..c7855e7af --- /dev/null +++ b/samples/snippets/rapid/storage_finalize_appendable_object_upload.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python + +# Copyright 2026 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import asyncio + +from google.cloud.storage._experimental.asyncio.async_appendable_object_writer import ( + AsyncAppendableObjectWriter, +) +from google.cloud.storage._experimental.asyncio.async_grpc_client import AsyncGrpcClient + + +# [START storage_finalize_appendable_object_upload] +async def storage_finalize_appendable_object_upload(bucket_name, object_name): + """Creates, writes to, and finalizes an appendable object.""" + + grpc_client = AsyncGrpcClient().grpc_client + writer = AsyncAppendableObjectWriter( + client=grpc_client, + bucket_name=bucket_name, + object_name=object_name, + ) + # Creates a new appendable object of size 0. + await writer.open() + + # Appends data to the object. + await writer.append(b"Some data") + + # finalize the appendable object, + # once finalized no more appends can be done to the object. + object_resource = await writer.finalize() + + print("Object resource: -- ") + print(object_resource) + + + print( + f"Appendable object {object_name} created and finalized with size {writer.persisted_size} bytes." + ) + + +# [END storage_finalize_appendable_object_upload] + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.") + parser.add_argument("--object_name", help="Your Cloud Storage object name.") + + args = parser.parse_args() + + asyncio.run( + storage_finalize_appendable_object_upload( + bucket_name=args.bucket_name, + object_name=args.object_name, + ) + ) diff --git a/samples/snippets/rapid/storage_open_multiple_objects_ranged_read.py b/samples/snippets/rapid/storage_open_multiple_objects_ranged_read.py new file mode 100644 index 000000000..c55bf2fa4 --- /dev/null +++ b/samples/snippets/rapid/storage_open_multiple_objects_ranged_read.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python + +# Copyright 2026 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Downloads a range of bytes from multiple objects concurrently.""" +import argparse +import asyncio +from io import BytesIO + +from google.cloud.storage._experimental.asyncio.async_grpc_client import ( + AsyncGrpcClient, +) +from google.cloud.storage._experimental.asyncio.async_multi_range_downloader import ( + AsyncMultiRangeDownloader, +) + + +# [START storage_open_multiple_objects_ranged_read] +async def storage_open_multiple_objects_ranged_read(bucket_name, object_names): + """Downloads a range of bytes from multiple objects concurrently.""" + client = AsyncGrpcClient().grpc_client + + async def _download_range(object_name): + """Helper coroutine to download a range from a single object.""" + mrd = AsyncMultiRangeDownloader(client, bucket_name, object_name) + + # Open the object in read mode. + await mrd.open() + + # Each object downloads the first 100 bytes. + start_byte = 0 + size = 100 + + # requested range will be downloaded into this buffer, user may provide + # their own buffer or file-like object. + output_buffer = BytesIO() + await mrd.download_ranges([(start_byte, size, output_buffer)]) + + await mrd.close() + + # Downloaded size can differ from requested size if object is smaller. + # mrd will download at most up to the end of the object. + downloaded_size = output_buffer.getbuffer().nbytes + print(f"Downloaded {downloaded_size} bytes from {object_name}") + + download_tasks = [_download_range(name) for name in object_names] + await asyncio.gather(*download_tasks) + + +# [END storage_open_multiple_objects_ranged_read] + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.") + parser.add_argument( + "--object_names", nargs="+", help="Your Cloud Storage object name(s)." + ) + + args = parser.parse_args() + + asyncio.run( + storage_open_multiple_objects_ranged_read(args.bucket_name, args.object_names) + ) diff --git a/samples/snippets/rapid/storage_open_object_multiple_ranged_read.py b/samples/snippets/rapid/storage_open_object_multiple_ranged_read.py new file mode 100644 index 000000000..75ff6daf6 --- /dev/null +++ b/samples/snippets/rapid/storage_open_object_multiple_ranged_read.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python + +# Copyright 2026 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import asyncio +from io import BytesIO + +from google.cloud.storage._experimental.asyncio.async_grpc_client import AsyncGrpcClient +from google.cloud.storage._experimental.asyncio.async_multi_range_downloader import ( + AsyncMultiRangeDownloader, +) + +# [START storage_open_object_multiple_ranged_read] +async def storage_open_object_multiple_ranged_read( + bucket_name, object_name +): + """Downloads multiple ranges of bytes from a single object into different buffers.""" + client = AsyncGrpcClient().grpc_client + + mrd = AsyncMultiRangeDownloader(client, bucket_name, object_name) + + # Open the object in read mode. + await mrd.open() + + # Define four different buffers to download ranges into. + buffers = [BytesIO(), BytesIO(), BytesIO(), BytesIO()] + + # Define the ranges to download. Each range is a tuple of (start_byte, size, buffer). + # All ranges will download 10 bytes from different starting positions. + # We choose arbitrary start bytes for this example. An object should be large enough. + # A user can choose any start byte between 0 and mrd.persisted_size. + ranges = [ + (0, 10, buffers[0]), + (20, 10, buffers[1]), + (40, 10, buffers[2]), + (60, 10, buffers[3]), + ] + + await mrd.download_ranges(ranges) + + await mrd.close() + + # Print the downloaded content from each buffer. + for i, output_buffer in enumerate(buffers): + downloaded_size = output_buffer.getbuffer().nbytes + print( + f"Downloaded {downloaded_size} bytes into buffer {i+1} from start byte {ranges[i][0]}: {output_buffer.getvalue()}" + ) + +# [END storage_open_object_multiple_ranged_read] + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.") + parser.add_argument("--object_name", help="Your Cloud Storage object name.") + + args = parser.parse_args() + + asyncio.run( + storage_open_object_multiple_ranged_read( + args.bucket_name, args.object_name + ) + ) diff --git a/samples/snippets/rapid/storage_open_object_read_full_object.py b/samples/snippets/rapid/storage_open_object_read_full_object.py new file mode 100644 index 000000000..2c99d3367 --- /dev/null +++ b/samples/snippets/rapid/storage_open_object_read_full_object.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python + +# Copyright 2026 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import asyncio +from io import BytesIO + +from google.cloud.storage._experimental.asyncio.async_grpc_client import AsyncGrpcClient +from google.cloud.storage._experimental.asyncio.async_multi_range_downloader import ( + AsyncMultiRangeDownloader, +) + +# [START storage_open_object_read_full_object] +async def storage_open_object_read_full_object( + bucket_name, object_name +): + """Downloads the entire content of an object.""" + client = AsyncGrpcClient().grpc_client + + mrd = AsyncMultiRangeDownloader(client, bucket_name, object_name) + + # Open the object in read mode. + await mrd.open() + + output_buffer = BytesIO() + # A download range of (0, 0) means to read from the beginning to the end. + await mrd.download_ranges([(0, 0, output_buffer)]) + + await mrd.close() + + downloaded_bytes = output_buffer.getvalue() + print( + f"Downloaded all {len(downloaded_bytes)} bytes from object {object_name} in bucket {bucket_name}." + ) + # You can now access the full content via downloaded_bytes, for example: + # print(downloaded_bytes) + +# [END storage_open_object_read_full_object] + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.") + parser.add_argument("--object_name", help="Your Cloud Storage object name.") + + args = parser.parse_args() + + asyncio.run( + storage_open_object_read_full_object( + args.bucket_name, args.object_name + ) + ) diff --git a/samples/snippets/rapid/storage_open_object_single_ranged_read.py b/samples/snippets/rapid/storage_open_object_single_ranged_read.py new file mode 100644 index 000000000..7c07f7825 --- /dev/null +++ b/samples/snippets/rapid/storage_open_object_single_ranged_read.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python + +# Copyright 2026 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import asyncio +from io import BytesIO + +from google.cloud.storage._experimental.asyncio.async_grpc_client import AsyncGrpcClient +from google.cloud.storage._experimental.asyncio.async_multi_range_downloader import ( + AsyncMultiRangeDownloader, +) + +# [START storage_open_object_single_ranged_read] +async def storage_open_object_single_ranged_read( + bucket_name, object_name, start_byte, size +): + """Downloads a range of bytes from an object.""" + client = AsyncGrpcClient().grpc_client + + mrd = AsyncMultiRangeDownloader(client, bucket_name, object_name) + + # Open the object in read mode. + await mrd.open() + + # requested range will be downloaded into this buffer, user may provide + # their own buffer or file-like object. + output_buffer = BytesIO() + await mrd.download_ranges([(start_byte, size, output_buffer)]) + + await mrd.close() + + # Downloaded size can differ from requested size if object is smaller. + # mrd will download at most up to the end of the object. + downloaded_size = output_buffer.getbuffer().nbytes + print( + f"Downloaded {downloaded_size} bytes from {object_name}" + ) + +# [END storage_open_object_single_ranged_read] + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.") + parser.add_argument("--object_name", help="Your Cloud Storage object name.") + parser.add_argument("--start_byte", type=int, help="The starting byte of the range.") + parser.add_argument("--size", type=int, help="The number of bytes to download.") + + args = parser.parse_args() + + asyncio.run( + storage_open_object_single_ranged_read( + args.bucket_name, args.object_name, args.start_byte, args.size + ) + ) diff --git a/samples/snippets/rapid/storage_pause_and_resume_appendable_upload.py b/samples/snippets/rapid/storage_pause_and_resume_appendable_upload.py new file mode 100644 index 000000000..a399522f7 --- /dev/null +++ b/samples/snippets/rapid/storage_pause_and_resume_appendable_upload.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python + +# Copyright 2026 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import asyncio + +from google.cloud.storage._experimental.asyncio.async_appendable_object_writer import ( + AsyncAppendableObjectWriter, +) +from google.cloud.storage._experimental.asyncio.async_grpc_client import AsyncGrpcClient + + +# [START storage_pause_and_resume_appendable_upload] +async def storage_pause_and_resume_appendable_upload(bucket_name, object_name): + """Demonstrates pausing and resuming an appendable object upload.""" + + grpc_client = AsyncGrpcClient().grpc_client + + # 1. After appending some data: + writer1 = AsyncAppendableObjectWriter( + client=grpc_client, + bucket_name=bucket_name, + object_name=object_name, + ) + await writer1.open() + await writer1.append(b"First part of the data. ") + print(f"Appended {writer1.persisted_size} bytes with the first writer.") + + # 2. Close the writer to "pause" the upload. + await writer1.close() + print("First writer closed. Upload is 'paused'.") + + # 3. Create a new writer, passing the generation number from the previous + # writer. This is a precondition to ensure that the object hasn't been + # modified since we last accessed it. + generation_to_resume = writer1.generation + print(f"Generation to resume from is: {generation_to_resume}") + + writer2 = AsyncAppendableObjectWriter( + client=grpc_client, + bucket_name=bucket_name, + object_name=object_name, + generation=generation_to_resume, + ) + + # 4. Open the new writer. + await writer2.open() + + # 5. Append some more data using the new writer. + await writer2.append(b"Second part of the data.") + print( + f"Appended more data. Total size is now {writer2.persisted_size} bytes." + ) + + # 6. Finally, close the new writer. + await writer2.close() + print("Second writer closed. Full object uploaded.") + + +# [END storage_pause_and_resume_appendable_upload] + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.") + parser.add_argument("--object_name", help="Your Cloud Storage object name.") + + args = parser.parse_args() + + asyncio.run( + storage_pause_and_resume_appendable_upload( + bucket_name=args.bucket_name, + object_name=args.object_name, + ) + ) diff --git a/samples/snippets/rapid/storage_read_appendable_object_tail.py b/samples/snippets/rapid/storage_read_appendable_object_tail.py new file mode 100644 index 000000000..3cab33930 --- /dev/null +++ b/samples/snippets/rapid/storage_read_appendable_object_tail.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python + +# Copyright 2026 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import asyncio +import time +from datetime import datetime +from io import BytesIO + +from google.cloud.storage._experimental.asyncio.async_appendable_object_writer import ( + AsyncAppendableObjectWriter, +) +from google.cloud.storage._experimental.asyncio.async_grpc_client import AsyncGrpcClient +from google.cloud.storage._experimental.asyncio.async_multi_range_downloader import ( + AsyncMultiRangeDownloader, +) +import os + +# [START storage_read_appendable_object_tail] +async def appender(writer: AsyncAppendableObjectWriter, duration: int): + """Appends 1 byte to the object every second for a given duration.""" + print("Appender started.") + for i in range(duration): + await writer.append(os.urandom(10)) # Append 1 random byte. + now = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + # print(f"[{now}] Appended 1 byte. Total appended: {i+1}") + await asyncio.sleep(1) + print("Appender finished.") + + +async def tailer(bucket_name: str, object_name: str, duration: int): + """Tails the object by reading new data as it is appended.""" + print("Tailer started.") + start_byte = 0 + client = AsyncGrpcClient().grpc_client + start_time = time.monotonic() + mrd = AsyncMultiRangeDownloader(client, bucket_name, object_name) + await mrd.open() + # Run the tailer for the specified duration. + while time.monotonic() - start_time < duration: + output_buffer = BytesIO() + # A download range of (start, 0) means to read from 'start' to the end. + await mrd.download_ranges([(start_byte, 0, output_buffer)]) + + bytes_downloaded = output_buffer.getbuffer().nbytes + if bytes_downloaded > 0: + now = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + print( + f"[{now}] Tailer read {bytes_downloaded} new bytes: {output_buffer.getvalue()}" + ) + start_byte += bytes_downloaded + + await asyncio.sleep(0.1) # Poll for new data every second. + print("Tailer finished.") + + +async def main_async(bucket_name: str, object_name: str, duration: int): + """Main function to create an appendable object and run tasks.""" + grpc_client = AsyncGrpcClient().grpc_client + writer = AsyncAppendableObjectWriter( + client=grpc_client, + bucket_name=bucket_name, + object_name=object_name, + ) + # 1. Create an empty appendable object. + await writer.open() + print(f"Created empty appendable object: {object_name}") + + # 2. Create the appender and tailer coroutines. + appender_task = asyncio.create_task(appender(writer, duration)) + # # Add a small delay to ensure the object is created before tailing begins. + # await asyncio.sleep(1) + tailer_task = asyncio.create_task(tailer(bucket_name, object_name, duration)) + + # 3. Execute the coroutines concurrently. + await asyncio.gather(appender_task, tailer_task) + + await writer.close() + print("Writer closed.") + + +# [END storage_read_appendable_object_tail] + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Demonstrates tailing an appendable GCS object.", + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.") + parser.add_argument( + "--object_name", help="Your Cloud Storage object name to be created." + ) + parser.add_argument( + "--duration", + type=int, + default=60, + help="Duration in seconds to run the demo.", + ) + + args = parser.parse_args() + + asyncio.run(main_async(args.bucket_name, args.object_name, args.duration))