Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
#!/usr/bin/env python

# Copyright 2026 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import asyncio

from google.cloud.storage._experimental.asyncio.async_appendable_object_writer import (
AsyncAppendableObjectWriter,
)
from google.cloud.storage._experimental.asyncio.async_grpc_client import AsyncGrpcClient


# [START storage_create_and_write_appendable_object]


async def storage_create_and_write_appendable_object(bucket_name, object_name):
"""Uploads a appendable object to zonal bucket."""
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

There's a small grammatical error in the docstring. "a appendable" should be "an appendable".

Suggested change
"""Uploads a appendable object to zonal bucket."""
"""Uploads an appendable object to zonal bucket."""


grpc_client = AsyncGrpcClient().grpc_client
writer = AsyncAppendableObjectWriter(
client=grpc_client,
bucket_name=bucket_name,
object_name=object_name,
)
# creates a new appendable of size 0
await writer.open()

# appends data to the object
# you can perform `.append` multiple times as needed. Data will be appended
# to the end of the object.
await writer.append(b"Some data")

# Once all appends are done, closes the gRPC bidirectional stream.
await writer.close()

print('Appended object {} created of size {} bytes.'.format(object_name, writer.persisted_size))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

For better readability and consistency with modern Python practices, it's recommended to use an f-string for string formatting instead of .format().

Suggested change
print('Appended object {} created of size {} bytes.'.format(object_name, writer.persisted_size))
print(f'Appended object {object_name} created of size {writer.persisted_size} bytes.')





# [END storage_create_and_write_appendable_object]

if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.")
parser.add_argument("--object_name", help="Your Cloud Storage object name.")

args = parser.parse_args()

asyncio.run(
storage_create_and_write_appendable_object(
bucket_name=args.bucket_name,
object_name=args.object_name,
)
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
#!/usr/bin/env python

# Copyright 2026 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import asyncio

from google.cloud.storage._experimental.asyncio.async_appendable_object_writer import (
AsyncAppendableObjectWriter,
)
from google.cloud.storage._experimental.asyncio.async_grpc_client import AsyncGrpcClient


# [START storage_finalize_appendable_object_upload]
async def storage_finalize_appendable_object_upload(bucket_name, object_name):
"""Creates, writes to, and finalizes an appendable object."""

grpc_client = AsyncGrpcClient().grpc_client
writer = AsyncAppendableObjectWriter(
client=grpc_client,
bucket_name=bucket_name,
object_name=object_name,
)
# Creates a new appendable object of size 0.
await writer.open()

# Appends data to the object.
await writer.append(b"Some data")

# finalize the appendable object,
# once finalized no more appends can be done to the object.
Comment on lines +42 to +43
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The comment has a minor grammatical issue. It would be clearer if it were a complete sentence.

Suggested change
# finalize the appendable object,
# once finalized no more appends can be done to the object.
# Finalize the appendable object.
# Once finalized, no more appends can be done to the object.

object_resource = await writer.finalize()

print("Object resource: -- ")
print(object_resource)


print(
f"Appendable object {object_name} created and finalized with size {writer.persisted_size} bytes."
)


# [END storage_finalize_appendable_object_upload]

if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.")
parser.add_argument("--object_name", help="Your Cloud Storage object name.")

args = parser.parse_args()

asyncio.run(
storage_finalize_appendable_object_upload(
bucket_name=args.bucket_name,
object_name=args.object_name,
)
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
#!/usr/bin/env python

# Copyright 2026 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Downloads a range of bytes from multiple objects concurrently."""
import argparse
import asyncio
from io import BytesIO

from google.cloud.storage._experimental.asyncio.async_grpc_client import (
AsyncGrpcClient,
)
from google.cloud.storage._experimental.asyncio.async_multi_range_downloader import (
AsyncMultiRangeDownloader,
)


# [START storage_open_multiple_objects_ranged_read]
async def storage_open_multiple_objects_ranged_read(bucket_name, object_names):
"""Downloads a range of bytes from multiple objects concurrently."""
client = AsyncGrpcClient().grpc_client

async def _download_range(object_name):
"""Helper coroutine to download a range from a single object."""
mrd = AsyncMultiRangeDownloader(client, bucket_name, object_name)

# Open the object in read mode.
await mrd.open()

# Each object downloads the first 100 bytes.
start_byte = 0
size = 100

# requested range will be downloaded into this buffer, user may provide
# their own buffer or file-like object.
output_buffer = BytesIO()
await mrd.download_ranges([(start_byte, size, output_buffer)])

await mrd.close()
Comment on lines +37 to +51
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

To ensure that the AsyncMultiRangeDownloader stream is always closed, even if an error occurs during processing, it's best practice to wrap the operations in a try...finally block. This guarantees resource cleanup and prevents potential leaks.

Suggested change
mrd = AsyncMultiRangeDownloader(client, bucket_name, object_name)
# Open the object in read mode.
await mrd.open()
# Each object downloads the first 100 bytes.
start_byte = 0
size = 100
# requested range will be downloaded into this buffer, user may provide
# their own buffer or file-like object.
output_buffer = BytesIO()
await mrd.download_ranges([(start_byte, size, output_buffer)])
await mrd.close()
mrd = AsyncMultiRangeDownloader(client, bucket_name, object_name)
try:
# Open the object in read mode.
await mrd.open()
# Each object downloads the first 100 bytes.
start_byte = 0
size = 100
# requested range will be downloaded into this buffer, user may provide
# their own buffer or file-like object.
output_buffer = BytesIO()
await mrd.download_ranges([(start_byte, size, output_buffer)])
finally:
if mrd.is_stream_open:
await mrd.close()


# Downloaded size can differ from requested size if object is smaller.
# mrd will download at most up to the end of the object.
downloaded_size = output_buffer.getbuffer().nbytes
print(f"Downloaded {downloaded_size} bytes from {object_name}")

download_tasks = [_download_range(name) for name in object_names]
await asyncio.gather(*download_tasks)


# [END storage_open_multiple_objects_ranged_read]


if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.")
parser.add_argument(
"--object_names", nargs="+", help="Your Cloud Storage object name(s)."
)

args = parser.parse_args()

asyncio.run(
storage_open_multiple_objects_ranged_read(args.bucket_name, args.object_names)
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
#!/usr/bin/env python

# Copyright 2026 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import asyncio
from io import BytesIO

from google.cloud.storage._experimental.asyncio.async_grpc_client import AsyncGrpcClient
from google.cloud.storage._experimental.asyncio.async_multi_range_downloader import (
AsyncMultiRangeDownloader,
)

# [START storage_open_object_multiple_ranged_read]
async def storage_open_object_multiple_ranged_read(
bucket_name, object_name
):
"""Downloads multiple ranges of bytes from a single object into different buffers."""
client = AsyncGrpcClient().grpc_client

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

This blank line can be removed to improve code density.

mrd = AsyncMultiRangeDownloader(client, bucket_name, object_name)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

This blank line can be removed to improve code density.

# Open the object in read mode.
await mrd.open()

# Define four different buffers to download ranges into.
buffers = [BytesIO(), BytesIO(), BytesIO(), BytesIO()]

# Define the ranges to download. Each range is a tuple of (start_byte, size, buffer).
# All ranges will download 10 bytes from different starting positions.
# We choose arbitrary start bytes for this example. An object should be large enough.
# A user can choose any start byte between 0 and mrd.persisted_size.
ranges = [
(0, 10, buffers[0]),
(20, 10, buffers[1]),
(40, 10, buffers[2]),
(60, 10, buffers[3]),
]

await mrd.download_ranges(ranges)

await mrd.close()
Comment on lines +35 to +54
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

To ensure proper resource management, the downloader operations should be wrapped in a try...finally block. This guarantees that mrd.close() is called to close the stream, even if an error occurs during the download.

    try:
        # Open the object in read mode.
        await mrd.open()

        # Define four different buffers to download ranges into.
        buffers = [BytesIO(), BytesIO(), BytesIO(), BytesIO()]

        # Define the ranges to download. Each range is a tuple of (start_byte, size, buffer).
        # All ranges will download 10 bytes from different starting positions.
        # We choose arbitrary start bytes for this example. An object should be large enough.
        # A user can choose any start byte between 0 and mrd.persisted_size.
        ranges = [
            (0, 10, buffers[0]),
            (20, 10, buffers[1]),
            (40, 10, buffers[2]),
            (60, 10, buffers[3]),
        ]

        await mrd.download_ranges(ranges)
    finally:
        if mrd.is_stream_open:
            await mrd.close()


# Print the downloaded content from each buffer.
for i, output_buffer in enumerate(buffers):
downloaded_size = output_buffer.getbuffer().nbytes
print(
f"Downloaded {downloaded_size} bytes into buffer {i+1} from start byte {ranges[i][0]}: {output_buffer.getvalue()}"
)

# [END storage_open_object_multiple_ranged_read]

if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.")
parser.add_argument("--object_name", help="Your Cloud Storage object name.")

args = parser.parse_args()

asyncio.run(
storage_open_object_multiple_ranged_read(
args.bucket_name, args.object_name
)
)
66 changes: 66 additions & 0 deletions samples/snippets/rapid/storage_open_object_read_full_object.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
#!/usr/bin/env python

# Copyright 2026 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import asyncio
from io import BytesIO

from google.cloud.storage._experimental.asyncio.async_grpc_client import AsyncGrpcClient
from google.cloud.storage._experimental.asyncio.async_multi_range_downloader import (
AsyncMultiRangeDownloader,
)

# [START storage_open_object_read_full_object]
async def storage_open_object_read_full_object(
bucket_name, object_name
):
"""Downloads the entire content of an object."""
client = AsyncGrpcClient().grpc_client

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

This blank line can be removed to improve code density.

mrd = AsyncMultiRangeDownloader(client, bucket_name, object_name)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

This blank line can be removed to improve code density.

# Open the object in read mode.
await mrd.open()

output_buffer = BytesIO()
# A download range of (0, 0) means to read from the beginning to the end.
await mrd.download_ranges([(0, 0, output_buffer)])

await mrd.close()
Comment on lines +35 to +42
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

To ensure the downloader stream is always closed, even if errors occur, it's best to wrap the open and download_ranges calls in a try...finally block. This guarantees that mrd.close() is called for proper resource cleanup.

Suggested change
# Open the object in read mode.
await mrd.open()
output_buffer = BytesIO()
# A download range of (0, 0) means to read from the beginning to the end.
await mrd.download_ranges([(0, 0, output_buffer)])
await mrd.close()
try:
# Open the object in read mode.
await mrd.open()
output_buffer = BytesIO()
# A download range of (0, 0) means to read from the beginning to the end.
await mrd.download_ranges([(0, 0, output_buffer)])
finally:
if mrd.is_stream_open:
await mrd.close()


downloaded_bytes = output_buffer.getvalue()
print(
f"Downloaded all {len(downloaded_bytes)} bytes from object {object_name} in bucket {bucket_name}."
)
# You can now access the full content via downloaded_bytes, for example:
# print(downloaded_bytes)

# [END storage_open_object_read_full_object]

if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("--bucket_name", help="Your Cloud Storage bucket name.")
parser.add_argument("--object_name", help="Your Cloud Storage object name.")

args = parser.parse_args()

asyncio.run(
storage_open_object_read_full_object(
args.bucket_name, args.object_name
)
)
Loading