diff --git a/config.js b/config.js index 1b90cf5a5c..68a3709de8 100644 --- a/config.js +++ b/config.js @@ -197,12 +197,12 @@ config.STS_CORS_EXPOSE_HEADERS = 'ETag'; config.DENY_UPLOAD_TO_STORAGE_CLASS_STANDARD = false; /** - * NSFS_GLACIER_FORCE_STORAGE_CLASS when set to true - * will force `GLACIER` storage class if no storage class - * is provided and if `STANDARD` storage class is provided - * @type {boolean} + * NSFS_GLACIER_FORCE_STORAGE_CLASS controls which storage + * class to be used if no storage class or `STANDARD` storage + * class is provided. + * @type {nb.StorageClass} */ -config.NSFS_GLACIER_FORCE_STORAGE_CLASS = false; +config.NSFS_GLACIER_FORCE_STORAGE_CLASS = undefined; // S3_RESTORE_MAX_DAYS controls that for how many maximum number // of days an object can be restored using `restore-object` call. diff --git a/src/endpoint/s3/ops/s3_get_object.js b/src/endpoint/s3/ops/s3_get_object.js index 69c2391817..53a5c39548 100644 --- a/src/endpoint/s3/ops/s3_get_object.js +++ b/src/endpoint/s3/ops/s3_get_object.js @@ -41,7 +41,7 @@ async function get_object(req, res) { s3_utils.set_response_object_md(res, object_md); s3_utils.set_encryption_response_headers(req, res, object_md.encryption); - if (object_md.storage_class === s3_utils.STORAGE_CLASS_GLACIER) { + if (s3_utils.GLACIER_STORAGE_CLASSES.includes(object_md.storage_class)) { if (object_md.restore_status?.ongoing || !object_md.restore_status?.expiry_time) { // Don't try to read the object if it's not restored yet dbg.warn('Object is not restored yet', req.path, object_md.restore_status); diff --git a/src/endpoint/s3/s3_rest.js b/src/endpoint/s3/s3_rest.js index b3f93cc21a..f4c2fa1159 100755 --- a/src/endpoint/s3/s3_rest.js +++ b/src/endpoint/s3/s3_rest.js @@ -107,9 +107,6 @@ async function handle_request(req, res) { } http_utils.check_headers(req, headers_options); - // Will override the storage class if configured - s3_utils.override_storage_class(req); - const redirect = await populate_request_additional_info_or_redirect(req); if (redirect) { res.setHeader('Location', redirect); diff --git a/src/endpoint/s3/s3_utils.js b/src/endpoint/s3/s3_utils.js index 16a1df028c..829922667e 100644 --- a/src/endpoint/s3/s3_utils.js +++ b/src/endpoint/s3/s3_utils.js @@ -21,7 +21,7 @@ const STORAGE_CLASS_GLACIER = 'GLACIER'; // "S3 Glacier Flexible Retrieval" /** @type {nb.StorageClass} */ const STORAGE_CLASS_GLACIER_IR = 'GLACIER_IR'; // "S3 Glacier Instant Retrieval" /** @type {nb.StorageClass} */ -const STORAGE_CLASS_GLACIER_DA = 'GLACIER_DA'; // "DBS3 specific Storage Class" +const STORAGE_CLASS_DEEP_ARCHIVE = 'DEEP_ARCHIVE'; // "S3 Deep Archive Storage Class" const DEFAULT_S3_USER = Object.freeze({ ID: '123', @@ -44,6 +44,17 @@ const X_NOOBAA_AVAILABLE_STORAGE_CLASSES = 'x-noobaa-available-storage-classes'; const OBJECT_ATTRIBUTES = Object.freeze(['ETag', 'Checksum', 'ObjectParts', 'StorageClass', 'ObjectSize']); const OBJECT_ATTRIBUTES_UNSUPPORTED = Object.freeze(['Checksum', 'ObjectParts']); +/** + * Set of storage classes which support RestoreObject S3 API + * + * GLACIER_IR is omitted as it doesn't require a restore. + * @type {nb.StorageClass[]} + */ +const GLACIER_STORAGE_CLASSES = [ + STORAGE_CLASS_GLACIER, + STORAGE_CLASS_DEEP_ARCHIVE, +]; + /** * get_default_object_owner returns bucket_owner info if exists * else it'll return the default owner @@ -381,10 +392,12 @@ function parse_storage_class_header(req) { * @returns {nb.StorageClass} */ function parse_storage_class(storage_class) { - if (!storage_class) return STORAGE_CLASS_STANDARD; - if (storage_class === STORAGE_CLASS_STANDARD) return STORAGE_CLASS_STANDARD; + if (config.NSFS_GLACIER_FORCE_STORAGE_CLASS) { + storage_class = config.NSFS_GLACIER_FORCE_STORAGE_CLASS; + } + if (!storage_class || storage_class === STORAGE_CLASS_STANDARD) return STORAGE_CLASS_STANDARD; if (storage_class === STORAGE_CLASS_GLACIER) return STORAGE_CLASS_GLACIER; - if (storage_class === STORAGE_CLASS_GLACIER_DA) return STORAGE_CLASS_GLACIER_DA; + if (storage_class === STORAGE_CLASS_DEEP_ARCHIVE) return STORAGE_CLASS_DEEP_ARCHIVE; if (storage_class === STORAGE_CLASS_GLACIER_IR) return STORAGE_CLASS_GLACIER_IR; throw new Error(`No such s3 storage class ${storage_class}`); } @@ -822,19 +835,11 @@ function parse_body_public_access_block(req) { return parsed; } -function override_storage_class(req) { - if ( - config.NSFS_GLACIER_FORCE_STORAGE_CLASS && - parse_storage_class_header(req) === STORAGE_CLASS_STANDARD - ) { - req.headers['x-amz-storage-class'] = STORAGE_CLASS_GLACIER; - } -} exports.STORAGE_CLASS_STANDARD = STORAGE_CLASS_STANDARD; exports.STORAGE_CLASS_GLACIER = STORAGE_CLASS_GLACIER; exports.STORAGE_CLASS_GLACIER_IR = STORAGE_CLASS_GLACIER_IR; -exports.STORAGE_CLASS_GLACIER_DA = STORAGE_CLASS_GLACIER_DA; +exports.STORAGE_CLASS_DEEP_ARCHIVE = STORAGE_CLASS_DEEP_ARCHIVE; exports.DEFAULT_S3_USER = DEFAULT_S3_USER; exports.DEFAULT_OBJECT_ACL = DEFAULT_OBJECT_ACL; exports.decode_chunked_upload = decode_chunked_upload; @@ -876,6 +881,6 @@ exports.key_marker_to_cont_tok = key_marker_to_cont_tok; exports.parse_sse_c = parse_sse_c; exports.verify_string_byte_length = verify_string_byte_length; exports.parse_body_public_access_block = parse_body_public_access_block; -exports.override_storage_class = override_storage_class; exports.OBJECT_ATTRIBUTES = OBJECT_ATTRIBUTES; exports.OBJECT_ATTRIBUTES_UNSUPPORTED = OBJECT_ATTRIBUTES_UNSUPPORTED; +exports.GLACIER_STORAGE_CLASSES = GLACIER_STORAGE_CLASSES; diff --git a/src/sdk/glacier.js b/src/sdk/glacier.js index d44647d00c..e42fb54894 100644 --- a/src/sdk/glacier.js +++ b/src/sdk/glacier.js @@ -339,7 +339,7 @@ class Glacier { */ static get_restore_status(xattr, now, file_path) { const storage_class = Glacier.storage_class_from_xattr(xattr); - if (storage_class !== s3_utils.STORAGE_CLASS_GLACIER) { + if (!s3_utils.GLACIER_STORAGE_CLASSES.includes(storage_class)) { return; } diff --git a/src/sdk/namespace_fs.js b/src/sdk/namespace_fs.js index 16cc5d7c42..8673b5eca3 100644 --- a/src/sdk/namespace_fs.js +++ b/src/sdk/namespace_fs.js @@ -1077,7 +1077,7 @@ class NamespaceFS { // Disallow read if the object is in Glacier storage class and isn't restored const obj_storage_class = Glacier.storage_class_from_xattr(stat.xattr); const obj_restore_status = Glacier.get_restore_status(stat.xattr, new Date(), file_path); - if (obj_storage_class === s3_utils.STORAGE_CLASS_GLACIER) { + if (s3_utils.GLACIER_STORAGE_CLASSES.includes(obj_storage_class)) { if (obj_restore_status?.ongoing || !obj_restore_status?.expiry_time) { dbg.warn('read_object_stream: object is not restored yet', obj_restore_status); throw new S3Error(S3Error.InvalidObjectState); @@ -1307,7 +1307,7 @@ class NamespaceFS { const src_storage_class = Glacier.storage_class_from_xattr(stat.xattr); const src_restore_status = Glacier.get_restore_status(stat.xattr, new Date(), src_file_path); - if (src_storage_class === s3_utils.STORAGE_CLASS_GLACIER) { + if (s3_utils.GLACIER_STORAGE_CLASSES.includes(src_storage_class)) { if (src_restore_status?.ongoing || !src_restore_status?.expiry_time) { dbg.warn('_validate_upload: object is not restored yet', src_restore_status); throw new S3Error(S3Error.InvalidObjectState); @@ -1317,7 +1317,7 @@ class NamespaceFS { } } - return params.copy_source && params.storage_class === s3_utils.STORAGE_CLASS_GLACIER; + return params.copy_source && s3_utils.GLACIER_STORAGE_CLASSES.includes(params.storage_class); } // on put part - file path is equal to upload path @@ -1363,7 +1363,7 @@ class NamespaceFS { [Glacier.STORAGE_CLASS_XATTR]: params.storage_class }); - if (params.storage_class === s3_utils.STORAGE_CLASS_GLACIER) { + if (s3_utils.GLACIER_STORAGE_CLASSES.includes(params.storage_class)) { await this.append_to_migrate_wal(file_path); } } @@ -2297,7 +2297,7 @@ class NamespaceFS { return { accepted: false, expires_on, - storage_class: s3_utils.STORAGE_CLASS_GLACIER + storage_class: Glacier.storage_class_from_xattr(stat.xattr) }; } } catch (error) { @@ -3539,15 +3539,9 @@ class NamespaceFS { } async _is_storage_class_supported(storage_class) { - const glacier_storage_classes = [ - s3_utils.STORAGE_CLASS_GLACIER, - s3_utils.STORAGE_CLASS_GLACIER_DA, - s3_utils.STORAGE_CLASS_GLACIER_IR, - ]; - if (!storage_class || storage_class === s3_utils.STORAGE_CLASS_STANDARD) return true; - if (glacier_storage_classes.includes(storage_class)) { + if (s3_utils.GLACIER_STORAGE_CLASSES.includes(storage_class)) { return config.NSFS_GLACIER_ENABLED || false; } @@ -3619,7 +3613,7 @@ class NamespaceFS { if (!config.NSFS_GLACIER_FORCE_EXPIRE_ON_GET) return; const storage_class = s3_utils.parse_storage_class(stat.xattr[Glacier.STORAGE_CLASS_XATTR]); - if (storage_class !== s3_utils.STORAGE_CLASS_GLACIER) return; + if (!s3_utils.GLACIER_STORAGE_CLASSES.includes(storage_class)) return; // Remove all the restore related xattrs await file.replacexattr(fs_context, { diff --git a/src/sdk/nb.d.ts b/src/sdk/nb.d.ts index c1d14a7227..14b4a94c92 100644 --- a/src/sdk/nb.d.ts +++ b/src/sdk/nb.d.ts @@ -17,7 +17,7 @@ type DigestType = 'sha1' | 'sha256' | 'sha384' | 'sha512'; type CompressType = 'snappy' | 'zlib'; type CipherType = 'aes-256-gcm'; type ParityType = 'isa-c1' | 'isa-rs' | 'cm256'; -type StorageClass = 'STANDARD' | 'GLACIER' | 'GLACIER_IR' | 'GLACIER_DA'; +type StorageClass = 'STANDARD' | 'GLACIER' | 'GLACIER_IR' | 'DEEP_ARCHIVE'; type ResourceType = 'HOSTS' | 'CLOUD' | 'INTERNAL'; type NodeType = 'BLOCK_STORE_S3' |