Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions config.js
Original file line number Diff line number Diff line change
Expand Up @@ -197,12 +197,12 @@ config.STS_CORS_EXPOSE_HEADERS = 'ETag';
config.DENY_UPLOAD_TO_STORAGE_CLASS_STANDARD = false;

/**
* NSFS_GLACIER_FORCE_STORAGE_CLASS when set to true
* will force `GLACIER` storage class if no storage class
* is provided and if `STANDARD` storage class is provided
* @type {boolean}
* NSFS_GLACIER_FORCE_STORAGE_CLASS controls which storage
* class to be used if no storage class or `STANDARD` storage
* class is provided.
* @type {nb.StorageClass}
*/
config.NSFS_GLACIER_FORCE_STORAGE_CLASS = false;
config.NSFS_GLACIER_FORCE_STORAGE_CLASS = undefined;

// S3_RESTORE_MAX_DAYS controls that for how many maximum number
// of days an object can be restored using `restore-object` call.
Expand Down
2 changes: 1 addition & 1 deletion src/endpoint/s3/ops/s3_get_object.js
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ async function get_object(req, res) {

s3_utils.set_response_object_md(res, object_md);
s3_utils.set_encryption_response_headers(req, res, object_md.encryption);
if (object_md.storage_class === s3_utils.STORAGE_CLASS_GLACIER) {
if (s3_utils.GLACIER_STORAGE_CLASSES.includes(object_md.storage_class)) {
if (object_md.restore_status?.ongoing || !object_md.restore_status?.expiry_time) {
// Don't try to read the object if it's not restored yet
dbg.warn('Object is not restored yet', req.path, object_md.restore_status);
Expand Down
3 changes: 0 additions & 3 deletions src/endpoint/s3/s3_rest.js
Original file line number Diff line number Diff line change
Expand Up @@ -107,9 +107,6 @@ async function handle_request(req, res) {
}
http_utils.check_headers(req, headers_options);

// Will override the storage class if configured
s3_utils.override_storage_class(req);

const redirect = await populate_request_additional_info_or_redirect(req);
if (redirect) {
res.setHeader('Location', redirect);
Expand Down
33 changes: 19 additions & 14 deletions src/endpoint/s3/s3_utils.js
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ const STORAGE_CLASS_GLACIER = 'GLACIER'; // "S3 Glacier Flexible Retrieval"
/** @type {nb.StorageClass} */
const STORAGE_CLASS_GLACIER_IR = 'GLACIER_IR'; // "S3 Glacier Instant Retrieval"
/** @type {nb.StorageClass} */
const STORAGE_CLASS_GLACIER_DA = 'GLACIER_DA'; // "DBS3 specific Storage Class"
const STORAGE_CLASS_DEEP_ARCHIVE = 'DEEP_ARCHIVE'; // "S3 Deep Archive Storage Class"

const DEFAULT_S3_USER = Object.freeze({
ID: '123',
Expand All @@ -44,6 +44,17 @@ const X_NOOBAA_AVAILABLE_STORAGE_CLASSES = 'x-noobaa-available-storage-classes';
const OBJECT_ATTRIBUTES = Object.freeze(['ETag', 'Checksum', 'ObjectParts', 'StorageClass', 'ObjectSize']);
const OBJECT_ATTRIBUTES_UNSUPPORTED = Object.freeze(['Checksum', 'ObjectParts']);

/**
* Set of storage classes which support RestoreObject S3 API
*
* GLACIER_IR is omitted as it doesn't require a restore.
* @type {nb.StorageClass[]}
*/
const GLACIER_STORAGE_CLASSES = [
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just a small comment worth added here I think - that this list is the classes that require restore-object, which is why GLACIER_IR is not included.

STORAGE_CLASS_GLACIER,
STORAGE_CLASS_DEEP_ARCHIVE,
];

/**
* get_default_object_owner returns bucket_owner info if exists
* else it'll return the default owner
Expand Down Expand Up @@ -381,10 +392,12 @@ function parse_storage_class_header(req) {
* @returns {nb.StorageClass}
*/
function parse_storage_class(storage_class) {
if (!storage_class) return STORAGE_CLASS_STANDARD;
if (storage_class === STORAGE_CLASS_STANDARD) return STORAGE_CLASS_STANDARD;
if (config.NSFS_GLACIER_FORCE_STORAGE_CLASS) {
storage_class = config.NSFS_GLACIER_FORCE_STORAGE_CLASS;
}
if (!storage_class || storage_class === STORAGE_CLASS_STANDARD) return STORAGE_CLASS_STANDARD;
if (storage_class === STORAGE_CLASS_GLACIER) return STORAGE_CLASS_GLACIER;
if (storage_class === STORAGE_CLASS_GLACIER_DA) return STORAGE_CLASS_GLACIER_DA;
if (storage_class === STORAGE_CLASS_DEEP_ARCHIVE) return STORAGE_CLASS_DEEP_ARCHIVE;
if (storage_class === STORAGE_CLASS_GLACIER_IR) return STORAGE_CLASS_GLACIER_IR;
throw new Error(`No such s3 storage class ${storage_class}`);
}
Expand Down Expand Up @@ -822,19 +835,11 @@ function parse_body_public_access_block(req) {
return parsed;
}

function override_storage_class(req) {
if (
config.NSFS_GLACIER_FORCE_STORAGE_CLASS &&
parse_storage_class_header(req) === STORAGE_CLASS_STANDARD
) {
req.headers['x-amz-storage-class'] = STORAGE_CLASS_GLACIER;
}
}

exports.STORAGE_CLASS_STANDARD = STORAGE_CLASS_STANDARD;
exports.STORAGE_CLASS_GLACIER = STORAGE_CLASS_GLACIER;
exports.STORAGE_CLASS_GLACIER_IR = STORAGE_CLASS_GLACIER_IR;
exports.STORAGE_CLASS_GLACIER_DA = STORAGE_CLASS_GLACIER_DA;
exports.STORAGE_CLASS_DEEP_ARCHIVE = STORAGE_CLASS_DEEP_ARCHIVE;
exports.DEFAULT_S3_USER = DEFAULT_S3_USER;
exports.DEFAULT_OBJECT_ACL = DEFAULT_OBJECT_ACL;
exports.decode_chunked_upload = decode_chunked_upload;
Expand Down Expand Up @@ -876,6 +881,6 @@ exports.key_marker_to_cont_tok = key_marker_to_cont_tok;
exports.parse_sse_c = parse_sse_c;
exports.verify_string_byte_length = verify_string_byte_length;
exports.parse_body_public_access_block = parse_body_public_access_block;
exports.override_storage_class = override_storage_class;
exports.OBJECT_ATTRIBUTES = OBJECT_ATTRIBUTES;
exports.OBJECT_ATTRIBUTES_UNSUPPORTED = OBJECT_ATTRIBUTES_UNSUPPORTED;
exports.GLACIER_STORAGE_CLASSES = GLACIER_STORAGE_CLASSES;
2 changes: 1 addition & 1 deletion src/sdk/glacier.js
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ class Glacier {
*/
static get_restore_status(xattr, now, file_path) {
const storage_class = Glacier.storage_class_from_xattr(xattr);
if (storage_class !== s3_utils.STORAGE_CLASS_GLACIER) {
if (!s3_utils.GLACIER_STORAGE_CLASSES.includes(storage_class)) {
return;
}

Expand Down
20 changes: 7 additions & 13 deletions src/sdk/namespace_fs.js
Original file line number Diff line number Diff line change
Expand Up @@ -1077,7 +1077,7 @@ class NamespaceFS {
// Disallow read if the object is in Glacier storage class and isn't restored
const obj_storage_class = Glacier.storage_class_from_xattr(stat.xattr);
const obj_restore_status = Glacier.get_restore_status(stat.xattr, new Date(), file_path);
if (obj_storage_class === s3_utils.STORAGE_CLASS_GLACIER) {
if (s3_utils.GLACIER_STORAGE_CLASSES.includes(obj_storage_class)) {
if (obj_restore_status?.ongoing || !obj_restore_status?.expiry_time) {
dbg.warn('read_object_stream: object is not restored yet', obj_restore_status);
throw new S3Error(S3Error.InvalidObjectState);
Expand Down Expand Up @@ -1307,7 +1307,7 @@ class NamespaceFS {
const src_storage_class = Glacier.storage_class_from_xattr(stat.xattr);
const src_restore_status = Glacier.get_restore_status(stat.xattr, new Date(), src_file_path);

if (src_storage_class === s3_utils.STORAGE_CLASS_GLACIER) {
if (s3_utils.GLACIER_STORAGE_CLASSES.includes(src_storage_class)) {
if (src_restore_status?.ongoing || !src_restore_status?.expiry_time) {
dbg.warn('_validate_upload: object is not restored yet', src_restore_status);
throw new S3Error(S3Error.InvalidObjectState);
Expand All @@ -1317,7 +1317,7 @@ class NamespaceFS {
}
}

return params.copy_source && params.storage_class === s3_utils.STORAGE_CLASS_GLACIER;
return params.copy_source && s3_utils.GLACIER_STORAGE_CLASSES.includes(params.storage_class);
}

// on put part - file path is equal to upload path
Expand Down Expand Up @@ -1363,7 +1363,7 @@ class NamespaceFS {
[Glacier.STORAGE_CLASS_XATTR]: params.storage_class
});

if (params.storage_class === s3_utils.STORAGE_CLASS_GLACIER) {
if (s3_utils.GLACIER_STORAGE_CLASSES.includes(params.storage_class)) {
await this.append_to_migrate_wal(file_path);
}
}
Expand Down Expand Up @@ -2297,7 +2297,7 @@ class NamespaceFS {
return {
accepted: false,
expires_on,
storage_class: s3_utils.STORAGE_CLASS_GLACIER
storage_class: Glacier.storage_class_from_xattr(stat.xattr)
};
}
} catch (error) {
Expand Down Expand Up @@ -3539,15 +3539,9 @@ class NamespaceFS {
}

async _is_storage_class_supported(storage_class) {
const glacier_storage_classes = [
s3_utils.STORAGE_CLASS_GLACIER,
s3_utils.STORAGE_CLASS_GLACIER_DA,
s3_utils.STORAGE_CLASS_GLACIER_IR,
];

if (!storage_class || storage_class === s3_utils.STORAGE_CLASS_STANDARD) return true;

if (glacier_storage_classes.includes(storage_class)) {
if (s3_utils.GLACIER_STORAGE_CLASSES.includes(storage_class)) {
return config.NSFS_GLACIER_ENABLED || false;
}

Expand Down Expand Up @@ -3619,7 +3613,7 @@ class NamespaceFS {
if (!config.NSFS_GLACIER_FORCE_EXPIRE_ON_GET) return;

const storage_class = s3_utils.parse_storage_class(stat.xattr[Glacier.STORAGE_CLASS_XATTR]);
if (storage_class !== s3_utils.STORAGE_CLASS_GLACIER) return;
if (!s3_utils.GLACIER_STORAGE_CLASSES.includes(storage_class)) return;

// Remove all the restore related xattrs
await file.replacexattr(fs_context, {
Expand Down
2 changes: 1 addition & 1 deletion src/sdk/nb.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ type DigestType = 'sha1' | 'sha256' | 'sha384' | 'sha512';
type CompressType = 'snappy' | 'zlib';
type CipherType = 'aes-256-gcm';
type ParityType = 'isa-c1' | 'isa-rs' | 'cm256';
type StorageClass = 'STANDARD' | 'GLACIER' | 'GLACIER_IR' | 'GLACIER_DA';
type StorageClass = 'STANDARD' | 'GLACIER' | 'GLACIER_IR' | 'DEEP_ARCHIVE';
type ResourceType = 'HOSTS' | 'CLOUD' | 'INTERNAL';
type NodeType =
'BLOCK_STORE_S3' |
Expand Down