Commit ba8654a2 authored by Stan Hu's avatar Stan Hu

Add support for specifying AWS S3 Server Side Encryption (AWS-KMS)

Prior to this change, uploads to AWS S3 were only encrypted on the
server if a default encryption were specified on the bucket.

With this change, admins can now configure the encryption and the AWS
Key Management Service (KMS) key ID in GitLab Rails, and the
configuration will be used in uploads. Bucket policies to enforce
gencryption can now be used since Workhorse sends the required headers
(`x-amz-server-side-encryption` and
`x-amz-server-side-encryption-aws-kms-key-id`).

This also refactors the object storage config parsing.

This requires
https://gitlab.com/gitlab-org/gitlab-workhorse/-/merge_requests/537 to
work.

Relates to https://gitlab.com/gitlab-org/gitlab/-/issues/22200
parent 932d5efc
...@@ -169,10 +169,6 @@ module ObjectStorage ...@@ -169,10 +169,6 @@ module ObjectStorage
object_store_options.connection.to_hash.deep_symbolize_keys object_store_options.connection.to_hash.deep_symbolize_keys
end end
def consolidated_settings?
object_store_options.fetch('consolidated_settings', false)
end
def remote_store_path def remote_store_path
object_store_options.remote_directory object_store_options.remote_directory
end end
...@@ -193,14 +189,18 @@ module ObjectStorage ...@@ -193,14 +189,18 @@ module ObjectStorage
File.join(self.root, TMP_UPLOAD_PATH) File.join(self.root, TMP_UPLOAD_PATH)
end end
def object_store_config
ObjectStorage::Config.new(object_store_options)
end
def workhorse_remote_upload_options(has_length:, maximum_size: nil) def workhorse_remote_upload_options(has_length:, maximum_size: nil)
return unless self.object_store_enabled? return unless self.object_store_enabled?
return unless self.direct_upload_enabled? return unless self.direct_upload_enabled?
id = [CarrierWave.generate_cache_id, SecureRandom.hex].join('-') id = [CarrierWave.generate_cache_id, SecureRandom.hex].join('-')
upload_path = File.join(TMP_UPLOAD_PATH, id) upload_path = File.join(TMP_UPLOAD_PATH, id)
direct_upload = ObjectStorage::DirectUpload.new(self.object_store_credentials, remote_store_path, upload_path, direct_upload = ObjectStorage::DirectUpload.new(self.object_store_config, upload_path,
has_length: has_length, maximum_size: maximum_size, consolidated_settings: consolidated_settings?) has_length: has_length, maximum_size: maximum_size)
direct_upload.to_hash.merge(ID: id) direct_upload.to_hash.merge(ID: id)
end end
...@@ -283,6 +283,10 @@ module ObjectStorage ...@@ -283,6 +283,10 @@ module ObjectStorage
self.class.object_store_credentials self.class.object_store_credentials
end end
def fog_attributes
@fog_attributes ||= self.class.object_store_config.fog_attributes
end
# Set ACL of uploaded objects to not-public (fog-aws)[1] or no ACL at all # Set ACL of uploaded objects to not-public (fog-aws)[1] or no ACL at all
# (fog-google). Value is ignored by other supported backends (fog-aliyun, # (fog-google). Value is ignored by other supported backends (fog-aliyun,
# fog-openstack, fog-rackspace) # fog-openstack, fog-rackspace)
......
---
title: Add support for specifying AWS S3 Server Side Encryption (AWS-KMS)
merge_request: 38240
author:
type: added
...@@ -218,6 +218,9 @@ production: &base ...@@ -218,6 +218,9 @@ production: &base
# region: us-east-1 # region: us-east-1
# aws_signature_version: 4 # For creation of signed URLs. Set to 2 if provider does not support v4. # aws_signature_version: 4 # For creation of signed URLs. Set to 2 if provider does not support v4.
# endpoint: 'https://s3.amazonaws.com' # default: nil - Useful for S3 compliant services such as DigitalOcean Spaces # endpoint: 'https://s3.amazonaws.com' # default: nil - Useful for S3 compliant services such as DigitalOcean Spaces
# storage_options:
# server_side_encryption: AES256 # AES256, aws:kms
# server_side_encryption_kms_key_id: # Amazon Resource Name. See https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html
# objects: # objects:
# artifacts: # artifacts:
# bucket: artifacts # bucket: artifacts
......
# frozen_string_literal: true
require "carrierwave/storage/fog"
# This pulls in https://github.com/carrierwaveuploader/carrierwave/pull/2504 to support
# sending AWS S3 encryption headers when copying objects.
module CarrierWave
module Storage
class Fog < Abstract
class File
def copy_to(new_path)
connection.copy_object(@uploader.fog_directory, file.key, @uploader.fog_directory, new_path, copy_to_options)
CarrierWave::Storage::Fog::File.new(@uploader, @base, new_path)
end
def copy_to_options
acl_header.merge(@uploader.fog_attributes)
end
end
end
end
end
...@@ -13,6 +13,7 @@ class ObjectStoreSettings ...@@ -13,6 +13,7 @@ class ObjectStoreSettings
object_store['direct_upload'] = false if object_store['direct_upload'].nil? object_store['direct_upload'] = false if object_store['direct_upload'].nil?
object_store['background_upload'] = true if object_store['background_upload'].nil? object_store['background_upload'] = true if object_store['background_upload'].nil?
object_store['proxy_download'] = false if object_store['proxy_download'].nil? object_store['proxy_download'] = false if object_store['proxy_download'].nil?
object_store['storage_options'] ||= {}
# Convert upload connection settings to use string keys, to make Fog happy # Convert upload connection settings to use string keys, to make Fog happy
object_store['connection']&.deep_stringify_keys! object_store['connection']&.deep_stringify_keys!
...@@ -37,6 +38,8 @@ class ObjectStoreSettings ...@@ -37,6 +38,8 @@ class ObjectStoreSettings
# region: gdk # region: gdk
# endpoint: 'http://127.0.0.1:9000' # endpoint: 'http://127.0.0.1:9000'
# path_style: true # path_style: true
# storage_options:
# server_side_encryption: AES256
# proxy_download: true # proxy_download: true
# objects: # objects:
# artifacts: # artifacts:
...@@ -49,7 +52,7 @@ class ObjectStoreSettings ...@@ -49,7 +52,7 @@ class ObjectStoreSettings
# #
# Settings.artifacts['object_store'] = { # Settings.artifacts['object_store'] = {
# "enabled" => true, # "enabled" => true,
# "connection"=> { # "connection" => {
# "provider" => "AWS", # "provider" => "AWS",
# "aws_access_key_id" => "minio", # "aws_access_key_id" => "minio",
# "aws_secret_access_key" => "gdk-minio", # "aws_secret_access_key" => "gdk-minio",
...@@ -57,6 +60,9 @@ class ObjectStoreSettings ...@@ -57,6 +60,9 @@ class ObjectStoreSettings
# "endpoint" => "http://127.0.0.1:9000", # "endpoint" => "http://127.0.0.1:9000",
# "path_style" => true # "path_style" => true
# }, # },
# "storage_options" => {
# "server_side_encryption" => "AES256"
# },
# "direct_upload" => true, # "direct_upload" => true,
# "background_upload" => false, # "background_upload" => false,
# "proxy_download" => false, # "proxy_download" => false,
...@@ -73,6 +79,9 @@ class ObjectStoreSettings ...@@ -73,6 +79,9 @@ class ObjectStoreSettings
# "endpoint" => "http://127.0.0.1:9000", # "endpoint" => "http://127.0.0.1:9000",
# "path_style" => true # "path_style" => true
# }, # },
# "storage_options" => {
# "server_side_encryption" => "AES256"
# },
# "direct_upload" => true, # "direct_upload" => true,
# "background_upload" => false, # "background_upload" => false,
# "proxy_download" => true, # "proxy_download" => true,
...@@ -91,12 +100,13 @@ class ObjectStoreSettings ...@@ -91,12 +100,13 @@ class ObjectStoreSettings
return unless use_consolidated_settings? return unless use_consolidated_settings?
main_config = settings['object_store'] main_config = settings['object_store']
common_config = main_config.slice('enabled', 'connection', 'proxy_download') common_config = main_config.slice('enabled', 'connection', 'proxy_download', 'storage_options')
# Convert connection settings to use string keys, to make Fog happy # Convert connection settings to use string keys, to make Fog happy
common_config['connection']&.deep_stringify_keys! common_config['connection']&.deep_stringify_keys!
# These are no longer configurable if common config is used # These are no longer configurable if common config is used
common_config['direct_upload'] = true common_config['direct_upload'] = true
common_config['background_upload'] = false common_config['background_upload'] = false
common_config['storage_options'] ||= {}
SUPPORTED_TYPES.each do |store_type| SUPPORTED_TYPES.each do |store_type|
overrides = main_config.dig('objects', store_type) || {} overrides = main_config.dig('objects', store_type) || {}
......
# frozen_string_literal: true
module ObjectStorage
class Config
attr_reader :options
def initialize(options)
@options = options.to_hash.deep_symbolize_keys
end
def credentials
@credentials ||= options[:connection] || {}
end
def storage_options
@storage_options ||= options[:storage_options] || {}
end
def enabled?
options[:enabled]
end
def bucket
options[:remote_directory]
end
def consolidated_settings?
options.fetch(:consolidated_settings, false)
end
# AWS-specific options
def aws?
provider == 'AWS'
end
def use_iam_profile?
credentials.fetch(:use_iam_profile, false)
end
def use_path_style?
credentials.fetch(:path_style, false)
end
def server_side_encryption
storage_options[:server_side_encryption]
end
def server_side_encryption_kms_key_id
storage_options[:server_side_encryption_kms_key_id]
end
def provider
credentials[:provider].to_s
end
# End AWS-specific options
def google?
provider == 'Google'
end
def fog_attributes
@fog_attributes ||= begin
return {} unless enabled? && aws?
return {} unless server_side_encryption.present?
aws_server_side_encryption_headers.compact
end
end
private
def aws_server_side_encryption_headers
{
'x-amz-server-side-encryption' => server_side_encryption,
'x-amz-server-side-encryption-aws-kms-key-id' => server_side_encryption_kms_key_id
}
end
end
end
...@@ -22,20 +22,20 @@ module ObjectStorage ...@@ -22,20 +22,20 @@ module ObjectStorage
MAXIMUM_MULTIPART_PARTS = 100 MAXIMUM_MULTIPART_PARTS = 100
MINIMUM_MULTIPART_SIZE = 5.megabytes MINIMUM_MULTIPART_SIZE = 5.megabytes
attr_reader :credentials, :bucket_name, :object_name attr_reader :config, :credentials, :bucket_name, :object_name
attr_reader :has_length, :maximum_size, :consolidated_settings attr_reader :has_length, :maximum_size
def initialize(credentials, bucket_name, object_name, has_length:, maximum_size: nil, consolidated_settings: false) def initialize(config, object_name, has_length:, maximum_size: nil)
unless has_length unless has_length
raise ArgumentError, 'maximum_size has to be specified if length is unknown' unless maximum_size raise ArgumentError, 'maximum_size has to be specified if length is unknown' unless maximum_size
end end
@credentials = credentials @config = config
@bucket_name = bucket_name @credentials = config.credentials
@bucket_name = config.bucket
@object_name = object_name @object_name = object_name
@has_length = has_length @has_length = has_length
@maximum_size = maximum_size @maximum_size = maximum_size
@consolidated_settings = consolidated_settings
end end
def to_hash def to_hash
...@@ -62,7 +62,7 @@ module ObjectStorage ...@@ -62,7 +62,7 @@ module ObjectStorage
end end
def workhorse_client_hash def workhorse_client_hash
return {} unless aws? return {} unless config.aws?
{ {
UseWorkhorseClient: use_workhorse_s3_client?, UseWorkhorseClient: use_workhorse_s3_client?,
...@@ -73,16 +73,18 @@ module ObjectStorage ...@@ -73,16 +73,18 @@ module ObjectStorage
Bucket: bucket_name, Bucket: bucket_name,
Region: credentials[:region], Region: credentials[:region],
Endpoint: credentials[:endpoint], Endpoint: credentials[:endpoint],
PathStyle: credentials.fetch(:path_style, false), PathStyle: config.use_path_style?,
UseIamProfile: credentials.fetch(:use_iam_profile, false) UseIamProfile: config.use_iam_profile?,
} ServerSideEncryption: config.server_side_encryption,
SSEKMSKeyID: config.server_side_encryption_kms_key_id
}.compact
} }
} }
end end
def use_workhorse_s3_client? def use_workhorse_s3_client?
return false unless Feature.enabled?(:use_workhorse_s3_client, default_enabled: true) return false unless Feature.enabled?(:use_workhorse_s3_client, default_enabled: true)
return false unless credentials.fetch(:use_iam_profile, false) || consolidated_settings return false unless config.use_iam_profile? || config.consolidated_settings?
# The Golang AWS SDK does not support V2 signatures # The Golang AWS SDK does not support V2 signatures
return false unless credentials.fetch(:aws_signature_version, 4).to_i >= 4 return false unless credentials.fetch(:aws_signature_version, 4).to_i >= 4
...@@ -95,7 +97,7 @@ module ObjectStorage ...@@ -95,7 +97,7 @@ module ObjectStorage
# Implements https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html # Implements https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
def get_url def get_url
if google? if config.google?
connection.get_object_https_url(bucket_name, object_name, expire_at) connection.get_object_https_url(bucket_name, object_name, expire_at)
else else
connection.get_object_url(bucket_name, object_name, expire_at) connection.get_object_url(bucket_name, object_name, expire_at)
...@@ -169,23 +171,15 @@ module ObjectStorage ...@@ -169,23 +171,15 @@ module ObjectStorage
].min ].min
end end
def aws?
provider == 'AWS'
end
def google?
provider == 'Google'
end
def requires_multipart_upload? def requires_multipart_upload?
aws? && !has_length config.aws? && !has_length
end end
def upload_id def upload_id
return unless requires_multipart_upload? return unless requires_multipart_upload?
strong_memoize(:upload_id) do strong_memoize(:upload_id) do
new_upload = connection.initiate_multipart_upload(bucket_name, object_name) new_upload = connection.initiate_multipart_upload(bucket_name, object_name, config.fog_attributes)
new_upload.body["UploadId"] new_upload.body["UploadId"]
end end
end end
......
# frozen_string_literal: true
require 'fast_spec_helper'
RSpec.describe ObjectStorage::Config do
let(:region) { 'us-east-1' }
let(:bucket_name) { 'test-bucket' }
let(:path_style) { false }
let(:use_iam_profile) { false }
let(:credentials) do
{
provider: 'AWS',
aws_access_key_id: 'AWS_ACCESS_KEY_ID',
aws_secret_access_key: 'AWS_SECRET_ACCESS_KEY',
region: region,
path_style: path_style,
use_iam_profile: use_iam_profile
}
end
let(:storage_options) do
{
server_side_encryption: 'AES256',
server_side_encryption_kms_key_id: 'arn:aws:12345'
}
end
let(:raw_config) do
{
enabled: true,
connection: credentials,
remote_directory: bucket_name,
storage_options: storage_options
}
end
subject { described_class.new(raw_config.as_json) }
describe '#credentials' do
it { expect(subject.credentials).to eq(credentials) }
end
describe '#storage_options' do
it { expect(subject.storage_options).to eq(storage_options) }
end
describe '#enabled?' do
it { expect(subject.enabled?).to eq(true) }
end
describe '#bucket' do
it { expect(subject.bucket).to eq(bucket_name) }
end
context 'with unconsolidated settings' do
describe 'consolidated_settings? returns false' do
it { expect(subject.consolidated_settings?).to be false }
end
end
context 'with consolidated settings' do
before do
raw_config[:consolidated_settings] = true
end
describe 'consolidated_settings? returns true' do
it { expect(subject.consolidated_settings?).to be true }
end
end
context 'with IAM profile in use' do
let(:use_iam_profile) { true }
it '#use_iam_profile? returns true' do
expect(subject.use_iam_profile?).to be true
end
end
context 'with IAM profile not in use' do
it '#use_iam_profile? returns false' do
expect(subject.use_iam_profile?).to be false
end
end
context 'with path style' do
let(:path_style) { true }
it '#use_path_style? returns true' do
expect(subject.use_path_style?).to be true
end
end
context 'with hostname style access' do
it '#use_path_style? returns false' do
expect(subject.use_path_style?).to be false
end
end
context 'with AWS credentials' do
it { expect(subject.provider).to eq('AWS') }
it { expect(subject.aws?).to be true }
it { expect(subject.google?).to be false }
end
context 'with Google credentials' do
let(:credentials) do
{
provider: 'Google',
google_client_email: 'foo@gcp-project.example.com',
google_json_key_location: '/path/to/gcp.json'
}
end
it { expect(subject.provider).to eq('Google') }
it { expect(subject.aws?).to be false }
it { expect(subject.google?).to be true }
it { expect(subject.fog_attributes).to eq({}) }
end
context 'with SSE-KMS enabled' do
it { expect(subject.server_side_encryption).to eq('AES256') }
it { expect(subject.server_side_encryption_kms_key_id).to eq('arn:aws:12345') }
it { expect(subject.fog_attributes.keys).to match_array(%w(x-amz-server-side-encryption x-amz-server-side-encryption-aws-kms-key-id)) }
end
context 'with only server side encryption enabled' do
let(:storage_options) { { server_side_encryption: 'AES256' } }
it { expect(subject.server_side_encryption).to eq('AES256') }
it { expect(subject.server_side_encryption_kms_key_id).to be_nil }
it { expect(subject.fog_attributes).to eq({ 'x-amz-server-side-encryption' => 'AES256' }) }
end
context 'without encryption enabled' do
let(:storage_options) { {} }
it { expect(subject.server_side_encryption).to be_nil }
it { expect(subject.server_side_encryption_kms_key_id).to be_nil }
it { expect(subject.fog_attributes).to eq({}) }
end
context 'with object storage disabled' do
before do
raw_config['enabled'] = false
end
it { expect(subject.enabled?).to be false }
it { expect(subject.fog_attributes).to eq({}) }
end
end
...@@ -18,13 +18,25 @@ RSpec.describe ObjectStorage::DirectUpload do ...@@ -18,13 +18,25 @@ RSpec.describe ObjectStorage::DirectUpload do
} }
end end
let(:storage_options) { {} }
let(:raw_config) do
{
enabled: true,
connection: credentials,
remote_directory: bucket_name,
storage_options: storage_options,
consolidated_settings: consolidated_settings
}
end
let(:config) { ObjectStorage::Config.new(raw_config) }
let(:storage_url) { 'https://uploads.s3.amazonaws.com/' } let(:storage_url) { 'https://uploads.s3.amazonaws.com/' }
let(:bucket_name) { 'uploads' } let(:bucket_name) { 'uploads' }
let(:object_name) { 'tmp/uploads/my-file' } let(:object_name) { 'tmp/uploads/my-file' }
let(:maximum_size) { 1.gigabyte } let(:maximum_size) { 1.gigabyte }
let(:direct_upload) { described_class.new(credentials, bucket_name, object_name, has_length: has_length, maximum_size: maximum_size, consolidated_settings: consolidated_settings) } let(:direct_upload) { described_class.new(config, object_name, has_length: has_length, maximum_size: maximum_size) }
before do before do
Fog.unmock! Fog.unmock!
...@@ -62,7 +74,7 @@ RSpec.describe ObjectStorage::DirectUpload do ...@@ -62,7 +74,7 @@ RSpec.describe ObjectStorage::DirectUpload do
end end
describe '#get_url' do describe '#get_url' do
subject { described_class.new(credentials, bucket_name, object_name, has_length: true) } subject { described_class.new(config, object_name, has_length: true) }
context 'when AWS is used' do context 'when AWS is used' do
it 'calls the proper method' do it 'calls the proper method' do
...@@ -111,6 +123,7 @@ RSpec.describe ObjectStorage::DirectUpload do ...@@ -111,6 +123,7 @@ RSpec.describe ObjectStorage::DirectUpload do
expect(s3_config[:Region]).to eq(region) expect(s3_config[:Region]).to eq(region)
expect(s3_config[:PathStyle]).to eq(path_style) expect(s3_config[:PathStyle]).to eq(path_style)
expect(s3_config[:UseIamProfile]).to eq(use_iam_profile) expect(s3_config[:UseIamProfile]).to eq(use_iam_profile)
expect(s3_config.keys).not_to include(%i(ServerSideEncryption SSEKMSKeyID))
end end
context 'when feature flag is disabled' do context 'when feature flag is disabled' do
...@@ -150,6 +163,33 @@ RSpec.describe ObjectStorage::DirectUpload do ...@@ -150,6 +163,33 @@ RSpec.describe ObjectStorage::DirectUpload do
expect(subject[:UseWorkhorseClient]).to be true expect(subject[:UseWorkhorseClient]).to be true
end end
end end
context 'when only server side encryption is used' do
let(:storage_options) { { server_side_encryption: 'AES256' } }
it 'sends server side encryption settings' do
s3_config = subject[:ObjectStorage][:S3Config]
expect(s3_config[:ServerSideEncryption]).to eq('AES256')
expect(s3_config.keys).not_to include(:SSEKMSKeyID)
end
end
context 'when SSE-KMS is used' do
let(:storage_options) do
{
server_side_encryption: 'AES256',
server_side_encryption_kms_key_id: 'arn:aws:12345'
}
end
it 'sends server side encryption settings' do
s3_config = subject[:ObjectStorage][:S3Config]
expect(s3_config[:ServerSideEncryption]).to eq('AES256')
expect(s3_config[:SSEKMSKeyID]).to eq('arn:aws:12345')
end
end
end end
shared_examples 'a valid Google upload' do shared_examples 'a valid Google upload' do
......
...@@ -382,6 +382,32 @@ RSpec.describe ObjectStorage do ...@@ -382,6 +382,32 @@ RSpec.describe ObjectStorage do
it { is_expected.to eq(nil) } it { is_expected.to eq(nil) }
end end
describe '#fog_attributes' do
subject { uploader.fog_attributes }
it { is_expected.to eq({}) }
context 'with encryption configured' do
let(:raw_options) do
{
"enabled" => true,
"connection" => { "provider" => 'AWS' },
"storage_options" => { "server_side_encryption" => "AES256" }
}
end
let(:options) { Settingslogic.new(raw_options) }
before do
allow(uploader_class).to receive(:options) do
double(object_store: options)
end
end
it { is_expected.to eq({ "x-amz-server-side-encryption" => "AES256" }) }
end
end
describe '.workhorse_authorize' do describe '.workhorse_authorize' do
let(:has_length) { true } let(:has_length) { true }
let(:maximum_size) { nil } let(:maximum_size) { nil }
...@@ -459,13 +485,18 @@ RSpec.describe ObjectStorage do ...@@ -459,13 +485,18 @@ RSpec.describe ObjectStorage do
context 'uses AWS' do context 'uses AWS' do
let(:storage_url) { "https://uploads.s3-eu-central-1.amazonaws.com/" } let(:storage_url) { "https://uploads.s3-eu-central-1.amazonaws.com/" }
let(:credentials) do
before do {
expect(uploader_class).to receive(:object_store_credentials) do provider: "AWS",
{ provider: "AWS",
aws_access_key_id: "AWS_ACCESS_KEY_ID", aws_access_key_id: "AWS_ACCESS_KEY_ID",
aws_secret_access_key: "AWS_SECRET_ACCESS_KEY", aws_secret_access_key: "AWS_SECRET_ACCESS_KEY",
region: "eu-central-1" } region: "eu-central-1"
}
end
before do
expect_next_instance_of(ObjectStorage::Config) do |instance|
allow(instance).to receive(:credentials).and_return(credentials)
end end
end end
...@@ -502,12 +533,17 @@ RSpec.describe ObjectStorage do ...@@ -502,12 +533,17 @@ RSpec.describe ObjectStorage do
context 'uses Google' do context 'uses Google' do
let(:storage_url) { "https://storage.googleapis.com/uploads/" } let(:storage_url) { "https://storage.googleapis.com/uploads/" }
let(:credentials) do
{
provider: "Google",
google_storage_access_key_id: 'ACCESS_KEY_ID',
google_storage_secret_access_key: 'SECRET_ACCESS_KEY'
}
end
before do before do
expect(uploader_class).to receive(:object_store_credentials) do expect_next_instance_of(ObjectStorage::Config) do |instance|
{ provider: "Google", allow(instance).to receive(:credentials).and_return(credentials)
google_storage_access_key_id: 'ACCESS_KEY_ID',
google_storage_secret_access_key: 'SECRET_ACCESS_KEY' }
end end
end end
...@@ -537,9 +573,7 @@ RSpec.describe ObjectStorage do ...@@ -537,9 +573,7 @@ RSpec.describe ObjectStorage do
context 'uses GDK/minio' do context 'uses GDK/minio' do
let(:storage_url) { "http://minio:9000/uploads/" } let(:storage_url) { "http://minio:9000/uploads/" }
let(:credentials) do
before do
expect(uploader_class).to receive(:object_store_credentials) do
{ provider: "AWS", { provider: "AWS",
aws_access_key_id: "AWS_ACCESS_KEY_ID", aws_access_key_id: "AWS_ACCESS_KEY_ID",
aws_secret_access_key: "AWS_SECRET_ACCESS_KEY", aws_secret_access_key: "AWS_SECRET_ACCESS_KEY",
...@@ -547,6 +581,11 @@ RSpec.describe ObjectStorage do ...@@ -547,6 +581,11 @@ RSpec.describe ObjectStorage do
path_style: true, path_style: true,
region: "gdk" } region: "gdk" }
end end
before do
expect_next_instance_of(ObjectStorage::Config) do |instance|
allow(instance).to receive(:credentials).and_return(credentials)
end
end end
context 'for known length' do context 'for known length' do
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment