Commit 939b05db authored by Dylan Griffith's avatar Dylan Griffith

Merge branch 'sh-s3-encryption' into 'master'

Add support for specifying AWS Server Side Encryption (AWS-KMS)

See merge request gitlab-org/gitlab!38240
parents c71dc6cd ba8654a2
......@@ -169,10 +169,6 @@ module ObjectStorage
object_store_options.connection.to_hash.deep_symbolize_keys
end
def consolidated_settings?
object_store_options.fetch('consolidated_settings', false)
end
def remote_store_path
object_store_options.remote_directory
end
......@@ -193,14 +189,18 @@ module ObjectStorage
File.join(self.root, TMP_UPLOAD_PATH)
end
def object_store_config
ObjectStorage::Config.new(object_store_options)
end
def workhorse_remote_upload_options(has_length:, maximum_size: nil)
return unless self.object_store_enabled?
return unless self.direct_upload_enabled?
id = [CarrierWave.generate_cache_id, SecureRandom.hex].join('-')
upload_path = File.join(TMP_UPLOAD_PATH, id)
direct_upload = ObjectStorage::DirectUpload.new(self.object_store_credentials, remote_store_path, upload_path,
has_length: has_length, maximum_size: maximum_size, consolidated_settings: consolidated_settings?)
direct_upload = ObjectStorage::DirectUpload.new(self.object_store_config, upload_path,
has_length: has_length, maximum_size: maximum_size)
direct_upload.to_hash.merge(ID: id)
end
......@@ -283,6 +283,10 @@ module ObjectStorage
self.class.object_store_credentials
end
def fog_attributes
@fog_attributes ||= self.class.object_store_config.fog_attributes
end
# Set ACL of uploaded objects to not-public (fog-aws)[1] or no ACL at all
# (fog-google). Value is ignored by other supported backends (fog-aliyun,
# fog-openstack, fog-rackspace)
......
---
title: Add support for specifying AWS S3 Server Side Encryption (AWS-KMS)
merge_request: 38240
author:
type: added
......@@ -218,6 +218,9 @@ production: &base
# region: us-east-1
# aws_signature_version: 4 # For creation of signed URLs. Set to 2 if provider does not support v4.
# endpoint: 'https://s3.amazonaws.com' # default: nil - Useful for S3 compliant services such as DigitalOcean Spaces
# storage_options:
# server_side_encryption: AES256 # AES256, aws:kms
# server_side_encryption_kms_key_id: # Amazon Resource Name. See https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html
# objects:
# artifacts:
# bucket: artifacts
......
# frozen_string_literal: true
require "carrierwave/storage/fog"
# This pulls in https://github.com/carrierwaveuploader/carrierwave/pull/2504 to support
# sending AWS S3 encryption headers when copying objects.
module CarrierWave
module Storage
class Fog < Abstract
class File
def copy_to(new_path)
connection.copy_object(@uploader.fog_directory, file.key, @uploader.fog_directory, new_path, copy_to_options)
CarrierWave::Storage::Fog::File.new(@uploader, @base, new_path)
end
def copy_to_options
acl_header.merge(@uploader.fog_attributes)
end
end
end
end
end
......@@ -13,6 +13,7 @@ class ObjectStoreSettings
object_store['direct_upload'] = false if object_store['direct_upload'].nil?
object_store['background_upload'] = true if object_store['background_upload'].nil?
object_store['proxy_download'] = false if object_store['proxy_download'].nil?
object_store['storage_options'] ||= {}
# Convert upload connection settings to use string keys, to make Fog happy
object_store['connection']&.deep_stringify_keys!
......@@ -37,6 +38,8 @@ class ObjectStoreSettings
# region: gdk
# endpoint: 'http://127.0.0.1:9000'
# path_style: true
# storage_options:
# server_side_encryption: AES256
# proxy_download: true
# objects:
# artifacts:
......@@ -49,7 +52,7 @@ class ObjectStoreSettings
#
# Settings.artifacts['object_store'] = {
# "enabled" => true,
# "connection"=> {
# "connection" => {
# "provider" => "AWS",
# "aws_access_key_id" => "minio",
# "aws_secret_access_key" => "gdk-minio",
......@@ -57,6 +60,9 @@ class ObjectStoreSettings
# "endpoint" => "http://127.0.0.1:9000",
# "path_style" => true
# },
# "storage_options" => {
# "server_side_encryption" => "AES256"
# },
# "direct_upload" => true,
# "background_upload" => false,
# "proxy_download" => false,
......@@ -73,6 +79,9 @@ class ObjectStoreSettings
# "endpoint" => "http://127.0.0.1:9000",
# "path_style" => true
# },
# "storage_options" => {
# "server_side_encryption" => "AES256"
# },
# "direct_upload" => true,
# "background_upload" => false,
# "proxy_download" => true,
......@@ -91,12 +100,13 @@ class ObjectStoreSettings
return unless use_consolidated_settings?
main_config = settings['object_store']
common_config = main_config.slice('enabled', 'connection', 'proxy_download')
common_config = main_config.slice('enabled', 'connection', 'proxy_download', 'storage_options')
# Convert connection settings to use string keys, to make Fog happy
common_config['connection']&.deep_stringify_keys!
# These are no longer configurable if common config is used
common_config['direct_upload'] = true
common_config['background_upload'] = false
common_config['storage_options'] ||= {}
SUPPORTED_TYPES.each do |store_type|
overrides = main_config.dig('objects', store_type) || {}
......
# frozen_string_literal: true
module ObjectStorage
class Config
attr_reader :options
def initialize(options)
@options = options.to_hash.deep_symbolize_keys
end
def credentials
@credentials ||= options[:connection] || {}
end
def storage_options
@storage_options ||= options[:storage_options] || {}
end
def enabled?
options[:enabled]
end
def bucket
options[:remote_directory]
end
def consolidated_settings?
options.fetch(:consolidated_settings, false)
end
# AWS-specific options
def aws?
provider == 'AWS'
end
def use_iam_profile?
credentials.fetch(:use_iam_profile, false)
end
def use_path_style?
credentials.fetch(:path_style, false)
end
def server_side_encryption
storage_options[:server_side_encryption]
end
def server_side_encryption_kms_key_id
storage_options[:server_side_encryption_kms_key_id]
end
def provider
credentials[:provider].to_s
end
# End AWS-specific options
def google?
provider == 'Google'
end
def fog_attributes
@fog_attributes ||= begin
return {} unless enabled? && aws?
return {} unless server_side_encryption.present?
aws_server_side_encryption_headers.compact
end
end
private
def aws_server_side_encryption_headers
{
'x-amz-server-side-encryption' => server_side_encryption,
'x-amz-server-side-encryption-aws-kms-key-id' => server_side_encryption_kms_key_id
}
end
end
end
......@@ -22,20 +22,20 @@ module ObjectStorage
MAXIMUM_MULTIPART_PARTS = 100
MINIMUM_MULTIPART_SIZE = 5.megabytes
attr_reader :credentials, :bucket_name, :object_name
attr_reader :has_length, :maximum_size, :consolidated_settings
attr_reader :config, :credentials, :bucket_name, :object_name
attr_reader :has_length, :maximum_size
def initialize(credentials, bucket_name, object_name, has_length:, maximum_size: nil, consolidated_settings: false)
def initialize(config, object_name, has_length:, maximum_size: nil)
unless has_length
raise ArgumentError, 'maximum_size has to be specified if length is unknown' unless maximum_size
end
@credentials = credentials
@bucket_name = bucket_name
@config = config
@credentials = config.credentials
@bucket_name = config.bucket
@object_name = object_name
@has_length = has_length
@maximum_size = maximum_size
@consolidated_settings = consolidated_settings
end
def to_hash
......@@ -62,7 +62,7 @@ module ObjectStorage
end
def workhorse_client_hash
return {} unless aws?
return {} unless config.aws?
{
UseWorkhorseClient: use_workhorse_s3_client?,
......@@ -73,16 +73,18 @@ module ObjectStorage
Bucket: bucket_name,
Region: credentials[:region],
Endpoint: credentials[:endpoint],
PathStyle: credentials.fetch(:path_style, false),
UseIamProfile: credentials.fetch(:use_iam_profile, false)
}
PathStyle: config.use_path_style?,
UseIamProfile: config.use_iam_profile?,
ServerSideEncryption: config.server_side_encryption,
SSEKMSKeyID: config.server_side_encryption_kms_key_id
}.compact
}
}
end
def use_workhorse_s3_client?
return false unless Feature.enabled?(:use_workhorse_s3_client, default_enabled: true)
return false unless credentials.fetch(:use_iam_profile, false) || consolidated_settings
return false unless config.use_iam_profile? || config.consolidated_settings?
# The Golang AWS SDK does not support V2 signatures
return false unless credentials.fetch(:aws_signature_version, 4).to_i >= 4
......@@ -95,7 +97,7 @@ module ObjectStorage
# Implements https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
def get_url
if google?
if config.google?
connection.get_object_https_url(bucket_name, object_name, expire_at)
else
connection.get_object_url(bucket_name, object_name, expire_at)
......@@ -169,23 +171,15 @@ module ObjectStorage
].min
end
def aws?
provider == 'AWS'
end
def google?
provider == 'Google'
end
def requires_multipart_upload?
aws? && !has_length
config.aws? && !has_length
end
def upload_id
return unless requires_multipart_upload?
strong_memoize(:upload_id) do
new_upload = connection.initiate_multipart_upload(bucket_name, object_name)
new_upload = connection.initiate_multipart_upload(bucket_name, object_name, config.fog_attributes)
new_upload.body["UploadId"]
end
end
......
# frozen_string_literal: true
require 'fast_spec_helper'
RSpec.describe ObjectStorage::Config do
let(:region) { 'us-east-1' }
let(:bucket_name) { 'test-bucket' }
let(:path_style) { false }
let(:use_iam_profile) { false }
let(:credentials) do
{
provider: 'AWS',
aws_access_key_id: 'AWS_ACCESS_KEY_ID',
aws_secret_access_key: 'AWS_SECRET_ACCESS_KEY',
region: region,
path_style: path_style,
use_iam_profile: use_iam_profile
}
end
let(:storage_options) do
{
server_side_encryption: 'AES256',
server_side_encryption_kms_key_id: 'arn:aws:12345'
}
end
let(:raw_config) do
{
enabled: true,
connection: credentials,
remote_directory: bucket_name,
storage_options: storage_options
}
end
subject { described_class.new(raw_config.as_json) }
describe '#credentials' do
it { expect(subject.credentials).to eq(credentials) }
end
describe '#storage_options' do
it { expect(subject.storage_options).to eq(storage_options) }
end
describe '#enabled?' do
it { expect(subject.enabled?).to eq(true) }
end
describe '#bucket' do
it { expect(subject.bucket).to eq(bucket_name) }
end
context 'with unconsolidated settings' do
describe 'consolidated_settings? returns false' do
it { expect(subject.consolidated_settings?).to be false }
end
end
context 'with consolidated settings' do
before do
raw_config[:consolidated_settings] = true
end
describe 'consolidated_settings? returns true' do
it { expect(subject.consolidated_settings?).to be true }
end
end
context 'with IAM profile in use' do
let(:use_iam_profile) { true }
it '#use_iam_profile? returns true' do
expect(subject.use_iam_profile?).to be true
end
end
context 'with IAM profile not in use' do
it '#use_iam_profile? returns false' do
expect(subject.use_iam_profile?).to be false
end
end
context 'with path style' do
let(:path_style) { true }
it '#use_path_style? returns true' do
expect(subject.use_path_style?).to be true
end
end
context 'with hostname style access' do
it '#use_path_style? returns false' do
expect(subject.use_path_style?).to be false
end
end
context 'with AWS credentials' do
it { expect(subject.provider).to eq('AWS') }
it { expect(subject.aws?).to be true }
it { expect(subject.google?).to be false }
end
context 'with Google credentials' do
let(:credentials) do
{
provider: 'Google',
google_client_email: 'foo@gcp-project.example.com',
google_json_key_location: '/path/to/gcp.json'
}
end
it { expect(subject.provider).to eq('Google') }
it { expect(subject.aws?).to be false }
it { expect(subject.google?).to be true }
it { expect(subject.fog_attributes).to eq({}) }
end
context 'with SSE-KMS enabled' do
it { expect(subject.server_side_encryption).to eq('AES256') }
it { expect(subject.server_side_encryption_kms_key_id).to eq('arn:aws:12345') }
it { expect(subject.fog_attributes.keys).to match_array(%w(x-amz-server-side-encryption x-amz-server-side-encryption-aws-kms-key-id)) }
end
context 'with only server side encryption enabled' do
let(:storage_options) { { server_side_encryption: 'AES256' } }
it { expect(subject.server_side_encryption).to eq('AES256') }
it { expect(subject.server_side_encryption_kms_key_id).to be_nil }
it { expect(subject.fog_attributes).to eq({ 'x-amz-server-side-encryption' => 'AES256' }) }
end
context 'without encryption enabled' do
let(:storage_options) { {} }
it { expect(subject.server_side_encryption).to be_nil }
it { expect(subject.server_side_encryption_kms_key_id).to be_nil }
it { expect(subject.fog_attributes).to eq({}) }
end
context 'with object storage disabled' do
before do
raw_config['enabled'] = false
end
it { expect(subject.enabled?).to be false }
it { expect(subject.fog_attributes).to eq({}) }
end
end
......@@ -18,13 +18,25 @@ RSpec.describe ObjectStorage::DirectUpload do
}
end
let(:storage_options) { {} }
let(:raw_config) do
{
enabled: true,
connection: credentials,
remote_directory: bucket_name,
storage_options: storage_options,
consolidated_settings: consolidated_settings
}
end
let(:config) { ObjectStorage::Config.new(raw_config) }
let(:storage_url) { 'https://uploads.s3.amazonaws.com/' }
let(:bucket_name) { 'uploads' }
let(:object_name) { 'tmp/uploads/my-file' }
let(:maximum_size) { 1.gigabyte }
let(:direct_upload) { described_class.new(credentials, bucket_name, object_name, has_length: has_length, maximum_size: maximum_size, consolidated_settings: consolidated_settings) }
let(:direct_upload) { described_class.new(config, object_name, has_length: has_length, maximum_size: maximum_size) }
before do
Fog.unmock!
......@@ -62,7 +74,7 @@ RSpec.describe ObjectStorage::DirectUpload do
end
describe '#get_url' do
subject { described_class.new(credentials, bucket_name, object_name, has_length: true) }
subject { described_class.new(config, object_name, has_length: true) }
context 'when AWS is used' do
it 'calls the proper method' do
......@@ -111,6 +123,7 @@ RSpec.describe ObjectStorage::DirectUpload do
expect(s3_config[:Region]).to eq(region)
expect(s3_config[:PathStyle]).to eq(path_style)
expect(s3_config[:UseIamProfile]).to eq(use_iam_profile)
expect(s3_config.keys).not_to include(%i(ServerSideEncryption SSEKMSKeyID))
end
context 'when feature flag is disabled' do
......@@ -150,6 +163,33 @@ RSpec.describe ObjectStorage::DirectUpload do
expect(subject[:UseWorkhorseClient]).to be true
end
end
context 'when only server side encryption is used' do
let(:storage_options) { { server_side_encryption: 'AES256' } }
it 'sends server side encryption settings' do
s3_config = subject[:ObjectStorage][:S3Config]
expect(s3_config[:ServerSideEncryption]).to eq('AES256')
expect(s3_config.keys).not_to include(:SSEKMSKeyID)
end
end
context 'when SSE-KMS is used' do
let(:storage_options) do
{
server_side_encryption: 'AES256',
server_side_encryption_kms_key_id: 'arn:aws:12345'
}
end
it 'sends server side encryption settings' do
s3_config = subject[:ObjectStorage][:S3Config]
expect(s3_config[:ServerSideEncryption]).to eq('AES256')
expect(s3_config[:SSEKMSKeyID]).to eq('arn:aws:12345')
end
end
end
shared_examples 'a valid Google upload' do
......
......@@ -382,6 +382,32 @@ RSpec.describe ObjectStorage do
it { is_expected.to eq(nil) }
end
describe '#fog_attributes' do
subject { uploader.fog_attributes }
it { is_expected.to eq({}) }
context 'with encryption configured' do
let(:raw_options) do
{
"enabled" => true,
"connection" => { "provider" => 'AWS' },
"storage_options" => { "server_side_encryption" => "AES256" }
}
end
let(:options) { Settingslogic.new(raw_options) }
before do
allow(uploader_class).to receive(:options) do
double(object_store: options)
end
end
it { is_expected.to eq({ "x-amz-server-side-encryption" => "AES256" }) }
end
end
describe '.workhorse_authorize' do
let(:has_length) { true }
let(:maximum_size) { nil }
......@@ -459,13 +485,18 @@ RSpec.describe ObjectStorage do
context 'uses AWS' do
let(:storage_url) { "https://uploads.s3-eu-central-1.amazonaws.com/" }
let(:credentials) do
{
provider: "AWS",
aws_access_key_id: "AWS_ACCESS_KEY_ID",
aws_secret_access_key: "AWS_SECRET_ACCESS_KEY",
region: "eu-central-1"
}
end
before do
expect(uploader_class).to receive(:object_store_credentials) do
{ provider: "AWS",
aws_access_key_id: "AWS_ACCESS_KEY_ID",
aws_secret_access_key: "AWS_SECRET_ACCESS_KEY",
region: "eu-central-1" }
expect_next_instance_of(ObjectStorage::Config) do |instance|
allow(instance).to receive(:credentials).and_return(credentials)
end
end
......@@ -502,12 +533,17 @@ RSpec.describe ObjectStorage do
context 'uses Google' do
let(:storage_url) { "https://storage.googleapis.com/uploads/" }
let(:credentials) do
{
provider: "Google",
google_storage_access_key_id: 'ACCESS_KEY_ID',
google_storage_secret_access_key: 'SECRET_ACCESS_KEY'
}
end
before do
expect(uploader_class).to receive(:object_store_credentials) do
{ provider: "Google",
google_storage_access_key_id: 'ACCESS_KEY_ID',
google_storage_secret_access_key: 'SECRET_ACCESS_KEY' }
expect_next_instance_of(ObjectStorage::Config) do |instance|
allow(instance).to receive(:credentials).and_return(credentials)
end
end
......@@ -537,15 +573,18 @@ RSpec.describe ObjectStorage do
context 'uses GDK/minio' do
let(:storage_url) { "http://minio:9000/uploads/" }
let(:credentials) do
{ provider: "AWS",
aws_access_key_id: "AWS_ACCESS_KEY_ID",
aws_secret_access_key: "AWS_SECRET_ACCESS_KEY",
endpoint: 'http://minio:9000',
path_style: true,
region: "gdk" }
end
before do
expect(uploader_class).to receive(:object_store_credentials) do
{ provider: "AWS",
aws_access_key_id: "AWS_ACCESS_KEY_ID",
aws_secret_access_key: "AWS_SECRET_ACCESS_KEY",
endpoint: 'http://minio:9000',
path_style: true,
region: "gdk" }
expect_next_instance_of(ObjectStorage::Config) do |instance|
allow(instance).to receive(:credentials).and_return(credentials)
end
end
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment