Commit b9a3438b authored by GitLab Bot's avatar GitLab Bot

Automatic merge of gitlab-org/gitlab-ce master

parents 5d24b055 74fa088b
......@@ -378,8 +378,6 @@ module Ci
end
def any_unmet_prerequisites?
return false unless Feature.enabled?(:ci_preparing_state, default_enabled: true)
prerequisites.present?
end
......
......@@ -21,11 +21,7 @@ module Clusters
private_class_method :projects_with_missing_kubernetes_namespaces_for_cluster
def self.clusters_with_missing_kubernetes_namespaces_for_project(project)
if Feature.enabled?(:ci_preparing_state, default_enabled: true)
project.clusters.managed.missing_kubernetes_namespace(project.kubernetes_namespaces)
else
project.all_clusters.managed.missing_kubernetes_namespace(project.kubernetes_namespaces)
end
project.clusters.managed.missing_kubernetes_namespace(project.kubernetes_namespaces)
end
private_class_method :clusters_with_missing_kubernetes_namespaces_for_project
......
......@@ -100,8 +100,6 @@ module Projects
current_user.invalidate_personal_projects_count
create_readme if @initialize_with_readme
configure_group_clusters_for_project
end
# Refresh the current user's authorizations inline (so they can access the
......@@ -127,10 +125,6 @@ module Projects
Files::CreateService.new(@project, current_user, commit_attrs).execute
end
def configure_group_clusters_for_project
ClusterProjectConfigureWorker.perform_async(@project.id)
end
def skip_wiki?
!@project.feature_available?(:wiki, current_user) || @skip_wiki
end
......
......@@ -54,7 +54,6 @@ module Projects
end
attempt_transfer_transaction
configure_group_clusters_for_project
end
# rubocop: enable CodeReuse/ActiveRecord
......@@ -164,10 +163,6 @@ module Projects
@new_namespace.full_path
)
end
def configure_group_clusters_for_project
ClusterProjectConfigureWorker.perform_async(project.id)
end
end
end
......
......@@ -6,7 +6,7 @@ class ClusterConfigureWorker
def perform(cluster_id)
Clusters::Cluster.managed.find_by_id(cluster_id).try do |cluster|
if cluster.project_type? || Feature.disabled?(:ci_preparing_state, default_enabled: true)
if cluster.project_type?
Clusters::RefreshService.create_or_update_namespaces_for_cluster(cluster)
end
end
......
---
title: Remove ability for group clusters to be automatically configured on creation
merge_request: 27245
author:
type: removed
......@@ -3,10 +3,11 @@
# ### Specify the Docker image
#
# Instead of installing .NET Core SDK manually, a docker image is used
# with already pre-installed .NET Core SDK.
# The 'latest' tag targets the latest available version of .NET Core SDK image.
# If preferred, you can explicitly specify version of .NET Core e.g. using '2.2-sdk' tag.
# Instead of installing .NET Core SDK manually, a docker image is used
# with already pre-installed .NET Core SDK.
#
# The 'latest' tag targets the latest available version of .NET Core SDK image.
# If preferred, you can explicitly specify version of .NET Core (e.g. using '2.2-sdk' tag).
#
# See other available tags for .NET Core: https://hub.docker.com/r/microsoft/dotnet
# Learn more about Docker tags: https://docs.docker.com/glossary/?term=tag
......@@ -17,16 +18,16 @@ image: microsoft/dotnet:latest
#
variables:
# 1) Name of directory where restore and build objects are stored.
OBJECTS_DIRECTORY: 'obj'
# 2) Name of directory used for keeping restored dependencies.
OBJECTS_DIRECTORY: 'obj'
# 2) Name of directory used for keeping restored dependencies.
NUGET_PACKAGES_DIRECTORY: '.nuget'
# 3) A relative path to the source code from project repository root.
# NOTE: Please edit this path so it matches the structure of your project!
SOURCE_CODE_PATH: '*/*/'
SOURCE_CODE_PATH: '*/*/'
# ### Define stage list
#
# In this example there are only two stages.
# In this example there are only two stages.
# Initially, the project will be built and then tested.
stages:
- build
......@@ -34,47 +35,55 @@ stages:
# ### Define global cache rule
#
# Before building the project, all dependencies (e.g. third-party NuGet packages)
# must be restored. Jobs on GitLab.com's Shared Runners are executed on autoscaled machines.
# Each machine is used only once (for security reasons) and after that it is removed.
# What that means is that before every job a dependency restore must be performed
# Before building the project, all dependencies (e.g. third-party NuGet packages)
# must be restored. Jobs on GitLab.com's Shared Runners are executed on autoscaled machines.
#
# Each machine is used only once (for security reasons) and after that is removed.
# This means that, before every job, a dependency restore must be performed
# because restored dependencies are removed along with machines. Fortunately,
# GitLab provides cache mechanism with the aim of keeping restored dependencies
# for other jobs. This example shows how to configure cache to pass over restored
# for other jobs.
#
# This example shows how to configure cache to pass over restored
# dependencies for re-use.
#
# With global cache rule, cached dependencies will be downloaded before every job
# and then unpacked to the paths as specified below.
cache:
# Per-stage and per-branch caching.
key: "$CI_JOB_STAGE-$CI_COMMIT_REF_SLUG"
key: "$CI_JOB_STAGE-$CI_COMMIT_REF_SLUG"
paths:
# Specify three paths that should be cached:
#
# 1) Main JSON file holding information about package dependency tree, packages versions,
# frameworks etc. It also holds information where to the dependencies were restored.
- '$SOURCE_CODE_PATH$OBJECTS_DIRECTORY/project.assets.json'
- '$SOURCE_CODE_PATH$OBJECTS_DIRECTORY/project.assets.json'
# 2) Other NuGet and MSBuild related files. Also needed.
- '$SOURCE_CODE_PATH$OBJECTS_DIRECTORY/*.csproj.nuget.*'
- '$SOURCE_CODE_PATH$OBJECTS_DIRECTORY/*.csproj.nuget.*'
# 3) Path to the directory where restored dependencies are kept.
- '$NUGET_PACKAGES_DIRECTORY'
# 'pull-push' policy means that latest cache will be downloaded (if exists)
# before executing the job, and a newer version will be uploaded afterwards.
# Such setting saves time when there are no changes in referenced third-party
# packages. For example if you run a pipeline with changes in your code,
# but with no changes within third-party packages which your project is using,
# then project restore will happen in next to no time as all required dependencies
# will already be there — unzipped from cache. 'pull-push' policy is a default
# cache policy, you do not have to specify it explicitly.
policy: pull-push
- '$NUGET_PACKAGES_DIRECTORY'
#
# 'pull-push' policy means that latest cache will be downloaded (if it exists)
# before executing the job, and a newer version will be uploaded afterwards.
# Such a setting saves time when there are no changes in referenced third-party
# packages.
#
# For example, if you run a pipeline with changes in your code,
# but with no changes within third-party packages which your project is using,
# then project restore will happen quickly as all required dependencies
# will already be there — unzipped from cache.
# 'pull-push' policy is the default cache policy, you do not have to specify it explicitly.
policy: pull-push
# ### Restore project dependencies
#
# NuGet packages by default are restored to '.nuget/packages' directory
# in the user's home directory. That directory is out of scope of GitLab caching.
# To get around this a custom path can be specified using '--packages <PATH>' option
# for 'dotnet restore' command. In this example a temporary directory is created
# in the root of project repository, so it's content can be cached.
# NuGet packages by default are restored to '.nuget/packages' directory
# in the user's home directory. That directory is out of scope of GitLab caching.
#
# To get around this, a custom path can be specified using the '--packages <PATH>' option
# for 'dotnet restore' command. In this example, a temporary directory is created
# in the root of project repository, so its content can be cached.
#
# Learn more about GitLab cache: https://docs.gitlab.com/ee/ci/caching/index.html
before_script:
......@@ -82,26 +91,26 @@ before_script:
build:
stage: build
# ### Build all projects discovered from solution file.
# ### Build all projects discovered from solution file.
#
# Note: this will fail if you have any projects in your solution that are not
# .NET Core based projects e.g. WCF service, which is based on .NET Framework,
# not .NET Core. In such scenario you will need to build every .NET Core based
# project by explicitly specifying a relative path to the directory
# where it is located e.g. 'dotnet build ./src/ConsoleApp'.
# Note: this will fail if you have any projects in your solution that are not
# .NET Core-based projects (e.g. WCF service), which is based on .NET Framework,
# not .NET Core. In this scenario, you will need to build every .NET Core-based
# project by explicitly specifying a relative path to the directory
# where it is located (e.g. 'dotnet build ./src/ConsoleApp').
# Only one project path can be passed as a parameter to 'dotnet build' command.
script:
- 'dotnet build --no-restore'
- 'dotnet build --no-restore'
tests:
stage: test
# ### Run the tests
#
# You can either run tests for all test projects that are defined in your solution
# You can either run tests for all test projects that are defined in your solution
# with 'dotnet test' or run tests only for specific project by specifying
# a relative path to the directory where it is located e.g. 'dotnet test ./test/UnitTests'.
# a relative path to the directory where it is located (e.g. 'dotnet test ./test/UnitTests').
#
# You may want to define separate testing jobs for different types of testing
# e.g. integration tests, unit tests etc.
# (e.g. integration tests, unit tests etc).
script:
- 'dotnet test --no-restore'
......@@ -2925,26 +2925,18 @@ describe Ci::Build do
subject { build.any_unmet_prerequisites? }
before do
allow(build).to receive(:prerequisites).and_return(prerequisites)
end
context 'build has prerequisites' do
before do
allow(build).to receive(:prerequisites).and_return([double])
end
let(:prerequisites) { [double] }
it { is_expected.to be_truthy }
context 'and the ci_preparing_state feature is disabled' do
before do
stub_feature_flags(ci_preparing_state: false)
end
it { is_expected.to be_falsey }
end
end
context 'build does not have prerequisites' do
before do
allow(build).to receive(:prerequisites).and_return([])
end
let(:prerequisites) { [] }
it { is_expected.to be_falsey }
end
......
......@@ -93,32 +93,14 @@ describe Clusters::RefreshService do
let(:group) { cluster.group }
let(:project) { create(:project, group: group) }
context 'when ci_preparing_state feature flag is enabled' do
include_examples 'does not create a kubernetes namespace'
context 'when project already has kubernetes namespace' do
before do
create(:cluster_kubernetes_namespace, project: project, cluster: cluster)
end
include_examples 'does not create a kubernetes namespace'
end
end
include_examples 'does not create a kubernetes namespace'
context 'when ci_preparing_state feature flag is disabled' do
context 'when project already has kubernetes namespace' do
before do
stub_feature_flags(ci_preparing_state: false)
create(:cluster_kubernetes_namespace, project: project, cluster: cluster)
end
include_examples 'creates a kubernetes namespace'
context 'when project already has kubernetes namespace' do
before do
create(:cluster_kubernetes_namespace, project: project, cluster: cluster)
end
include_examples 'does not create a kubernetes namespace'
end
include_examples 'does not create a kubernetes namespace'
end
end
......
......@@ -268,33 +268,6 @@ describe Projects::CreateService, '#execute' do
end
end
context 'when group has kubernetes cluster' do
let(:group_cluster) { create(:cluster, :group, :provided_by_gcp) }
let(:group) { group_cluster.group }
let(:token) { 'aaaa' }
let(:service_account_creator) { double(Clusters::Gcp::Kubernetes::CreateOrUpdateServiceAccountService, execute: true) }
let(:secrets_fetcher) { double(Clusters::Gcp::Kubernetes::FetchKubernetesTokenService, execute: token) }
before do
group.add_owner(user)
stub_feature_flags(ci_preparing_state: false)
expect(Clusters::Gcp::Kubernetes::CreateOrUpdateServiceAccountService).to receive(:namespace_creator).and_return(service_account_creator)
expect(Clusters::Gcp::Kubernetes::FetchKubernetesTokenService).to receive(:new).and_return(secrets_fetcher)
end
it 'creates kubernetes namespace for the project' do
project = create_project(user, opts.merge!(namespace_id: group.id))
expect(project).to be_valid
kubernetes_namespace = group_cluster.kubernetes_namespaces.first
expect(kubernetes_namespace).to be_present
expect(kubernetes_namespace.project).to eq(project)
end
end
context 'when there is an active service template' do
before do
create(:service, project: nil, template: true, active: true)
......
......@@ -73,33 +73,6 @@ describe Projects::TransferService do
shard_name: project.repository_storage
)
end
context 'new group has a kubernetes cluster' do
let(:group_cluster) { create(:cluster, :group, :provided_by_gcp) }
let(:group) { group_cluster.group }
let(:token) { 'aaaa' }
let(:service_account_creator) { double(Clusters::Gcp::Kubernetes::CreateOrUpdateServiceAccountService, execute: true) }
let(:secrets_fetcher) { double(Clusters::Gcp::Kubernetes::FetchKubernetesTokenService, execute: token) }
subject { transfer_project(project, user, group) }
before do
stub_feature_flags(ci_preparing_state: false)
expect(Clusters::Gcp::Kubernetes::CreateOrUpdateServiceAccountService).to receive(:namespace_creator).and_return(service_account_creator)
expect(Clusters::Gcp::Kubernetes::FetchKubernetesTokenService).to receive(:new).and_return(secrets_fetcher)
end
it 'creates kubernetes namespace for the project' do
subject
expect(project.kubernetes_namespaces.count).to eq(1)
kubernetes_namespace = group_cluster.kubernetes_namespaces.first
expect(kubernetes_namespace).to be_present
expect(kubernetes_namespace.project).to eq(project)
end
end
end
context 'when transfer fails' do
......
......@@ -4,11 +4,6 @@ require 'spec_helper'
describe ClusterConfigureWorker, '#perform' do
let(:worker) { described_class.new }
let(:ci_preparing_state_enabled) { false }
before do
stub_feature_flags(ci_preparing_state: ci_preparing_state_enabled)
end
shared_examples 'configured cluster' do
it 'creates a namespace' do
......@@ -33,26 +28,14 @@ describe ClusterConfigureWorker, '#perform' do
context 'when group has a project' do
let!(:project) { create(:project, group: group) }
it_behaves_like 'configured cluster'
context 'ci_preparing_state feature is enabled' do
let(:ci_preparing_state_enabled) { true }
it_behaves_like 'unconfigured cluster'
end
it_behaves_like 'unconfigured cluster'
end
context 'when group has project in a sub-group' do
let!(:subgroup) { create(:group, parent: group) }
let!(:project) { create(:project, group: subgroup) }
it_behaves_like 'configured cluster'
context 'ci_preparing_state feature is enabled' do
let(:ci_preparing_state_enabled) { true }
it_behaves_like 'unconfigured cluster'
end
it_behaves_like 'unconfigured cluster'
end
end
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment