Commit 52686413 authored by Zamir Martins Filho's avatar Zamir Martins Filho Committed by Miguel Rincon

Add E2E test for cilium network policies as part

parent cc04f5d9
......@@ -67,7 +67,7 @@ export default {
</script>
<template>
<section>
<section data-qa-selector="threat_monitoring_container">
<header class="my-3">
<h2 class="h3 mb-1 gl-display-flex gl-align-items-center">
{{ s__('ThreatMonitoring|Threat Monitoring') }}
......@@ -94,7 +94,11 @@ export default {
>
<alerts />
</gl-tab>
<gl-tab ref="policyTab" :title="s__('ThreatMonitoring|Policies')">
<gl-tab
ref="policyTab"
:title="s__('ThreatMonitoring|Policies')"
data-qa-selector="policies_tab"
>
<no-environment-empty-state v-if="!isSetUpMaybe" />
<policy-list
v-else
......
......@@ -18,6 +18,7 @@ exports[`ThreatMonitoringApp component given there is a default environment with
exports[`ThreatMonitoringApp component given there is a default environment with data renders the network policy tab 1`] = `
<gl-tab-stub
data-qa-selector="policies_tab"
title="Policies"
titlelinkclass=""
>
......
......@@ -592,6 +592,7 @@ module QA
autoload :Minikube, 'qa/service/cluster_provider/minikube'
autoload :K3d, 'qa/service/cluster_provider/k3d'
autoload :K3s, 'qa/service/cluster_provider/k3s'
autoload :K3sCilium, 'qa/service/cluster_provider/k3s_cilium'
end
module DockerRun
......
......@@ -183,6 +183,7 @@ module QA
end
module ThreatMonitoring
autoload :Index, 'qa/ee/page/project/threat_monitoring/index'
autoload :AlertsList, 'qa/ee/page/project/threat_monitoring/alerts_list'
end
end
......
# frozen_string_literal: true
module QA
module EE
module Page
module Project
module ThreatMonitoring
class Index < QA::Page::Base
TAB_INDEX = {
alerts: 1,
policies: 2,
statistics: 3 # it hasn't been added yet
}.freeze
view 'ee/app/assets/javascripts/threat_monitoring/components/app.vue' do
element :alerts_tab
element :policies_tab
element :threat_monitoring_container
end
def has_alerts_tab?
has_element?(:alerts_tab)
end
def has_policies_tab?
has_element?(:policies_tab)
end
def click_policies_tab
within_element(:threat_monitoring_container) do
find(tab_element_for(:policies)).click
end
end
private
def tab_element_for(tab_name)
"a[aria-posinset='#{TAB_INDEX[tab_name]}']"
end
end
end
end
end
end
end
......@@ -13,8 +13,8 @@ module QA
Resource::Project.fabricate!
end
attribute :ingress_ip do
Page::Project::Infrastructure::Kubernetes::Show.perform(&:ingress_ip)
def ingress_ip
@ingress_ip ||= @cluster.fetch_external_ip_for_ingress
end
def fabricate!
......@@ -42,19 +42,6 @@ module QA
# We must wait a few seconds for permissions to be set up correctly for new cluster
sleep 25
# TODO: These steps do not work anymore, see https://gitlab.com/gitlab-org/gitlab/-/issues/333818
# Open applications tab
show.open_applications
show.install!(:ingress) if @install_ingress
show.install!(:prometheus) if @install_prometheus
show.install!(:runner) if @install_runner
show.await_installed(:ingress) if @install_ingress
show.await_installed(:prometheus) if @install_prometheus
show.await_installed(:runner) if @install_runner
if @install_ingress
populate(:ingress_ip)
......
# frozen_string_literal: true
module QA
module Service
module ClusterProvider
class K3sCilium < K3s
def setup
@k3s = Service::DockerRun::K3s.new.tap do |k3s|
k3s.remove!
k3s.cni_enabled = true
k3s.register!
shell "kubectl config set-cluster k3s --server https://#{k3s.host_name}:6443 --insecure-skip-tls-verify"
shell 'kubectl config set-credentials default --username=node --password=some-secret'
shell 'kubectl config set-context k3s --cluster=k3s --user=default'
shell 'kubectl config use-context k3s'
wait_for_server(k3s.host_name) do
shell 'kubectl version'
# install local storage
shell 'kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml'
# patch local storage
shell %(kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}')
shell 'kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.8/install/kubernetes/quick-install.yaml'
wait_for_namespaces do
wait_for_cilium
wait_for_coredns do
shell 'kubectl create -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-0.31.0/deploy/static/provider/cloud/deploy.yaml'
wait_for_ingress
end
end
end
end
end
private
def wait_for_cilium
QA::Runtime::Logger.info 'Waiting for Cilium pod to be initialized'
60.times do
if service_available?('kubectl get pods --all-namespaces -l k8s-app=cilium --no-headers=true | grep -o "cilium-.*1/1"')
return yield if block_given?
return true
end
sleep 1
QA::Runtime::Logger.info '.'
end
raise 'Cilium pod has not initialized correctly'
end
def wait_for_coredns
QA::Runtime::Logger.info 'Waiting for CoreDNS pod to be initialized'
60.times do
if service_available?('kubectl get pods --all-namespaces --no-headers=true | grep -o "coredns.*1/1"')
return yield if block_given?
return true
end
sleep 1
QA::Runtime::Logger.info '.'
end
raise 'CoreDNS pod has not been initialized correctly'
end
def wait_for_ingress
QA::Runtime::Logger.info 'Waiting for Ingress controller pod to be initialized'
60.times do
if service_available?('kubectl get pods --all-namespaces -l app.kubernetes.io/component=controller | grep -o "ingress-nginx-controller.*1/1"')
return yield if block_given?
return true
end
sleep 1
QA::Runtime::Logger.info '.'
end
raise 'Ingress pod has not been initialized correctly'
end
end
end
end
end
......@@ -4,15 +4,20 @@ module QA
module Service
module DockerRun
class K3s < Base
attr_accessor :cni_enabled
def initialize
@image = 'registry.gitlab.com/gitlab-org/cluster-integration/test-utils/k3s-gitlab-ci/releases/v0.6.1'
@image = 'registry.gitlab.com/gitlab-org/cluster-integration/test-utils/k3s-gitlab-ci/releases/v0.9.1'
@name = 'k3s'
@cni_enabled = false
super
end
def register!
pull
start_k3s
# Mount the berkeley packet filter if container network interface is enabled
mount_bpf if @cni_enabled
end
def host_name
......@@ -36,12 +41,20 @@ module QA
#{@image} server
--cluster-secret some-secret
--no-deploy traefik
#{@cni_enabled ? '--no-flannel' : ''}
CMD
command.gsub!("--network #{network} --hostname #{host_name}", '') unless QA::Runtime::Env.running_in_ci?
shell command
end
private
def mount_bpf
shell "docker exec --privileged k3s mount bpffs -t bpf /sys/fs/bpf"
shell "docker exec --privileged k3s mount --make-shared bpffs -t bpf /sys/fs/bpf"
end
end
end
end
......
......@@ -51,6 +51,30 @@ module QA
shell('kubectl apply -f -', stdin_data: manifest)
end
def add_sample_policy(project, policy_name: 'sample-policy')
namespace = "#{project.name}-#{project.id}-production"
network_policy = <<~YAML
apiVersion: "cilium.io/v2"
kind: CiliumNetworkPolicy
metadata:
name: #{policy_name}
namespace: #{namespace}
spec:
endpointSelector:
matchLabels:
role: backend
ingress:
- fromEndpoints:
- matchLabels:
role: frontend
YAML
shell('kubectl apply -f -', stdin_data: network_policy)
end
def fetch_external_ip_for_ingress
`kubectl get svc --all-namespaces --no-headers=true -l app.kubernetes.io/name=ingress-nginx -o custom-columns=:'status.loadBalancer.ingress[0].ip' | grep -v 'none'`
end
private
def fetch_api_url
......
......@@ -3,18 +3,23 @@
module QA
RSpec.describe 'Protect' do
describe 'Threat Monitoring Policy List page' do
let(:project) do
let!(:project) do
Resource::Project.fabricate_via_api! do |project|
project.name = Runtime::Env.auto_devops_project_name || 'project-with-protect'
project.description = 'Project with Protect'
project.auto_devops_enabled = false
project.auto_devops_enabled = true
project.initialize_with_readme = true
project.template_name = 'express'
end
end
after do
project.remove_via_api!
end
context 'without k8s cluster' do
before do
Flow::Login.sign_in
project.visit!
end
......@@ -22,10 +27,76 @@ module QA
Page::Project::Menu.perform(&:click_on_threat_monitoring)
EE::Page::Project::ThreatMonitoring::AlertsList.perform do |alerts_list|
aggregate_failures do
expect(alerts_list).to have_alerts_tab
expect(alerts_list).to have_alerts_list
end
end
end
end
context 'with k8s cluster', :require_admin, :kubernetes, :orchestrated, :runner do
let(:policy_name) { 'l3-rule' }
let!(:cluster) { Service::KubernetesCluster.new(provider_class: Service::ClusterProvider::K3sCilium).create! }
let!(:runner) do
Resource::Runner.fabricate_via_api! do |resource|
resource.project = project
resource.executor = :docker
end
end
let(:optional_jobs) do
%w[
LICENSE_MANAGEMENT_DISABLED
SAST_DISABLED DAST_DISABLED
DEPENDENCY_SCANNING_DISABLED
CONTAINER_SCANNING_DISABLED
CODE_QUALITY_DISABLED
]
end
before do
Flow::Login.sign_in_as_admin
end
after do
runner.remove_via_api!
cluster.remove!
end
it 'loads a sample network policy under policies tab on the Threat Monitoring page', testcase: 'https://gitlab.com/gitlab-org/quality/testcases/-/issues/1855' do
Resource::KubernetesCluster::ProjectCluster.fabricate_via_browser_ui! do |k8s_cluster|
k8s_cluster.project = project
k8s_cluster.cluster = cluster
k8s_cluster.install_ingress = true
end.project.visit!
Resource::Pipeline.fabricate_via_api! do |pipeline|
pipeline.project = project
pipeline.variables =
optional_jobs.map do |job|
{ key: job, value: '1', variable_type: 'env_var' }
end
end
Page::Project::Menu.perform(&:click_ci_cd_pipelines)
Page::Project::Pipeline::Index.perform do |index|
index.wait_for_latest_pipeline_completed
end
cluster.add_sample_policy(project, policy_name: policy_name)
Page::Project::Menu.perform(&:click_on_threat_monitoring)
EE::Page::Project::ThreatMonitoring::Index.perform do |index|
index.click_policies_tab
aggregate_failures do
expect(index).to have_policies_tab
expect(index.has_content?(policy_name)).to be true
end
end
end
end
end
end
end
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment