Commit 7ab014c6 authored by GitLab Bot's avatar GitLab Bot

Automatic merge of gitlab-org/gitlab-ce master

parents c05432e6 e4af69a7
......@@ -3,7 +3,8 @@
module Clusters
module Applications
class CertManager < ApplicationRecord
VERSION = 'v0.5.2'.freeze
VERSION = 'v0.9.1'
CRD_VERSION = '0.9'
self.table_name = 'clusters_applications_cert_managers'
......@@ -21,16 +22,22 @@ module Clusters
validates :email, presence: true
def chart
'stable/cert-manager'
'certmanager/cert-manager'
end
def repository
'https://charts.jetstack.io'
end
def install_command
Gitlab::Kubernetes::Helm::InstallCommand.new(
name: 'certmanager',
repository: repository,
version: VERSION,
rbac: cluster.platform_kubernetes_rbac?,
chart: chart,
files: files.merge(cluster_issuer_file),
preinstall: pre_install_script,
postinstall: post_install_script
)
end
......@@ -46,16 +53,30 @@ module Clusters
private
def pre_install_script
[
apply_file("https://raw.githubusercontent.com/jetstack/cert-manager/release-#{CRD_VERSION}/deploy/manifests/00-crds.yaml"),
"kubectl label --overwrite namespace #{Gitlab::Kubernetes::Helm::NAMESPACE} certmanager.k8s.io/disable-validation=true"
]
end
def post_install_script
["kubectl create -f /data/helm/certmanager/config/cluster_issuer.yaml"]
[retry_command(apply_file('/data/helm/certmanager/config/cluster_issuer.yaml'))]
end
def retry_command(command)
"for i in $(seq 1 30); do #{command} && break; sleep 1s; echo \"Retrying ($i)...\"; done"
end
def post_delete_script
[
delete_private_key,
delete_crd('certificates.certmanager.k8s.io'),
delete_crd('certificaterequests.certmanager.k8s.io'),
delete_crd('challenges.certmanager.k8s.io'),
delete_crd('clusterissuers.certmanager.k8s.io'),
delete_crd('issuers.certmanager.k8s.io')
delete_crd('issuers.certmanager.k8s.io'),
delete_crd('orders.certmanager.k8s.io')
].compact
end
......@@ -75,6 +96,10 @@ module Clusters
Gitlab::Kubernetes::KubectlCmd.delete("crd", definition, "--ignore-not-found")
end
def apply_file(filename)
Gitlab::Kubernetes::KubectlCmd.apply_file(filename)
end
def cluster_issuer_file
{
'cluster_issuer.yaml': cluster_issuer_yaml_content
......
---
title: Install cert-manager v0.9.1
merge_request: 32243
author:
type: changed
......@@ -128,8 +128,10 @@ total are being tracked in [epic &153](https://gitlab.com/groups/gitlab-org/-/ep
## Enabling Elasticsearch
In order to enable Elasticsearch, you need to have admin access. Go to
**Admin > Settings > Integrations** and find the "Elasticsearch" section.
In order to enable Elasticsearch, you need to have admin access. Navigate to
**Admin Area** (wrench icon), then **Settings > Integrations** and expand the **Elasticsearch** section.
Click **Save changes** for the changes to take effect.
The following Elasticsearch settings are available:
......@@ -171,171 +173,222 @@ from the Elasticsearch index as expected.
To disable the Elasticsearch integration:
1. Navigate to the **Admin > Settings > Integrations**
1. Find the 'Elasticsearch' section and uncheck 'Search with Elasticsearch enabled'
and 'Elasticsearch indexing'
1. Click **Save** for the changes to take effect
1. (Optional) Delete the existing index by running the command `sudo gitlab-rake gitlab:elastic:delete_index`
1. Navigate to the **Admin Area** (wrench icon), then **Settings > Integrations**.
1. Expand the **Elasticsearch** section and uncheck **Elasticsearch indexing**
and **Search with Elasticsearch enabled**.
1. Click **Save changes** for the changes to take effect.
1. (Optional) Delete the existing index by running one of these commands:
```sh
# Omnibus installations
sudo gitlab-rake gitlab:elastic:delete_index
# Installations from source
bundle exec rake gitlab:elastic:delete_index RAILS_ENV=production
```
## Adding GitLab's data to the Elasticsearch index
### Indexing small instances (database size less than 500 MiB, size of repos less than 5 GiB)
While Elasticsearch indexing is enabled, new changes in your GitLab instance will be automatically indexed as they happen.
To backfill existing data, you can use one of the methods below to index it in background jobs.
Configure Elasticsearch's host and port in **Admin > Settings**. Then index the data using one of the following commands:
### Indexing through the administration UI
```sh
# Omnibus installations
sudo gitlab-rake gitlab:elastic:index
> [Introduced](https://gitlab.com/gitlab-org/gitlab-ee/merge_requests/15390) in [GitLab Starter](https://about.gitlab.com/pricing/) 12.3.
# Installations from source
bundle exec rake gitlab:elastic:index RAILS_ENV=production
```
To index via the admin area:
1. Navigate to the **Admin Area** (wrench icon), then **Settings > Integrations** and expand the **Elasticsearch** section.
1. [Enable **Elasticsearch indexing** and configure your host and port](#enabling-elasticsearch).
1. Create empty indexes using one of the following commands:
```sh
# Omnibus installations
sudo gitlab-rake gitlab:elastic:create_empty_index
# Installations from source
bundle exec rake gitlab:elastic:create_empty_index RAILS_ENV=production
```
1. Click **Index all projects**.
1. Click **Check progress** in the confirmation message to see the status of the background jobs.
1. Personal snippets need to be indexed manually by running one of these commands:
```sh
# Omnibus installations
sudo gitlab-rake gitlab:elastic:index_snippets
# Installations from source
bundle exec rake gitlab:elastic:index_snippets RAILS_ENV=production
```
1. After the indexing has completed, enable [**Search with Elasticsearch**](#enabling-elasticsearch).
### Indexing through Rake tasks
#### Indexing small instances
CAUTION: **Warning**:
This will delete your existing indexes.
If the database size is less than 500 MiB, and the size of all hosted repos is less than 5 GiB:
1. [Enable **Elasticsearch indexing** and configure your host and port](#enabling-elasticsearch).
1. Index your data using one of the following commands:
```sh
# Omnibus installations
sudo gitlab-rake gitlab:elastic:index
# Installations from source
bundle exec rake gitlab:elastic:index RAILS_ENV=production
```
After it completes the indexing process, [enable Elasticsearch searching](elasticsearch.md#enabling-elasticsearch).
1. After the indexing has completed, enable [**Search with Elasticsearch**](#enabling-elasticsearch).
### Indexing large instances
#### Indexing large instances
WARNING: **Warning**:
Performing asynchronous indexing, as this will describe, will generate a lot of sidekiq jobs.
CAUTION: **Warning**:
Performing asynchronous indexing will generate a lot of Sidekiq jobs.
Make sure to prepare for this task by either [Horizontally Scaling](../administration/high_availability/README.md#basic-scaling)
or creating [extra sidekiq processes](../administration/operations/extra_sidekiq_processes.md)
or creating [extra Sidekiq processes](../administration/operations/extra_sidekiq_processes.md)
Configure Elasticsearch's host and port in **Admin > Settings > Integrations**. Then create empty indexes using one of the following commands:
1. [Enable **Elasticsearch indexing** and configure your host and port](#enabling-elasticsearch).
1. Create empty indexes using one of the following commands:
```sh
# Omnibus installations
sudo gitlab-rake gitlab:elastic:create_empty_index
```sh
# Omnibus installations
sudo gitlab-rake gitlab:elastic:create_empty_index
# Installations from source
bundle exec rake gitlab:elastic:create_empty_index RAILS_ENV=production
```
# Installations from source
bundle exec rake gitlab:elastic:create_empty_index RAILS_ENV=production
```
Indexing large Git repositories can take a while. To speed up the process, you
can temporarily disable auto-refreshing and replicating. In our experience, you can expect a 20%
decrease in indexing time. We'll enable them when indexing is done. This step is optional!
1. Indexing large Git repositories can take a while. To speed up the process, you
can temporarily disable auto-refreshing and replicating. In our experience, you can expect a 20%
decrease in indexing time. We'll enable them when indexing is done. This step is optional!
```bash
curl --request PUT localhost:9200/gitlab-production/_settings --data '{
"index" : {
"refresh_interval" : "-1",
"number_of_replicas" : 0
} }'
```
```bash
curl --request PUT localhost:9200/gitlab-production/_settings --data '{
"index" : {
"refresh_interval" : "-1",
"number_of_replicas" : 0
} }'
```
Then enable Elasticsearch indexing and run project indexing tasks:
1. Index projects and their associated data:
```sh
# Omnibus installations
sudo gitlab-rake gitlab:elastic:index_projects
```sh
# Omnibus installations
sudo gitlab-rake gitlab:elastic:index_projects
# Installations from source
bundle exec rake gitlab:elastic:index_projects RAILS_ENV=production
```
# Installations from source
bundle exec rake gitlab:elastic:index_projects RAILS_ENV=production
```
This enqueues a Sidekiq job for each project that needs to be indexed.
You can view the jobs in the admin panel (they are placed in the `elastic_indexer`
queue), or you can query indexing status using a rake task:
This enqueues a Sidekiq job for each project that needs to be indexed.
You can view the jobs in **Admin Area > Monitoring > Background Jobs > Queues Tab**
and click `elastic_indexer`, or you can query indexing status using a rake task:
```sh
# Omnibus installations
sudo gitlab-rake gitlab:elastic:index_projects_status
```sh
# Omnibus installations
sudo gitlab-rake gitlab:elastic:index_projects_status
# Installations from source
bundle exec rake gitlab:elastic:index_projects_status RAILS_ENV=production
# Installations from source
bundle exec rake gitlab:elastic:index_projects_status RAILS_ENV=production
Indexing is 65.55% complete (6555/10000 projects)
```
Indexing is 65.55% complete (6555/10000 projects)
```
If you want to limit the index to a range of projects you can provide the
`ID_FROM` and `ID_TO` parameters:
If you want to limit the index to a range of projects you can provide the
`ID_FROM` and `ID_TO` parameters:
```sh
# Omnibus installations
sudo gitlab-rake gitlab:elastic:index_projects ID_FROM=1001 ID_TO=2000
```sh
# Omnibus installations
sudo gitlab-rake gitlab:elastic:index_projects ID_FROM=1001 ID_TO=2000
# Installations from source
bundle exec rake gitlab:elastic:index_projects ID_FROM=1001 ID_TO=2000 RAILS_ENV=production
```
# Installations from source
bundle exec rake gitlab:elastic:index_projects ID_FROM=1001 ID_TO=2000 RAILS_ENV=production
```
Where `ID_FROM` and `ID_TO` are project IDs. Both parameters are optional.
The above examples will index all projects starting with ID `1001` up to (and including) ID `2000`.
Where `ID_FROM` and `ID_TO` are project IDs. Both parameters are optional.
The above example will index all projects from ID `1001` up to (and including) ID `2000`.
TIP: **Troubleshooting:**
Sometimes the project indexing jobs queued by `gitlab:elastic:index_projects`
can get interrupted. This may happen for many reasons, but it's always safe
to run the indexing task again - it will skip those repositories that have
already been indexed.
TIP: **Troubleshooting:**
Sometimes the project indexing jobs queued by `gitlab:elastic:index_projects`
can get interrupted. This may happen for many reasons, but it's always safe
to run the indexing task again. It will skip repositories that have
already been indexed.
As the indexer stores the last commit SHA of every indexed repository in the
database, you can run the indexer with the special parameter `UPDATE_INDEX` and
it will check every project repository again to make sure that every commit in
that repository is indexed, it can be useful in case if your index is outdated:
As the indexer stores the last commit SHA of every indexed repository in the
database, you can run the indexer with the special parameter `UPDATE_INDEX` and
it will check every project repository again to make sure that every commit in
a repository is indexed, which can be useful in case if your index is outdated:
```sh
# Omnibus installations
sudo gitlab-rake gitlab:elastic:index_projects UPDATE_INDEX=true ID_TO=1000
```sh
# Omnibus installations
sudo gitlab-rake gitlab:elastic:index_projects UPDATE_INDEX=true ID_TO=1000
# Installations from source
bundle exec rake gitlab:elastic:index_projects UPDATE_INDEX=true ID_TO=1000 RAILS_ENV=production
```
# Installations from source
bundle exec rake gitlab:elastic:index_projects UPDATE_INDEX=true ID_TO=1000 RAILS_ENV=production
```
You can also use the `gitlab:elastic:clear_index_status` Rake task to force the
indexer to "forget" all progress, so retrying the indexing process from the
start.
You can also use the `gitlab:elastic:clear_index_status` Rake task to force the
indexer to "forget" all progress, so it will retry the indexing process from the
start.
The `index_projects` command enqueues jobs to index all project and wiki
repositories, and most database content. However, snippets still need to be
indexed separately. To do so, run one of these commands:
1. Personal snippets are not associated with a project and need to be indexed separately
by running one of these commands:
```sh
# Omnibus installations
sudo gitlab-rake gitlab:elastic:index_snippets
```sh
# Omnibus installations
sudo gitlab-rake gitlab:elastic:index_snippets
# Installations from source
bundle exec rake gitlab:elastic:index_snippets RAILS_ENV=production
```
# Installations from source
bundle exec rake gitlab:elastic:index_snippets RAILS_ENV=production
```
Enable replication and refreshing again after indexing (only if you previously disabled it):
1. Enable replication and refreshing again after indexing (only if you previously disabled it):
```bash
curl --request PUT localhost:9200/gitlab-production/_settings --data '{
"index" : {
"number_of_replicas" : 1,
"refresh_interval" : "1s"
} }'
```
```bash
curl --request PUT localhost:9200/gitlab-production/_settings --data '{
"index" : {
"number_of_replicas" : 1,
"refresh_interval" : "1s"
} }'
```
A force merge should be called after enabling the refreshing above.
A force merge should be called after enabling the refreshing above.
For Elasticsearch 6.x, before proceeding with the force merge, the index should be in read-only mode:
For Elasticsearch 6.x, the index should be in read-only mode before proceeding with the force merge:
```bash
curl --request PUT localhost:9200/gitlab-production/_settings --data '{
"settings": {
"index.blocks.write": true
} }'
```
```bash
curl --request PUT localhost:9200/gitlab-production/_settings --data '{
"settings": {
"index.blocks.write": true
} }'
```
Then, initiate the force merge:
Then, initiate the force merge:
```bash
curl --request POST 'http://localhost:9200/gitlab-production/_forcemerge?max_num_segments=5'
```
```bash
curl --request POST 'http://localhost:9200/gitlab-production/_forcemerge?max_num_segments=5'
```
After this, if your index is in read-only, switch back to read-write:
After this, if your index is in read-only mode, switch back to read-write:
```bash
curl --request PUT localhost:9200/gitlab-production/_settings --data '{
"settings": {
"index.blocks.write": false
} }'
```
```bash
curl --request PUT localhost:9200/gitlab-production/_settings --data '{
"settings": {
"index.blocks.write": false
} }'
```
Enable Elasticsearch search in **Admin > Settings > Integrations**. That's it. Enjoy it!
1. After the indexing has completed, enable [**Search with Elasticsearch**](#enabling-elasticsearch).
### Index limit
### Indexing limitations
Currently for repository and snippet files, GitLab would only index up to 1 MB of content, in order to avoid indexing timeout.
For repository and snippet files, GitLab will only index up to 1 MiB of content, in order to avoid indexing timeouts.
## GitLab Elasticsearch Rake Tasks
......@@ -352,7 +405,7 @@ There are several rake tasks available to you via the command line:
- [`sudo gitlab-rake gitlab:elastic:index_projects_status`](https://gitlab.com/gitlab-org/gitlab-ee/blob/master/ee/lib/tasks/gitlab/elastic.rake)
- This determines the overall status of the indexing. It is done by counting the total number of indexed projects, dividing by a count of the total number of projects, then multiplying by 100.
- [`sudo gitlab-rake gitlab:elastic:create_empty_index`](https://gitlab.com/gitlab-org/gitlab-ee/blob/master/ee/lib/tasks/gitlab/elastic.rake)
- This generates an empty index on the Elasticsearch side.
- This generates an empty index on the Elasticsearch side, deleting the existing one if present.
- [`sudo gitlab-rake gitlab:elastic:clear_index_status`](https://gitlab.com/gitlab-org/gitlab-ee/blob/master/ee/lib/tasks/gitlab/elastic.rake)
- This deletes all instances of IndexStatus for all projects.
- [`sudo gitlab-rake gitlab:elastic:delete_index`](https://gitlab.com/gitlab-org/gitlab-ee/blob/master/ee/lib/tasks/gitlab/elastic.rake)
......@@ -468,7 +521,7 @@ Here are some common pitfalls and how to overcome them:
pp s.search_objects.to_a
```
See [Elasticsearch Index Scopes](elasticsearch.md#elasticsearch-index-scopes) for more information on searching for specific types of data.
See [Elasticsearch Index Scopes](#elasticsearch-index-scopes) for more information on searching for specific types of data.
- **I indexed all the repositories but then switched Elasticsearch servers and now I can't find anything**
......
......@@ -65,16 +65,11 @@ Once you're on the dashboard, at the top you should see a series of filters for:
NOTE: **Note:**
The dashboard only shows projects with [security reports](#supported-reports) enabled in a group.
![dashboard with action buttons and metrics](img/group_security_dashboard.png)
![dashboard with action buttons and metrics](img/group_security_dashboard_v12_3.png)
Selecting one or more filters will filter the results in this page.
The first section is an overview of all the vulnerabilities, grouped by severity.
Underneath this overview is a timeline chart that shows how many open
vulnerabilities your projects had at various points in time. You can filter among 30, 60, and
90 days, with the default being 90. Hover over the chart to get more details about
the open vulnerabilities at a specific time.
Finally, there is a list of all the vulnerabilities in the group, sorted by severity.
The main section is a list of all the vulnerabilities in the group, sorted by severity.
In that list, you can see the severity of the vulnerability, its name, its
confidence (likelihood of the vulnerability to be a positive one), and the project
it's from.
......@@ -85,6 +80,11 @@ If you hover over a row, there will appear some actions you can take:
- "Create issue"
- "Dismiss vulnerability"
Next to the list is a timeline chart that shows how many open
vulnerabilities your projects had at various points in time. You can filter among 30, 60, and
90 days, with the default being 90. Hover over the chart to get more details about
the open vulnerabilities at a specific time.
Read more on how to [interact with the vulnerabilities](../index.md#interacting-with-the-vulnerabilities).
## Keeping the dashboards up to date
......
......@@ -393,6 +393,27 @@ The following requirements must be met for the metric to unfurl:
![Embedded Metrics](img/embed_metrics.png)
### Embedding live Grafana charts
It is also possible to embed live [Grafana](https://docs.gitlab.com/omnibus/settings/grafana.html) charts within issues, as a [Direct Linked Rendered Image](https://grafana.com/docs/reference/sharing/#direct-link-rendered-image).
The sharing dialog within Grafana provides the link, as highlighted below.
![Grafana Direct Linked Rendered Image](img/grafana_live_embed.png)
NOTE: **Note:**
For this embed to display correctly the Grafana instance must be available to the target user, either as a public dashboard or on the same network.
Copy the link and add an image tag as [inline HTML](../../markdown.md#inline-html) in your markdown. You may tweak the query parameters as required. For instance, removing the `&from=` and `&to=` parameters will give you a live chart. Here is example markup for a live chart from GitLab's public dashboard:
```html
<img src="https://dashboards.gitlab.com/render/d-solo/RZmbBr7mk/gitlab-triage?orgId=1&refresh=30s&var-env=gprd&var-environment=gprd&var-prometheus=prometheus-01-inf-gprd&var-prometheus_app=prometheus-app-01-inf-gprd&var-backend=All&var-type=All&var-stage=main&panelId=1247&width=1000&height=300"/>
```
This will render like so:
<img src="https://dashboards.gitlab.com/render/d-solo/RZmbBr7mk/gitlab-triage?orgId=1&refresh=30s&var-env=gprd&var-environment=gprd&var-prometheus=prometheus-01-inf-gprd&var-prometheus_app=prometheus-app-01-inf-gprd&var-backend=All&var-type=All&var-stage=main&panelId=1247&width=1000&height=300"/>
## Troubleshooting
If the "No data found" screen continues to appear, it could be due to:
......
......@@ -44,11 +44,18 @@ describe Clusters::Applications::CertManager do
it 'is initialized with cert_manager arguments' do
expect(subject.name).to eq('certmanager')
expect(subject.chart).to eq('stable/cert-manager')
expect(subject.version).to eq('v0.5.2')
expect(subject.chart).to eq('certmanager/cert-manager')
expect(subject.repository).to eq('https://charts.jetstack.io')
expect(subject.version).to eq('v0.9.1')
expect(subject).to be_rbac
expect(subject.files).to eq(cert_manager.files.merge(cluster_issuer_file))
expect(subject.postinstall).to eq(['kubectl create -f /data/helm/certmanager/config/cluster_issuer.yaml'])
expect(subject.preinstall).to eq([
'kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.9/deploy/manifests/00-crds.yaml',
'kubectl label --overwrite namespace gitlab-managed-apps certmanager.k8s.io/disable-validation=true'
])
expect(subject.postinstall).to eq([
'for i in $(seq 1 30); do kubectl apply -f /data/helm/certmanager/config/cluster_issuer.yaml && break; sleep 1s; echo "Retrying ($i)..."; done'
])
end
context 'for a specific user' do
......@@ -75,7 +82,7 @@ describe Clusters::Applications::CertManager do
let(:cert_manager) { create(:clusters_applications_cert_manager, :errored, version: '0.0.1') }
it 'is initialized with the locked version' do
expect(subject.version).to eq('v0.5.2')
expect(subject.version).to eq('v0.9.1')
end
end
end
......@@ -93,10 +100,13 @@ describe Clusters::Applications::CertManager do
it 'specifies a post delete command to remove custom resource definitions' do
expect(subject.postdelete).to eq([
"kubectl delete secret -n gitlab-managed-apps letsencrypt-prod --ignore-not-found",
'kubectl delete secret -n gitlab-managed-apps letsencrypt-prod --ignore-not-found',
'kubectl delete crd certificates.certmanager.k8s.io --ignore-not-found',
'kubectl delete crd certificaterequests.certmanager.k8s.io --ignore-not-found',
'kubectl delete crd challenges.certmanager.k8s.io --ignore-not-found',
'kubectl delete crd clusterissuers.certmanager.k8s.io --ignore-not-found',
'kubectl delete crd issuers.certmanager.k8s.io --ignore-not-found'
'kubectl delete crd issuers.certmanager.k8s.io --ignore-not-found',
'kubectl delete crd orders.certmanager.k8s.io --ignore-not-found'
])
end
......@@ -111,8 +121,11 @@ describe Clusters::Applications::CertManager do
it 'does not try and delete the secret' do
expect(subject.postdelete).to eq([
'kubectl delete crd certificates.certmanager.k8s.io --ignore-not-found',
'kubectl delete crd certificaterequests.certmanager.k8s.io --ignore-not-found',
'kubectl delete crd challenges.certmanager.k8s.io --ignore-not-found',
'kubectl delete crd clusterissuers.certmanager.k8s.io --ignore-not-found',
'kubectl delete crd issuers.certmanager.k8s.io --ignore-not-found'
'kubectl delete crd issuers.certmanager.k8s.io --ignore-not-found',
'kubectl delete crd orders.certmanager.k8s.io --ignore-not-found'
])
end
end
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment