Commit 05292794 authored by Dmitriy Zaporozhets's avatar Dmitriy Zaporozhets

Merge remote-tracking branch 'ce-com/master' into ce-to-ee

Signed-off-by: default avatarDmitriy Zaporozhets <dmitriy.zaporozhets@gmail.com>
parents 438d2ddc 6291bc8c
......@@ -2,7 +2,6 @@ source 'https://rubygems.org'
gem 'rails', '4.2.8'
gem 'rails-deprecated_sanitizer', '~> 1.0.3'
gem 'bootsnap', '~> 1.1'
# Responders respond_to and respond_with
gem 'responders', '~> 2.0'
......@@ -355,7 +354,7 @@ group :development, :test do
gem 'benchmark-ips', '~> 2.3.0', require: false
gem 'license_finder', '~> 2.1.0', require: false
gem 'knapsack', '~> 1.11.0'
gem 'knapsack', '~> 1.14.0'
gem 'activerecord_sane_schema_dumper', '0.2'
......
......@@ -91,8 +91,6 @@ GEM
bindata (2.3.5)
binding_of_caller (0.7.2)
debug_inspector (>= 0.0.1)
bootsnap (1.1.1)
msgpack (~> 1.0)
bootstrap-sass (3.3.6)
autoprefixer-rails (>= 5.2.1)
sass (>= 3.3.4)
......@@ -452,9 +450,8 @@ GEM
actionpack (>= 3.0.0)
activesupport (>= 3.0.0)
kgio (2.10.0)
knapsack (1.11.0)
knapsack (1.14.0)
rake
timecop (>= 0.1.0)
kubeclient (2.2.0)
http (= 0.9.8)
recursive-open-struct (= 1.0.0)
......@@ -493,7 +490,6 @@ GEM
minitest (5.7.0)
mmap2 (2.2.7)
mousetrap-rails (1.4.6)
msgpack (1.1.0)
multi_json (1.12.1)
multi_xml (0.6.0)
multipart-post (2.0.0)
......@@ -960,7 +956,6 @@ DEPENDENCIES
benchmark-ips (~> 2.3.0)
better_errors (~> 2.1.0)
binding_of_caller (~> 0.7.2)
bootsnap (~> 1.1)
bootstrap-sass (~> 3.3.0)
bootstrap_form (~> 2.7.0)
brakeman (~> 3.6.0)
......@@ -1042,7 +1037,7 @@ DEPENDENCIES
json-schema (~> 2.6.2)
jwt (~> 1.5.6)
kaminari (~> 0.17.0)
knapsack (~> 1.11.0)
knapsack (~> 1.14.0)
kubeclient (~> 2.2.0)
letter_opener_web (~> 1.3.0)
license_finder (~> 2.1.0)
......
......@@ -46,6 +46,8 @@ export default {
},
methods: {
changePage(e) {
if (e.target.parentElement.classList.contains('disabled')) return;
const text = e.target.innerText;
const { totalPages, nextPage, previousPage } = this.pageInfo;
......@@ -82,7 +84,9 @@ export default {
const page = this.pageInfo.page;
const items = [];
if (page > 1) items.push({ title: FIRST });
if (page > 1) {
items.push({ title: FIRST, first: true });
}
if (page > 1) {
items.push({ title: PREV, prev: true });
......@@ -110,7 +114,9 @@ export default {
items.push({ title: NEXT, next: true });
}
if (total - page >= 1) items.push({ title: LAST, last: true });
if (total - page >= 1) {
items.push({ title: LAST, last: true });
}
return items;
},
......@@ -124,13 +130,15 @@ export default {
v-for="item in getItems"
:class="{
page: item.page,
prev: item.prev,
next: item.next,
'js-previous-button': item.prev,
'js-next-button': item.next,
'js-last-button': item.last,
'js-first-button': item.first,
separator: item.separator,
active: item.active,
disabled: item.disabled
}">
<a @click="changePage($event)">{{item.title}}</a>
<a @click.prevent="changePage($event)">{{item.title}}</a>
</li>
</ul>
</div>
......
......@@ -125,7 +125,7 @@
.dropdown-menu {
margin-top: 11px;
z-index: 200;
z-index: 300;
}
.ci-action-icon-wrapper {
......
module IssuableCollections
extend ActiveSupport::Concern
include SortingHelper
include Gitlab::IssuableMetadata
included do
helper_method :issues_finder
......@@ -9,39 +10,6 @@ module IssuableCollections
private
def issuable_meta_data(issuable_collection, collection_type)
# map has to be used here since using pluck or select will
# throw an error when ordering issuables by priority which inserts
# a new order into the collection.
# We cannot use reorder to not mess up the paginated collection.
issuable_ids = issuable_collection.map(&:id)
return {} if issuable_ids.empty?
issuable_note_count = Note.count_for_collection(issuable_ids, @collection_type)
issuable_votes_count = AwardEmoji.votes_for_collection(issuable_ids, @collection_type)
issuable_merge_requests_count =
if collection_type == 'Issue'
MergeRequestsClosingIssues.count_for_collection(issuable_ids)
else
[]
end
issuable_ids.each_with_object({}) do |id, issuable_meta|
downvotes = issuable_votes_count.find { |votes| votes.awardable_id == id && votes.downvote? }
upvotes = issuable_votes_count.find { |votes| votes.awardable_id == id && votes.upvote? }
notes = issuable_note_count.find { |notes| notes.noteable_id == id }
merge_requests = issuable_merge_requests_count.find { |mr| mr.first == id }
issuable_meta[id] = Issuable::IssuableMeta.new(
upvotes.try(:count).to_i,
downvotes.try(:count).to_i,
notes.try(:count).to_i,
merge_requests.try(:last).to_i
)
end
end
def issues_collection
issues_finder.execute.preload(:project, :author, :assignees, :labels, :milestone, project: :namespace)
end
......
module RequiresHealthToken
module RequiresWhitelistedMonitoringClient
extend ActiveSupport::Concern
included do
before_action :validate_health_check_access!
before_action :validate_ip_whitelisted_or_valid_token!
end
private
def validate_health_check_access!
render_404 unless token_valid?
def validate_ip_whitelisted_or_valid_token!
render_404 unless client_ip_whitelisted? || valid_token?
end
def token_valid?
def client_ip_whitelisted?
ip_whitelist.any? { |e| e.include?(Gitlab::RequestContext.client_ip) }
end
def ip_whitelist
@ip_whitelist ||= Settings.monitoring.ip_whitelist.map(&IPAddr.method(:new))
end
def valid_token?
token = params[:token].presence || request.headers['TOKEN']
token.present? &&
ActiveSupport::SecurityUtils.variable_size_secure_compare(
......
class HealthCheckController < HealthCheck::HealthCheckController
include RequiresHealthToken
include RequiresWhitelistedMonitoringClient
end
class HealthController < ActionController::Base
protect_from_forgery with: :exception
include RequiresHealthToken
include RequiresWhitelistedMonitoringClient
CHECKS = [
Gitlab::HealthChecks::DbCheck,
......
class MetricsController < ActionController::Base
include RequiresHealthToken
include RequiresWhitelistedMonitoringClient
protect_from_forgery with: :exception
before_action :validate_prometheus_metrics
def index
render text: metrics_service.metrics_text, content_type: 'text/plain; verssion=0.0.4'
render text: metrics_service.metrics_text, content_type: 'text/plain; version=0.0.4'
end
private
......
......@@ -197,6 +197,9 @@ class ApplicationSetting < ActiveRecord::Base
Rails.cache.fetch(CACHE_KEY) do
ApplicationSetting.last
end
rescue
# Fall back to an uncached value if there are any problems (e.g. redis down)
ApplicationSetting.last
end
def self.expire
......
......@@ -4,7 +4,7 @@ module Ci
prepend EE::Ci::Runner
RUNNER_QUEUE_EXPIRY_TIME = 60.minutes
LAST_CONTACT_TIME = 1.hour.ago
ONLINE_CONTACT_TIMEOUT = 1.hour
AVAILABLE_SCOPES = %w[specific shared active paused online].freeze
FORM_EDITABLE = %i[description tag_list active run_untagged locked].freeze
......@@ -20,7 +20,7 @@ module Ci
scope :shared, ->() { where(is_shared: true) }
scope :active, ->() { where(active: true) }
scope :paused, ->() { where(active: false) }
scope :online, ->() { where('contacted_at > ?', LAST_CONTACT_TIME) }
scope :online, ->() { where('contacted_at > ?', contact_time_deadline) }
scope :ordered, ->() { order(id: :desc) }
scope :owned_or_shared, ->(project_id) do
......@@ -60,6 +60,10 @@ module Ci
where(t[:token].matches(pattern).or(t[:description].matches(pattern)))
end
def self.contact_time_deadline
ONLINE_CONTACT_TIMEOUT.ago
end
def set_default_values
self.token = SecureRandom.hex(15) if self.token.blank?
end
......@@ -81,7 +85,7 @@ module Ci
end
def online?
contacted_at && contacted_at > LAST_CONTACT_TIME
contacted_at && contacted_at > self.class.contact_time_deadline
end
def status
......
---
title: Deprecate Healthcheck Access Token in favor of IP whitelist
merge_request:
author:
---
title: Prevent bad data being added to application settings when Redis is unavailable
merge_request: 12750
author:
---
title: Prevent disabled pagination button to be clicked
merge_request:
author:
---
title: Fix offline runner detection
merge_request: 11751
author: Alessio Caiazza
---
title: Remove remaining N+1 queries in merge requests API with emojis and labels
merge_request:
author:
---
title: Bump bootsnap to 1.1.1
merge_request: 12425
author: @blackst0ne
......@@ -19,7 +19,11 @@ an ERB file and then loads the resulting YML as its configuration.
This file is called `resque.yml` for historical reasons. We are **NOT**
using Resque at the moment. It is used to specify Redis configuration
<<<<<<< HEAD
values instead when a single database instance of Redis is desired.
=======
values when a single database instance of Redis is desired.
>>>>>>> ce-com/master
# Advanced Redis configuration files
......
......@@ -5,12 +5,6 @@ ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', __FILE__)
require 'bundler/setup' if File.exist?(ENV['BUNDLE_GEMFILE'])
begin
require 'bootsnap/setup'
rescue SystemCallError => exception
$stderr.puts "WARNING: Bootsnap failed to setup: #{exception.message}"
end
# set default directory for multiproces metrics gathering
if ENV['RAILS_ENV'] == 'development' || ENV['RAILS_ENV'] == 'test'
ENV['prometheus_multiproc_dir'] ||= 'tmp/prometheus_multiproc_dir'
......
......@@ -634,10 +634,15 @@ production: &base
# enabled: true
# host: localhost
# port: 3808
prometheus:
## Monitoring
# Built in monitoring settings
monitoring:
# Time between sampling of unicorn socket metrics, in seconds
# unicorn_sampler_interval: 10
# IP whitelist to access monitoring endpoints
ip_whitelist:
- 127.0.0.0/8
## GitLab Geo settings (EE-only)
geo_primary_role:
......
......@@ -590,10 +590,11 @@ Settings.webpack.dev_server['host'] ||= 'localhost'
Settings.webpack.dev_server['port'] ||= 3808
#
# Prometheus metrics settings
# Monitoring settings
#
Settings['prometheus'] ||= Settingslogic.new({})
Settings.prometheus['unicorn_sampler_interval'] ||= 10
Settings['monitoring'] ||= Settingslogic.new({})
Settings.monitoring['ip_whitelist'] ||= ['127.0.0.1/8']
Settings.monitoring['unicorn_sampler_interval'] ||= 10
#
# Testing settings
......
......@@ -141,7 +141,7 @@ def instrument_classes(instrumentation)
end
# rubocop:enable Metrics/AbcSize
Gitlab::Metrics::UnicornSampler.initialize_instance(Settings.prometheus.unicorn_sampler_interval).start
Gitlab::Metrics::UnicornSampler.initialize_instance(Settings.monitoring.unicorn_sampler_interval).start
Gitlab::Application.configure do |config|
# 0 should be Sentry to catch errors in this middleware
......
......@@ -3,4 +3,6 @@ require 'flipper/middleware/memoizer'
unless Rails.env.test?
Rails.application.config.middleware.use Flipper::Middleware::Memoizer,
lambda { Feature.flipper }
Feature.register_feature_groups
end
......@@ -78,5 +78,9 @@ begin
end
end
end
<<<<<<< HEAD
rescue ::Redis::BaseError, SocketError, Errno::ENOENT, Errno::EAFNOSUPPORT, Errno::ECONNRESET, Errno::ECONNREFUSED
=======
rescue Redis::BaseError, SocketError, Errno::ENOENT, Errno::EADDRNOTAVAIL, Errno::EAFNOSUPPORT, Errno::ECONNRESET, Errno::ECONNREFUSED
>>>>>>> ce-com/master
end
......@@ -420,6 +420,12 @@ GitLab Shell is an SSH access and repository management software developed speci
**Note:** Make sure your hostname can be resolved on the machine itself by either a proper DNS record or an additional line in /etc/hosts ("127.0.0.1 hostname"). This might be necessary for example if you set up GitLab behind a reverse proxy. If the hostname cannot be resolved, the final installation check will fail with "Check GitLab API access: FAILED. code: 401" and pushing commits will be rejected with "[remote rejected] master -> master (hook declined)".
**Note:** GitLab Shell application startup time can be greatly reduced by disabling RubyGems. This can be done in several manners:
* Export `RUBYOPT=--disable-gems` environment variable for the processes
* Compile Ruby with `configure --disable-rubygems` to disable RubyGems by default. Not recommened for system-wide Ruby.
* Omnibus GitLab [replaces the *shebang* line of the `gitlab-shell/bin/*` scripts](https://gitlab.com/gitlab-org/omnibus-gitlab/merge_requests/1707)
### Install gitlab-workhorse
GitLab-Workhorse uses [GNU Make](https://www.gnu.org/software/make/). The
......
......@@ -5,6 +5,8 @@
- The `health_check` endpoint was [introduced][ce-3888] in GitLab 8.8 and will
be deprecated in GitLab 9.1. Read more in the [old behavior](#old-behavior)
section.
- [Access token](#access-token) has been deprecated in GitLab 9.4
in favor of [IP Whitelist](#ip-whitelist)
GitLab provides liveness and readiness probes to indicate service health and
reachability to required services. These probes report on the status of the
......@@ -12,7 +14,19 @@ database connection, Redis connection, and access to the filesystem. These
endpoints [can be provided to schedulers like Kubernetes][kubernetes] to hold
traffic until the system is ready or restart the container as needed.
## Access Token
## IP Whitelist
To access monitoring resources the client IP needs to be included in the whitelist.
To add or remove hosts or IP ranges from the list you can edit `gitlab.rb` or `gitlab.yml`.
Example whitelist configuration:
```yaml
monitoring:
ip_whitelist:
- 127.0.0.0/8 # by default only local IPs are allowed to access monitoring resources
```
## Access Token (Deprecated)
An access token needs to be provided while accessing the probe endpoints. The current
accepted token can be found under the **Admin area ➔ Monitoring ➔ Health check**
......@@ -47,10 +61,10 @@ which will then provide a report of system health in JSON format:
## Using the Endpoint
Once you have the access token, the probes can be accessed:
With default whitelist settings, the probes can be accessed from localhost:
- `https://gitlab.example.com/-/readiness?token=ACCESS_TOKEN`
- `https://gitlab.example.com/-/liveness?token=ACCESS_TOKEN`
- `http://localhost/-/readiness`
- `http://localhost/-/liveness`
## Status
......@@ -71,8 +85,8 @@ the database connection, the state of the database migrations, and the ability t
and access the cache. This endpoint can be provided to uptime monitoring services like
[Pingdom][pingdom], [Nagios][nagios-health], and [NewRelic][newrelic-health].
Once you have the [access token](#access-token), health information can be
retrieved as plain text, JSON, or XML using the `health_check` endpoint:
Once you have the [access token](#access-token) or your client IP is [whitelisted](#ip-whitelist),
health information can be retrieved as plain text, JSON, or XML using the `health_check` endpoint:
- `https://gitlab.example.com/health_check?token=ACCESS_TOKEN`
- `https://gitlab.example.com/health_check.json?token=ACCESS_TOKEN`
......
......@@ -355,10 +355,26 @@ module API
class MergeRequestBasic < ProjectEntity
expose :target_branch, :source_branch
expose :upvotes, :downvotes
expose :upvotes do |merge_request, options|
if options[:issuable_metadata]
options[:issuable_metadata][merge_request.id].upvotes
else
merge_request.upvotes
end
end
expose :downvotes do |merge_request, options|
if options[:issuable_metadata]
options[:issuable_metadata][merge_request.id].downvotes
else
merge_request.downvotes
end
end
expose :author, :assignee, using: Entities::UserBasic
expose :source_project_id, :target_project_id
expose :label_names, as: :labels
expose :labels do |merge_request, options|
# Avoids an N+1 query since labels are preloaded
merge_request.labels.map(&:title).sort
end
expose :work_in_progress?, as: :work_in_progress
expose :milestone, using: Entities::Milestone
expose :merge_when_pipeline_succeeds
......
......@@ -10,6 +10,8 @@ module API
resource :projects, requirements: { id: %r{[^/]+} } do
include TimeTrackingEndpoints
helpers ::Gitlab::IssuableMetadata
helpers do
def handle_merge_request_errors!(errors)
if errors[:project_access].any?
......@@ -48,10 +50,9 @@ module API
args[:label_name] = args.delete(:labels)
merge_requests = MergeRequestsFinder.new(current_user, args).execute
.inc_notes_with_associations
.preload(:target_project, :author, :assignee, :milestone, :merge_request_diff)
merge_requests.reorder(args[:order_by] => args[:sort])
merge_requests = merge_requests.reorder(args[:order_by] => args[:sort])
paginate(merge_requests)
.preload(:notes, :target_project, :author, :assignee, :milestone, :merge_request_diff, :labels)
end
params :optional_params_ce do
......@@ -94,8 +95,9 @@ module API
authorize! :read_merge_request, user_project
merge_requests = find_merge_requests(project_id: user_project.id)
issuable_metadata = issuable_meta_data(merge_requests, 'MergeRequest')
present paginate(merge_requests), with: Entities::MergeRequestBasic, current_user: current_user, project: user_project
present merge_requests, with: Entities::MergeRequestBasic, current_user: current_user, project: user_project, issuable_metadata: issuable_metadata
end
desc 'Create a merge request' do
......
......@@ -57,5 +57,11 @@ class Feature
Flipper.new(adapter)
end
end
# This method is called from config/initializers/flipper.rb and can be used
# to register Flipper groups.
# See https://docs.gitlab.com/ee/development/feature_flags.html#feature-groups
def register_feature_groups
end
end
end
......@@ -25,7 +25,7 @@ module Gitlab
def cached_application_settings
begin
::ApplicationSetting.cached
rescue ::Redis::BaseError, ::Errno::ENOENT
rescue ::Redis::BaseError, ::Errno::ENOENT, ::Errno::EADDRNOTAVAIL
# In case Redis isn't running or the Redis UNIX socket file is not available
end
end
......@@ -33,12 +33,7 @@ module Gitlab
def uncached_application_settings
return fake_application_settings unless connect_to_db?
# This loads from the database into the cache, so handle Redis errors
begin
db_settings = ::ApplicationSetting.current
rescue ::Redis::BaseError, ::Errno::ENOENT
# In case Redis isn't running or the Redis UNIX socket file is not available
end
# If there are pending migrations, it's possible there are columns that
# need to be added to the application settings. To prevent Rake tasks
......
......@@ -337,7 +337,7 @@ module Gitlab
# In the EE repo
$ git push origin #{ee_branch_prefix}
⚠️ Also, don't forget to create a new merge request on gitlab-ce and
⚠️ Also, don't forget to create a new merge request on gitlab-ee and
cross-link it with the CE merge request.
Once this is done, you can retry this failed build, and it should pass.
......
module Gitlab
module IssuableMetadata
def issuable_meta_data(issuable_collection, collection_type)
# map has to be used here since using pluck or select will
# throw an error when ordering issuables by priority which inserts
# a new order into the collection.
# We cannot use reorder to not mess up the paginated collection.
issuable_ids = issuable_collection.map(&:id)
return {} if issuable_ids.empty?
issuable_note_count = ::Note.count_for_collection(issuable_ids, collection_type)
issuable_votes_count = ::AwardEmoji.votes_for_collection(issuable_ids, collection_type)
issuable_merge_requests_count =
if collection_type == 'Issue'
::MergeRequestsClosingIssues.count_for_collection(issuable_ids)
else
[]
end
issuable_ids.each_with_object({}) do |id, issuable_meta|
downvotes = issuable_votes_count.find { |votes| votes.awardable_id == id && votes.downvote? }
upvotes = issuable_votes_count.find { |votes| votes.awardable_id == id && votes.upvote? }
notes = issuable_note_count.find { |notes| notes.noteable_id == id }
merge_requests = issuable_merge_requests_count.find { |mr| mr.first == id }
issuable_meta[id] = ::Issuable::IssuableMeta.new(
upvotes.try(:count).to_i,
downvotes.try(:count).to_i,
notes.try(:count).to_i,
merge_requests.try(:last).to_i
)
end
end
end
end
......@@ -20,6 +20,7 @@ module Gitlab
def token
Gitlab::Redis::SharedState.with do |redis|
token = redis.get(redis_shared_state_key)
<<<<<<< HEAD
if token
redis.expire(redis_shared_state_key, EXPIRY_TIME)
......@@ -27,6 +28,10 @@ module Gitlab
token = Devise.friendly_token(TOKEN_LENGTH)
redis.set(redis_shared_state_key, token, ex: EXPIRY_TIME)
end
=======
token ||= Devise.friendly_token(TOKEN_LENGTH)
redis.set(redis_shared_state_key, token, ex: EXPIRY_TIME)
>>>>>>> ce-com/master
token
end
......
......@@ -49,7 +49,6 @@ module QA
autoload :Entry, 'qa/page/main/entry'
autoload :Menu, 'qa/page/main/menu'
autoload :Groups, 'qa/page/main/groups'
autoload :Projects, 'qa/page/main/projects'
end
module Project
......
......@@ -14,6 +14,13 @@ module QA
within_user_menu { click_link 'Admin area' }
end
def go_to_new_project
within_user_menu do
find('.header-new-dropdown-toggle').click
click_link('New project')
end
end
def sign_out
within_user_menu do
find('.header-user-dropdown-toggle').click
......
module QA
module Page
module Main
class Projects < Page::Base
def go_to_new_project
##
# There are 'New Project' and 'New project' buttons on the projects
# page, so we can't use `click_on`.
#
button = find('a', text: /^new project$/i)
button.click
end
end
end
end
end
......@@ -14,8 +14,7 @@ module QA
def perform
Page::Main::Menu.act { go_to_groups }
Page::Main::Groups.act { prepare_test_namespace }
Page::Main::Menu.act { go_to_projects }
Page::Main::Projects.act { go_to_new_project }
Page::Main::Menu.act { go_to_new_project }
Page::Project::New.perform do |page|
page.choose_test_namespace
......
......@@ -25,27 +25,15 @@ module QA
def configure_rspec!
RSpec.configure do |config|
config.expect_with :rspec do |expectations|
# This option will default to `true` in RSpec 4. It makes the `description`
# and `failure_message` of custom matchers include text for helper methods
# defined using `chain`.
expectations.include_chain_clauses_in_custom_matcher_descriptions = true
end
config.mock_with :rspec do |mocks|
# Prevents you from mocking or stubbing a method that does not exist on
# a real object. This is generally recommended, and will default to
# `true` in RSpec 4.
mocks.verify_partial_doubles = true
end
# Run specs in random order to surface order dependencies.
config.order = :random
Kernel.srand config.seed
# config.before(:all) do
# page.current_window.resize_to(1200, 1800)
# end
config.formatter = :documentation
config.color = true
end
......@@ -56,7 +44,7 @@ module QA
capabilities = Selenium::WebDriver::Remote::Capabilities.chrome(
'chromeOptions' => {
'binary' => '/usr/bin/google-chrome-stable',
'args' => %w[headless no-sandbox disable-gpu]
'args' => %w[headless no-sandbox disable-gpu window-size=1280,1024]
}
)
......@@ -64,6 +52,10 @@ module QA
.new(app, browser: :chrome, desired_capabilities: capabilities)
end
Capybara::Screenshot.register_driver(:chrome) do |driver, path|
driver.browser.save_screenshot(path)
end
Capybara.configure do |config|
config.app_host = @address
config.default_driver = :chrome
......
......@@ -8,9 +8,14 @@ if [ "$USE_BUNDLE_INSTALL" != "false" ]; then
bundle install --clean $BUNDLE_INSTALL_FLAGS && bundle check
fi
# Only install knapsack after bundle install! Otherwise oddly some native
# Only install fog-aws/mime-types after bundle install! Otherwise oddly some native
# gems could not be found under some circumstance. No idea why, hours wasted.
<<<<<<< HEAD
retry gem install knapsack fog-aws mime-types
=======
# TODO: remove workaround on !10156
retry gem install fog-aws mime-types
>>>>>>> ce-com/master
cp config/gitlab.yml.example config/gitlab.yml
......@@ -45,6 +50,12 @@ else # Assume it's mysql
sed -i 's/# host:.*/host: mysql/g' config/database_geo.yml
fi
<<<<<<< HEAD
=======
cp config/resque.yml.example config/resque.yml
sed -i 's/localhost/redis/g' config/resque.yml
>>>>>>> ce-com/master
cp config/redis.cache.yml.example config/redis.cache.yml
sed -i 's/localhost/redis/g' config/redis.cache.yml
......
......@@ -3,52 +3,79 @@ require 'spec_helper'
describe HealthCheckController do
include StubENV
let(:token) { current_application_settings.health_check_access_token }
let(:json_response) { JSON.parse(response.body) }
let(:xml_response) { Hash.from_xml(response.body)['hash'] }
let(:token) { current_application_settings.health_check_access_token }
let(:whitelisted_ip) { '127.0.0.1' }
let(:not_whitelisted_ip) { '127.0.0.2' }
before do
allow(Settings.monitoring).to receive(:ip_whitelist).and_return([whitelisted_ip])
stub_env('IN_MEMORY_APPLICATION_SETTINGS', 'false')
end
describe 'GET #index' do
context 'when services are up but NO access token' do
context 'when services are up but accessed from outside whitelisted ips' do
before do
allow(Gitlab::RequestContext).to receive(:client_ip).and_return(not_whitelisted_ip)
end
it 'returns a not found page' do
get :index
expect(response).to be_not_found
end
end
context 'when services are up and an access token is provided' do
context 'when services are accessed with token' do
it 'supports passing the token in the header' do
request.headers['TOKEN'] = token
get :index
expect(response).to be_success
expect(response.content_type).to eq 'text/plain'
end
it 'supports successful plaintest response' do
it 'supports passing the token in query params' do
get :index, token: token
expect(response).to be_success
expect(response.content_type).to eq 'text/plain'
end
end
end
context 'when services are up and accessed from whitelisted ips' do
before do
allow(Gitlab::RequestContext).to receive(:client_ip).and_return(whitelisted_ip)
end
it 'supports successful plaintext response' do
get :index
expect(response).to be_success
expect(response.content_type).to eq 'text/plain'
end
it 'supports successful json response' do
get :index, token: token, format: :json
get :index, format: :json
expect(response).to be_success
expect(response.content_type).to eq 'application/json'
expect(json_response['healthy']).to be true
end
it 'supports successful xml response' do
get :index, token: token, format: :xml
get :index, format: :xml
expect(response).to be_success
expect(response.content_type).to eq 'application/xml'
expect(xml_response['healthy']).to be true
end
it 'supports successful responses for specific checks' do
get :index, token: token, checks: 'email', format: :json
get :index, checks: 'email', format: :json
expect(response).to be_success
expect(response.content_type).to eq 'application/json'
expect(json_response['healthy']).to be true
......@@ -58,33 +85,29 @@ describe HealthCheckController do
context 'when a service is down but NO access token' do
it 'returns a not found page' do
get :index
expect(response).to be_not_found
end
end
context 'when a service is down and an access token is provided' do
context 'when a service is down and an endpoint is accessed from whitelisted ip' do
before do
allow(HealthCheck::Utils).to receive(:process_checks).with(['standard']).and_return('The server is on fire')
allow(HealthCheck::Utils).to receive(:process_checks).with(['email']).and_return('Email is on fire')
allow(Gitlab::RequestContext).to receive(:client_ip).and_return(whitelisted_ip)
end
it 'supports passing the token in the header' do
request.headers['TOKEN'] = token
it 'supports failure plaintext response' do
get :index
expect(response).to have_http_status(500)
expect(response.content_type).to eq 'text/plain'
expect(response.body).to include('The server is on fire')
end
it 'supports failure plaintest response' do
get :index, token: token
expect(response).to have_http_status(500)
expect(response.content_type).to eq 'text/plain'
expect(response.body).to include('The server is on fire')
end
it 'supports failure json response' do
get :index, token: token, format: :json
get :index, format: :json
expect(response).to have_http_status(500)
expect(response.content_type).to eq 'application/json'
expect(json_response['healthy']).to be false
......@@ -92,7 +115,8 @@ describe HealthCheckController do
end
it 'supports failure xml response' do
get :index, token: token, format: :xml
get :index, format: :xml
expect(response).to have_http_status(500)
expect(response.content_type).to eq 'application/xml'
expect(xml_response['healthy']).to be false
......@@ -100,7 +124,8 @@ describe HealthCheckController do
end
it 'supports failure responses for specific checks' do
get :index, token: token, checks: 'email', format: :json
get :index, checks: 'email', format: :json
expect(response).to have_http_status(500)
expect(response.content_type).to eq 'application/json'
expect(json_response['healthy']).to be false
......
......@@ -3,21 +3,25 @@ require 'spec_helper'
describe HealthController do
include StubENV
let(:token) { current_application_settings.health_check_access_token }
let(:json_response) { JSON.parse(response.body) }
let(:token) { current_application_settings.health_check_access_token }
let(:whitelisted_ip) { '127.0.0.1' }
let(:not_whitelisted_ip) { '127.0.0.2' }
before do
allow(Settings.monitoring).to receive(:ip_whitelist).and_return([whitelisted_ip])
stub_env('IN_MEMORY_APPLICATION_SETTINGS', 'false')
end
describe '#readiness' do
context 'authorization token provided' do
before do
request.headers['TOKEN'] = token
end
shared_context 'endpoint responding with readiness data' do
let(:request_params) { {} }
subject { get :readiness, request_params }
it 'responds with readiness checks data' do
subject
it 'returns proper response' do
get :readiness
expect(json_response['db_check']['status']).to eq('ok')
expect(json_response['cache_check']['status']).to eq('ok')
expect(json_response['queues_check']['status']).to eq('ok')
......@@ -27,22 +31,50 @@ describe HealthController do
end
end
context 'without authorization token' do
it 'returns proper response' do
get :readiness
expect(response.status).to eq(404)
context 'accessed from whitelisted ip' do
before do
allow(Gitlab::RequestContext).to receive(:client_ip).and_return(whitelisted_ip)
end
it_behaves_like 'endpoint responding with readiness data'
end
context 'accessed from not whitelisted ip' do
before do
allow(Gitlab::RequestContext).to receive(:client_ip).and_return(not_whitelisted_ip)
end
it 'responds with resource not found' do
get :readiness
expect(response.status).to eq(404)
end
describe '#liveness' do
context 'authorization token provided' do
context 'accessed with valid token' do
context 'token passed in request header' do
before do
request.headers['TOKEN'] = token
end
it 'returns proper response' do
get :liveness
it_behaves_like 'endpoint responding with readiness data'
end
end
context 'token passed as URL param' do
it_behaves_like 'endpoint responding with readiness data' do
let(:request_params) { { token: token } }
end
end
end
end
describe '#liveness' do
shared_context 'endpoint responding with liveness data' do
subject { get :liveness }
it 'responds with liveness checks data' do
subject
expect(json_response['db_check']['status']).to eq('ok')
expect(json_response['cache_check']['status']).to eq('ok')
expect(json_response['queues_check']['status']).to eq('ok')
......@@ -51,11 +83,40 @@ describe HealthController do
end
end
context 'without authorization token' do
it 'returns proper response' do
context 'accessed from whitelisted ip' do
before do
allow(Gitlab::RequestContext).to receive(:client_ip).and_return(whitelisted_ip)
end
it_behaves_like 'endpoint responding with liveness data'
end
context 'accessed from not whitelisted ip' do
before do
allow(Gitlab::RequestContext).to receive(:client_ip).and_return(not_whitelisted_ip)
end
it 'responds with resource not found' do
get :liveness
expect(response.status).to eq(404)
end
context 'accessed with valid token' do
context 'token passed in request header' do
before do
request.headers['TOKEN'] = token
end
it_behaves_like 'endpoint responding with liveness data'
end
context 'token passed as URL param' do
it_behaves_like 'endpoint responding with liveness data' do
subject { get :liveness, token: token }
end
end
end
end
end
end
......@@ -3,22 +3,22 @@ require 'spec_helper'
describe MetricsController do
include StubENV
let(:token) { current_application_settings.health_check_access_token }
let(:json_response) { JSON.parse(response.body) }
let(:metrics_multiproc_dir) { Dir.mktmpdir }
let(:whitelisted_ip) { '127.0.0.1' }
let(:whitelisted_ip_range) { '10.0.0.0/24' }
let(:ip_in_whitelisted_range) { '10.0.0.1' }
let(:not_whitelisted_ip) { '10.0.1.1' }
before do
stub_env('IN_MEMORY_APPLICATION_SETTINGS', 'false')
stub_env('prometheus_multiproc_dir', metrics_multiproc_dir)
allow(Gitlab::Metrics).to receive(:prometheus_metrics_enabled?).and_return(true)
allow(Settings.monitoring).to receive(:ip_whitelist).and_return([whitelisted_ip, whitelisted_ip_range])
end
describe '#index' do
context 'authorization token provided' do
before do
request.headers['TOKEN'] = token
end
shared_examples_for 'endpoint providing metrics' do
it 'returns DB ping metrics' do
get :index
......@@ -83,7 +83,27 @@ describe MetricsController do
end
end
context 'without authorization token' do
context 'accessed from whitelisted ip' do
before do
allow(Gitlab::RequestContext).to receive(:client_ip).and_return(whitelisted_ip)
end
it_behaves_like 'endpoint providing metrics'
end
context 'accessed from ip in whitelisted range' do
before do
allow(Gitlab::RequestContext).to receive(:client_ip).and_return(ip_in_whitelisted_range)
end
it_behaves_like 'endpoint providing metrics'
end
context 'accessed from not whitelisted ip' do
before do
allow(Gitlab::RequestContext).to receive(:client_ip).and_return(not_whitelisted_ip)
end
it 'returns proper response' do
get :index
......
......@@ -143,6 +143,7 @@ import '~/lib/utils/common_utils';
it('should return valid parameter', () => {
const value = gl.utils.getParameterByName('scope');
expect(gl.utils.getParameterByName('p')).toEqual('2');
expect(value).toBe('all');
});
......
import Vue from 'vue';
import paginationComp from '~/vue_shared/components/table_pagination.vue';
import '~/lib/utils/common_utils';
describe('Pagination component', () => {
let component;
let PaginationComponent;
const changeChanges = {
one: '',
};
const change = (one) => {
changeChanges.one = one;
};
let spy;
let mountComponet;
beforeEach(() => {
spy = jasmine.createSpy('spy');
PaginationComponent = Vue.extend(paginationComp);
mountComponet = function (props) {
return new PaginationComponent({
propsData: props,
}).$mount();
};
});
it('should render and start at page 1', () => {
component = new PaginationComponent({
propsData: {
describe('render', () => {
describe('prev button', () => {
it('should be disabled and non clickable', () => {
component = mountComponet({
pageInfo: {
totalPages: 10,
nextPage: 2,
previousPage: '',
page: 1,
perPage: 20,
previousPage: NaN,
total: 84,
totalPages: 5,
},
change,
},
}).$mount();
change: spy,
});
expect(component.$el.classList).toContain('gl-pagination');
expect(
component.$el.querySelector('.js-previous-button').classList.contains('disabled'),
).toEqual(true);
component.changePage({ target: { innerText: '1' } });
component.$el.querySelector('.js-previous-button a').click();
expect(changeChanges.one).toEqual(1);
expect(spy).not.toHaveBeenCalled();
});
it('should go to the previous page', () => {
component = new PaginationComponent({
propsData: {
it('should be enabled and clickable', () => {
component = mountComponet({
pageInfo: {
totalPages: 10,
nextPage: 3,
page: 2,
perPage: 20,
previousPage: 1,
total: 84,
totalPages: 5,
},
change,
},
}).$mount();
change: spy,
});
component.changePage({ target: { innerText: 'Prev' } });
component.$el.querySelector('.js-previous-button a').click();
expect(changeChanges.one).toEqual(1);
expect(spy).toHaveBeenCalledWith(1);
});
});
it('should go to the next page', () => {
component = new PaginationComponent({
propsData: {
describe('first button', () => {
it('should call the change callback with the first page', () => {
component = mountComponet({
pageInfo: {
totalPages: 10,
nextPage: 5,
previousPage: 3,
},
change,
nextPage: 3,
page: 2,
perPage: 20,
previousPage: 1,
total: 84,
totalPages: 5,
},
}).$mount();
change: spy,
});
component.changePage({ target: { innerText: 'Next' } });
const button = component.$el.querySelector('.js-first-button a');
expect(changeChanges.one).toEqual(5);
expect(button.textContent.trim()).toEqual('« First');
button.click();
expect(spy).toHaveBeenCalledWith(1);
});
});
it('should go to the last page', () => {
component = new PaginationComponent({
propsData: {
describe('last button', () => {
it('should call the change callback with the last page', () => {
component = mountComponet({
pageInfo: {
totalPages: 10,
nextPage: 5,
previousPage: 3,
},
change,
nextPage: 3,
page: 2,
perPage: 20,
previousPage: 1,
total: 84,
totalPages: 5,
},
}).$mount();
change: spy,
});
const button = component.$el.querySelector('.js-last-button a');
component.changePage({ target: { innerText: 'Last »' } });
expect(button.textContent.trim()).toEqual('Last »');
expect(changeChanges.one).toEqual(10);
button.click();
expect(spy).toHaveBeenCalledWith(5);
});
});
it('should go to the first page', () => {
component = new PaginationComponent({
propsData: {
describe('next button', () => {
it('should be disabled and non clickable', () => {
component = mountComponet({
pageInfo: {
totalPages: 10,
nextPage: 5,
previousPage: 3,
},
change,
page: 5,
perPage: 20,
previousPage: 1,
total: 84,
totalPages: 5,
},
}).$mount();
change: spy,
});
expect(
component.$el.querySelector('.js-next-button').textContent.trim(),
).toEqual('Next');
component.changePage({ target: { innerText: '« First' } });
component.$el.querySelector('.js-next-button a').click();
expect(changeChanges.one).toEqual(1);
expect(spy).not.toHaveBeenCalled();
});
it('should do nothing', () => {
component = new PaginationComponent({
propsData: {
it('should be enabled and clickable', () => {
component = mountComponet({
pageInfo: {
totalPages: 10,
nextPage: 2,
previousPage: '',
},
change,
nextPage: 4,
page: 3,
perPage: 20,
previousPage: 2,
total: 84,
totalPages: 5,
},
}).$mount();
change: spy,
});
component.changePage({ target: { innerText: '...' } });
component.$el.querySelector('.js-next-button a').click();
expect(changeChanges.one).toEqual(1);
expect(spy).toHaveBeenCalledWith(4);
});
});
describe('paramHelper', () => {
afterEach(() => {
window.history.pushState({}, null, '');
});
it('can parse url parameters correctly', () => {
window.history.pushState({}, null, '?scope=all&p=2');
const scope = gl.utils.getParameterByName('scope');
const p = gl.utils.getParameterByName('p');
expect(scope).toEqual('all');
expect(p).toEqual('2');
describe('numbered buttons', () => {
it('should render 5 pages', () => {
component = mountComponet({
pageInfo: {
nextPage: 4,
page: 3,
perPage: 20,
previousPage: 2,
total: 84,
totalPages: 5,
},
change: spy,
});
it('returns null if param not in url', () => {
window.history.pushState({}, null, '?p=2');
expect(component.$el.querySelectorAll('.page').length).toEqual(5);
});
});
const scope = gl.utils.getParameterByName('scope');
const p = gl.utils.getParameterByName('p');
it('should render the spread operator', () => {
component = mountComponet({
pageInfo: {
nextPage: 4,
page: 3,
perPage: 20,
previousPage: 2,
total: 84,
totalPages: 10,
},
change: spy,
});
expect(scope).toEqual(null);
expect(p).toEqual('2');
expect(component.$el.querySelector('.separator').textContent.trim()).toEqual('...');
});
});
});
......@@ -26,11 +26,28 @@ describe Gitlab::CurrentSettings do
expect(current_application_settings).to be_a(ApplicationSetting)
end
<<<<<<< HEAD
it 'falls back to DB if Caching fails' do
=======
it 'falls back to DB if Redis fails' do
db_settings = ApplicationSetting.create!(ApplicationSetting.defaults)
>>>>>>> ce-com/master
expect(ApplicationSetting).to receive(:cached).and_raise(::Redis::BaseError)
expect(ApplicationSetting).to receive(:last).and_call_original
expect(Rails.cache).to receive(:fetch).with(ApplicationSetting::CACHE_KEY).and_raise(Redis::BaseError)
expect(current_application_settings).to be_a(ApplicationSetting)
expect(current_application_settings).to eq(db_settings)
end
it 'creates default ApplicationSettings if none are present' do
expect(ApplicationSetting).to receive(:cached).and_raise(::Redis::BaseError)
expect(Rails.cache).to receive(:fetch).with(ApplicationSetting::CACHE_KEY).and_raise(Redis::BaseError)
settings = current_application_settings
expect(settings).to be_a(ApplicationSetting)
expect(settings).to be_persisted
expect(settings).to have_attributes(ApplicationSetting.defaults)
end
context 'with migrations pending' do
......
require 'spec_helper'
describe Gitlab::IssuableMetadata, lib: true do
let(:user) { create(:user) }
let!(:project) { create(:project, :public, :repository, creator: user, namespace: user.namespace) }
subject { Class.new { include Gitlab::IssuableMetadata }.new }
it 'returns an empty Hash if an empty collection is provided' do
expect(subject.issuable_meta_data(Issue.none, 'Issue')).to eq({})
end
context 'issues' do
let!(:issue) { create(:issue, author: user, project: project) }
let!(:closed_issue) { create(:issue, state: :closed, author: user, project: project) }
let!(:downvote) { create(:award_emoji, :downvote, awardable: closed_issue) }
let!(:upvote) { create(:award_emoji, :upvote, awardable: issue) }
let!(:merge_request) { create(:merge_request, :simple, author: user, assignee: user, source_project: project, target_project: project, title: "Test") }
let!(:closing_issues) { create(:merge_requests_closing_issues, issue: issue, merge_request: merge_request) }
it 'aggregates stats on issues' do
data = subject.issuable_meta_data(Issue.all, 'Issue')
expect(data.count).to eq(2)
expect(data[issue.id].upvotes).to eq(1)
expect(data[issue.id].downvotes).to eq(0)
expect(data[issue.id].notes_count).to eq(0)
expect(data[issue.id].merge_requests_count).to eq(1)
expect(data[closed_issue.id].upvotes).to eq(0)
expect(data[closed_issue.id].downvotes).to eq(1)
expect(data[closed_issue.id].notes_count).to eq(0)
expect(data[closed_issue.id].merge_requests_count).to eq(0)
end
end
context 'merge requests' do
let!(:merge_request) { create(:merge_request, :simple, author: user, assignee: user, source_project: project, target_project: project, title: "Test") }
let!(:merge_request_closed) { create(:merge_request, state: "closed", source_project: project, target_project: project, title: "Closed Test") }
let!(:downvote) { create(:award_emoji, :downvote, awardable: merge_request) }
let!(:upvote) { create(:award_emoji, :upvote, awardable: merge_request) }
let!(:note) { create(:note_on_merge_request, author: user, project: project, noteable: merge_request, note: "a comment on a MR") }
it 'aggregates stats on merge requests' do
data = subject.issuable_meta_data(MergeRequest.all, 'MergeRequest')
expect(data.count).to eq(2)
expect(data[merge_request.id].upvotes).to eq(1)
expect(data[merge_request.id].downvotes).to eq(1)
expect(data[merge_request.id].notes_count).to eq(1)
expect(data[merge_request.id].merge_requests_count).to eq(0)
expect(data[merge_request_closed.id].upvotes).to eq(0)
expect(data[merge_request_closed.id].downvotes).to eq(0)
expect(data[merge_request_closed.id].notes_count).to eq(0)
expect(data[merge_request_closed.id].merge_requests_count).to eq(0)
end
end
end
......@@ -7,7 +7,7 @@ describe Gitlab::PerformanceBar do
described_class.enabled?(user)
end
it 'caches the allowed user IDs in cache', :caching do
it 'caches the allowed user IDs in cache', :use_clean_rails_memory_store_caching do
expect do
expect(described_class.enabled?(user)).to be_truthy
end.not_to exceed_query_limit(0)
......
......@@ -174,6 +174,18 @@ describe ApplicationSetting, models: true do
end
end
describe '.current' do
context 'redis unavailable' do
it 'returns an ApplicationSetting' do
allow(Rails.cache).to receive(:fetch).and_call_original
allow(ApplicationSetting).to receive(:last).and_return(:last)
expect(Rails.cache).to receive(:fetch).with(ApplicationSetting::CACHE_KEY).and_raise(ArgumentError)
expect(ApplicationSetting.current).to eq(:last)
end
end
end
context 'restricted signup domains' do
it 'sets single domain' do
setting.domain_whitelist_raw = 'example.com'
......
......@@ -168,6 +168,7 @@ describe API::Internal do
end
end
<<<<<<< HEAD
describe "GET /internal/authorized_keys" do
context "unsing an existing key's fingerprint" do
it "finds the key" do
......@@ -216,6 +217,8 @@ describe API::Internal do
end
end
=======
>>>>>>> ce-com/master
describe "POST /internal/allowed", :clean_gitlab_redis_shared_state do
context "access granted" do
before do
......
......@@ -16,7 +16,11 @@ describe API::MergeRequests do
let!(:label) do
create(:label, title: 'label', color: '#FFAABB', project: project)
end
let!(:label2) { create(:label, title: 'a-test', color: '#FFFFFF', project: project) }
let!(:label_link) { create(:label_link, label: label, target: merge_request) }
let!(:label_link2) { create(:label_link, label: label2, target: merge_request) }
let!(:downvote) { create(:award_emoji, :downvote, awardable: merge_request) }
let!(:upvote) { create(:award_emoji, :upvote, awardable: merge_request) }
before do
project.team << [user, :reporter]
......@@ -32,6 +36,18 @@ describe API::MergeRequests do
end
context "when authenticated" do
it 'avoids N+1 queries' do
control_count = ActiveRecord::QueryRecorder.new do
get api("/projects/#{project.id}/merge_requests", user)
end.count
create(:merge_request, state: 'closed', milestone: milestone1, author: user, assignee: user, source_project: project, target_project: project, title: "Test", created_at: base_time)
expect do
get api("/projects/#{project.id}/merge_requests", user)
end.not_to exceed_query_limit(control_count)
end
it "returns an array of all merge_requests" do
get api("/projects/#{project.id}/merge_requests", user)
......@@ -44,6 +60,9 @@ describe API::MergeRequests do
expect(json_response.last['sha']).to eq(merge_request.diff_head_sha)
expect(json_response.last['merge_commit_sha']).to be_nil
expect(json_response.last['merge_commit_sha']).to eq(merge_request.merge_commit_sha)
expect(json_response.last['downvotes']).to eq(1)
expect(json_response.last['upvotes']).to eq(1)
expect(json_response.last['labels']).to eq([label2.title, label.title])
expect(json_response.first['title']).to eq(merge_request_merged.title)
expect(json_response.first['sha']).to eq(merge_request_merged.diff_head_sha)
expect(json_response.first['merge_commit_sha']).not_to be_nil
......@@ -146,7 +165,7 @@ describe API::MergeRequests do
expect(response).to have_http_status(200)
expect(json_response).to be_an Array
expect(json_response.length).to eq(1)
expect(json_response.first['labels']).to eq([label.title])
expect(json_response.first['labels']).to eq([label2.title, label.title])
end
it 'returns an array of labeled merge requests where all labels match' do
......@@ -237,8 +256,8 @@ describe API::MergeRequests do
expect(json_response['author']).to be_a Hash
expect(json_response['target_branch']).to eq(merge_request.target_branch)
expect(json_response['source_branch']).to eq(merge_request.source_branch)
expect(json_response['upvotes']).to eq(0)
expect(json_response['downvotes']).to eq(0)
expect(json_response['upvotes']).to eq(1)
expect(json_response['downvotes']).to eq(1)
expect(json_response['source_project_id']).to eq(merge_request.source_project.id)
expect(json_response['target_project_id']).to eq(merge_request.target_project.id)
expect(json_response['work_in_progress']).to be_falsy
......
......@@ -208,6 +208,7 @@ module TestEnv
# Otherwise they'd be created by the first test, often timing out and
# causing a transient test failure
def eager_load_driver_server
return unless ENV['CI']
return unless defined?(Capybara)
puts "Starting the Capybara driver server..."
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment