Commit d87b19bb authored by Miguel Rincon's avatar Miguel Rincon

Fix error handling by using a backoff timeout and a flash notice

When a metric cannot be loaded, our frontend would stop after 3 tries
if a metric loading failed. This change removes the retry count and
replaces it with a long timeout.

This is done by grouping all dashboard metrics requests in a
Promise.all and by prompting the user when any of them fails.

The other metrics can still load, so specs are added to emulate
these condition when a single metric fails have been updated.

Add changelog file, remove unused gitlab.pot string
parent 48e99af0
......@@ -4,28 +4,22 @@ import createFlash from '~/flash';
import trackDashboardLoad from '../monitoring_tracking_helper';
import statusCodes from '../../lib/utils/http_status';
import { backOff } from '../../lib/utils/common_utils';
import { s__, __ } from '../../locale';
import { s__ } from '../../locale';
const MAX_REQUESTS = 3;
const TWO_MINUTES = 120000;
export function backOffRequest(makeRequestCallback) {
let requestCounter = 0;
function backOffRequest(makeRequestCallback) {
return backOff((next, stop) => {
makeRequestCallback()
.then(resp => {
if (resp.status === statusCodes.NO_CONTENT) {
requestCounter += 1;
if (requestCounter < MAX_REQUESTS) {
next();
} else {
stop(new Error(__('Failed to connect to the prometheus server')));
}
} else {
stop(resp);
}
})
.catch(stop);
});
}, TWO_MINUTES);
}
export const setGettingStartedEmptyState = ({ commit }) => {
......@@ -52,11 +46,6 @@ export const receiveMetricsDashboardFailure = ({ commit }, error) => {
commit(types.RECEIVE_METRICS_DATA_FAILURE, error);
};
export const requestMetricsData = ({ commit }) => commit(types.REQUEST_METRICS_DATA);
export const receiveMetricsDataSuccess = ({ commit }, data) =>
commit(types.RECEIVE_METRICS_DATA_SUCCESS, data);
export const receiveMetricsDataFailure = ({ commit }, error) =>
commit(types.RECEIVE_METRICS_DATA_FAILURE, error);
export const receiveDeploymentsDataSuccess = ({ commit }, data) =>
commit(types.RECEIVE_DEPLOYMENTS_DATA_SUCCESS, data);
export const receiveDeploymentsDataFailure = ({ commit }) =>
......@@ -149,10 +138,14 @@ export const fetchPrometheusMetrics = ({ state, commit, dispatch }, params) => {
});
});
return Promise.all(promises).then(() => {
return Promise.all(promises)
.then(() => {
if (state.metricsWithData.length === 0) {
commit(types.SET_NO_DATA_EMPTY_STATE);
}
})
.catch(() => {
createFlash(s__(`Metrics|There was an error while retrieving metrics`), 'warning');
});
};
......
---
title: Improve the way the metrics dashboard waits for data
merge_request: 20687
author:
type: fixed
......@@ -7156,9 +7156,6 @@ msgstr ""
msgid "Failed to check related branches."
msgstr ""
msgid "Failed to connect to the prometheus server"
msgstr ""
msgid "Failed to create Merge Request. Please try again."
msgstr ""
......
import MockAdapter from 'axios-mock-adapter';
import Tracking from '~/tracking';
import { TEST_HOST } from 'helpers/test_constants';
import testAction from 'helpers/vuex_action_helper';
import axios from '~/lib/utils/axios_utils';
import statusCodes from '~/lib/utils/http_status';
import { backOff } from '~/lib/utils/common_utils';
import createFlash from '~/flash';
import store from '~/monitoring/stores';
import * as types from '~/monitoring/stores/mutation_types';
import {
backOffRequest,
fetchDashboard,
receiveMetricsDashboardSuccess,
receiveMetricsDashboardFailure,
......@@ -17,7 +16,6 @@ import {
fetchEnvironmentsData,
fetchPrometheusMetrics,
fetchPrometheusMetric,
requestMetricsData,
setEndpoints,
setGettingStartedEmptyState,
} from '~/monitoring/stores/actions';
......@@ -31,6 +29,7 @@ import {
} from '../mock_data';
jest.mock('~/lib/utils/common_utils');
jest.mock('~/flash');
const resetStore = str => {
str.replaceState({
......@@ -40,71 +39,36 @@ const resetStore = str => {
});
};
const MAX_REQUESTS = 3;
describe('Monitoring store helpers', () => {
describe('Monitoring store actions', () => {
let mock;
// Mock underlying `backOff` function to remove in-built delay.
backOff.mockImplementation(
callback =>
new Promise((resolve, reject) => {
const stop = arg => (arg instanceof Error ? reject(arg) : resolve(arg));
const next = () => callback(next, stop);
callback(next, stop);
}),
);
beforeEach(() => {
mock = new MockAdapter(axios);
});
afterEach(() => {
mock.restore();
});
describe('backOffRequest', () => {
it('returns immediately when recieving a 200 status code', () => {
mock.onGet(TEST_HOST).reply(200);
// Mock `backOff` function to remove exponential algorithm delay.
jest.useFakeTimers();
return backOffRequest(() => axios.get(TEST_HOST)).then(() => {
expect(mock.history.get.length).toBe(1);
});
backOff.mockImplementation(callback => {
const q = new Promise((resolve, reject) => {
const stop = arg => (arg instanceof Error ? reject(arg) : resolve(arg));
const next = () => callback(next, stop);
// Define a timeout based on a mock timer
setTimeout(() => {
callback(next, stop);
});
it(`repeats the network call ${MAX_REQUESTS} times when receiving a 204 response`, done => {
mock.onGet(TEST_HOST).reply(statusCodes.NO_CONTENT, {});
backOffRequest(() => axios.get(TEST_HOST))
.then(done.fail)
.catch(() => {
expect(mock.history.get.length).toBe(MAX_REQUESTS);
done();
});
// Run all resolved promises in chain
jest.runOnlyPendingTimers();
return q;
});
});
});
describe('Monitoring store actions', () => {
let mock;
beforeEach(() => {
mock = new MockAdapter(axios);
});
afterEach(() => {
resetStore(store);
mock.restore();
});
describe('requestMetricsData', () => {
it('sets emptyState to loading', () => {
const commit = jest.fn();
const { state } = store;
requestMetricsData({
state,
commit,
});
expect(commit).toHaveBeenCalledWith(types.REQUEST_METRICS_DATA);
});
mock.reset();
backOff.mockReset();
createFlash.mockReset();
});
describe('fetchDeploymentsData', () => {
it('commits RECEIVE_DEPLOYMENTS_DATA_SUCCESS on error', done => {
const dispatch = jest.fn();
......@@ -362,17 +326,11 @@ describe('Monitoring store actions', () => {
it('commits empty state when state.groups is empty', done => {
const state = storeState();
const params = {};
fetchPrometheusMetrics(
{
state,
commit,
dispatch,
},
params,
)
fetchPrometheusMetrics({ state, commit, dispatch }, params)
.then(() => {
expect(commit).toHaveBeenCalledWith(types.SET_NO_DATA_EMPTY_STATE);
expect(dispatch).not.toHaveBeenCalled();
expect(createFlash).not.toHaveBeenCalled();
done();
})
.catch(done.fail);
......@@ -382,20 +340,42 @@ describe('Monitoring store actions', () => {
const state = storeState();
state.dashboard.panel_groups = metricsDashboardResponse.dashboard.panel_groups;
const metric = state.dashboard.panel_groups[0].panels[0].metrics[0];
fetchPrometheusMetrics(
{
state,
commit,
dispatch,
},
fetchPrometheusMetrics({ state, commit, dispatch }, params)
.then(() => {
expect(dispatch).toHaveBeenCalledTimes(3);
expect(dispatch).toHaveBeenCalledWith('fetchPrometheusMetric', {
metric,
params,
)
});
expect(createFlash).not.toHaveBeenCalled();
done();
})
.catch(done.fail);
done();
});
it('dispatches fetchPrometheusMetric for each panel query, handles an error', done => {
const params = {};
const state = storeState();
state.dashboard.panel_groups = metricsDashboardResponse.dashboard.panel_groups;
const metric = state.dashboard.panel_groups[0].panels[0].metrics[0];
// Mock having one out of three metrics failing
dispatch.mockRejectedValueOnce(new Error('Error fetching this metric'));
dispatch.mockResolvedValue();
fetchPrometheusMetrics({ state, commit, dispatch }, params)
.then(() => {
expect(dispatch).toHaveBeenCalledTimes(3);
expect(dispatch).toHaveBeenCalledWith('fetchPrometheusMetric', {
metric,
params,
});
expect(createFlash).toHaveBeenCalledTimes(1);
done();
})
.catch(done.fail);
......@@ -403,28 +383,75 @@ describe('Monitoring store actions', () => {
});
});
describe('fetchPrometheusMetric', () => {
it('commits prometheus query result', done => {
const commit = jest.fn();
const params = {
start: '2019-08-06T12:40:02.184Z',
end: '2019-08-06T20:40:02.184Z',
};
const metric = metricsDashboardResponse.dashboard.panel_groups[0].panels[0].metrics[0];
const state = storeState();
const data = metricsGroupsAPIResponse[0].panels[0].metrics[0];
const response = {
data,
};
mock.onGet('http://test').reply(200, response);
let commit;
let metric;
let state;
let data;
beforeEach(() => {
commit = jest.fn();
state = storeState();
[metric] = metricsDashboardResponse.dashboard.panel_groups[0].panels[0].metrics;
[data] = metricsGroupsAPIResponse[0].panels[0].metrics;
});
it('commits result', done => {
mock.onGet('http://test').reply(200, { data }); // One attempt
fetchPrometheusMetric({ state, commit }, { metric, params })
.then(() => {
expect(commit).toHaveBeenCalledWith(types.SET_QUERY_RESULT, {
metricId: metric.metric_id,
result: data.result,
});
expect(mock.history.get).toHaveLength(1);
done();
})
.catch(done.fail);
});
it('commits result, when waiting for results', done => {
// Mock multiple attempts while the cache is filling up
mock.onGet('http://test').replyOnce(statusCodes.NO_CONTENT);
mock.onGet('http://test').replyOnce(statusCodes.NO_CONTENT);
mock.onGet('http://test').replyOnce(statusCodes.NO_CONTENT);
mock.onGet('http://test').reply(200, { data }); // 4th attempt
const fetch = fetchPrometheusMetric({ state, commit }, { metric, params });
fetch
.then(() => {
expect(commit).toHaveBeenCalledWith(types.SET_QUERY_RESULT, {
metricId: metric.metric_id,
result: data.result,
});
expect(mock.history.get).toHaveLength(4);
done();
})
.catch(done.fail);
});
it('commits failure, when waiting for results and getting a server error', done => {
// Mock multiple attempts while the cache is filling up and fails
mock.onGet('http://test').replyOnce(statusCodes.NO_CONTENT);
mock.onGet('http://test').replyOnce(statusCodes.NO_CONTENT);
mock.onGet('http://test').replyOnce(statusCodes.NO_CONTENT);
mock.onGet('http://test').reply(500); // 4th attempt
fetchPrometheusMetric({ state, commit }, { metric, params })
.then(() => {
done.fail();
})
.catch(() => {
expect(commit).not.toHaveBeenCalled();
expect(mock.history.get).toHaveLength(4);
done();
});
});
});
});
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment