Commit 99a2c061 authored by Jan Lindström's avatar Jan Lindström

MDEV-7754: innodb assert "array->n_elems < array->max_elems" on a huge blob update

Problem was that static array was used for storing thread mutex sync levels.
Fixed by using std::vector instead.

Does not contain test case to avoid too big memory/disk space usage
on buildbot VMs.
parent 3d485015
...@@ -47,6 +47,8 @@ Created 9/5/1995 Heikki Tuuri ...@@ -47,6 +47,8 @@ Created 9/5/1995 Heikki Tuuri
#include "ha_prototypes.h" #include "ha_prototypes.h"
#include "my_cpu.h" #include "my_cpu.h"
#include <vector>
/* /*
REASONS FOR IMPLEMENTING THE SPIN LOCK MUTEX REASONS FOR IMPLEMENTING THE SPIN LOCK MUTEX
============================================ ============================================
...@@ -225,12 +227,9 @@ static const ulint SYNC_THREAD_N_LEVELS = 10000; ...@@ -225,12 +227,9 @@ static const ulint SYNC_THREAD_N_LEVELS = 10000;
/** Array for tracking sync levels per thread. */ /** Array for tracking sync levels per thread. */
struct sync_arr_t { struct sync_arr_t {
ulint in_use; /*!< Number of active cells */
ulint n_elems; /*!< Number of elements in the array */ ulint n_elems; /*!< Number of elements in the array */
ulint max_elems; /*!< Maximum elements */
ulint next_free; /*!< ULINT_UNDEFINED or index of next std::vector<sync_level_t> elems; /*!< Vector of elements */
free slot */
sync_level_t* elems; /*!< Array elements */
}; };
/** Mutexes or rw-locks held by a thread */ /** Mutexes or rw-locks held by a thread */
...@@ -1069,10 +1068,9 @@ sync_thread_add_level( ...@@ -1069,10 +1068,9 @@ sync_thread_add_level(
SYNC_LEVEL_VARYING, nothing is done */ SYNC_LEVEL_VARYING, nothing is done */
ibool relock) /*!< in: TRUE if re-entering an x-lock */ ibool relock) /*!< in: TRUE if re-entering an x-lock */
{ {
ulint i;
sync_level_t* slot;
sync_arr_t* array; sync_arr_t* array;
sync_thread_t* thread_slot; sync_thread_t* thread_slot;
sync_level_t sync_level;
if (!sync_order_checks_on) { if (!sync_order_checks_on) {
...@@ -1097,21 +1095,11 @@ sync_thread_add_level( ...@@ -1097,21 +1095,11 @@ sync_thread_add_level(
thread_slot = sync_thread_level_arrays_find_slot(); thread_slot = sync_thread_level_arrays_find_slot();
if (thread_slot == NULL) { if (thread_slot == NULL) {
ulint sz;
sz = sizeof(*array)
+ (sizeof(*array->elems) * SYNC_THREAD_N_LEVELS);
/* We have to allocate the level array for a new thread */ /* We have to allocate the level array for a new thread */
array = static_cast<sync_arr_t*>(calloc(sz, sizeof(char))); array = static_cast<sync_arr_t*>(calloc(1, sizeof(sync_arr_t)));
ut_a(array != NULL); ut_a(array != NULL);
array->next_free = ULINT_UNDEFINED;
array->max_elems = SYNC_THREAD_N_LEVELS;
array->elems = (sync_level_t*) &array[1];
thread_slot = sync_thread_level_arrays_find_free(); thread_slot = sync_thread_level_arrays_find_free();
thread_slot->levels = array; thread_slot->levels = array;
thread_slot->id = os_thread_get_curr_id(); thread_slot->id = os_thread_get_curr_id();
} }
...@@ -1321,26 +1309,11 @@ sync_thread_add_level( ...@@ -1321,26 +1309,11 @@ sync_thread_add_level(
} }
levels_ok: levels_ok:
if (array->next_free == ULINT_UNDEFINED) {
ut_a(array->n_elems < array->max_elems);
i = array->n_elems++;
} else {
i = array->next_free;
array->next_free = array->elems[i].level;
}
ut_a(i < array->n_elems);
ut_a(i != ULINT_UNDEFINED);
++array->in_use;
slot = &array->elems[i]; array->n_elems++;
sync_level.latch = latch;
ut_a(slot->latch == NULL); sync_level.level = level;
array->elems.push_back(sync_level);
slot->latch = latch;
slot->level = level;
mutex_exit(&sync_thread_mutex); mutex_exit(&sync_thread_mutex);
} }
...@@ -1358,7 +1331,6 @@ sync_thread_reset_level( ...@@ -1358,7 +1331,6 @@ sync_thread_reset_level(
{ {
sync_arr_t* array; sync_arr_t* array;
sync_thread_t* thread_slot; sync_thread_t* thread_slot;
ulint i;
if (!sync_order_checks_on) { if (!sync_order_checks_on) {
...@@ -1387,36 +1359,16 @@ sync_thread_reset_level( ...@@ -1387,36 +1359,16 @@ sync_thread_reset_level(
array = thread_slot->levels; array = thread_slot->levels;
for (i = 0; i < array->n_elems; i++) { for (std::vector<sync_level_t>::iterator it = array->elems.begin(); it != array->elems.end(); ++it) {
sync_level_t* slot; sync_level_t level = *it;
slot = &array->elems[i];
if (slot->latch != latch) { if (level.latch != latch) {
continue; continue;
} }
slot->latch = NULL; array->elems.erase(it);
array->n_elems--;
/* Update the free slot list. See comment in sync_level_t
for the level field. */
slot->level = array->next_free;
array->next_free = i;
ut_a(array->in_use >= 1);
--array->in_use;
/* If all cells are idle then reset the free
list. The assumption is that this will save
time when we need to scan up to n_elems. */
if (array->in_use == 0) {
array->n_elems = 0;
array->next_free = ULINT_UNDEFINED;
}
mutex_exit(&sync_thread_mutex); mutex_exit(&sync_thread_mutex);
return(TRUE); return(TRUE);
} }
......
...@@ -48,6 +48,8 @@ Created 9/5/1995 Heikki Tuuri ...@@ -48,6 +48,8 @@ Created 9/5/1995 Heikki Tuuri
#include "ha_prototypes.h" #include "ha_prototypes.h"
#include "my_cpu.h" #include "my_cpu.h"
#include <vector>
/* /*
REASONS FOR IMPLEMENTING THE SPIN LOCK MUTEX REASONS FOR IMPLEMENTING THE SPIN LOCK MUTEX
============================================ ============================================
...@@ -229,12 +231,9 @@ static const ulint SYNC_THREAD_N_LEVELS = 10000; ...@@ -229,12 +231,9 @@ static const ulint SYNC_THREAD_N_LEVELS = 10000;
/** Array for tracking sync levels per thread. */ /** Array for tracking sync levels per thread. */
struct sync_arr_t { struct sync_arr_t {
ulint in_use; /*!< Number of active cells */
ulint n_elems; /*!< Number of elements in the array */ ulint n_elems; /*!< Number of elements in the array */
ulint max_elems; /*!< Maximum elements */
ulint next_free; /*!< ULINT_UNDEFINED or index of next std::vector<sync_level_t> elems; /*!< Vector of elements */
free slot */
sync_level_t* elems; /*!< Array elements */
}; };
/** Mutexes or rw-locks held by a thread */ /** Mutexes or rw-locks held by a thread */
...@@ -1177,10 +1176,9 @@ sync_thread_add_level( ...@@ -1177,10 +1176,9 @@ sync_thread_add_level(
SYNC_LEVEL_VARYING, nothing is done */ SYNC_LEVEL_VARYING, nothing is done */
ibool relock) /*!< in: TRUE if re-entering an x-lock */ ibool relock) /*!< in: TRUE if re-entering an x-lock */
{ {
ulint i;
sync_level_t* slot;
sync_arr_t* array; sync_arr_t* array;
sync_thread_t* thread_slot; sync_thread_t* thread_slot;
sync_level_t sync_level;
if (!sync_order_checks_on) { if (!sync_order_checks_on) {
...@@ -1205,21 +1203,11 @@ sync_thread_add_level( ...@@ -1205,21 +1203,11 @@ sync_thread_add_level(
thread_slot = sync_thread_level_arrays_find_slot(); thread_slot = sync_thread_level_arrays_find_slot();
if (thread_slot == NULL) { if (thread_slot == NULL) {
ulint sz;
sz = sizeof(*array)
+ (sizeof(*array->elems) * SYNC_THREAD_N_LEVELS);
/* We have to allocate the level array for a new thread */ /* We have to allocate the level array for a new thread */
array = static_cast<sync_arr_t*>(calloc(sz, sizeof(char))); array = static_cast<sync_arr_t*>(calloc(1, sizeof(sync_arr_t)));
ut_a(array != NULL); ut_a(array != NULL);
array->next_free = ULINT_UNDEFINED;
array->max_elems = SYNC_THREAD_N_LEVELS;
array->elems = (sync_level_t*) &array[1];
thread_slot = sync_thread_level_arrays_find_free(); thread_slot = sync_thread_level_arrays_find_free();
thread_slot->levels = array; thread_slot->levels = array;
thread_slot->id = os_thread_get_curr_id(); thread_slot->id = os_thread_get_curr_id();
} }
...@@ -1446,26 +1434,11 @@ sync_thread_add_level( ...@@ -1446,26 +1434,11 @@ sync_thread_add_level(
} }
levels_ok: levels_ok:
if (array->next_free == ULINT_UNDEFINED) {
ut_a(array->n_elems < array->max_elems);
i = array->n_elems++;
} else {
i = array->next_free;
array->next_free = array->elems[i].level;
}
ut_a(i < array->n_elems);
ut_a(i != ULINT_UNDEFINED);
++array->in_use;
slot = &array->elems[i]; array->n_elems++;
sync_level.latch = latch;
ut_a(slot->latch == NULL); sync_level.level = level;
array->elems.push_back(sync_level);
slot->latch = latch;
slot->level = level;
mutex_exit(&sync_thread_mutex); mutex_exit(&sync_thread_mutex);
} }
...@@ -1483,7 +1456,6 @@ sync_thread_reset_level( ...@@ -1483,7 +1456,6 @@ sync_thread_reset_level(
{ {
sync_arr_t* array; sync_arr_t* array;
sync_thread_t* thread_slot; sync_thread_t* thread_slot;
ulint i;
if (!sync_order_checks_on) { if (!sync_order_checks_on) {
...@@ -1512,36 +1484,16 @@ sync_thread_reset_level( ...@@ -1512,36 +1484,16 @@ sync_thread_reset_level(
array = thread_slot->levels; array = thread_slot->levels;
for (i = 0; i < array->n_elems; i++) { for (std::vector<sync_level_t>::iterator it = array->elems.begin(); it != array->elems.end(); ++it) {
sync_level_t* slot; sync_level_t level = *it;
slot = &array->elems[i];
if (slot->latch != latch) { if (level.latch != latch) {
continue; continue;
} }
slot->latch = NULL; array->elems.erase(it);
array->n_elems--;
/* Update the free slot list. See comment in sync_level_t
for the level field. */
slot->level = array->next_free;
array->next_free = i;
ut_a(array->in_use >= 1);
--array->in_use;
/* If all cells are idle then reset the free
list. The assumption is that this will save
time when we need to scan up to n_elems. */
if (array->in_use == 0) {
array->n_elems = 0;
array->next_free = ULINT_UNDEFINED;
}
mutex_exit(&sync_thread_mutex); mutex_exit(&sync_thread_mutex);
return(TRUE); return(TRUE);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment