ndb - bug#21363

  Add extra sleeps (conditionally) if user-ndb-object still exists during shutdown
  Note: This is not in anyway optimal, but i dont't really know in which order should be shutdown
        but it fixes problem...
parent 4f7df01b
......@@ -3858,13 +3858,38 @@ err:
close_thread_tables(thd);
pthread_mutex_lock(&injector_mutex);
/* don't mess with the injector_ndb anymore from other threads */
int ndb_obj_cnt= 1; // g_ndb
ndb_obj_cnt+= injector_ndb == 0 ? 0 : 1;
ndb_obj_cnt+= schema_ndb == 0 ? 0 : 1;
ndb_obj_cnt+= ndbcluster_util_inited ? 1 : 0;
injector_thd= 0;
injector_ndb= 0;
schema_ndb= 0;
pthread_mutex_unlock(&injector_mutex);
thd->db= 0; // as not to try to free memory
sql_print_information("Stopping Cluster Binlog");
if (!ndb_extra_logging)
sql_print_information("Stopping Cluster Binlog");
else
sql_print_information("Stopping Cluster Binlog: %u(%u)",
g_ndb_cluster_connection->get_active_ndb_objects(),
ndb_obj_cnt);
/**
* Add extra wait loop to make user "user" ndb-object go away...
* otherwise user thread can have ongoing SUB_DATA
*/
int sleep_cnt= 0;
while (sleep_cnt < 300 && g_ndb_cluster_connection->get_active_ndb_objects() > ndb_obj_cnt)
{
my_sleep(10000); // 10ms
sleep_cnt++;
}
if (ndb_extra_logging)
sql_print_information("Stopping Cluster Binlog: waited %ums %u(%u)",
10*sleep_cnt, g_ndb_cluster_connection->get_active_ndb_objects(),
ndb_obj_cnt);
if (apply_status_share)
{
free_share(&apply_status_share);
......
......@@ -114,6 +114,7 @@ public:
void init_get_next_node(Ndb_cluster_connection_node_iter &iter);
unsigned int get_next_node(Ndb_cluster_connection_node_iter &iter);
unsigned get_active_ndb_objects() const;
#endif
private:
......
......@@ -742,9 +742,12 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
const Uint32 oid = sdata->senderData;
NdbEventOperationImpl *op= (NdbEventOperationImpl*)int2void(oid);
if (op->m_magic_number != NDB_EVENT_OP_MAGIC_NUMBER)
if (unlikely(op == 0 || op->m_magic_number != NDB_EVENT_OP_MAGIC_NUMBER))
{
g_eventLogger.error("dropped GSN_SUB_TABLE_DATA due to wrong magic "
"number");
return ;
}
// Accumulate DIC_TAB_INFO for TE_ALTER events
if (sdata->operation == NdbDictionary::Event::_TE_ALTER &&
......
......@@ -1265,6 +1265,7 @@ TransporterFacade::get_an_alive_node()
}
TransporterFacade::ThreadData::ThreadData(Uint32 size){
m_use_cnt = 0;
m_firstFree = END_OF_LIST;
expand(size);
}
......@@ -1302,6 +1303,7 @@ TransporterFacade::ThreadData::open(void* objRef,
nextFree = m_firstFree;
}
m_use_cnt++;
m_firstFree = m_statusNext[nextFree];
Object_Execute oe = { objRef , fun };
......@@ -1318,6 +1320,8 @@ TransporterFacade::ThreadData::close(int number){
number= numberToIndex(number);
assert(getInUse(number));
m_statusNext[number] = m_firstFree;
assert(m_use_cnt);
m_use_cnt--;
m_firstFree = number;
Object_Execute oe = { 0, 0 };
m_objectExecute[number] = oe;
......@@ -1325,6 +1329,12 @@ TransporterFacade::ThreadData::close(int number){
return 0;
}
Uint32
TransporterFacade::get_active_ndb_objects() const
{
return m_threads.m_use_cnt;
}
PollGuard::PollGuard(TransporterFacade *tp, NdbWaiter *aWaiter,
Uint32 block_no)
{
......
......@@ -599,5 +599,10 @@ Ndb_cluster_connection::get_next_node(Ndb_cluster_connection_node_iter &iter)
return m_impl.get_next_node(iter);
}
unsigned
Ndb_cluster_connection::get_active_ndb_objects() const
{
return m_impl.m_transporter_facade->get_active_ndb_objects();
}
template class Vector<Ndb_cluster_connection_impl::Node>;
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment