diff --git a/CHANGES.txt b/CHANGES.txt
index b25752d6d773c5dd704a84e9164d3d2191f850b6..3b1c4a43d1489a7e29cfc57f784a538ec9b45341 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,12 +1,62 @@
 Changes
 =======
 
-0.32 (Unreleased)
+0.39 (unreleased)
 -----------------
 
-  * No change yet.
+ * No changes yet.
 
-0.31 (2011-10-06)
+0.38 (2011-12-05)
+-----------------
+
+ * erp5: Swtich to percona, as maatkit is obsoleted. [Sebastien Robin]
+ * erp5: Improve haproxy configuration. [Sebastien Robin]
+ * erp5: Support sphinxd. [Kazuhiko Shiozaki]
+ * erp5: Improve and make logging more usual. [Sebastien Robin]
+ * erp5: Allow mysql connection from localhost. [Romain Courteaud]
+ * erp5: Allow to control Zope/Zeo cache [Arnaud Fontaine]
+ * erp5: Increase precision in logs [Julien Muchembled]
+ * erp5: Improve erp5 update [Arnaud Fontaine, Rafael Monnerat]
+
+0.37 (2011-11-24)
+-----------------
+
+ * KVM : allow access to several KVM instances without SSL certificate duplicate
+   problem. [Cedric de Saint Martin]
+
+0.36 (2011-11-16)
+-----------------
+
+ * erp5testnode : the code of testnode is not in slapos repository anymore
+
+0.35 (2011-11-10)
+-----------------
+
+ * KVM : Promise are now working properly. [Łukasz Nowak]
+ * KVM : Use NoVNC with automatic login. [Cedric de Saint Martin]
+ * KVM : Use websockify egg and remove numpy hack. [Cedric de Saint Martin]
+
+0.34 (2011-11-08)
+-----------------
+
+  * Any LAMP software can specify its own php.ini [Alain Takoudjou]
+  * LAMP : Fix bug where buildout does not has sufficient rights to update
+    application parts. [Alain Takoudjou]
+  * LAMP : Update formatting when returning list of renamed files.
+    [Alain Takoudjou]
+
+0.33 (2011-10-31)
+-----------------
+
+  * erp5 : use percona toolkit instead of maatkit [Sebastien Robin]
+
+0.32 (2011-10-28)
+-----------------
+
+  * LAMP : Recipe can now call lampconfigure from slapos.toolbox which will 
+   configure PHP application instance when needed. [Alain Takoudjou Kamdem]
+
+0.31 (2011-10-16)
 -----------------
 
  * Split big redundant recipes into small ones. In order to factorize the code
diff --git a/component/apache/buildout.cfg b/component/apache/buildout.cfg
index 335e87046b48c54b261a22ab808b6a07cf8f1c3a..42d1c7162ba8b1965e77a6ba9a1ba43bc045771f 100644
--- a/component/apache/buildout.cfg
+++ b/component/apache/buildout.cfg
@@ -13,61 +13,23 @@ extends =
   ../sqlite3/buildout.cfg
   ../zlib/buildout.cfg
 
-[apache-no-ssl]
-# inspired on http://old.aclark.net/team/aclark/blog/a-lamp-buildout-for-wordpress-and-other-php-apps/
-recipe = hexagonit.recipe.cmmi
-url = http://mir2.ovh.net/ftp.apache.org/dist//httpd/httpd-2.2.21.tar.bz2
-md5sum = 1696ae62cd879ab1d4dd9ff021a470f2
-configure-options = --enable-authn-alias
-                    --enable-bucketeer
-                    --enable-cache
-                    --enable-case-filter
-                    --enable-case-filter-in
-                    --enable-cgid
-                    --enable-charset-lite
-                    --enable-disk-cache
-                    --enable-echo
-                    --enable-exception-hook
-                    --enable-mods-shared=all
-                    --enable-optional-fn-export
-                    --enable-optional-fn-import
-                    --enable-optional-hook-export
-                    --enable-optional-hook-import
-                    --enable-proxy
-                    --enable-proxy-ajp
-                    --enable-proxy-balancer
-                    --enable-proxy-connect
-                    --enable-proxy-ftp
-                    --enable-proxy-http
-                    --enable-proxy-scgi
-                    --enable-so
-                    --enable-dav
-                    --enable-dav-fs
-                    --disable-ssl
-                    --with-included-apr
-                    --with-z=${zlib:location}
-                    --with-expat=${libexpat:location}
-                    --with-pcre=${pcre:location}
-                    --with-sqlite3=${sqlite3:location}
-                    --with-gdbm=${gdbm:location}
-                    --without-ssl
-                    --without-lber
-                    --without-ldap
-                    --without-ndbm
-                    --without-berkeley-db
-                    --without-pgsql
-                    --without-mysql
-                    --without-sqlite2
-                    --without-oracle
-                    --without-freedts
-                    --without-odbc
-                    --without-iconv
+[apache-CVE-2011-3368.patch]
+recipe = hexagonit.recipe.download
+md5sum = 1ad598213480ddfc239ce6359b7b2c0b
+url = http://www.apache.org/dist/httpd/patches/apply_to_2.2.21/CVE-2011-3368.patch
+filename = ${:_buildout_section_name_}
+download-only = true
 
 [apache]
 # inspired on http://old.aclark.net/team/aclark/blog/a-lamp-buildout-for-wordpress-and-other-php-apps/
 recipe = hexagonit.recipe.cmmi
+depends =
+  ${gdbm:version}
 url = http://mir2.ovh.net/ftp.apache.org/dist//httpd/httpd-2.2.21.tar.bz2
 md5sum = 1696ae62cd879ab1d4dd9ff021a470f2
+patches =
+  ${apache-CVE-2011-3368.patch:location}/${apache-CVE-2011-3368.patch:filename}
+patch-options = -p1
 configure-options = --disable-static
                     --enable-authn-alias
                     --enable-bucketeer
diff --git a/component/cmake/buildout.cfg b/component/cmake/buildout.cfg
index 387e7a2fff43a6498834d2f23a6eec0a670df0f6..51789dfc5dab350c13cd009fae5eb762e1f053ae 100644
--- a/component/cmake/buildout.cfg
+++ b/component/cmake/buildout.cfg
@@ -4,5 +4,5 @@ parts =
 
 [cmake]
 recipe = hexagonit.recipe.cmmi
-url = http://www.cmake.org/files/v2.8/cmake-2.8.3.tar.gz
-md5sum = a76a44b93acf5e3badda9de111385921
+url = http://www.cmake.org/files/v2.8/cmake-2.8.6.tar.gz
+md5sum = 2147da452fd9212bb9b4542a9eee9d5b
diff --git a/component/dropbear/buildout.cfg b/component/dropbear/buildout.cfg
index dbb0709203192325335ec072f569234d5c9b8484..6f18f1785dae68f5d741c9ef48f6f749fa71e2ed 100644
--- a/component/dropbear/buildout.cfg
+++ b/component/dropbear/buildout.cfg
@@ -28,7 +28,9 @@ download-only = true
 [dropbear]
 recipe = hexagonit.recipe.cmmi
 md5sum = 0284ea239083f04c8b874e08e1aca243
-url = http://matt.ucc.asn.au/dropbear/dropbear-0.53.1.tar.bz2
+# XXX: We still use an old version of dropbear instead of the last one
+#      in order have all patches working.
+url = http://matt.ucc.asn.au/dropbear/releases/dropbear-0.53.1.tar.bz2
 
 configure-options =
   --prefix=${buildout:parts-directory}/${:_buildout_section_name_}
diff --git a/component/fonts/buildout.cfg b/component/fonts/buildout.cfg
index 0a8a59c527feb748bb15d7ffb0631bc4d4d4ef67..a148657775cfe81feb8ffce88d170944d60df17e 100644
--- a/component/fonts/buildout.cfg
+++ b/component/fonts/buildout.cfg
@@ -2,6 +2,7 @@
 parts =
   liberation-fonts
   ipaex-fonts
+  ipa-fonts
 
 [fonts]
 location = ${buildout:parts-directory}/${:_buildout_section_name_}
@@ -16,7 +17,7 @@ url = https://fedorahosted.org/releases/l/i/liberation-fonts/liberation-fonts-tt
 md5sum = 8150db1c6e27cacdfd524b563b85b69e
 destination = ${fonts:location}/${:_buildout_section_name_}
 
-# IPAex Font - Japanese fonts provided by IPA
+# IPAex and IPA Font - Japanese fonts provided by IPA
 # http://ossipedia.ipa.go.jp/ipafont/index.html
 [ipaex-fonts]
 recipe = hexagonit.recipe.download
@@ -24,3 +25,10 @@ strip-top-level-dir = true
 url = http://info.openlab.ipa.go.jp/ipafont/fontdata/IPAexfont00103.zip
 md5sum = ac67b2fc3aab7f683d89f0070df284e7
 destination = ${fonts:location}/${:_buildout_section_name_}
+
+[ipa-fonts]
+recipe = hexagonit.recipe.download
+strip-top-level-dir = true
+url = http://info.openlab.ipa.go.jp/ipafont/fontdata/IPAfont00303.zip
+md5sum = 39a828acf27790adbe4944dfb4d94bb1
+destination = ${fonts:location}/${:_buildout_section_name_}
diff --git a/component/freetype/buildout.cfg b/component/freetype/buildout.cfg
index 2ca184fa05b9b4145983a94c44f7e31210a82fb5..c358b3e996713b4d215af71d95eb238c3b9a318e 100644
--- a/component/freetype/buildout.cfg
+++ b/component/freetype/buildout.cfg
@@ -10,8 +10,8 @@ parts =
 
 [freetype]
 recipe = hexagonit.recipe.cmmi
-url = http://download.savannah.gnu.org/releases/freetype/freetype-2.4.6.tar.bz2
-md5sum = 5e6510613f612809d2d7862592b92ab7
+url = http://download.savannah.gnu.org/releases/freetype/freetype-2.4.8.tar.bz2
+md5sum = dbf2caca1d3afd410a29217a9809d397
 configure-options =
   --disable-static
 environment =
diff --git a/component/gdbm/buildout.cfg b/component/gdbm/buildout.cfg
index c97171d9b97a8444d0d9ebd7a034f7e2fbb03ee6..f44aabd4b693cfd7098bb15bb880b8283f2ab095 100644
--- a/component/gdbm/buildout.cfg
+++ b/component/gdbm/buildout.cfg
@@ -2,25 +2,18 @@
 parts =
   gdbm
 
-[gdbm-nochange-patch-download]
-recipe = hexagonit.recipe.download
-url = ${:_profile_base_location_}/${:filename}
-md5sum = fafa6cae0afbf2b5afb9ef3b8e3035a4
-download-only = true
-filename = gdbm-Makefile.in-nochange.patch
-
 [gdbm]
 recipe = hexagonit.recipe.cmmi
-url = ftp://ftp.gnu.org/gnu/gdbm/gdbm-1.8.3.tar.gz
-md5sum = 1d1b1d5c0245b1c00aff92da751e9aa1
-patches = ${gdbm-nochange-patch-download:location}/${gdbm-nochange-patch-download:filename}
+version = 1.9.1
+url = ftp://ftp.gnu.org/gnu/gdbm/gdbm-${:version}.tar.gz
+md5sum = 59f6e4c4193cb875964ffbe8aa384b58
 configure-options =
   --disable-static
 # install as parts/gdbm/include/gdbm/*.h etc. because some softwares
 # (eg. python's dbmmodule.c extension) assume the location like this.
 includedir = ${buildout:parts-directory}/${:_buildout_section_name_}/include
 make-targets =
-  install install-compat includedir=${:includedir}/gdbm && rm -f ${:includedir}/*.h && ln -sf gdbm/gdbm.h ${:includedir}/gdbm.h
+  install includedir=${:includedir}/gdbm && rm -f ${:includedir}/*.h && ln -sf gdbm/gdbm.h ${:includedir}/gdbm.h
 # it seems that parallel build sometimes fails for gdbm.
 make-options =
   -j1
diff --git a/component/ghostscript/buildout.cfg b/component/ghostscript/buildout.cfg
index 90f4b639a6e3810106d3221e81b10eb61b2eadf7..85ff413060e3c14faf7a6ee981e0e5de3577f03a 100644
--- a/component/ghostscript/buildout.cfg
+++ b/component/ghostscript/buildout.cfg
@@ -7,16 +7,8 @@ extends =
 
 parts = ghostscript
 
-[ghostscript-hooks-download]
-recipe = hexagonit.recipe.download
-url = ${:_profile_base_location_}/${:filename}
-filename = ghostscript-hooks.py
-md5sum = 731475648c91507bd1dfe2a61ee84552
-download-only = true
-
 [ghostscript-common]
 recipe = hexagonit.recipe.cmmi
-pre-configure-hook = ${ghostscript-hooks-download:location}/${ghostscript-hooks-download:filename}:pre_configure_hook
 configure-options =
   --disable-cups
   --disable-cairo
@@ -32,17 +24,9 @@ environment =
   LD_LIBRARY_PATH=${fontconfig:location}/lib
 
 [ghostscript]
-# we prefer ghostscript-8 for now, because ghostscript-9.00 seems to have a
-# problem with Japanese fonts if -dTextAlphaBits=4 is specified by
-# imagemagick.
-<= ghostscript-8
+<= ghostscript-9
 
 [ghostscript-9]
 <= ghostscript-common
-url = http://ghostscript.com/releases/ghostscript-9.00.tar.gz
-md5sum = a402462478b4cdda3e1816899227b845
-
-[ghostscript-8]
-<= ghostscript-common
-url = http://www.nexedi.org/static/tarballs/ghostscript/ghostscript-8.71-no-looping-symlink.tar.bz2
-md5sum = 34639af3ffe8594f2c5ea944dfbe1d78
+url = http://downloads.ghostscript.com/public/ghostscript-9.04.tar.bz2
+md5sum = 9f6899e821ab6d78ab2c856f10fa3023
diff --git a/component/ghostscript/ghostscript-hooks.py b/component/ghostscript/ghostscript-hooks.py
deleted file mode 100644
index c39936894def08a085a2928ca03995464899e351..0000000000000000000000000000000000000000
--- a/component/ghostscript/ghostscript-hooks.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import os
-def pre_configure_hook(oprtions, buildout):
-  # fixes python bug related to not creating symlink contained in tarfiles
-  for missing in 'configure.ac', 'Makefile.in':
-    if not os.path.exists(os.path.join(os.path.curdir, missing)):
-      os.symlink(os.path.join(os.path.curdir, 'base', missing),
-          os.path.join(os.path.curdir, missing))
diff --git a/component/gnutls/buildout.cfg b/component/gnutls/buildout.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..4bb35dd949bbca6932c0385928880cca3f4998e3
--- /dev/null
+++ b/component/gnutls/buildout.cfg
@@ -0,0 +1,34 @@
+[buildout]
+extends = 
+  ../readline/buildout.cfg
+  ../ncurses/buildout.cfg
+  ../zlib/buildout.cfg
+
+parts = gnutls
+
+[gpg-error]
+recipe = hexagonit.recipe.cmmi
+url = ftp://ftp.gnupg.org/gcrypt/libgpg-error/libgpg-error-1.10.tar.gz
+md5sum = 7c2710ef439f82ac429b88fec88e9a4c
+
+[gcrypt]
+recipe = hexagonit.recipe.cmmi
+url = ftp://ftp.gnupg.org/gcrypt/libgcrypt/libgcrypt-1.4.6.tar.gz
+md5sum = bfd45922eefb8a24d598af77366220d4
+configure-options =
+  --with-gpg-error-prefix=${gpg-error:location}
+environment =
+  CPPFLAGS=-I${gpg-error:location}/include
+  LDFLAGS=-Wl,-rpath -Wl,${gpg-error:location}/lib -Wl,${gpg-error:location}/lib/libgpg-error.so.0
+
+[gnutls]
+# XXX-Cedric : update to latest gnutls
+recipe = hexagonit.recipe.cmmi
+url = ftp://ftp.gnupg.org/gcrypt/gnutls/gnutls-2.8.6.tar.bz2
+md5sum = eb0a6d7d3cb9ac684d971c14f9f6d3ba
+configure-options =
+  --with-libgcrypt-prefix=${gcrypt:location}
+environment =
+  CPPFLAGS=-I${zlib:location}/include -I${readline:location}/include -I${ncurses:location}/include -I${ncurses:location}/include/ncursesw -I${gcrypt:location}/include -I${gpg-error:location}/include
+  LDFLAGS=-L${readline:location}/lib -L${ncurses:location}/lib -L${gcrypt:location}/lib -Wl,-rpath -Wl,${zlib:location}/lib -Wl,-rpath -Wl,${readline:location}/lib -Wl,-rpath -Wl,${ncurses:location}/lib -Wl,-rpath -Wl,${gcrypt:location}/lib -Wl,-rpath -Wl,${gpg-error:location}/lib -Wl,${gcrypt:location}/lib/libgcrypt.so.11
+  PKG_CONFIG=${zlib:location}/lib/pkgconfig
diff --git a/component/grep/buildout.cfg b/component/grep/buildout.cfg
index 589bb6cb490c4ba6dc5bf3742f91c4dc111b946b..231f7bca91a66dcc064f6b65318594a36bae8a2a 100644
--- a/component/grep/buildout.cfg
+++ b/component/grep/buildout.cfg
@@ -6,8 +6,8 @@ parts =
 
 [grep]
 recipe = hexagonit.recipe.cmmi
-url = http://ftp.gnu.org/gnu/grep/grep-2.8.tar.gz
-md5sum = cb2dfc502c5afc7a4a6e5f6cefd6850e
+url = http://ftp.gnu.org/gnu/grep/grep-2.9.tar.gz
+md5sum = 03e3451a38b0d615cb113cbeaf252dc0
 environment =
-  PKG_CONFIG_PATH=${pcre:location}/lib/pkgconfig
-  LDFLAGS =-Wl,--as-needed -Wl,-rpath=${pcre:location}/lib
+  CPPFLAGS=-I${pcre:location}/include
+  LDFLAGS=-L${pcre:location}/lib -Wl,-rpath=${pcre:location}/lib
diff --git a/component/groonga/buildout.cfg b/component/groonga/buildout.cfg
index 45608460ecf6affa1aa6219d7bc1be05539cf9a7..49b8b0f94cef7126a2ec67c637c1f7c147cc20ae 100644
--- a/component/groonga/buildout.cfg
+++ b/component/groonga/buildout.cfg
@@ -8,8 +8,8 @@ parts =
 
 [groonga]
 recipe = hexagonit.recipe.cmmi
-url = http://packages.groonga.org/source/groonga/groonga-1.2.5.tar.gz
-md5sum = 7e608406677b7a3f91e287acc0c718c0
+url = http://packages.groonga.org/source/groonga/groonga-1.2.8.tar.gz
+md5sum = a319b1f3a55cbf250ef5255f5c51ff46
 configure-options =
   --disable-static
   --disable-glibtest
diff --git a/component/handlersocket/HandlerSocket-Plugin-for-MySQL-1.0.6-mariadb.patch b/component/handlersocket/HandlerSocket-Plugin-for-MySQL-1.0.6-mariadb.patch
deleted file mode 100644
index a3d817294cb2d09d989283e34efaf000fcde6bf0..0000000000000000000000000000000000000000
--- a/component/handlersocket/HandlerSocket-Plugin-for-MySQL-1.0.6-mariadb.patch
+++ /dev/null
@@ -1,37 +0,0 @@
---- a/configure.ac
-+++ b/configure.ac
-@@ -28,7 +28,7 @@
-         MYSQL_INC="$MYSQL_INC -I$ac_mysql_source_dir/regex"
-         MYSQL_INC="$MYSQL_INC -I$ac_mysql_source_dir"
-         AC_SUBST(MYSQL_INC)
--        MYSQL_SOURCE_VERSION=`cat $ac_mysql_source_dir/configure.in | grep "\[[MySQL Server\]]" | sed -e "s|.*\([[0-9]]\+\.[[0-9]]\+\.[[0-9]]\+[[0-9a-zA-Z\_\-]]*\).*|\1|"`
-+        MYSQL_SOURCE_VERSION=`cat $ac_mysql_source_dir/configure.in | grep "\[[MariaDB Server\]]" | sed -e "s|.*\([[0-9]]\+\.[[0-9]]\+\.[[0-9]]\+[[0-9a-zA-Z\_\-]]*\).*|\1|"`
-         AC_MSG_RESULT([yes: Using $ac_mysql_source_dir, version $MYSQL_SOURCE_VERSION])
-       else
-         AC_MSG_ERROR([invalid MySQL source directory: $ac_mysql_source_dir])
---- a/handlersocket/database.cpp
-+++ b/handlersocket/database.cpp
-@@ -686,19 +686,19 @@
-   for (uint32_t i = 0; i < limit + skip; ++i) {
-     if (i == 0) {
-       const key_part_map kpm = (1U << args.kvalslen) - 1;
--      r = hnd->index_read_map(table->record[0], key_buf, kpm, find_flag);
-+      r = hnd->ha_index_read_map(table->record[0], key_buf, kpm, find_flag);
-     } else {
-       switch (find_flag) {
-       case HA_READ_BEFORE_KEY:
-       case HA_READ_KEY_OR_PREV:
--	r = hnd->index_prev(table->record[0]);
-+	r = hnd->ha_index_prev(table->record[0]);
- 	break;
-       case HA_READ_AFTER_KEY:
-       case HA_READ_KEY_OR_NEXT:
--	r = hnd->index_next(table->record[0]);
-+	r = hnd->ha_index_next(table->record[0]);
- 	break;
-       case HA_READ_KEY_EXACT:
--	r = hnd->index_next_same(table->record[0], key_buf, kplen_sum);
-+	r = hnd->ha_index_next_same(table->record[0], key_buf, kplen_sum);
- 	break;
-       default:
- 	r = HA_ERR_END_OF_FILE; /* to finish the loop */
diff --git a/component/handlersocket/buildout.cfg b/component/handlersocket/buildout.cfg
deleted file mode 100644
index 72a260caa78c202aad98ec3bc8e95fb6f018ad97..0000000000000000000000000000000000000000
--- a/component/handlersocket/buildout.cfg
+++ /dev/null
@@ -1,36 +0,0 @@
-# Handlersocket - a NoSQL plugin for MySQL.
-# http://github.com/ahiguti/HandlerSocket-Plugin-for-MySQL
-
-[buildout]
-extends =
-  ../autoconf/buildout.cfg
-  ../automake/buildout.cfg
-  ../libtool/buildout.cfg
-  ../mariadb/buildout.cfg
-
-parts =
-  handlersocket
-
-[handlersocket-mariadb-patch]
-recipe = hexagonit.recipe.download
-url = ${:_profile_base_location_}/${:filename}
-md5sum = 2654feea2e867c898b741ac0f0aa8e14
-filename = HandlerSocket-Plugin-for-MySQL-1.0.6-mariadb.patch
-download-only = true
-
-[handlersocket]
-recipe = hexagonit.recipe.cmmi
-url = http://github.com/ahiguti/HandlerSocket-Plugin-for-MySQL/tarball/1.0.6
-md5sum = 57f5c131e3d29701b01dd92c35ed25fd
-patch-options = -p1
-patches =
-  ${handlersocket-mariadb-patch:location}/${handlersocket-mariadb-patch:filename}
-configure-command =
-  ACLOCAL_ARGS=-I${libtool:location}/share/aclocal ./autogen.sh && ./configure
-configure-options =
-  --prefix=${buildout:parts-directory}/${:_buildout_section_name_}
-  --with-mysql-source=${mariadb:location}__compile__/mariadb-${mariadb:version}
-  --with-mysql-bindir=${mariadb:location}/bin
-  --with-mysql-plugindir=${mariadb:location}/lib/mysql/plugin
-environment =
-  PATH =${autoconf:location}/bin:${automake-1.11:location}/bin:${libtool:location}/bin:%(PATH)s
diff --git a/component/imagemagick/buildout.cfg b/component/imagemagick/buildout.cfg
index 216d2389dfdef7b923e854a9be3f5d5b0559e491..a3d90bae4b8013307c31d2a0b2fd5e1f33709e20 100644
--- a/component/imagemagick/buildout.cfg
+++ b/component/imagemagick/buildout.cfg
@@ -35,8 +35,8 @@ filename = imagemagick-6.6.7-4-without-lzma.patch
 
 [imagemagick]
 recipe = hexagonit.recipe.cmmi
-url = ftp://ftp.imagemagick.org/pub/ImageMagick/ImageMagick-6.7.2-10.tar.bz2
-md5sum = 073ec5d7f2a22db96a0e87e4322b75f9
+url = ftp://ftp.imagemagick.org/pub/ImageMagick/ImageMagick-6.7.3-1.tar.bz2
+md5sum = 89d378733d89bc61c04bc0fdc140a3a7
 configure-options =
   --disable-static
   --without-x
diff --git a/component/java/buildout.cfg b/component/java/buildout.cfg
index 42fe4a8c76b9e51a762417c9ebfaf84c741149c1..fdfd350db67e0bcf9a5f392e12a8ecb6e411850d 100644
--- a/component/java/buildout.cfg
+++ b/component/java/buildout.cfg
@@ -1,9 +1,25 @@
 [buildout]
 
-parts = 
+parts =
   java
-  
+
+[jdk-6u27-no-user-interaction-patch]
+recipe = hexagonit.recipe.download
+url = ${:_profile_base_location_}/${:filename}
+download-only = true
+filename = jdk-6u27-no-user-interaction.patch
+md5sum = 4c4303240647a114d07f3c411b2e6b5b
+
 [java]
+<= java-re
+
+[java-re]
+<= java-re-1.6.0
+
+[java-sdk]
+<= java-sdk-1.6.0
+
+[java-re-1.6.0]
 recipe = slapos.recipe.build
 slapos_promisee =
   directory:bin
@@ -28,3 +44,49 @@ script =
   self.cleanup_dir_list.append(extract_dir)
   workdir = guessworkdir(extract_dir)
   self.copyTree(os.path.join(workdir, "jre1.6.0_27"), "%(location)s")
+
+[java-sdk-1.6.0]
+recipe = slapos.recipe.build
+slapos_promisee =
+  directory:bin
+  directory:lib
+  directory:man
+  directory:plugin
+  directory:javaws
+  file:jre/lib/rt.jar
+  file:bin/java
+x86 = http://download.oracle.com/otn-pub/java/jdk/6u27-b07/jdk-6u27-linux-i586.bin bdb5f05bd20c6aa9a4729726191bf6fd
+x86-64 = http://download.oracle.com/otn-pub/java/jdk/6u27-b07/jdk-6u27-linux-x64.bin 94f93a3ff03f824a238ecd79ad90433e
+script =
+  if not self.options.get('url'): self.options['url'], self.options['md5sum'] = self.options[guessPlatform()].split(' ')
+  download_file = self.download(self.options['url'], self.options.get('md5sum'))
+  extract_dir = tempfile.mkdtemp(self.name)
+  os.chdir(extract_dir)
+  (download_dir, filename) = os.path.split(download_file)
+  auto_extract_bin = os.path.join(extract_dir, filename)
+  shutil.move(download_file, auto_extract_bin)
+  os.chmod(auto_extract_bin, 0777)
+  subprocess.call(["patch", auto_extract_bin, "-i", "${jdk-6u27-no-user-interaction-patch:location}/${jdk-6u27-no-user-interaction-patch:filename}"])
+  subprocess.call([auto_extract_bin])
+  self.cleanup_dir_list.append(extract_dir)
+  workdir = guessworkdir(extract_dir)
+  self.copyTree(os.path.join(workdir, "jdk1.6.0_27"), "%(location)s")
+
+[java-sdk-1.7.0]
+recipe = slapos.recipe.build
+slapos_promisee =
+  directory:bin
+  directory:lib
+  directory:man
+  directory:jre
+  file:jre/lib/rt.jar
+  file:bin/java
+  file:bin/javac
+x86 = http://download.oracle.com/otn-pub/java/jdk/7/jdk-7-linux-i586.tar.gz f97244a104f03731e5ff69f0dd5a9927
+x86-64 = http://download.oracle.com/otn-pub/java/jdk/7/jdk-7-linux-x64.tar.gz b3c1ef5faea7b180469c129a49762b64
+script =
+  if not self.options.get('url'): self.options['url'], self.options['md5sum'] = self.options[guessPlatform()].split(' ')
+  extract_dir = self.extract(self.download(self.options['url'], self.options.get('md5sum')))
+  workdir = guessworkdir(extract_dir)
+  self.copyTree(workdir, "%(location)s")
+
diff --git a/component/java/jdk-6u27-no-user-interaction.patch b/component/java/jdk-6u27-no-user-interaction.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b217852d4e9b4ea42bdf9992c58bb5dab4b0dd6c
--- /dev/null
+++ b/component/java/jdk-6u27-no-user-interaction.patch
@@ -0,0 +1,20 @@
+--- jdk-6u27-linux-x64.bin.orig	2011-09-27 11:02:14.000000000 +0200
++++ jdk-6u27-linux-x64.bin	2011-09-27 10:38:01.000000000 +0200
+@@ -81,7 +81,7 @@
+ trap 'rm -f $outname; exit 1' HUP INT QUIT TERM
+ echo "Unpacking..."
+ tail ${tail_args} +189 "$0" > $outname
+-if [ -x /usr/bin/sum ]; then
++if [ -x /usr/bin/null ]; then
+     echo "Checksumming..."
+ 
+     sum=`/usr/bin/sum $outname`
+@@ -169,7 +169,7 @@
+     fi
+ 
+     # Service Tag support and JDK product registration
+-    register_JDK "$javahome" "${BINARY_NAME}" "$1"
++    # register_JDK "$javahome" "${BINARY_NAME}" "$1"
+ 
+ else
+     if [ "$1" = "-x" ]; then
diff --git a/component/libaio/buildout.cfg b/component/libaio/buildout.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..4d46fe3c4b48486c708addcfa667a751c05f1c35
--- /dev/null
+++ b/component/libaio/buildout.cfg
@@ -0,0 +1,12 @@
+[buildout]
+parts =
+  libaio
+
+[libaio]
+recipe = hexagonit.recipe.cmmi
+# originally located at http://www.kernel.org/pub/linux/libs/aio/
+url = http://ftp.free.fr/mirrors/ftp.debian.org/pool/main/liba/libaio/libaio_0.3.109.orig.tar.gz
+md5sum = 435a5b16ca6198eaf01155263d855756
+location = ${buildout:parts-directory}/${:_buildout_section_name_}
+configure-command = echo
+make-options = prefix=${:location}
diff --git a/component/libpng/buildout.cfg b/component/libpng/buildout.cfg
index d7ead5bdf7c3e6d3baab4ddc6408ccabffbfa5c6..a78e8143ea8c5762c182fb003a8388bcbacbe591 100644
--- a/component/libpng/buildout.cfg
+++ b/component/libpng/buildout.cfg
@@ -15,5 +15,5 @@ environment =
 
 [libpng]
 <= libpng-common
-url = http://download.sourceforge.net/libpng/libpng-1.5.4.tar.bz2
-md5sum = b43afe39237b69859522455b215f9e85
+url = http://download.sourceforge.net/libpng/libpng-1.5.5.tar.bz2
+md5sum = 3270bf2990c3174ae939388398de751e
diff --git a/component/libreoffice-bin/buildout.cfg b/component/libreoffice-bin/buildout.cfg
index c29f2b76c8adfed4901c9d0b8de2b2d2df8b9cbb..5cf716eb14a95f390f308bf4c6451f0a4be10dca 100644
--- a/component/libreoffice-bin/buildout.cfg
+++ b/component/libreoffice-bin/buildout.cfg
@@ -12,11 +12,11 @@ find-links =
 [libreoffice-bin]
 recipe = slapos.recipe.build
 # here, two %s are used, first one is for directory name (eg. x86_64), and second one is for filename (eg. x86-64).
-url = http://download.documentfoundation.org/libreoffice/stable/3.4.3/rpm/%s/LibO_3.4.3_Linux_%s_install-rpm_en-US.tar.gz
+url = http://download.documentfoundation.org/libreoffice/stable/3.4.4/rpm/%s/LibO_3.4.4_Linux_%s_install-rpm_en-US.tar.gz
 
 # supported architectures md5sums
-md5sum_x86 = ae1b2b387dcef513c378cc95b255affc
-md5sum_x86-64 = b2d6a902182c1af82ca088fbb665d0e3
+md5sum_x86 = 529c60e161d0c23405723f4a3cd1e046
+md5sum_x86-64 = fc6cb85312d6e11a7ab6ddb1bc4e79cc
 
 # where office code can be found?
 officedir = libreoffice3.4
@@ -37,7 +37,7 @@ script =
   rpmsdir = os.path.join(workdir, [q for q in os.listdir(workdir) if q == 'RPMS'][0])
   rpmlist = [os.path.join(rpmsdir, q) for q in os.listdir(rpmsdir) if q.endswith('.rpm') and 'javafilter' not in q and 'xsltfilter' not in q]
   [self.pipeCommand([[sys.executable, '${:rpm2cpio}', rpm], ['${:cpio}', '-idum']], cwd=storagedir) for rpm in rpmlist]
-  self.copyTree(os.path.join(storagedir, 'opt', '${:officedir}'), location, ['basis3.4', 'basis3.3', 'ure'])
+  self.copyTree(os.path.join(storagedir, 'opt', '${:officedir}'), location, ['basis3.4', 'ure'])
 
 # helper binaries
 cpio = ${cpio:location}/bin/cpio
diff --git a/component/libtool/buildout.cfg b/component/libtool/buildout.cfg
index d50fa57038d95f16b36a514fde80e3e4d854ed37..8d6256665e45aac0682f028fa47575c4d3c22a1c 100644
--- a/component/libtool/buildout.cfg
+++ b/component/libtool/buildout.cfg
@@ -3,7 +3,7 @@ parts = libtool
 
 [libtool]
 recipe = hexagonit.recipe.cmmi
-md5sum = b32b04148ecdd7344abc6fe8bd1bb021
-url = http://ftp.gnu.org/gnu/libtool/libtool-2.4.tar.gz
+md5sum = d2f3b7d4627e69e13514a40e72a24d50
+url = http://ftp.gnu.org/gnu/libtool/libtool-2.4.2.tar.gz
 configure-options =
   --disable-static
diff --git a/component/maatkit/buildout.cfg b/component/maatkit/buildout.cfg
deleted file mode 100644
index 7f5d7d5efeecdb5fd6ded97ba218f40495b99549..0000000000000000000000000000000000000000
--- a/component/maatkit/buildout.cfg
+++ /dev/null
@@ -1,18 +0,0 @@
-[buildout]
-extends =
-  ../perl/buildout.cfg
-  ../perl-DBI/buildout.cfg
-  ../perl-DBD-MySQL/buildout.cfg
-parts =
-  maatkit
-
-[maatkit]
-recipe = hexagonit.recipe.cmmi
-depends =
-  ${perl:version}
-  ${perl-DBI:version}
-  ${perl-DBD-MySQL:version}
-url = http://maatkit.googlecode.com/files/maatkit-7540.tar.gz
-md5sum = 55457f98500b096a6bf549356d3445fe
-configure-command =
-  ${perl:location}/bin/perl Makefile.PL
diff --git a/component/mariadb/buildout.cfg b/component/mariadb/buildout.cfg
index 8c36bf7bfc67eb8251840536b0217e2dcc968500..919e032cb3c3c6b88262b729a8315b429025e533 100644
--- a/component/mariadb/buildout.cfg
+++ b/component/mariadb/buildout.cfg
@@ -5,6 +5,7 @@
 extends =
   ../zlib/buildout.cfg
   ../groonga/buildout.cfg
+  ../libevent/buildout.cfg
   ../ncurses/buildout.cfg
   ../pkgconfig/buildout.cfg
   ../readline/buildout.cfg
@@ -12,6 +13,13 @@ extends =
 parts =
   mariadb
 
+[mariadb-no_test-patch]
+recipe = hexagonit.recipe.download
+url = ${:_profile_base_location_}/${:filename}
+md5sum = d65f61829cfbcd5062f49db2b00bd6fe
+filename = mysql_create_system_tables__no_test.patch
+download-only = true
+
 [mariadb]
 recipe = hexagonit.recipe.cmmi
 version = 5.3.3-rc
@@ -31,19 +39,30 @@ configure-options =
   --with-pic
   --with-fast-mutexes
   --with-charset=utf8
+  --with-extra-charsets=complex
   --with-collation=utf8_unicode_ci
+  --with-big-tables
+  --with-embedded-server
+  --with-plugins=max-no-ndb
+  --with-aria-tmp-tables
+  --without-plugin-innodb_plugin
+  --without-plugin-oqgraph
   --without-readline
   --with-ssl
+  --with-libevent=${libevent:location}
   --with-zlib-dir=${zlib:location}
 
+patch-options = -p0
+patches =
+  ${mariadb-no_test-patch:location}/${mariadb-no_test-patch:filename}
 environment =
   CPPFLAGS=-I${ncurses:location}/include -I${readline5:location}/include
   LDFLAGS=-Wl,-rpath=${libevent:location}/lib -L${ncurses:location}/lib -Wl,-rpath=${ncurses:location}/lib -L${readline5:location}/lib -Wl,-rpath=${readline5:location}/lib -Wl,-rpath=${zlib:location}/lib
 
-[groonga-storage-engine-mariadb]
+[mroonga-mariadb]
 recipe = hexagonit.recipe.cmmi
-url = http://github.com/downloads/mroonga/mroonga/groonga-storage-engine-0.5.tar.gz
-md5sum = 52fed75d97a91f239750a1011ea9e468
+url = https://github.com/downloads/mroonga/mroonga/mroonga-1.10.tar.gz
+md5sum = 6a712b2b20eddc65d918dabd8fba590f
 configure-options =
   --with-mysql-source=${mariadb:location}__compile__/mariadb-${mariadb:version}
   --with-mysql-config=${mariadb:location}/bin/mysql_config
diff --git a/component/mariadb/groonga-storage-engine-1.0.1-mariadb.patch b/component/mariadb/groonga-storage-engine-1.0.1-mariadb.patch
new file mode 100644
index 0000000000000000000000000000000000000000..98d6188fc91abac6dd6530cdd7aa113ca8c72de1
--- /dev/null
+++ b/component/mariadb/groonga-storage-engine-1.0.1-mariadb.patch
@@ -0,0 +1,382 @@
+--- groonga-storage-engine-1.0.1/ha_mroonga.cc	2011-10-28 07:19:15.506715507 +0200
++++ groonga-storage-engine-1.0.1/ha_mroonga.cc	2011-11-02 11:37:03.095096227 +0100
+@@ -77,6 +77,9 @@
+ extern "C" {
+ #endif
+ 
++/* groonga's internal functions */
++const char *grn_obj_get_value_(grn_ctx *ctx, grn_obj *obj, grn_id id, uint32 *size);
++
+ /* global variables */
+ pthread_mutex_t mrn_db_mutex;
+ pthread_mutex_t mrn_log_mutex;
+@@ -109,7 +112,6 @@
+ static bool mrn_logfile_opened = false;
+ grn_log_level mrn_log_level_default = GRN_LOG_DEFAULT_LEVEL;
+ ulong mrn_log_level = (ulong) mrn_log_level_default;
+-char mrn_default_parser_name[MRN_MAX_KEY_SIZE];
+ char *mrn_default_parser;
+ 
+ static void mrn_logger_func(int level, const char *time, const char *title,
+@@ -228,13 +230,12 @@
+           "default parser changed from '%s' to '%s'",
+           old_value, new_value);
+   grn_ctx_fin(&ctx);
+-  strcpy(mrn_default_parser_name, new_value);
+-  mrn_default_parser = mrn_default_parser_name;
++  strncpy(mrn_default_parser, new_value, MRN_MAX_KEY_SIZE - 1);
+   DBUG_VOID_RETURN;
+ }
+ 
+ static MYSQL_SYSVAR_STR(default_parser, mrn_default_parser,
+-                        PLUGIN_VAR_RQCMDARG,
++                        PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC,
+                         "default fulltext parser",
+                         NULL,
+                         mrn_default_parser_update,
+@@ -908,6 +909,15 @@
+                    field->charset());
+       break;
+     }
++  case MYSQL_TYPE_BLOB:
++    {
++      GRN_VOID_INIT(&buf);
++      uint32 len;
++      const char *val = grn_obj_get_value_(ctx, col, id, &len);
++      Field_blob *blob = (Field_blob *)field;
++      blob->set_ptr((uchar *)&len, (uchar *)val);
++      break;
++    }
+   default: //strings etc..
+     {
+       GRN_TEXT_INIT(&buf,0);
+@@ -1010,6 +1020,9 @@
+     goto error_allocated_open_tables_hash_init;
+   }
+ 
++  mrn_default_parser = (char *)my_malloc(MRN_MAX_KEY_SIZE, MYF(MY_WME));
++  strncpy(mrn_default_parser, MRN_PARSER_DEFAULT, MRN_MAX_KEY_SIZE - 1);
++
+   return 0;
+ 
+ error_allocated_open_tables_hash_init:
+@@ -4422,7 +4435,7 @@
+   DBUG_RETURN(error);
+ }
+ 
+-int ha_mroonga::wrapper_index_read_map(uchar * buf, const uchar * key,
++int ha_mroonga::wrapper_index_read_map(uchar *buf, const uchar *key,
+                                        key_part_map keypart_map,
+                                        enum ha_rkey_function find_flag)
+ {
+@@ -4442,7 +4455,11 @@
+     MRN_SET_WRAP_TABLE_KEY(this, table);
+     if (fulltext_searching)
+       set_pk_bitmap();
++#ifdef MRN_HANDLER_HAVE_HA_INDEX_READ_MAP
++    error = wrap_handler->ha_index_read_map(buf, key, keypart_map, find_flag);
++#else
+     error = wrap_handler->index_read_map(buf, key, keypart_map, find_flag);
++#endif
+     MRN_SET_BASE_SHARE_KEY(share, table->s);
+     MRN_SET_BASE_TABLE_KEY(this, table);
+   }
+@@ -4557,7 +4574,7 @@
+   DBUG_RETURN(error);
+ }
+ 
+-int ha_mroonga::index_read_map(uchar * buf, const uchar * key,
++int ha_mroonga::index_read_map(uchar *buf, const uchar *key,
+                                key_part_map keypart_map,
+                                enum ha_rkey_function find_flag)
+ {
+@@ -4572,6 +4589,7 @@
+   DBUG_RETURN(error);
+ }
+ 
++#ifdef MRN_HANDLER_HAVE_INDEX_READ_LAST_MAP
+ int ha_mroonga::wrapper_index_read_last_map(uchar *buf, const uchar *key,
+                                             key_part_map keypart_map)
+ {
+@@ -4658,6 +4676,7 @@
+   }
+   DBUG_RETURN(error);
+ }
++#endif
+ 
+ int ha_mroonga::wrapper_index_next(uchar *buf)
+ {
+@@ -6226,7 +6245,11 @@
+ }
+ 
+ ha_rows ha_mroonga::wrapper_multi_range_read_info(uint keyno, uint n_ranges,
+-                                                  uint keys, uint *bufsz,
++                                                  uint keys,
++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS
++                                                  uint key_parts,
++#endif
++                                                  uint *bufsz,
+                                                   uint *flags, COST_VECT *cost)
+ {
+   MRN_DBUG_ENTER_METHOD();
+@@ -6236,6 +6259,9 @@
+   if (fulltext_searching)
+     set_pk_bitmap();
+   rows = wrap_handler->multi_range_read_info(keyno, n_ranges, keys,
++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS
++                                             key_parts,
++#endif
+                                              bufsz, flags, cost);
+   MRN_SET_BASE_SHARE_KEY(share, table->s);
+   MRN_SET_BASE_TABLE_KEY(this, table);
+@@ -6243,16 +6269,26 @@
+ }
+ 
+ ha_rows ha_mroonga::storage_multi_range_read_info(uint keyno, uint n_ranges,
+-                                                  uint keys, uint *bufsz,
++                                                  uint keys,
++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS
++                                                  uint key_parts,
++#endif
++                                                  uint *bufsz,
+                                                   uint *flags, COST_VECT *cost)
+ {
+   MRN_DBUG_ENTER_METHOD();
+   ha_rows rows = handler::multi_range_read_info(keyno, n_ranges, keys,
++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS
++                                                key_parts,
++#endif
+                                                 bufsz, flags, cost);
+   DBUG_RETURN(rows);
+ }
+ 
+ ha_rows ha_mroonga::multi_range_read_info(uint keyno, uint n_ranges, uint keys,
++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS
++                                          uint key_parts,
++#endif
+                                           uint *bufsz, uint *flags,
+                                           COST_VECT *cost)
+ {
+@@ -6261,9 +6297,15 @@
+   if (share->wrapper_mode)
+   {
+     rows = wrapper_multi_range_read_info(keyno, n_ranges, keys,
++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS
++                                         key_parts,
++#endif
+                                          bufsz, flags, cost);
+   } else {
+     rows = storage_multi_range_read_info(keyno, n_ranges, keys,
++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS
++                                         key_parts,
++#endif
+                                          bufsz, flags, cost);
+   }
+   DBUG_RETURN(rows);
+@@ -6315,7 +6357,7 @@
+   DBUG_RETURN(error);
+ }
+ 
+-int ha_mroonga::wrapper_multi_range_read_next(char **range_info)
++int ha_mroonga::wrapper_multi_range_read_next(range_id_t *range_info)
+ {
+   MRN_DBUG_ENTER_METHOD();
+   int error = 0;
+@@ -6329,14 +6371,14 @@
+   DBUG_RETURN(error);
+ }
+ 
+-int ha_mroonga::storage_multi_range_read_next(char **range_info)
++int ha_mroonga::storage_multi_range_read_next(range_id_t *range_info)
+ {
+   MRN_DBUG_ENTER_METHOD();
+   int error = handler::multi_range_read_next(range_info);
+   DBUG_RETURN(error);
+ }
+ 
+-int ha_mroonga::multi_range_read_next(char **range_info)
++int ha_mroonga::multi_range_read_next(range_id_t *range_info)
+ {
+   MRN_DBUG_ENTER_METHOD();
+   int error = 0;
+--- groonga-storage-engine-1.0.1/ha_mroonga.h	2011-10-27 12:31:36.859277054 +0200
++++ groonga-storage-engine-1.0.1/ha_mroonga.h	2011-11-02 11:37:03.095096227 +0100
+@@ -47,18 +47,22 @@
+ #  define MRN_HANDLER_HAVE_ADD_INDEX 1
+ #endif
+ 
+-#if (MYSQL_VERSION_ID >= 50600) || \
+-    (defined(MRN_MARIADB_P) && MYSQL_VERSION_ID >= 50302)
+-#  define MRN_HANDLER_HAVE_HA_CLOSE 1
++#if (MYSQL_VERSION_ID >= 50603) || \
++    (defined(MRN_MARIADB_P) && MYSQL_VERSION_ID >= 50209)
+ #  define MRN_HANDLER_HAVE_HA_RND_NEXT 1
+ #  define MRN_HANDLER_HAVE_HA_RND_POS 1
++#  define MRN_HANDLER_HAVE_HA_INDEX_READ_MAP 1
+ #  define MRN_HANDLER_HAVE_HA_INDEX_READ_IDX_MAP 1
+ #  define MRN_HANDLER_HAVE_HA_INDEX_NEXT 1
+ #  define MRN_HANDLER_HAVE_HA_INDEX_PREV 1
+ #  define MRN_HANDLER_HAVE_HA_INDEX_FIRST 1
+ #  define MRN_HANDLER_HAVE_HA_INDEX_LAST 1
+ #  define MRN_HANDLER_HAVE_HA_INDEX_NEXT_SAME 1
++#endif
+ 
++#if (MYSQL_VERSION_ID >= 50603) || \
++    (defined(MRN_MARIADB_P) && MYSQL_VERSION_ID >= 50302)
++#  define MRN_HANDLER_HAVE_HA_CLOSE 1
+ #  define MRN_HANDLER_HAVE_MULTI_RANGE_READ 1
+ #endif
+ 
+@@ -66,6 +70,14 @@
+ #  define MRN_HANDLER_HAVE_HA_INPLACE_INDEX_CHANGE
+ #endif
+ 
++#ifndef MRN_MARIADB_P
++#  define MRN_HANDLER_HAVE_INDEX_READ_LAST_MAP
++#endif
++
++#if (defined(MRN_MARIADB_P) && MYSQL_VERSION_ID >= 50302)
++#  define MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS
++#endif
++
+ #if MYSQL_VERSION_ID < 50600
+   typedef Item COND;
+ #endif
+@@ -74,6 +86,10 @@
+   typedef MYSQL_ERROR Sql_condition;
+ #endif
+ 
++#ifndef MRN_MARIADB_P
++  typedef char *range_id_t;
++#endif
++
+ class ha_mroonga;
+ 
+ /* structs */
+@@ -213,11 +229,15 @@
+   ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
+   int index_init(uint idx, bool sorted);
+   int index_end();
++#ifndef MRN_HANDLER_HAVE_HA_INDEX_READ_MAP
+   int index_read_map(uchar * buf, const uchar * key,
+                      key_part_map keypart_map,
+                      enum ha_rkey_function find_flag);
++#endif
++#ifdef MRN_HANDLER_HAVE_INDEX_READ_LAST_MAP
+   int index_read_last_map(uchar *buf, const uchar *key,
+                           key_part_map keypart_map);
++#endif
+ #ifndef MRN_HANDLER_HAVE_HA_INDEX_NEXT
+   int index_next(uchar *buf);
+ #endif
+@@ -261,11 +281,14 @@
+                                       uint n_ranges, uint *bufsz,
+                                       uint *flags, COST_VECT *cost);
+   ha_rows multi_range_read_info(uint keyno, uint n_ranges, uint keys,
++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS
++                                uint key_parts,
++#endif
+                                 uint *bufsz, uint *flags, COST_VECT *cost);
+   int multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param,
+                             uint n_ranges, uint mode,
+                             HANDLER_BUFFER *buf);
+-  int multi_range_read_next(char **range_info);
++  int multi_range_read_next(range_id_t *range_info);
+ #else // MRN_HANDLER_HAVE_MULTI_RANGE_READ
+   int read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
+                              KEY_MULTI_RANGE *ranges,
+@@ -321,6 +344,11 @@
+ #ifdef MRN_HANDLER_HAVE_HA_RND_POS
+   int rnd_pos(uchar *buf, uchar *pos);
+ #endif
++#ifdef MRN_HANDLER_HAVE_HA_INDEX_READ_MAP
++  int index_read_map(uchar *buf, const uchar *key,
++                     key_part_map keypart_map,
++                     enum ha_rkey_function find_flag);
++#endif
+ #ifdef MRN_HANDLER_HAVE_HA_INDEX_NEXT
+   int index_next(uchar *buf);
+ #endif
+@@ -469,10 +497,12 @@
+   int storage_index_read_map(uchar *buf, const uchar *key,
+                              key_part_map keypart_map,
+                              enum ha_rkey_function find_flag);
++#ifdef MRN_HANDLER_HAVE_INDEX_READ_LAST_MAP
+   int wrapper_index_read_last_map(uchar *buf, const uchar *key,
+                                   key_part_map keypart_map);
+   int storage_index_read_last_map(uchar *buf, const uchar *key,
+                                   key_part_map keypart_map);
++#endif
+   int wrapper_index_next(uchar *buf);
+   int storage_index_next(uchar *buf);
+   int wrapper_index_prev(uchar *buf);
+@@ -533,9 +563,15 @@
+                                               uint *flags,
+                                               COST_VECT *cost);
+   ha_rows wrapper_multi_range_read_info(uint keyno, uint n_ranges, uint keys,
++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS
++                                        uint key_parts,
++#endif
+                                         uint *bufsz, uint *flags,
+                                         COST_VECT *cost);
+   ha_rows storage_multi_range_read_info(uint keyno, uint n_ranges, uint keys,
++#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS
++                                        uint key_parts,
++#endif
+                                         uint *bufsz, uint *flags,
+                                         COST_VECT *cost);
+   int wrapper_multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param,
+@@ -544,8 +580,8 @@
+   int storage_multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param,
+                                     uint n_ranges, uint mode,
+                                     HANDLER_BUFFER *buf);
+-  int wrapper_multi_range_read_next(char **range_info);
+-  int storage_multi_range_read_next(char **range_info);
++  int wrapper_multi_range_read_next(range_id_t *range_info);
++  int storage_multi_range_read_next(range_id_t *range_info);
+ #else // MRN_HANDLER_HAVE_MULTI_RANGE_READ
+   int wrapper_read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
+                                      KEY_MULTI_RANGE *ranges,
+--- groonga-storage-engine-1.0.1/test/run-sql-test.sh	2011-09-27 10:43:29.093290682 +0200
++++ groonga-storage-engine-1.0.1/test/run-sql-test.sh	2011-11-02 11:37:03.099096256 +0100
+@@ -24,12 +24,20 @@
+ source_test_suites_dir="${source_mysql_test_dir}/suite"
+ build_test_suites_dir="${build_mysql_test_dir}/suite"
+ case "${MYSQL_VERSION}" in
+-    5.1)
++    5.1.*)
+ 	plugins_dir="${MYSQL_BUILD}/lib/mysql/plugin"
+ 	if ! test -d "${build_test_suites_dir}"; then
+ 	    mkdir -p "${build_test_suites_dir}"
+ 	fi
+ 	;;
++    *-MariaDB*)
++	if ! test -d "${build_test_suites_dir}"; then
++	    ln -s "${source_test_suites_dir}" "${build_test_suites_dir}"
++	fi
++	if ! test -d "${MYSQL_BUILD}/plugin/mroonga"; then
++	    ln -s "${top_dir}" "${MYSQL_BUILD}/plugin/mroonga"
++	fi
++	;;
+     *)
+ 	if ! test -d "${build_test_suites_dir}"; then
+ 	    ln -s "${source_test_suites_dir}" "${build_test_suites_dir}"
+@@ -47,10 +55,14 @@
+     fi
+ done
+ 
+-make -C ${top_dir} \
+-    install-pluginLTLIBRARIES \
+-    plugindir=${plugins_dir} > /dev/null || \
+-    exit 1
++if test -n "${plugins_dir}"; then
++    make -C ${top_dir} \
++	install-pluginLTLIBRARIES \
++	plugindir=${plugins_dir} > /dev/null || \
++	exit 1
++else
++    make -C ${top_dir} > /dev/null || exit 1
++fi
+ 
+ (cd "$build_mysql_test_dir" && \
+     ./mysql-test-run.pl \
diff --git a/component/mariadb/mysql_create_system_tables__no_test.patch b/component/mariadb/mysql_create_system_tables__no_test.patch
new file mode 100644
index 0000000000000000000000000000000000000000..31f5d0ab12a9c4715a8998e2107a3c17cce5f14f
--- /dev/null
+++ b/component/mariadb/mysql_create_system_tables__no_test.patch
@@ -0,0 +1,26 @@
+# 33_scripts__mysql_create_system_tables__no_test.dpatch by  <ch@debian.org>
+
+A user with no password prevents a normal user from login under certain
+circumstances as it is checked first.
+See http://bugs.debian.org/301741
+and http://bugs.mysql.com/bug.php?id=6901
+
+--- scripts/mysql_system_tables_data.sql	2008-12-04 22:59:44.000000000 +0100
++++ scripts/mysql_system_tables_data.sql	2008-12-04 23:00:07.000000000 +0100
+@@ -11,8 +11,6 @@
+ -- Fill "db" table with default grants for anyone to
+ -- access database 'test' and 'test_%' if "db" table didn't exist
+ CREATE TEMPORARY TABLE tmp_db LIKE db;
+-INSERT INTO tmp_db VALUES ('%','test','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y');
+-INSERT INTO tmp_db VALUES ('%','test\_%','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y');
+ INSERT INTO db SELECT * FROM tmp_db WHERE @had_db_table=0;
+ DROP TABLE tmp_db;
+ 
+@@ -24,7 +22,5 @@
+ INSERT INTO tmp_user VALUES ('localhost','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','');
+ REPLACE INTO tmp_user SELECT @current_hostname,'root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','' FROM dual WHERE LOWER( @current_hostname) != 'localhost';
+ REPLACE INTO tmp_user VALUES ('127.0.0.1','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','');
+-INSERT INTO tmp_user (host,user) VALUES ('localhost','');
+-INSERT INTO tmp_user (host,user) SELECT @current_hostname,'' FROM dual WHERE LOWER(@current_hostname ) != 'localhost';
+ INSERT INTO user SELECT * FROM tmp_user WHERE @had_user_table=0;
+ DROP TABLE tmp_user;
diff --git a/component/memcached/buildout.cfg b/component/memcached/buildout.cfg
index 5c36165e4bfa3777421094c7d358b1be8aa00e3c..dbc2979c053fb0e367cd0f09b839f3e2e4d0e0ea 100644
--- a/component/memcached/buildout.cfg
+++ b/component/memcached/buildout.cfg
@@ -1,16 +1,9 @@
 [buildout]
 parts = memcached
-extends = ../libevent/buildout.cfg
-
-[memcached-strict-aliasing-patch]
-# on some platforms original memcached refuses to build:
-#  * http://code.google.com/p/memcached/issues/detail?id=60
-#  * http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=565033
-recipe = hexagonit.recipe.download
-url = ${:_profile_base_location_}/${:filename}
-md5sum = c03b3bfc237b77667b0e90442b0980e8
-download-only = true
-filename = memcached-fix-strict-aliasing.patch
+extends =
+  ../autoconf/buildout.cfg
+  ../automake/buildout.cfg
+  ../libevent/buildout.cfg
 
 [memcached-fix-array-subscript-is-above-array-bounds]
 recipe = hexagonit.recipe.download
@@ -19,45 +12,30 @@ filename = memcached-1.4-fix-array-subscript-is-above-array-bounds.patch
 download-only = true
 md5sum = 472508b9a4b6c0b9f5d6f2abce3444e3
 
-[memcached-gcc4.6.patch]
+[memcached-gcc-4.4.patch]
 recipe = hexagonit.recipe.download
 url = ${:_profile_base_location_}/${:filename}
-filename = memcached-gcc4.6.patch
+filename = memcached-gcc-4.4.patch
 download-only = true
-md5sum = 3418477f64500cd2a8dce046f5d72fec
+md5sum = fd98d0cbfc4d3a25ac9808472fbe62f8
 
 [memcached]
-<= memcached-1.4.6
-
-[memcached-1.4.6]
-<= memcached-common
-url = http://memcached.googlecode.com/files/memcached-1.4.6.tar.gz
-md5sum = 243e5d82de27e6e45caf0ebfd400e41a
-patches =
-  ${memcached-fix-array-subscript-is-above-array-bounds:location}/${memcached-fix-array-subscript-is-above-array-bounds:filename}
-
-[memcached-1.4.5]
-<= memcached-common
-url = http://memcached.googlecode.com/files/memcached-1.4.5.tar.gz
-md5sum = 583441a25f937360624024f2881e5ea8
-patches =
-  ${memcached-strict-aliasing-patch:location}/${memcached-strict-aliasing-patch:filename}
-  ${memcached-fix-array-subscript-is-above-array-bounds:location}/${memcached-fix-array-subscript-is-above-array-bounds:filename}
-  ${memcached-gcc4.6.patch:location}/${memcached-gcc4.6.patch:filename}
-
-[memcached-1.4.4]
-<= memcached-common
-url = http://memcached.googlecode.com/files/memcached-1.4.4.tar.gz
-md5sum = 5ca5b24de347e97ac1f48f3785b4178a
-patches =
-  ${memcached-strict-aliasing-patch:location}/${memcached-strict-aliasing-patch:filename}
-  ${memcached-fix-array-subscript-is-above-array-bounds:location}/${memcached-fix-array-subscript-is-above-array-bounds:filename}
-  ${memcached-gcc4.6.patch:location}/${memcached-gcc4.6.patch:filename}
-
-[memcached-common]
 recipe = hexagonit.recipe.cmmi
+url = http://memcached.googlecode.com/files/memcached-1.4.8.tar.gz
+md5sum = b7104e269511621c2777367d6d6315fe
+patches =
+  ${memcached-fix-array-subscript-is-above-array-bounds:location}/${memcached-fix-array-subscript-is-above-array-bounds:filename} ${memcached-gcc-4.4.patch:location}/${memcached-gcc-4.4.patch:filename}
+patch-options = -p1
+configure-command =
+  aclocal-1.11
+  autoheader
+  automake-1.11 --foreign
+  autoconf
+  ./configure
 configure-options =
+  --prefix=${buildout:parts-directory}/${:_buildout_section_name_}
   --with-libevent=${libevent:location}
-patch-options = -p1
+  --disable-docs
 environment =
-    LDFLAGS =-Wl,-rpath=${libevent:location}/lib
+  PATH=${autoconf:location}/bin:${automake-1.11:location}/bin:%(PATH)s
+  LDFLAGS =-Wl,-rpath=${libevent:location}/lib
diff --git a/component/memcached/memcached-fix-strict-aliasing.patch b/component/memcached/memcached-fix-strict-aliasing.patch
deleted file mode 100644
index ac8d4dde568472b007a8565ee68c398943a47857..0000000000000000000000000000000000000000
--- a/component/memcached/memcached-fix-strict-aliasing.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-diff -rdBu memcached-1.4.0-rc1/memcached.h memcached-1.4.0-my/memcached.h
---- memcached-1.4.0-rc1/memcached.h	2009-05-29 00:51:56.000000000 +0400
-+++ memcached-1.4.0-my/memcached.h	2009-06-07 22:32:52.000000000 +0400
-@@ -75,21 +75,21 @@
- 
- /* warning: don't use these macros with a function, as it evals its arg twice */
- #define ITEM_get_cas(i) ((uint64_t)(((i)->it_flags & ITEM_CAS) ? \
--                                    *(uint64_t*)&((i)->end[0]) : 0x0))
-+                                    *(uint64_t*)((char*)(i) + sizeof(*i)) : 0x0))
- #define ITEM_set_cas(i,v) { if ((i)->it_flags & ITEM_CAS) { \
--                          *(uint64_t*)&((i)->end[0]) = v; } }
-+                          *(uint64_t*)((char*)(i) + sizeof(*i)) = v; } }
- 
--#define ITEM_key(item) (((char*)&((item)->end[0])) \
-+#define ITEM_key(item) ((char*)(item) + sizeof(*item) \
-          + (((item)->it_flags & ITEM_CAS) ? sizeof(uint64_t) : 0))
- 
--#define ITEM_suffix(item) ((char*) &((item)->end[0]) + (item)->nkey + 1 \
-+#define ITEM_suffix(item) ((char*)(item) + sizeof(*item) + (item)->nkey + 1 \
-          + (((item)->it_flags & ITEM_CAS) ? sizeof(uint64_t) : 0))
- 
--#define ITEM_data(item) ((char*) &((item)->end[0]) + (item)->nkey + 1 \
-+#define ITEM_data(item) ((char*)(item) + sizeof(*item) + (item)->nkey + 1 \
-          + (item)->nsuffix \
-          + (((item)->it_flags & ITEM_CAS) ? sizeof(uint64_t) : 0))
- 
--#define ITEM_ntotal(item) (sizeof(struct _stritem) + (item)->nkey + 1 \
-+#define ITEM_ntotal(item) (sizeof(*item) + (item)->nkey + 1 \
-          + (item)->nsuffix + (item)->nbytes \
-          + (((item)->it_flags & ITEM_CAS) ? sizeof(uint64_t) : 0))
- 
-@@ -285,7 +285,6 @@
-     uint8_t         it_flags;   /* ITEM_* above */
-     uint8_t         slabs_clsid;/* which slab class we're in */
-     uint8_t         nkey;       /* key length, w/terminating null and padding */
--    void * end[];
-     /* if it_flags & ITEM_CAS we have 8 bytes CAS */
-     /* then null-terminated key */
-     /* then " flags length\r\n" (no terminating null) */
-
diff --git a/component/memcached/memcached-gcc-4.4.patch b/component/memcached/memcached-gcc-4.4.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f86324858ad92d1fdd860b27d7647252c16d99ea
--- /dev/null
+++ b/component/memcached/memcached-gcc-4.4.patch
@@ -0,0 +1,12 @@
+# In OpenSuse 11.2, 'gcc -dumpversion' returns '4.4', not '4.4.*'.
+--- memcached-1.4.8/configure.ac.orig
++++ memcached-1.4.8/configure.ac
+@@ -502,7 +502,7 @@
+   GCC_VERSION=`$CC -dumpversion`
+   CFLAGS="$CFLAGS -Wall -Werror -pedantic -Wmissing-prototypes -Wmissing-declarations -Wredundant-decls"
+   case $GCC_VERSION in
+-    4.4.*)
++    4.4 | 4.4.*)
+     CFLAGS="$CFLAGS -fno-strict-aliasing"
+     ;;
+   esac
diff --git a/component/memcached/memcached-gcc4.6.patch b/component/memcached/memcached-gcc4.6.patch
deleted file mode 100644
index 53647d90aed8753754d1652a267428b64bfe3381..0000000000000000000000000000000000000000
--- a/component/memcached/memcached-gcc4.6.patch
+++ /dev/null
@@ -1,36 +0,0 @@
-diff --git a/items.c b/items.c
-index e7f01ea..9fc6704 100644
---- a/items.c
-+++ b/items.c
-@@ -450,9 +450,7 @@ void do_item_stats_sizes(ADD_STAT add_stats, void *c) {
-         for (i = 0; i < num_buckets; i++) {
-             if (histogram[i] != 0) {
-                 char key[8];
--                int klen = 0;
--                klen = snprintf(key, sizeof(key), "%d", i * 32);
--                assert(klen < sizeof(key));
-+                assert(snprintf(key, sizeof(key), "%d", i * 32) < sizeof(key));
-                 APPEND_STAT(key, "%u", histogram[i]);
-             }
-         }
-diff --git a/memcached.c b/memcached.c
-index 750c8b3..0913b77 100644
---- a/memcached.c
-+++ b/memcached.c
-@@ -4627,8 +4627,6 @@ int main (int argc, char **argv) {
- 
-     /* create the listening socket, bind it, and init */
-     if (settings.socketpath == NULL) {
--        int udp_port;
--
-         const char *portnumber_filename = getenv("MEMCACHED_PORT_FILENAME");
-         char temp_portnumber_filename[PATH_MAX];
-         FILE *portnumber_file = NULL;
-@@ -4658,7 +4656,6 @@ int main (int argc, char **argv) {
-          * then daemonise if needed, then init libevent (in some cases
-          * descriptors created by libevent wouldn't survive forking).
-          */
--        udp_port = settings.udpport ? settings.udpport : settings.port;
- 
-         /* create the UDP listening socket and bind it */
-         errno = 0;
diff --git a/component/mysql-5.1/buildout.cfg b/component/mysql-5.1/buildout.cfg
index 5c912b79d2a4ff1eb9049240ba60c4724a348140..1397b9511be4e38a927b4e8e2b370001c40ca75a 100644
--- a/component/mysql-5.1/buildout.cfg
+++ b/component/mysql-5.1/buildout.cfg
@@ -21,6 +21,13 @@ md5sum = eefcd08c400c58d3e89542ab482a8429
 filename = mysql-5.1-sphinx-2.0.1-beta.diff
 download-only = true
 
+[mysql-5.1-no_test-patch]
+recipe = hexagonit.recipe.download
+url = ${:_profile_base_location_}/${:filename}
+md5sum = 22b0ef8baec5efc182e10d233c6f14ca
+filename = mysql_create_system_tables__no_test.patch
+download-only = true
+
 [mysql-5.1]
 recipe = hexagonit.recipe.cmmi
 version = 5.1.58
@@ -60,6 +67,7 @@ make-options =
 patch-options = -p0
 patches =
   ${mysql-5.1-sphinx-patch:location}/${mysql-5.1-sphinx-patch:filename}
+  ${mysql-5.1-no_test-patch:location}/${mysql-5.1-no_test-patch:filename}
 environment =
   PATH =${autoconf:location}/bin:${automake-1.11:location}/bin:${libtool:location}/bin:${bison:location}/bin:${flex:location}/bin:%(PATH)s
   CPPFLAGS =-I${ncurses:location}/include -I${readline:location}/include
@@ -67,8 +75,8 @@ environment =
 
 [groonga-storage-engine-mysql-5.1]
 recipe = hexagonit.recipe.cmmi
-url = http://github.com/downloads/mroonga/mroonga/groonga-storage-engine-0.9.tar.gz
-md5sum = 78fe07122dc376796a5aede476f50cfd
+url = http://github.com/downloads/mroonga/mroonga/groonga-storage-engine-1.0.0.tar.gz
+md5sum = 289b8b7919e790599ea79b6fe9270e04
 configure-options =
   --with-mysql-source=${mysql-5.1:location}__compile__/mysql-${mysql-5.1:version}
   --with-mysql-config=${mysql-5.1:location}/bin/mysql_config
diff --git a/component/mysql-5.1/mysql_create_system_tables__no_test.patch b/component/mysql-5.1/mysql_create_system_tables__no_test.patch
new file mode 100644
index 0000000000000000000000000000000000000000..0087e17588e5ea42d88682af0ea5ca9c910a45cc
--- /dev/null
+++ b/component/mysql-5.1/mysql_create_system_tables__no_test.patch
@@ -0,0 +1,26 @@
+# 33_scripts__mysql_create_system_tables__no_test.dpatch by  <ch@debian.org>
+
+A user with no password prevents a normal user from login under certain
+circumstances as it is checked first.
+See http://bugs.debian.org/301741
+and http://bugs.mysql.com/bug.php?id=6901
+
+--- scripts/mysql_system_tables_data.sql	2008-12-04 22:59:44.000000000 +0100
++++ scripts/mysql_system_tables_data.sql	2008-12-04 23:00:07.000000000 +0100
+@@ -11,8 +11,6 @@
+ -- Fill "db" table with default grants for anyone to
+ -- access database 'test' and 'test_%' if "db" table didn't exist
+ CREATE TEMPORARY TABLE tmp_db LIKE db;
+-INSERT INTO tmp_db VALUES ('%','test','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y');
+-INSERT INTO tmp_db VALUES ('%','test\_%','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y');
+ INSERT INTO db SELECT * FROM tmp_db WHERE @had_db_table=0;
+ DROP TABLE tmp_db;
+ 
+@@ -24,7 +22,5 @@
+ INSERT INTO tmp_user VALUES ('localhost','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0);
+ REPLACE INTO tmp_user SELECT @current_hostname,'root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0 FROM dual WHERE LOWER( @current_hostname) != 'localhost';
+ REPLACE INTO tmp_user VALUES ('127.0.0.1','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0);
+-INSERT INTO tmp_user (host,user) VALUES ('localhost','');
+-INSERT INTO tmp_user (host,user) SELECT @current_hostname,'' FROM dual WHERE LOWER(@current_hostname ) != 'localhost';
+ INSERT INTO user SELECT * FROM tmp_user WHERE @had_user_table=0;
+ DROP TABLE tmp_user;
diff --git a/component/mysql-5.5/buildout.cfg b/component/mysql-5.5/buildout.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..1c55e0ea182ec5195a79ebdb11b97d705148e2ab
--- /dev/null
+++ b/component/mysql-5.5/buildout.cfg
@@ -0,0 +1,76 @@
+[buildout]
+extends =
+  ../autoconf/buildout.cfg
+  ../automake/buildout.cfg
+  ../bison/buildout.cfg
+  ../cmake/buildout.cfg
+  ../groonga/buildout.cfg
+  ../libaio/buildout.cfg
+  ../libtool/buildout.cfg
+  ../ncurses/buildout.cfg
+  ../pkgconfig/buildout.cfg
+  ../zlib/buildout.cfg
+
+parts =
+    mysql-5.5
+
+[mysql-5.5-sphinx-patch]
+# this patch comes from sphinx-2.0.1-beta including changes for
+# MySQL-5.5 in
+# http://code.google.com/p/sphinxsearch/source/detail?r=2921
+recipe = hexagonit.recipe.download
+url = ${:_profile_base_location_}/${:filename}
+md5sum = 04549822ebfa91b5f84025ff49ef24c2
+filename = mysql-5.5-sphinx-2.0.1-beta.diff
+download-only = true
+
+[mysql-5.5-no_test-patch]
+recipe = hexagonit.recipe.download
+url = ${:_profile_base_location_}/${:filename}
+md5sum = bb7ee34b9a98da1f913566c63ffbc6f5
+filename = mysql_create_system_tables__no_test.patch
+download-only = true
+
+[mysql-5.5]
+recipe = hexagonit.recipe.cmmi
+version = 5.5.16
+url = http://mysql.he.net/Downloads/MySQL-5.5/mysql-${:version}.tar.gz
+md5sum = 462ab3752dd666ec06ca32f92673b59e
+# compile directory is required to build mysql plugins.
+keep-compile-dir = true
+patch-options = -p0
+patches =
+  ${mysql-5.5-sphinx-patch:location}/${mysql-5.5-sphinx-patch:filename}
+  ${mysql-5.5-no_test-patch:location}/${mysql-5.5-no_test-patch:filename}
+configure-command = ${cmake:location}/bin/cmake
+# we use embeded yassl instead of openssl to avoid compilation errors on sphinx search engine.
+configure-options =
+  -DCMAKE_INSTALL_PREFIX=${buildout:parts-directory}/${:_buildout_section_name_}
+  -DBUILD_CONFIG=mysql_release
+  -DDEFAULT_CHARSET=utf8
+  -DDEFAULT_COLLATION=utf8_unicode_ci
+  -DWITH_SSL=bundled
+  -DWITH_ZLIB=system
+  -DWITHOUT_EXAMPLE_STORAGE_ENGINE=1
+  -DWITHOUT_DAEMON_EXAMPLE=1
+  -DWITH_SPHINX_STORAGE_ENGINE=1
+  -DCMAKE_C_FLAGS="-I${libaio:location}/include -I${ncurses:location}/include -I${zlib:location}/include"
+  -DCMAKE_INSTALL_RPATH=${libaio:location}/lib:${ncurses:location}/lib:${zlib:location}/lib
+environment =
+  CMAKE_PROGRAM_PATH=${autoconf:location}/bin:${automake-1.11:location}/bin:${cmake:location}/bin:${libtool:location}/bin:${bison:location}/bin:${flex:location}/bin
+  CMAKE_INCLUDE_PATH=${libaio:location}/include:${ncurses:location}/include:${zlib:location}/include
+  CMAKE_LIBRARY_PATH=${libaio:location}/lib:${ncurses:location}/lib:${zlib:location}/lib
+  LDFLAGS=-L${libaio:location}/lib
+
+[groonga-storage-engine-mysql-5.5]
+recipe = hexagonit.recipe.cmmi
+url = http://github.com/downloads/mroonga/mroonga/groonga-storage-engine-1.0.0.tar.gz
+md5sum = 289b8b7919e790599ea79b6fe9270e04
+configure-options =
+  --with-mysql-source=${mysql-5.5:location}__compile__/mysql-${mysql-5.5:version}
+  --with-mysql-config=${mysql-5.5:location}/bin/mysql_config
+environment =
+  PATH=${groonga:location}/bin:${pkgconfig:location}/bin:%(PATH)s
+  CPPFLAGS=-I${groonga:location}/include/groonga
+  LDFLAGS=-L${groonga:location}/lib
+  PKG_CONFIG_PATH=${groonga:location}/lib/pkgconfig
diff --git a/component/mysql-5.5/mysql-5.5-sphinx-2.0.1-beta.diff b/component/mysql-5.5/mysql-5.5-sphinx-2.0.1-beta.diff
new file mode 100644
index 0000000000000000000000000000000000000000..ee9f88cfa0b42ccc170a08f017bc229e6e3383ba
--- /dev/null
+++ b/component/mysql-5.5/mysql-5.5-sphinx-2.0.1-beta.diff
@@ -0,0 +1,4721 @@
+diff -uNr storage/sphinx/CMakeLists.txt storage/sphinx/CMakeLists.txt
+--- storage/sphinx/CMakeLists.txt	1970-01-01 01:00:00.000000000 +0100
++++ storage/sphinx/CMakeLists.txt	2011-10-13 00:59:59.282957578 +0200
+@@ -0,0 +1,16 @@
++SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
++SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
++ADD_DEFINITIONS(-DMYSQL_SERVER)
++
++INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include
++                    ${CMAKE_SOURCE_DIR}/sql
++                    ${CMAKE_SOURCE_DIR}/extra/yassl/include
++                    ${CMAKE_SOURCE_DIR}/regex)
++
++SET(SPHINX_SOURCES ha_sphinx.cc)
++IF(MYSQL_VERSION_ID LESS 50515) 
++ADD_LIBRARY(sphinx ha_sphinx.cc)
++ELSE()
++SET(SPHINX_PLUGIN_DYNAMIC "ha_sphinx")
++MYSQL_ADD_PLUGIN(sphinx ${SPHINX_SOURCES} STORAGE_ENGINE MODULE_ONLY LINK_LIBRARIES mysys)
++ENDIF()
+diff -uNr storage/sphinx/gen_data.php storage/sphinx/gen_data.php
+--- storage/sphinx/gen_data.php	1970-01-01 01:00:00.000000000 +0100
++++ storage/sphinx/gen_data.php	2006-06-07 09:28:43.000000000 +0200
+@@ -0,0 +1,37 @@
++<?php
++
++$file_name= $argv[1];
++
++//echo $file_name;
++
++$cont= file_get_contents($file_name);
++
++$words= explode(" ", $cont);
++
++//echo "words: ".(count($words))."\n";
++
++$cw = count($words);
++
++echo "REPLACE INTO test.documents ( id, group_id, date_added, title, content ) VALUES\n";
++
++
++for ($i=1; $i<=100000; $i++)
++{
++  $count_words= mt_rand(10,30);
++  $pred = "";
++  for ($j=0; $j<$count_words; $j++)
++  {
++    $pred .= chop($words[mt_rand(1, $cw-1)])." ";
++  }
++  $count_words= mt_rand(3,5);
++  $tit = "";
++  for ($j=0; $j<$count_words; $j++)
++  {
++    $tit .= chop($words[mt_rand(1, $cw-1)])." ";
++  }
++  echo "($i,".mt_rand(1,20).",NOW(),'".addslashes($tit)."','".addslashes($pred)."'),\n";
++}       
++  echo "(0,1,now(),'end','eND');\n";
++  
++
++?>
+diff -uNr storage/sphinx/ha_sphinx.cc storage/sphinx/ha_sphinx.cc
+--- storage/sphinx/ha_sphinx.cc	1970-01-01 01:00:00.000000000 +0100
++++ storage/sphinx/ha_sphinx.cc	2011-10-13 00:59:59.282957578 +0200
+@@ -0,0 +1,3547 @@
++//
++// $Id: ha_sphinx.cc 2752 2011-03-29 08:21:05Z tomat $
++//
++
++//
++// Copyright (c) 2001-2011, Andrew Aksyonoff
++// Copyright (c) 2008-2011, Sphinx Technologies Inc
++// All rights reserved
++//
++// This program is free software; you can redistribute it and/or modify
++// it under the terms of the GNU General Public License. You should have
++// received a copy of the GPL license along with this program; if you
++// did not, you can find it at http://www.gnu.org/
++//
++
++#ifdef USE_PRAGMA_IMPLEMENTATION
++#pragma implementation // gcc: Class implementation
++#endif
++
++#if _MSC_VER>=1400
++#define _CRT_SECURE_NO_DEPRECATE 1
++#define _CRT_NONSTDC_NO_DEPRECATE 1
++#endif
++
++#include <mysql_version.h>
++
++#if MYSQL_VERSION_ID>=50515
++#include "sql_class.h"
++#include "sql_array.h"
++#elif MYSQL_VERSION_ID>50100
++#include "mysql_priv.h"
++#include <mysql/plugin.h>
++#else
++#include "../mysql_priv.h"
++#endif
++
++#include <mysys_err.h>
++#include <my_sys.h>
++#include <mysql.h> // include client for INSERT table (sort of redoing federated..)
++
++#ifndef __WIN__
++	// UNIX-specific
++	#include <my_net.h>
++	#include <netdb.h>
++	#include <sys/un.h>
++
++	#define	RECV_FLAGS	MSG_WAITALL
++
++	#define sphSockClose(_sock)	::close(_sock)
++#else
++	// Windows-specific
++	#include <io.h>
++	#define strcasecmp	stricmp
++	#define snprintf	_snprintf
++
++	#define	RECV_FLAGS	0
++
++	#define sphSockClose(_sock)	::closesocket(_sock)
++#endif
++
++#include <ctype.h>
++#include "ha_sphinx.h"
++
++#ifndef MSG_WAITALL
++#define MSG_WAITALL 0
++#endif
++
++#if _MSC_VER>=1400
++#pragma warning(push,4)
++#endif
++
++/////////////////////////////////////////////////////////////////////////////
++
++/// there might be issues with min() on different platforms (eg. Gentoo, they say)
++#define Min(a,b) ((a)<(b)?(a):(b))
++
++/// unaligned RAM accesses are forbidden on SPARC
++#if defined(sparc) || defined(__sparc__)
++#define UNALIGNED_RAM_ACCESS 0
++#else
++#define UNALIGNED_RAM_ACCESS 1
++#endif
++
++
++#if UNALIGNED_RAM_ACCESS
++
++/// pass-through wrapper
++template < typename T > inline T sphUnalignedRead ( const T & tRef )
++{
++	return tRef;
++}
++
++/// pass-through wrapper
++template < typename T > void sphUnalignedWrite ( void * pPtr, const T & tVal )
++{
++	*(T*)pPtr = tVal;
++}
++
++#else
++
++/// unaligned read wrapper for some architectures (eg. SPARC)
++template < typename T >
++inline T sphUnalignedRead ( const T & tRef )
++{
++	T uTmp;
++	byte * pSrc = (byte *) &tRef;
++	byte * pDst = (byte *) &uTmp;
++	for ( int i=0; i<(int)sizeof(T); i++ )
++		*pDst++ = *pSrc++;
++	return uTmp;
++}
++
++/// unaligned write wrapper for some architectures (eg. SPARC)
++template < typename T >
++void sphUnalignedWrite ( void * pPtr, const T & tVal )
++{
++	byte * pDst = (byte *) pPtr;
++	byte * pSrc = (byte *) &tVal;
++	for ( int i=0; i<(int)sizeof(T); i++ )
++		*pDst++ = *pSrc++;
++}
++
++#endif
++
++#if MYSQL_VERSION_ID>=50515
++
++#define sphinx_hash_init my_hash_init
++#define sphinx_hash_free my_hash_free
++#define sphinx_hash_search my_hash_search
++#define sphinx_hash_delete my_hash_delete
++
++#else
++
++#define sphinx_hash_init hash_init
++#define sphinx_hash_free hash_free
++#define sphinx_hash_search hash_search
++#define sphinx_hash_delete hash_delete
++
++#endif
++
++/////////////////////////////////////////////////////////////////////////////
++
++// FIXME! make this all dynamic
++#define SPHINXSE_MAX_FILTERS		32
++
++#define SPHINXAPI_DEFAULT_HOST		"127.0.0.1"
++#define SPHINXAPI_DEFAULT_PORT		9312
++#define SPHINXAPI_DEFAULT_INDEX		"*"
++
++#define SPHINXQL_DEFAULT_PORT		9306
++
++#define SPHINXSE_SYSTEM_COLUMNS		3
++
++#define SPHINXSE_MAX_ALLOC			(16*1024*1024)
++#define SPHINXSE_MAX_KEYWORDSTATS	4096
++
++#define SPHINXSE_VERSION			"0.9.9 ($Revision: 2752 $)"
++
++// FIXME? the following is cut-n-paste from sphinx.h and searchd.cpp
++// cut-n-paste is somewhat simpler that adding dependencies however..
++
++enum
++{
++	SPHINX_SEARCHD_PROTO	= 1,
++	SEARCHD_COMMAND_SEARCH	= 0,
++	VER_COMMAND_SEARCH		= 0x116,
++};
++
++/// search query sorting orders
++enum ESphSortOrder
++{
++	SPH_SORT_RELEVANCE		= 0,	///< sort by document relevance desc, then by date
++	SPH_SORT_ATTR_DESC		= 1,	///< sort by document date desc, then by relevance desc
++	SPH_SORT_ATTR_ASC		= 2,	///< sort by document date asc, then by relevance desc
++	SPH_SORT_TIME_SEGMENTS	= 3,	///< sort by time segments (hour/day/week/etc) desc, then by relevance desc
++	SPH_SORT_EXTENDED		= 4,	///< sort by SQL-like expression (eg. "@relevance DESC, price ASC, @id DESC")
++	SPH_SORT_EXPR			= 5,	///< sort by expression
++
++	SPH_SORT_TOTAL
++};
++
++/// search query matching mode
++enum ESphMatchMode
++{
++	SPH_MATCH_ALL = 0,			///< match all query words
++	SPH_MATCH_ANY,				///< match any query word
++	SPH_MATCH_PHRASE,			///< match this exact phrase
++	SPH_MATCH_BOOLEAN,			///< match this boolean query
++	SPH_MATCH_EXTENDED,			///< match this extended query
++	SPH_MATCH_FULLSCAN,			///< match all document IDs w/o fulltext query, apply filters
++	SPH_MATCH_EXTENDED2,		///< extended engine V2
++
++	SPH_MATCH_TOTAL
++};
++
++/// search query relevance ranking mode
++enum ESphRankMode
++{
++	SPH_RANK_PROXIMITY_BM25		= 0,	///< default mode, phrase proximity major factor and BM25 minor one
++	SPH_RANK_BM25				= 1,	///< statistical mode, BM25 ranking only (faster but worse quality)
++	SPH_RANK_NONE				= 2,	///< no ranking, all matches get a weight of 1
++	SPH_RANK_WORDCOUNT			= 3,	///< simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts
++	SPH_RANK_PROXIMITY			= 4,	///< phrase proximity
++	SPH_RANK_MATCHANY			= 5,	///< emulate old match-any weighting
++	SPH_RANK_FIELDMASK			= 6,	///< sets bits where there were matches
++	SPH_RANK_SPH04				= 7,	///< codename SPH04, phrase proximity + bm25 + head/exact boost
++
++	SPH_RANK_TOTAL,
++	SPH_RANK_DEFAULT			= SPH_RANK_PROXIMITY_BM25
++};
++
++/// search query grouping mode
++enum ESphGroupBy
++{
++	SPH_GROUPBY_DAY		= 0,	///< group by day
++	SPH_GROUPBY_WEEK	= 1,	///< group by week
++	SPH_GROUPBY_MONTH	= 2,	///< group by month
++	SPH_GROUPBY_YEAR	= 3,	///< group by year
++	SPH_GROUPBY_ATTR	= 4		///< group by attribute value
++};
++
++/// known attribute types
++enum
++{
++	SPH_ATTR_NONE		= 0,			///< not an attribute at all
++	SPH_ATTR_INTEGER	= 1,			///< this attr is just an integer
++	SPH_ATTR_TIMESTAMP	= 2,			///< this attr is a timestamp
++	SPH_ATTR_ORDINAL	= 3,			///< this attr is an ordinal string number (integer at search time, specially handled at indexing time)
++	SPH_ATTR_BOOL		= 4,			///< this attr is a boolean bit field
++	SPH_ATTR_FLOAT		= 5,
++	SPH_ATTR_BIGINT		= 6,
++
++	SPH_ATTR_MULTI		= 0x40000000UL	///< this attr has multiple values (0 or more)
++};
++
++/// known answers
++enum
++{
++	SEARCHD_OK		= 0,	///< general success, command-specific reply follows
++	SEARCHD_ERROR	= 1,	///< general failure, error message follows
++	SEARCHD_RETRY	= 2,	///< temporary failure, error message follows, client should retry later
++	SEARCHD_WARNING	= 3		///< general success, warning message and command-specific reply follow
++};
++
++//////////////////////////////////////////////////////////////////////////////
++
++#define SPHINX_DEBUG_OUTPUT		0
++#define SPHINX_DEBUG_CALLS		0
++
++#include <stdarg.h>
++
++#if SPHINX_DEBUG_OUTPUT
++inline void SPH_DEBUG ( const char * format, ... )
++{
++	va_list ap;
++	va_start ( ap, format );
++	fprintf ( stderr, "SphinxSE: " );
++	vfprintf ( stderr, format, ap );
++	fprintf ( stderr, "\n" );
++	va_end ( ap );
++}
++#else
++inline void SPH_DEBUG ( const char *, ... ) {}
++#endif
++
++#if SPHINX_DEBUG_CALLS
++
++#define SPH_ENTER_FUNC() { SPH_DEBUG ( "enter %s", __FUNCTION__ ); }
++#define SPH_ENTER_METHOD() { SPH_DEBUG ( "enter %s(this=%08x)", __FUNCTION__, this ); }
++#define SPH_RET(_arg) { SPH_DEBUG ( "leave %s", __FUNCTION__ ); return _arg; }
++#define SPH_VOID_RET() { SPH_DEBUG ( "leave %s", __FUNCTION__ ); return; }
++
++#else
++
++#define SPH_ENTER_FUNC()
++#define SPH_ENTER_METHOD()
++#define SPH_RET(_arg) { return(_arg); }
++#define SPH_VOID_RET() { return; }
++
++#endif
++
++
++#define SafeDelete(_arg)		{ if ( _arg ) delete ( _arg );		(_arg) = NULL; }
++#define SafeDeleteArray(_arg)	{ if ( _arg ) delete [] ( _arg );	(_arg) = NULL; }
++
++//////////////////////////////////////////////////////////////////////////////
++
++/// per-table structure that will be shared among all open Sphinx SE handlers
++struct CSphSEShare
++{
++	pthread_mutex_t	m_tMutex;
++	THR_LOCK		m_tLock;
++
++	char *			m_sTable;
++	char *			m_sScheme;		///< our connection string
++	char *			m_sHost;		///< points into m_sScheme buffer, DO NOT FREE EXPLICITLY
++	char *			m_sSocket;		///< points into m_sScheme buffer, DO NOT FREE EXPLICITLY
++	char *			m_sIndex;		///< points into m_sScheme buffer, DO NOT FREE EXPLICITLY
++	ushort			m_iPort;
++	bool			m_bSphinxQL;	///< is this read-only SphinxAPI table, or write-only SphinxQL table?
++	uint			m_iTableNameLen;
++	uint			m_iUseCount;
++	CHARSET_INFO *	m_pTableQueryCharset;
++
++	int					m_iTableFields;
++	char **				m_sTableField;
++	enum_field_types *	m_eTableFieldType;
++
++	CSphSEShare ()
++		: m_sTable ( NULL )
++		, m_sScheme ( NULL )
++		, m_sHost ( NULL )
++		, m_sSocket ( NULL )
++		, m_sIndex ( NULL )
++		, m_iPort ( 0 )
++		, m_bSphinxQL ( false )
++		, m_iTableNameLen ( 0 )
++		, m_iUseCount ( 1 )
++		, m_pTableQueryCharset ( NULL )
++
++		, m_iTableFields ( 0 )
++		, m_sTableField ( NULL )
++		, m_eTableFieldType ( NULL )
++	{
++		thr_lock_init ( &m_tLock );
++		pthread_mutex_init ( &m_tMutex, MY_MUTEX_INIT_FAST );
++	}
++
++	~CSphSEShare ()
++	{
++		pthread_mutex_destroy ( &m_tMutex );
++		thr_lock_delete ( &m_tLock );
++
++		SafeDeleteArray ( m_sTable );
++		SafeDeleteArray ( m_sScheme );
++		ResetTable ();
++	}
++
++	void ResetTable ()
++	{
++		for ( int i=0; i<m_iTableFields; i++ )
++			SafeDeleteArray ( m_sTableField[i] );
++		SafeDeleteArray ( m_sTableField );
++		SafeDeleteArray ( m_eTableFieldType );
++	}
++};
++
++/// schema attribute
++struct CSphSEAttr
++{
++	char *			m_sName;		///< attribute name (received from Sphinx)
++	uint32			m_uType;		///< attribute type (received from Sphinx)
++	int				m_iField;		///< field index in current table (-1 if none)
++
++	CSphSEAttr()
++		: m_sName ( NULL )
++		, m_uType ( SPH_ATTR_NONE )
++		, m_iField ( -1 )
++	{}
++
++	~CSphSEAttr ()
++	{
++		SafeDeleteArray ( m_sName );
++	}
++};
++
++/// word stats
++struct CSphSEWordStats
++{
++	char *			m_sWord;
++	int				m_iDocs;
++	int				m_iHits;
++
++	CSphSEWordStats ()
++		: m_sWord ( NULL )
++		, m_iDocs ( 0 )
++		, m_iHits ( 0 )
++	{}
++
++	~CSphSEWordStats ()
++	{
++		SafeDeleteArray ( m_sWord );
++	}
++};
++
++/// request stats
++struct CSphSEStats
++{
++public:
++	int					m_iMatchesTotal;
++	int					m_iMatchesFound;
++	int					m_iQueryMsec;
++	int					m_iWords;
++	CSphSEWordStats *	m_dWords;
++	bool				m_bLastError;
++	char				m_sLastMessage[1024];
++
++	CSphSEStats()
++		: m_dWords ( NULL )
++	{
++		Reset ();
++	}
++
++	void Reset ()
++	{
++		m_iMatchesTotal = 0;
++		m_iMatchesFound = 0;
++		m_iQueryMsec = 0;
++		m_iWords = 0;
++		SafeDeleteArray ( m_dWords );
++		m_bLastError = false;
++		m_sLastMessage[0] = '\0';
++	}
++
++	~CSphSEStats()
++	{
++		Reset ();
++	}
++};
++
++/// thread local storage
++struct CSphSEThreadData
++{
++	static const int	MAX_QUERY_LEN	= 262144; // 256k should be enough, right?
++
++	bool				m_bStats;
++	CSphSEStats			m_tStats;
++
++	bool				m_bQuery;
++	char				m_sQuery[MAX_QUERY_LEN];
++
++	CHARSET_INFO *		m_pQueryCharset;
++
++	bool				m_bReplace;		///< are we doing an INSERT or REPLACE
++
++	bool				m_bCondId;		///< got a value from condition pushdown
++	longlong			m_iCondId;		///< value acquired from id=value condition pushdown
++	bool				m_bCondDone;	///< index_read() is now over
++
++	CSphSEThreadData ()
++		: m_bStats ( false )
++		, m_bQuery ( false )
++		, m_pQueryCharset ( NULL )
++		, m_bReplace ( false )
++		, m_bCondId ( false )
++		, m_iCondId ( 0 )
++		, m_bCondDone ( false )
++	{}
++};
++
++/// filter types
++enum ESphFilter
++{
++	SPH_FILTER_VALUES		= 0,	///< filter by integer values set
++	SPH_FILTER_RANGE		= 1,	///< filter by integer range
++	SPH_FILTER_FLOATRANGE	= 2		///< filter by float range
++};
++
++
++/// search query filter
++struct CSphSEFilter
++{
++public:
++	ESphFilter		m_eType;
++	char *			m_sAttrName;
++	longlong		m_uMinValue;
++	longlong		m_uMaxValue;
++	float			m_fMinValue;
++	float			m_fMaxValue;
++	int				m_iValues;
++	longlong *		m_pValues;
++	int				m_bExclude;
++
++public:
++	CSphSEFilter ()
++		: m_eType ( SPH_FILTER_VALUES )
++		, m_sAttrName ( NULL )
++		, m_uMinValue ( 0 )
++		, m_uMaxValue ( UINT_MAX )
++		, m_fMinValue ( 0.0f )
++		, m_fMaxValue ( 0.0f )
++		, m_iValues ( 0 )
++		, m_pValues ( NULL )
++		, m_bExclude ( 0 )
++	{
++	}
++
++	~CSphSEFilter ()
++	{
++		SafeDeleteArray ( m_pValues );
++	}
++};
++
++
++/// float vs dword conversion
++inline uint32 sphF2DW ( float f )	{ union { float f; uint32 d; } u; u.f = f; return u.d; }
++
++/// dword vs float conversion
++inline float sphDW2F ( uint32 d )	{ union { float f; uint32 d; } u; u.d = d; return u.f; }
++
++
++/// client-side search query
++struct CSphSEQuery
++{
++public:
++	const char *	m_sHost;
++	int				m_iPort;
++
++private:
++	char *			m_sQueryBuffer;
++
++	const char *	m_sIndex;
++	int				m_iOffset;
++	int				m_iLimit;
++
++	bool			m_bQuery;
++	char *			m_sQuery;
++	uint32 *		m_pWeights;
++	int				m_iWeights;
++	ESphMatchMode	m_eMode;
++	ESphRankMode	m_eRanker;
++	ESphSortOrder	m_eSort;
++	char *			m_sSortBy;
++	int				m_iMaxMatches;
++	int				m_iMaxQueryTime;
++	uint32			m_iMinID;
++	uint32			m_iMaxID;
++
++	int				m_iFilters;
++	CSphSEFilter	m_dFilters[SPHINXSE_MAX_FILTERS];
++
++	ESphGroupBy		m_eGroupFunc;
++	char *			m_sGroupBy;
++	char *			m_sGroupSortBy;
++	int				m_iCutoff;
++	int				m_iRetryCount;
++	int				m_iRetryDelay;
++	char *			m_sGroupDistinct;							///< points to query buffer; do NOT delete
++	int				m_iIndexWeights;
++	char *			m_sIndexWeight[SPHINXSE_MAX_FILTERS];		///< points to query buffer; do NOT delete
++	int				m_iIndexWeight[SPHINXSE_MAX_FILTERS];
++	int				m_iFieldWeights;
++	char *			m_sFieldWeight[SPHINXSE_MAX_FILTERS];		///< points to query buffer; do NOT delete
++	int				m_iFieldWeight[SPHINXSE_MAX_FILTERS];
++
++	bool			m_bGeoAnchor;
++	char *			m_sGeoLatAttr;
++	char *			m_sGeoLongAttr;
++	float			m_fGeoLatitude;
++	float			m_fGeoLongitude;
++
++	char *			m_sComment;
++	char *			m_sSelect;
++
++	struct Override_t
++	{
++		union Value_t
++		{
++			uint32		m_uValue;
++			longlong	m_iValue64;
++			float		m_fValue;
++		};
++		char *						m_sName; ///< points to query buffer
++		int							m_iType;
++		Dynamic_array<ulonglong>	m_dIds;
++		Dynamic_array<Value_t>		m_dValues;
++	};
++	Dynamic_array<Override_t *> m_dOverrides;
++
++public:
++	char			m_sParseError[256];
++
++public:
++	CSphSEQuery ( const char * sQuery, int iLength, const char * sIndex );
++	~CSphSEQuery ();
++
++	bool			Parse ();
++	int				BuildRequest ( char ** ppBuffer );
++
++protected:
++	char *			m_pBuf;
++	char *			m_pCur;
++	int				m_iBufLeft;
++	bool			m_bBufOverrun;
++
++	template < typename T > int ParseArray ( T ** ppValues, const char * sValue );
++	bool			ParseField ( char * sField );
++
++	void			SendBytes ( const void * pBytes, int iBytes );
++	void			SendWord ( short int v )		{ v = ntohs(v); SendBytes ( &v, sizeof(v) ); }
++	void			SendInt ( int v )				{ v = ntohl(v); SendBytes ( &v, sizeof(v) ); }
++	void			SendDword ( uint v )			{ v = ntohl(v) ;SendBytes ( &v, sizeof(v) ); }
++	void			SendUint64 ( ulonglong v )		{ SendDword ( (uint)(v>>32) ); SendDword ( (uint)(v&0xFFFFFFFFUL) ); }
++	void			SendString ( const char * v )	{ int iLen = strlen(v); SendDword(iLen); SendBytes ( v, iLen ); }
++	void			SendFloat ( float v )			{ SendDword ( sphF2DW(v) ); }
++};
++
++template int CSphSEQuery::ParseArray<uint32> ( uint32 **, const char * );
++template int CSphSEQuery::ParseArray<longlong> ( longlong **, const char * );
++
++//////////////////////////////////////////////////////////////////////////////
++
++#if MYSQL_VERSION_ID>50100
++
++#if MYSQL_VERSION_ID<50114
++#error Sphinx SE requires MySQL 5.1.14 or higher if compiling for 5.1.x series!
++#endif
++
++static handler *	sphinx_create_handler ( handlerton * hton, TABLE_SHARE * table, MEM_ROOT * mem_root );
++static int			sphinx_init_func ( void * p );
++static int			sphinx_close_connection ( handlerton * hton, THD * thd );
++static int			sphinx_panic ( handlerton * hton, enum ha_panic_function flag );
++static bool			sphinx_show_status ( handlerton * hton, THD * thd, stat_print_fn * stat_print, enum ha_stat_type stat_type );
++
++#else
++
++static bool			sphinx_init_func_for_handlerton ();
++static int			sphinx_close_connection ( THD * thd );
++bool				sphinx_show_status ( THD * thd );
++
++#endif // >50100
++
++//////////////////////////////////////////////////////////////////////////////
++
++static const char	sphinx_hton_name[]		= "SPHINX";
++static const char	sphinx_hton_comment[]	= "Sphinx storage engine " SPHINXSE_VERSION;
++
++#if MYSQL_VERSION_ID<50100
++handlerton sphinx_hton =
++{
++	#ifdef MYSQL_HANDLERTON_INTERFACE_VERSION
++	MYSQL_HANDLERTON_INTERFACE_VERSION,
++	#endif
++	sphinx_hton_name,
++	SHOW_OPTION_YES,
++	sphinx_hton_comment,
++	DB_TYPE_SPHINX_DB,
++	sphinx_init_func_for_handlerton,
++	0,							// slot
++	0,							// savepoint size
++	sphinx_close_connection,	// close_connection
++	NULL,	// savepoint
++	NULL,	// rollback to savepoint
++	NULL,	// release savepoint
++	NULL,	// commit
++	NULL,	// rollback
++	NULL,	// prepare
++	NULL,	// recover
++	NULL,	// commit_by_xid
++	NULL,	// rollback_by_xid
++	NULL,	// create_cursor_read_view
++	NULL,	// set_cursor_read_view
++	NULL,	// close_cursor_read_view
++	HTON_CAN_RECREATE
++};
++#else
++static handlerton * sphinx_hton_ptr = NULL;
++#endif
++
++//////////////////////////////////////////////////////////////////////////////
++
++// variables for Sphinx shared methods
++pthread_mutex_t		sphinx_mutex;		// mutex to init the hash
++static int			sphinx_init = 0;	// flag whether the hash was initialized
++static HASH			sphinx_open_tables;	// hash used to track open tables
++
++//////////////////////////////////////////////////////////////////////////////
++// INITIALIZATION AND SHUTDOWN
++//////////////////////////////////////////////////////////////////////////////
++
++// hashing function
++#if MYSQL_VERSION_ID>=50120
++typedef size_t GetKeyLength_t;
++#else
++typedef uint GetKeyLength_t;
++#endif
++
++static byte * sphinx_get_key ( const byte * pSharePtr, GetKeyLength_t * pLength, my_bool )
++{
++	CSphSEShare * pShare = (CSphSEShare *) pSharePtr;
++	*pLength = (size_t) pShare->m_iTableNameLen;
++	return (byte*) pShare->m_sTable;
++}
++
++#if MYSQL_VERSION_ID<50100
++static int sphinx_init_func ( void * ) // to avoid unused arg warning
++#else
++static int sphinx_init_func ( void * p )
++#endif
++{
++	SPH_ENTER_FUNC();
++	if ( !sphinx_init )
++	{
++		sphinx_init = 1;
++		void ( pthread_mutex_init ( &sphinx_mutex, MY_MUTEX_INIT_FAST ) );
++		sphinx_hash_init ( &sphinx_open_tables, system_charset_info, 32, 0, 0,
++			sphinx_get_key, 0, 0 );
++
++		#if MYSQL_VERSION_ID > 50100
++		handlerton * hton = (handlerton*) p;
++		hton->state = SHOW_OPTION_YES;
++		hton->db_type = DB_TYPE_FIRST_DYNAMIC;
++		hton->create = sphinx_create_handler;
++		hton->close_connection = sphinx_close_connection;
++		hton->show_status = sphinx_show_status;
++		hton->panic = sphinx_panic;
++		hton->flags = HTON_CAN_RECREATE;
++		#endif
++	}
++	SPH_RET(0);
++}
++
++
++#if MYSQL_VERSION_ID<50100
++static bool sphinx_init_func_for_handlerton ()
++{
++	return sphinx_init_func ( &sphinx_hton );
++}
++#endif
++
++
++#if MYSQL_VERSION_ID>50100
++
++static int sphinx_close_connection ( handlerton * hton, THD * thd )
++{
++	// deallocate common handler data
++	SPH_ENTER_FUNC();
++	void ** tmp = thd_ha_data ( thd, hton );
++	CSphSEThreadData * pTls = (CSphSEThreadData*) (*tmp);
++	SafeDelete ( pTls );
++	*tmp = NULL;
++	SPH_RET(0);
++}
++
++
++static int sphinx_done_func ( void * )
++{
++	SPH_ENTER_FUNC();
++
++	int error = 0;
++	if ( sphinx_init )
++	{
++		sphinx_init = 0;
++		if ( sphinx_open_tables.records )
++			error = 1;
++		sphinx_hash_free ( &sphinx_open_tables );
++		pthread_mutex_destroy ( &sphinx_mutex );
++	}
++
++	SPH_RET(0);
++}
++
++
++static int sphinx_panic ( handlerton * hton, enum ha_panic_function )
++{
++	return sphinx_done_func ( hton );
++}
++
++#else
++
++static int sphinx_close_connection ( THD * thd )
++{
++	// deallocate common handler data
++	SPH_ENTER_FUNC();
++	CSphSEThreadData * pTls = (CSphSEThreadData*) thd->ha_data[sphinx_hton.slot];
++	SafeDelete ( pTls );
++	thd->ha_data[sphinx_hton.slot] = NULL;
++	SPH_RET(0);
++}
++
++#endif // >50100
++
++//////////////////////////////////////////////////////////////////////////////
++// SHOW STATUS
++//////////////////////////////////////////////////////////////////////////////
++
++#if MYSQL_VERSION_ID>50100
++static bool sphinx_show_status ( handlerton * hton, THD * thd, stat_print_fn * stat_print,
++	enum ha_stat_type )
++#else
++bool sphinx_show_status ( THD * thd )
++#endif
++{
++	SPH_ENTER_FUNC();
++
++#if MYSQL_VERSION_ID<50100
++	Protocol * protocol = thd->protocol;
++	List<Item> field_list;
++#endif
++
++	char buf1[IO_SIZE];
++	uint buf1len;
++	char buf2[IO_SIZE];
++	uint buf2len = 0;
++	String words;
++
++	buf1[0] = '\0';
++	buf2[0] = '\0';
++
++
++#if MYSQL_VERSION_ID>50100
++	// 5.1.x style stats
++	CSphSEThreadData * pTls = (CSphSEThreadData*) ( *thd_ha_data ( thd, hton ) );
++
++#define LOC_STATS(_key,_keylen,_val,_vallen) \
++	stat_print ( thd, sphinx_hton_name, strlen(sphinx_hton_name), _key, _keylen, _val, _vallen );
++
++#else
++	// 5.0.x style stats
++	if ( have_sphinx_db!=SHOW_OPTION_YES )
++	{
++		my_message ( ER_NOT_SUPPORTED_YET,
++			"failed to call SHOW SPHINX STATUS: --skip-sphinx was specified",
++			MYF(0) );
++		SPH_RET(TRUE);
++	}
++	CSphSEThreadData * pTls = (CSphSEThreadData*) thd->ha_data[sphinx_hton.slot];
++
++	field_list.push_back ( new Item_empty_string ( "Type", 10 ) );
++	field_list.push_back ( new Item_empty_string ( "Name", FN_REFLEN ) );
++	field_list.push_back ( new Item_empty_string ( "Status", 10 ) );
++	if ( protocol->send_fields ( &field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF ) )
++		SPH_RET(TRUE);
++
++#define LOC_STATS(_key,_keylen,_val,_vallen) \
++	protocol->prepare_for_resend (); \
++	protocol->store ( "SPHINX", 6, system_charset_info ); \
++	protocol->store ( _key, _keylen, system_charset_info ); \
++	protocol->store ( _val, _vallen, system_charset_info ); \
++	if ( protocol->write() ) \
++		SPH_RET(TRUE);
++
++#endif
++
++
++	// show query stats
++	if ( pTls && pTls->m_bStats )
++	{
++		const CSphSEStats * pStats = &pTls->m_tStats;
++		buf1len = my_snprintf ( buf1, sizeof(buf1),
++			"total: %d, total found: %d, time: %d, words: %d",
++			pStats->m_iMatchesTotal, pStats->m_iMatchesFound, pStats->m_iQueryMsec, pStats->m_iWords );
++
++		LOC_STATS ( "stats", 5, buf1, buf1len );
++
++		if ( pStats->m_iWords )
++		{
++			for ( int i=0; i<pStats->m_iWords; i++ )
++			{
++				CSphSEWordStats & tWord = pStats->m_dWords[i];
++				buf2len = my_snprintf ( buf2, sizeof(buf2), "%s%s:%d:%d ",
++					buf2, tWord.m_sWord, tWord.m_iDocs, tWord.m_iHits );
++			}
++
++			// convert it if we can
++			const char * sWord = buf2;
++			int iWord = buf2len;
++
++			String sBuf3;
++			if ( pTls->m_pQueryCharset )
++			{
++				uint iErrors;
++				sBuf3.copy ( buf2, buf2len, pTls->m_pQueryCharset, system_charset_info, &iErrors );
++				sWord = sBuf3.c_ptr();
++				iWord = sBuf3.length();
++			}
++
++			LOC_STATS ( "words", 5, sWord, iWord );
++		}
++	}
++
++	// show last error or warning (either in addition to stats, or on their own)
++	if ( pTls && pTls->m_tStats.m_sLastMessage && pTls->m_tStats.m_sLastMessage[0] )
++	{
++		const char * sMessageType = pTls->m_tStats.m_bLastError ? "error" : "warning";
++
++		LOC_STATS (
++			sMessageType, strlen ( sMessageType ),
++			pTls->m_tStats.m_sLastMessage, strlen ( pTls->m_tStats.m_sLastMessage ) );
++
++	} else
++	{
++		// well, nothing to show just yet
++#if MYSQL_VERSION_ID < 50100
++		LOC_STATS ( "stats", 5, "no query has been executed yet", sizeof("no query has been executed yet")-1 );
++#endif
++	}
++
++#if MYSQL_VERSION_ID < 50100
++	send_eof(thd);
++#endif
++
++	SPH_RET(FALSE);
++}
++
++//////////////////////////////////////////////////////////////////////////////
++// HELPERS
++//////////////////////////////////////////////////////////////////////////////
++
++static char * sphDup ( const char * sSrc, int iLen=-1 )
++{
++	if ( !sSrc )
++		return NULL;
++
++	if ( iLen<0 )
++		iLen = strlen(sSrc);
++
++	char * sRes = new char [ 1+iLen ];
++	memcpy ( sRes, sSrc, iLen );
++	sRes[iLen] = '\0';
++	return sRes;
++}
++
++
++static void sphLogError ( const char * sFmt, ... )
++{
++	// emit timestamp
++#ifdef __WIN__
++	SYSTEMTIME t;
++	GetLocalTime ( &t );
++
++	fprintf ( stderr, "%02d%02d%02d %2d:%02d:%02d SphinxSE: internal error: ",
++		(int)t.wYear % 100, (int)t.wMonth, (int)t.wDay,
++		(int)t.wHour, (int)t.wMinute, (int)t.wSecond );
++#else
++	// Unix version
++	time_t tStamp;
++	time ( &tStamp );
++
++	struct tm * pParsed;
++#ifdef HAVE_LOCALTIME_R
++	struct tm tParsed;
++	localtime_r ( &tStamp, &tParsed );
++	pParsed = &tParsed;
++#else
++	pParsed = localtime ( &tStamp );
++#endif // HAVE_LOCALTIME_R
++
++	fprintf ( stderr, "%02d%02d%02d %2d:%02d:%02d SphinxSE: internal error: ",
++		pParsed->tm_year % 100, pParsed->tm_mon + 1, pParsed->tm_mday,
++		pParsed->tm_hour, pParsed->tm_min, pParsed->tm_sec);
++#endif // __WIN__
++
++	// emit message
++	va_list ap;
++	va_start ( ap, sFmt );
++	vfprintf ( stderr, sFmt, ap );
++	va_end ( ap );
++
++	// emit newline
++	fprintf ( stderr, "\n" );
++}
++
++
++
++// the following scheme variants are recognized
++//
++// sphinx://host[:port]/index
++// sphinxql://host[:port]/index
++// unix://unix/domain/socket[:index]
++static bool ParseUrl ( CSphSEShare * share, TABLE * table, bool bCreate )
++{
++	SPH_ENTER_FUNC();
++
++	if ( share )
++	{
++		// check incoming stuff
++		if ( !table )
++		{
++			sphLogError ( "table==NULL in ParseUrl()" );
++			return false;
++		}
++		if ( !table->s )
++		{
++			sphLogError ( "(table->s)==NULL in ParseUrl()" );
++			return false;
++		}
++
++		// free old stuff
++		share->ResetTable ();
++
++		// fill new stuff
++		share->m_iTableFields = table->s->fields;
++		if ( share->m_iTableFields )
++		{
++			share->m_sTableField = new char * [ share->m_iTableFields ];
++			share->m_eTableFieldType = new enum_field_types [ share->m_iTableFields ];
++
++			for ( int i=0; i<share->m_iTableFields; i++ )
++			{
++				share->m_sTableField[i] = sphDup ( table->field[i]->field_name );
++				share->m_eTableFieldType[i] = table->field[i]->type();
++			}
++		}
++	}
++
++	// defaults
++	bool bOk = true;
++	bool bQL = false;
++	char * sScheme = NULL;
++	char * sHost = SPHINXAPI_DEFAULT_HOST;
++	char * sIndex = SPHINXAPI_DEFAULT_INDEX;
++	int iPort = SPHINXAPI_DEFAULT_PORT;
++
++	// parse connection string, if any
++	while ( table->s->connect_string.length!=0 )
++	{
++		sScheme = sphDup ( table->s->connect_string.str, table->s->connect_string.length );
++
++		sHost = strstr ( sScheme, "://" );
++		if ( !sHost )
++		{
++			bOk = false;
++			break;
++		}
++		sHost[0] = '\0';
++		sHost += 3;
++
++		/////////////////////////////
++		// sphinxapi via unix socket
++		/////////////////////////////
++
++		if ( !strcmp ( sScheme, "unix" ) )
++		{
++			sHost--; // reuse last slash
++			iPort = 0;
++			if (!( sIndex = strrchr ( sHost, ':' ) ))
++				sIndex = SPHINXAPI_DEFAULT_INDEX;
++			else
++			{
++				*sIndex++ = '\0';
++				if ( !*sIndex )
++					sIndex = SPHINXAPI_DEFAULT_INDEX;
++			}
++			bOk = true;
++			break;
++		}
++
++		/////////////////////
++		// sphinxapi via tcp
++		/////////////////////
++
++		if ( !strcmp ( sScheme, "sphinx" ) )
++		{
++			char * sPort = strchr ( sHost, ':' );
++			if ( sPort )
++			{
++				*sPort++ = '\0';
++				if ( *sPort )
++				{
++					sIndex = strchr ( sPort, '/' );
++					if ( sIndex )
++						*sIndex++ = '\0';
++					else
++						sIndex = SPHINXAPI_DEFAULT_INDEX;
++
++					iPort = atoi(sPort);
++					if ( !iPort )
++						iPort = SPHINXAPI_DEFAULT_PORT;
++				}
++			} else
++			{
++				sIndex = strchr ( sHost, '/' );
++				if ( sIndex )
++					*sIndex++ = '\0';
++				else
++					sIndex = SPHINXAPI_DEFAULT_INDEX;
++			}
++			bOk = true;
++			break;
++		}
++
++		////////////
++		// sphinxql
++		////////////
++
++		if ( !strcmp ( sScheme, "sphinxql" ) )
++		{
++			bQL = true;
++			iPort = SPHINXQL_DEFAULT_PORT;
++
++			// handle port
++			char * sPort = strchr ( sHost, ':' );
++			sIndex = sHost; // starting point for index name search
++
++			if ( sPort )
++			{
++				*sPort++ = '\0';
++				sIndex = sPort;
++
++				iPort = atoi(sPort);
++				if ( !iPort )
++				{
++					bOk = false; // invalid port; can report ER_FOREIGN_DATA_STRING_INVALID
++					break;
++				}
++			}
++
++			// find index
++			sIndex = strchr ( sIndex, '/' );
++			if ( sIndex )
++				*sIndex++ = '\0';
++
++			// final checks
++			// host and index names are required
++			bOk = ( sHost && *sHost && sIndex && *sIndex );
++			break;
++		}
++
++		// unknown case
++		bOk = false;
++		break;
++	}
++
++	if ( !bOk )
++	{
++		my_error ( bCreate ? ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE : ER_FOREIGN_DATA_STRING_INVALID,
++			MYF(0), table->s->connect_string );
++	} else
++	{
++		if ( share )
++		{
++			SafeDeleteArray ( share->m_sScheme );
++			share->m_sScheme = sScheme;
++			share->m_sHost = sHost;
++			share->m_sIndex = sIndex;
++			share->m_iPort = (ushort)iPort;
++			share->m_bSphinxQL = bQL;
++		}
++	}
++	if ( !bOk && !share )
++		SafeDeleteArray ( sScheme );
++
++	SPH_RET(bOk);
++}
++
++
++// Example of simple lock controls. The "share" it creates is structure we will
++// pass to each sphinx handler. Do you have to have one of these? Well, you have
++// pieces that are used for locking, and they are needed to function.
++static CSphSEShare * get_share ( const char * table_name, TABLE * table )
++{
++	SPH_ENTER_FUNC();
++	pthread_mutex_lock ( &sphinx_mutex );
++
++	CSphSEShare * pShare = NULL;
++	for ( ;; )
++	{
++		// check if we already have this share
++#if MYSQL_VERSION_ID>=50120
++		pShare = (CSphSEShare*) sphinx_hash_search ( &sphinx_open_tables, (const uchar *) table_name, strlen(table_name) );
++#else
++#ifdef __WIN__
++		pShare = (CSphSEShare*) sphinx_hash_search ( &sphinx_open_tables, (const byte *) table_name, strlen(table_name) );
++#else
++		pShare = (CSphSEShare*) sphinx_hash_search ( &sphinx_open_tables, table_name, strlen(table_name) );
++#endif // win
++#endif // pre-5.1.20
++
++		if ( pShare )
++		{
++			pShare->m_iUseCount++;
++			break;
++		}
++
++		// try to allocate new share
++		pShare = new CSphSEShare ();
++		if ( !pShare )
++			break;
++
++		// try to setup it
++		if ( !ParseUrl ( pShare, table, false ) )
++		{
++			SafeDelete ( pShare );
++			break;
++		}
++
++		if ( !pShare->m_bSphinxQL )
++			pShare->m_pTableQueryCharset = table->field[2]->charset();
++
++		// try to hash it
++		pShare->m_iTableNameLen = strlen(table_name);
++		pShare->m_sTable = sphDup ( table_name );
++		if ( my_hash_insert ( &sphinx_open_tables, (const byte *)pShare ) )
++		{
++			SafeDelete ( pShare );
++			break;
++		}
++
++		// all seems fine
++		break;
++	}
++
++	pthread_mutex_unlock ( &sphinx_mutex );
++	SPH_RET(pShare);
++}
++
++
++// Free lock controls. We call this whenever we close a table. If the table had
++// the last reference to the share then we free memory associated with it.
++static int free_share ( CSphSEShare * pShare )
++{
++	SPH_ENTER_FUNC();
++	pthread_mutex_lock ( &sphinx_mutex );
++
++	if ( !--pShare->m_iUseCount )
++	{
++		sphinx_hash_delete ( &sphinx_open_tables, (byte *)pShare );
++		SafeDelete ( pShare );
++	}
++
++	pthread_mutex_unlock ( &sphinx_mutex );
++	SPH_RET(0);
++}
++
++
++#if MYSQL_VERSION_ID>50100
++static handler * sphinx_create_handler ( handlerton * hton, TABLE_SHARE * table, MEM_ROOT * mem_root )
++{
++	sphinx_hton_ptr = hton;
++	return new ( mem_root ) ha_sphinx ( hton, table );
++}
++#endif
++
++//////////////////////////////////////////////////////////////////////////////
++// CLIENT-SIDE REQUEST STUFF
++//////////////////////////////////////////////////////////////////////////////
++
++CSphSEQuery::CSphSEQuery ( const char * sQuery, int iLength, const char * sIndex )
++	: m_sHost ( "" )
++	, m_iPort ( 0 )
++	, m_sIndex ( sIndex ? sIndex : "*" )
++	, m_iOffset ( 0 )
++	, m_iLimit ( 20 )
++	, m_bQuery ( false )
++	, m_sQuery ( "" )
++	, m_pWeights ( NULL )
++	, m_iWeights ( 0 )
++	, m_eMode ( SPH_MATCH_ALL )
++	, m_eRanker ( SPH_RANK_PROXIMITY_BM25 )
++	, m_eSort ( SPH_SORT_RELEVANCE )
++	, m_sSortBy ( "" )
++	, m_iMaxMatches ( 1000 )
++	, m_iMaxQueryTime ( 0 )
++	, m_iMinID ( 0 )
++	, m_iMaxID ( 0 )
++	, m_iFilters ( 0 )
++	, m_eGroupFunc ( SPH_GROUPBY_DAY )
++	, m_sGroupBy ( "" )
++	, m_sGroupSortBy ( "@group desc" )
++	, m_iCutoff ( 0 )
++	, m_iRetryCount ( 0 )
++	, m_iRetryDelay ( 0 )
++	, m_sGroupDistinct ( "" )
++	, m_iIndexWeights ( 0 )
++	, m_iFieldWeights ( 0 )
++	, m_bGeoAnchor ( false )
++	, m_sGeoLatAttr ( "" )
++	, m_sGeoLongAttr ( "" )
++	, m_fGeoLatitude ( 0.0f )
++	, m_fGeoLongitude ( 0.0f )
++	, m_sComment ( "" )
++	, m_sSelect ( "" )
++
++	, m_pBuf ( NULL )
++	, m_pCur ( NULL )
++	, m_iBufLeft ( 0 )
++	, m_bBufOverrun ( false )
++{
++	m_sQueryBuffer = new char [ iLength+2 ];
++	memcpy ( m_sQueryBuffer, sQuery, iLength );
++	m_sQueryBuffer[iLength] = ';';
++	m_sQueryBuffer[iLength+1] = '\0';
++}
++
++
++CSphSEQuery::~CSphSEQuery ()
++{
++	SPH_ENTER_METHOD();
++	SafeDeleteArray ( m_sQueryBuffer );
++	SafeDeleteArray ( m_pWeights );
++	SafeDeleteArray ( m_pBuf );
++	for ( int i=0; i<m_dOverrides.elements(); i++ )
++		SafeDelete ( m_dOverrides.at(i) );
++	SPH_VOID_RET();
++}
++
++
++template < typename T >
++int CSphSEQuery::ParseArray ( T ** ppValues, const char * sValue )
++{
++	SPH_ENTER_METHOD();
++
++	assert ( ppValues );
++	assert ( !(*ppValues) );
++
++	const char * pValue;
++	bool bPrevDigit = false;
++	int iValues = 0;
++
++	// count the values
++	for ( pValue=sValue; *pValue; pValue++ )
++	{
++		bool bDigit = (*pValue)>='0' && (*pValue)<='9';
++		if ( bDigit && !bPrevDigit )
++			iValues++;
++		bPrevDigit = bDigit;
++	}
++	if ( !iValues )
++		SPH_RET(0);
++
++	// extract the values
++	T * pValues = new T [ iValues ];
++	*ppValues = pValues;
++
++	int iIndex = 0, iSign = 1;
++	T uValue = 0;
++
++	bPrevDigit = false;
++	for ( pValue=sValue ;; pValue++ )
++	{
++		bool bDigit = (*pValue)>='0' && (*pValue)<='9';
++
++		if ( bDigit )
++		{
++			if ( !bPrevDigit )
++				uValue = 0;
++			uValue = uValue*10 + ( (*pValue)-'0' );
++		} else if ( bPrevDigit )
++		{
++			assert ( iIndex<iValues );
++			pValues [ iIndex++ ] = uValue * iSign;
++			iSign = 1;
++		} else if ( *pValue=='-' )
++			iSign = -1;
++
++		bPrevDigit = bDigit;
++		if ( !*pValue )
++			break;
++	}
++
++	SPH_RET ( iValues );
++}
++
++
++static char * chop ( char * s )
++{
++	while ( *s && isspace(*s) )
++		s++;
++
++	char * p = s + strlen(s);
++	while ( p>s && isspace ( p[-1] ) )
++		p--;
++	*p = '\0';
++
++	return s;
++}
++
++
++static bool myisattr ( char c )
++{
++	return
++		( c>='0' && c<='9' ) ||
++		( c>='a' && c<='z' ) ||
++		( c>='A' && c<='Z' ) ||
++		c=='_';
++}
++
++
++bool CSphSEQuery::ParseField ( char * sField )
++{
++	SPH_ENTER_METHOD();
++
++	// look for option name/value separator
++	char * sValue = strchr ( sField, '=' );
++	if ( !sValue || sValue==sField || sValue[-1]=='\\' )
++	{
++		// by default let's assume it's just query
++		if ( sField[0] )
++		{
++			if ( m_bQuery )
++			{
++				snprintf ( m_sParseError, sizeof(m_sParseError), "search query already specified; '%s' is redundant", sField );
++				SPH_RET(false);
++			} else
++			{
++				m_sQuery = sField;
++				m_bQuery = true;
++
++				// unescape only 1st one
++				char *s = sField, *d = sField;
++				int iSlashes = 0;
++				while ( *s )
++				{
++					iSlashes = ( *s=='\\' ) ? iSlashes+1 : 0;
++					if ( ( iSlashes%2 )==0 ) *d++ = *s;
++					s++;
++				}
++				*d = '\0';
++			}
++		}
++		SPH_RET(true);
++	}
++
++	// split
++	*sValue++ = '\0';
++	sValue = chop ( sValue );
++	int iValue = atoi ( sValue );
++
++	// handle options
++	char * sName = chop ( sField );
++
++	if ( !strcmp ( sName, "query" ) )			m_sQuery = sValue;
++	else if ( !strcmp ( sName, "host" ) )		m_sHost = sValue;
++	else if ( !strcmp ( sName, "port" ) )		m_iPort = iValue;
++	else if ( !strcmp ( sName, "index" ) )		m_sIndex = sValue;
++	else if ( !strcmp ( sName, "offset" ) )		m_iOffset = iValue;
++	else if ( !strcmp ( sName, "limit" ) )		m_iLimit = iValue;
++	else if ( !strcmp ( sName, "weights" ) )	m_iWeights = ParseArray<uint32> ( &m_pWeights, sValue );
++	else if ( !strcmp ( sName, "minid" ) )		m_iMinID = iValue;
++	else if ( !strcmp ( sName, "maxid" ) )		m_iMaxID = iValue;
++	else if ( !strcmp ( sName, "maxmatches" ) )	m_iMaxMatches = iValue;
++	else if ( !strcmp ( sName, "maxquerytime" ) )	m_iMaxQueryTime = iValue;
++	else if ( !strcmp ( sName, "groupsort" ) )	m_sGroupSortBy = sValue;
++	else if ( !strcmp ( sName, "distinct" ) )	m_sGroupDistinct = sValue;
++	else if ( !strcmp ( sName, "cutoff" ) )		m_iCutoff = iValue;
++	else if ( !strcmp ( sName, "comment" ) )	m_sComment = sValue;
++	else if ( !strcmp ( sName, "select" ) )		m_sSelect = sValue;
++
++	else if ( !strcmp ( sName, "mode" ) )
++	{
++		m_eMode = SPH_MATCH_ALL;
++		if ( !strcmp ( sValue, "any" ) )			m_eMode = SPH_MATCH_ANY;
++		else if ( !strcmp ( sValue, "phrase" ) )	m_eMode = SPH_MATCH_PHRASE;
++		else if ( !strcmp ( sValue, "boolean" ) )	m_eMode = SPH_MATCH_BOOLEAN;
++		else if ( !strcmp ( sValue, "ext" ) )		m_eMode = SPH_MATCH_EXTENDED;
++		else if ( !strcmp ( sValue, "extended" ) )	m_eMode = SPH_MATCH_EXTENDED;
++		else if ( !strcmp ( sValue, "ext2" ) )		m_eMode = SPH_MATCH_EXTENDED2;
++		else if ( !strcmp ( sValue, "extended2" ) )	m_eMode = SPH_MATCH_EXTENDED2;
++		else if ( !strcmp ( sValue, "all" ) )		m_eMode = SPH_MATCH_ALL;
++		else if ( !strcmp ( sValue, "fullscan" ) )	m_eMode = SPH_MATCH_FULLSCAN;
++		else
++		{
++			snprintf ( m_sParseError, sizeof(m_sParseError), "unknown matching mode '%s'", sValue );
++			SPH_RET(false);
++		}
++	} else if ( !strcmp ( sName, "ranker" ) )
++	{
++		m_eRanker = SPH_RANK_PROXIMITY_BM25;
++		if ( !strcmp ( sValue, "proximity_bm25" ) )	m_eRanker = SPH_RANK_PROXIMITY_BM25;
++		else if ( !strcmp ( sValue, "bm25" ) )		m_eRanker = SPH_RANK_BM25;
++		else if ( !strcmp ( sValue, "none" ) )		m_eRanker = SPH_RANK_NONE;
++		else if ( !strcmp ( sValue, "wordcount" ) )	m_eRanker = SPH_RANK_WORDCOUNT;
++		else if ( !strcmp ( sValue, "proximity" ) )	m_eRanker = SPH_RANK_PROXIMITY;
++		else if ( !strcmp ( sValue, "matchany" ) )	m_eRanker = SPH_RANK_MATCHANY;
++		else if ( !strcmp ( sValue, "fieldmask" ) )	m_eRanker = SPH_RANK_FIELDMASK;
++		else if ( !strcmp ( sValue, "sph04" ) )		m_eRanker = SPH_RANK_SPH04;
++		else
++		{
++			snprintf ( m_sParseError, sizeof(m_sParseError), "unknown ranking mode '%s'", sValue );
++			SPH_RET(false);
++		}
++	} else if ( !strcmp ( sName, "sort" ) )
++	{
++		static const struct
++		{
++			const char *	m_sName;
++			ESphSortOrder	m_eSort;
++		} dSortModes[] =
++		{
++			{ "relevance",		SPH_SORT_RELEVANCE },
++			{ "attr_desc:",		SPH_SORT_ATTR_DESC },
++			{ "attr_asc:",		SPH_SORT_ATTR_ASC },
++			{ "time_segments:",	SPH_SORT_TIME_SEGMENTS },
++			{ "extended:",		SPH_SORT_EXTENDED },
++			{ "expr:",			SPH_SORT_EXPR }
++		};
++
++		int i;
++		const int nModes = sizeof(dSortModes)/sizeof(dSortModes[0]);
++		for ( i=0; i<nModes; i++ )
++			if ( !strncmp ( sValue, dSortModes[i].m_sName, strlen ( dSortModes[i].m_sName ) ) )
++		{
++			m_eSort = dSortModes[i].m_eSort;
++			m_sSortBy = sValue + strlen ( dSortModes[i].m_sName );
++			break;
++		}
++		if ( i==nModes )
++		{
++			snprintf ( m_sParseError, sizeof(m_sParseError), "unknown sorting mode '%s'", sValue );
++			SPH_RET(false);
++		}
++
++	} else if ( !strcmp ( sName, "groupby" ) )
++	{
++		static const struct
++		{
++			const char *	m_sName;
++			ESphGroupBy		m_eFunc;
++		} dGroupModes[] =
++		{
++			{ "day:",	SPH_GROUPBY_DAY },
++			{ "week:",	SPH_GROUPBY_WEEK },
++			{ "month:",	SPH_GROUPBY_MONTH },
++			{ "year:",	SPH_GROUPBY_YEAR },
++			{ "attr:",	SPH_GROUPBY_ATTR },
++		};
++
++		int i;
++		const int nModes = sizeof(dGroupModes)/sizeof(dGroupModes[0]);
++		for ( i=0; i<nModes; i++ )
++			if ( !strncmp ( sValue, dGroupModes[i].m_sName, strlen ( dGroupModes[i].m_sName ) ) )
++		{
++			m_eGroupFunc = dGroupModes[i].m_eFunc;
++			m_sGroupBy = sValue + strlen ( dGroupModes[i].m_sName );
++			break;
++		}
++		if ( i==nModes )
++		{
++			snprintf ( m_sParseError, sizeof(m_sParseError), "unknown groupby mode '%s'", sValue );
++			SPH_RET(false);
++		}
++
++	} else if ( m_iFilters<SPHINXSE_MAX_FILTERS &&
++		( !strcmp ( sName, "range" ) || !strcmp ( sName, "!range" ) || !strcmp ( sName, "floatrange" ) || !strcmp ( sName, "!floatrange" ) ) )
++	{
++		for ( ;; )
++		{
++			char * p = sName;
++			CSphSEFilter & tFilter = m_dFilters [ m_iFilters ];
++			tFilter.m_bExclude = ( *p=='!' ); if ( tFilter.m_bExclude ) p++;
++			tFilter.m_eType = ( *p=='f' ) ? SPH_FILTER_FLOATRANGE : SPH_FILTER_RANGE;
++
++			if (!( p = strchr ( sValue, ',' ) ))
++				break;
++			*p++ = '\0';
++
++			tFilter.m_sAttrName = chop ( sValue );
++			sValue = p;
++
++			if (!( p = strchr ( sValue, ',' ) ))
++				break;
++			*p++ = '\0';
++
++			if ( tFilter.m_eType==SPH_FILTER_RANGE )
++			{
++				tFilter.m_uMinValue = strtoll ( sValue, NULL, 0 );
++				tFilter.m_uMaxValue = strtoll ( p, NULL, 0 );
++			} else
++			{
++				tFilter.m_fMinValue = (float)atof(sValue);
++				tFilter.m_fMaxValue = (float)atof(p);
++			}
++
++			// all ok
++			m_iFilters++;
++			break;
++		}
++
++	} else if ( m_iFilters<SPHINXSE_MAX_FILTERS &&
++		( !strcmp ( sName, "filter" ) || !strcmp ( sName, "!filter" ) ) )
++	{
++		for ( ;; )
++		{
++			CSphSEFilter & tFilter = m_dFilters [ m_iFilters ];
++			tFilter.m_eType = SPH_FILTER_VALUES;
++			tFilter.m_bExclude = ( strcmp ( sName, "!filter" )==0 );
++
++			// get the attr name
++			while ( (*sValue) && !myisattr(*sValue) )
++				sValue++;
++			if ( !*sValue )
++				break;
++
++			tFilter.m_sAttrName = sValue;
++			while ( (*sValue) && myisattr(*sValue) )
++				sValue++;
++			if ( !*sValue )
++				break;
++			*sValue++ = '\0';
++
++			// get the values
++			tFilter.m_iValues = ParseArray<longlong> ( &tFilter.m_pValues, sValue );
++			if ( !tFilter.m_iValues )
++			{
++				assert ( !tFilter.m_pValues );
++				break;
++			}
++
++			// all ok
++			m_iFilters++;
++			break;
++		}
++
++	} else if ( !strcmp ( sName, "indexweights" ) || !strcmp ( sName, "fieldweights" ) )
++	{
++		bool bIndex = !strcmp ( sName, "indexweights" );
++		int * pCount = bIndex ? &m_iIndexWeights : &m_iFieldWeights;
++		char ** pNames = bIndex ? &m_sIndexWeight[0] : &m_sFieldWeight[0];
++		int * pWeights = bIndex ? &m_iIndexWeight[0] : &m_iFieldWeight[0];
++
++		*pCount = 0;
++
++		char * p = sValue;
++		while ( *p && *pCount<SPHINXSE_MAX_FILTERS )
++		{
++			// extract attr name
++			if ( !myisattr(*p) )
++			{
++				snprintf ( m_sParseError, sizeof(m_sParseError), "%s: index name expected near '%s'", sName, p );
++				SPH_RET(false);
++			}
++
++			pNames[*pCount] = p;
++			while ( myisattr(*p) ) p++;
++
++			if ( *p!=',' )
++			{
++				snprintf ( m_sParseError, sizeof(m_sParseError), "%s: comma expected near '%s'", sName, p );
++				SPH_RET(false);
++			}
++			*p++ = '\0';
++
++			// extract attr value
++			char * sVal = p;
++			while ( isdigit(*p) ) p++;
++			if ( p==sVal )
++			{
++				snprintf ( m_sParseError, sizeof(m_sParseError), "%s: integer weight expected near '%s'", sName, sVal );
++				SPH_RET(false);
++			}
++			pWeights[*pCount] = atoi(sVal);
++			(*pCount)++;
++
++			if ( !*p )
++				break;
++			if ( *p!=',' )
++			{
++				snprintf ( m_sParseError, sizeof(m_sParseError), "%s: comma expected near '%s'", sName, p );
++				SPH_RET(false);
++			}
++			p++;
++		}
++
++	} else if ( !strcmp ( sName, "geoanchor" ) )
++	{
++		m_bGeoAnchor = false;
++		for ( ;; )
++		{
++			char * sLat = sValue;
++			char * p = sValue;
++
++			if (!( p = strchr ( p, ',' ) )) break; *p++ = '\0';
++			char * sLong = p;
++
++			if (!( p = strchr ( p, ',' ) )) break; *p++ = '\0';
++			char * sLatVal = p;
++
++			if (!( p = strchr ( p, ',' ) )) break; *p++ = '\0';
++			char * sLongVal = p;
++
++			m_sGeoLatAttr = chop(sLat);
++			m_sGeoLongAttr = chop(sLong);
++			m_fGeoLatitude = (float)atof ( sLatVal );
++			m_fGeoLongitude = (float)atof ( sLongVal );
++			m_bGeoAnchor = true;
++			break;
++		}
++		if ( !m_bGeoAnchor )
++		{
++			snprintf ( m_sParseError, sizeof(m_sParseError), "geoanchor: parse error, not enough comma-separated arguments" );
++			SPH_RET(false);
++		}
++	} else if ( !strcmp ( sName, "override" ) ) // name,type,id:value,id:value,...
++	{
++		char * sName = NULL;
++		int iType = 0;
++		CSphSEQuery::Override_t * pOverride = NULL;
++
++		// get name and type
++		char * sRest = sValue;
++		for ( ;; )
++		{
++			sName = sRest;
++			if ( !*sName )
++				break;
++			if (!( sRest = strchr ( sRest, ',' ) ))
++				break;
++			*sRest++ = '\0';
++			char * sType = sRest;
++			if (!( sRest = strchr ( sRest, ',' ) ))
++				break;
++
++			static const struct
++			{
++				const char *	m_sName;
++				int				m_iType;
++			}
++			dAttrTypes[] =
++			{
++				{ "int",		SPH_ATTR_INTEGER },
++				{ "timestamp",	SPH_ATTR_TIMESTAMP },
++				{ "bool",		SPH_ATTR_BOOL },
++				{ "float",		SPH_ATTR_FLOAT },
++				{ "bigint",		SPH_ATTR_BIGINT }
++			};
++			for ( int i=0; i<sizeof(dAttrTypes)/sizeof(*dAttrTypes); i++ )
++				if ( !strncmp ( sType, dAttrTypes[i].m_sName, sRest - sType ) )
++			{
++				iType = dAttrTypes[i].m_iType;
++				break;
++			}
++			break;
++		}
++
++		// fail
++		if ( !sName || !*sName || !iType )
++		{
++			snprintf ( m_sParseError, sizeof(m_sParseError), "override: malformed query" );
++			SPH_RET(false);
++		}
++
++		// grab id:value pairs
++		sRest++;
++		while ( sRest )
++		{
++			char * sId = sRest;
++			if (!( sRest = strchr ( sRest, ':' ) )) break; *sRest++ = '\0';
++			if (!( sRest - sId )) break;
++
++			char * sValue = sRest;
++			if ( ( sRest = strchr ( sRest, ',' ) )!=NULL )
++				*sRest++ = '\0';
++			if ( !*sValue )
++				break;
++
++			if ( !pOverride )
++			{
++				pOverride = new CSphSEQuery::Override_t;
++				pOverride->m_sName = chop(sName);
++				pOverride->m_iType = iType;
++				m_dOverrides.append ( pOverride );
++			}
++
++			ulonglong uId = strtoull ( sId, NULL, 10 );
++			CSphSEQuery::Override_t::Value_t tValue;
++			if ( iType==SPH_ATTR_FLOAT )
++				tValue.m_fValue = (float)atof(sValue);
++			else if ( iType==SPH_ATTR_BIGINT )
++				tValue.m_iValue64 = strtoll ( sValue, NULL, 10 );
++			else
++				tValue.m_uValue = (uint32)strtoul ( sValue, NULL, 10 );
++
++			pOverride->m_dIds.append ( uId );
++			pOverride->m_dValues.append ( tValue );
++		}
++
++		if ( !pOverride )
++		{
++			snprintf ( m_sParseError, sizeof(m_sParseError), "override: id:value mapping expected" );
++			SPH_RET(false);
++		}
++		SPH_RET(true);
++	} else
++	{
++		snprintf ( m_sParseError, sizeof(m_sParseError), "unknown parameter '%s'", sName );
++		SPH_RET(false);
++	}
++
++	// !COMMIT handle syntax errors
++
++	SPH_RET(true);
++}
++
++
++bool CSphSEQuery::Parse ()
++{
++	SPH_ENTER_METHOD();
++	SPH_DEBUG ( "query [[ %s ]]", m_sQueryBuffer );
++
++	m_bQuery = false;
++	char * pCur = m_sQueryBuffer;
++	char * pNext = pCur;
++
++	while ( ( pNext = strchr ( pNext, ';' ) )!=NULL )
++	{
++		// handle escaped semicolons
++		if ( pNext>m_sQueryBuffer && pNext[-1]=='\\' && pNext[1]!='\0' )
++		{
++			pNext++;
++			continue;
++		}
++
++		// handle semicolon-separated clauses
++		*pNext++ = '\0';
++		if ( !ParseField ( pCur ) )
++			SPH_RET(false);
++		pCur = pNext;
++	}
++
++	SPH_DEBUG ( "q [[ %s ]]", m_sQuery );
++
++	SPH_RET(true);
++}
++
++
++void CSphSEQuery::SendBytes ( const void * pBytes, int iBytes )
++{
++	SPH_ENTER_METHOD();
++	if ( m_iBufLeft<iBytes )
++	{
++		m_bBufOverrun = true;
++		SPH_VOID_RET();
++	}
++
++	memcpy ( m_pCur, pBytes, iBytes );
++
++	m_pCur += iBytes;
++	m_iBufLeft -= iBytes;
++	SPH_VOID_RET();
++}
++
++
++int CSphSEQuery::BuildRequest ( char ** ppBuffer )
++{
++	SPH_ENTER_METHOD();
++
++	// calc request length
++	int iReqSize = 124 + 4*m_iWeights
++		+ strlen ( m_sSortBy )
++		+ strlen ( m_sQuery )
++		+ strlen ( m_sIndex )
++		+ strlen ( m_sGroupBy )
++		+ strlen ( m_sGroupSortBy )
++		+ strlen ( m_sGroupDistinct )
++		+ strlen ( m_sComment )
++		+ strlen ( m_sSelect );
++	for ( int i=0; i<m_iFilters; i++ )
++	{
++		const CSphSEFilter & tFilter = m_dFilters[i];
++		iReqSize += 12 + strlen ( tFilter.m_sAttrName ); // string attr-name; int type; int exclude-flag
++		switch ( tFilter.m_eType )
++		{
++			case SPH_FILTER_VALUES:		iReqSize += 4 + 8*tFilter.m_iValues; break;
++			case SPH_FILTER_RANGE:		iReqSize += 16; break;
++			case SPH_FILTER_FLOATRANGE:	iReqSize += 8; break;
++		}
++	}
++	if ( m_bGeoAnchor ) // 1.14+
++		iReqSize += 16 + strlen ( m_sGeoLatAttr ) + strlen ( m_sGeoLongAttr );
++	for ( int i=0; i<m_iIndexWeights; i++ ) // 1.15+
++		iReqSize += 8 + strlen(m_sIndexWeight[i] );
++	for ( int i=0; i<m_iFieldWeights; i++ ) // 1.18+
++		iReqSize += 8 + strlen(m_sFieldWeight[i] );
++	// overrides
++	iReqSize += 4;
++	for ( int i=0; i<m_dOverrides.elements(); i++ )
++	{
++		CSphSEQuery::Override_t * pOverride = m_dOverrides.at(i);
++		const uint32 uSize = pOverride->m_iType==SPH_ATTR_BIGINT ? 16 : 12; // id64 + value
++		iReqSize += strlen ( pOverride->m_sName ) + 12 + uSize*pOverride->m_dIds.elements();
++	}
++	// select
++	iReqSize += 4;
++
++	m_iBufLeft = 0;
++	SafeDeleteArray ( m_pBuf );
++
++	m_pBuf = new char [ iReqSize ];
++	if ( !m_pBuf )
++		SPH_RET(-1);
++
++	m_pCur = m_pBuf;
++	m_iBufLeft = iReqSize;
++	m_bBufOverrun = false;
++	(*ppBuffer) = m_pBuf;
++
++	// build request
++	SendWord ( SEARCHD_COMMAND_SEARCH ); // command id
++	SendWord ( VER_COMMAND_SEARCH ); // command version
++	SendInt ( iReqSize-8 ); // packet body length
++
++	SendInt ( 1 ); // number of queries
++	SendInt ( m_iOffset );
++	SendInt ( m_iLimit );
++	SendInt ( m_eMode );
++	SendInt ( m_eRanker ); // 1.16+
++	SendInt ( m_eSort );
++	SendString ( m_sSortBy ); // sort attr
++	SendString ( m_sQuery ); // query
++	SendInt ( m_iWeights );
++	for ( int j=0; j<m_iWeights; j++ )
++		SendInt ( m_pWeights[j] ); // weights
++	SendString ( m_sIndex ); // indexes
++	SendInt ( 1 ); // id64 range follows
++	SendUint64 ( m_iMinID ); // id/ts ranges
++	SendUint64 ( m_iMaxID );
++
++	SendInt ( m_iFilters );
++	for ( int j=0; j<m_iFilters; j++ )
++	{
++		const CSphSEFilter & tFilter = m_dFilters[j];
++		SendString ( tFilter.m_sAttrName );
++		SendInt ( tFilter.m_eType );
++
++		switch ( tFilter.m_eType )
++		{
++			case SPH_FILTER_VALUES:
++				SendInt ( tFilter.m_iValues );
++				for ( int k=0; k<tFilter.m_iValues; k++ )
++					SendUint64 ( tFilter.m_pValues[k] );
++				break;
++
++			case SPH_FILTER_RANGE:
++				SendUint64 ( tFilter.m_uMinValue );
++				SendUint64 ( tFilter.m_uMaxValue );
++				break;
++
++			case SPH_FILTER_FLOATRANGE:
++				SendFloat ( tFilter.m_fMinValue );
++				SendFloat ( tFilter.m_fMaxValue );
++				break;
++		}
++
++		SendInt ( tFilter.m_bExclude );
++	}
++
++	SendInt ( m_eGroupFunc );
++	SendString ( m_sGroupBy );
++	SendInt ( m_iMaxMatches );
++	SendString ( m_sGroupSortBy );
++	SendInt ( m_iCutoff ); // 1.9+
++	SendInt ( m_iRetryCount ); // 1.10+
++	SendInt ( m_iRetryDelay );
++	SendString ( m_sGroupDistinct ); // 1.11+
++	SendInt ( m_bGeoAnchor ); // 1.14+
++	if ( m_bGeoAnchor )
++	{
++		SendString ( m_sGeoLatAttr );
++		SendString ( m_sGeoLongAttr );
++		SendFloat ( m_fGeoLatitude );
++		SendFloat ( m_fGeoLongitude );
++	}
++	SendInt ( m_iIndexWeights ); // 1.15+
++	for ( int i=0; i<m_iIndexWeights; i++ )
++	{
++		SendString ( m_sIndexWeight[i] );
++		SendInt ( m_iIndexWeight[i] );
++	}
++	SendInt ( m_iMaxQueryTime ); // 1.17+
++	SendInt ( m_iFieldWeights ); // 1.18+
++	for ( int i=0; i<m_iFieldWeights; i++ )
++	{
++		SendString ( m_sFieldWeight[i] );
++		SendInt ( m_iFieldWeight[i] );
++	}
++	SendString ( m_sComment );
++
++	// overrides
++	SendInt ( m_dOverrides.elements() );
++	for ( int i=0; i<m_dOverrides.elements(); i++ )
++	{
++		CSphSEQuery::Override_t * pOverride = m_dOverrides.at(i);
++		SendString ( pOverride->m_sName );
++		SendDword ( pOverride->m_iType );
++		SendInt ( pOverride->m_dIds.elements() );
++		for ( int j=0; j<pOverride->m_dIds.elements(); j++ )
++		{
++			SendUint64 ( pOverride->m_dIds.at(j) );
++			if ( pOverride->m_iType==SPH_ATTR_FLOAT )
++				SendFloat ( pOverride->m_dValues.at(j).m_fValue );
++			else if ( pOverride->m_iType==SPH_ATTR_BIGINT )
++				SendUint64 ( pOverride->m_dValues.at(j).m_iValue64 );
++			else
++				SendDword ( pOverride->m_dValues.at(j).m_uValue );
++		}
++	}
++
++	// select
++	SendString ( m_sSelect );
++
++	// detect buffer overruns and underruns, and report internal error
++	if ( m_bBufOverrun || m_iBufLeft!=0 || m_pCur-m_pBuf!=iReqSize )
++		SPH_RET(-1);
++
++	// all fine
++	SPH_RET ( iReqSize );
++}
++
++//////////////////////////////////////////////////////////////////////////////
++// SPHINX HANDLER
++//////////////////////////////////////////////////////////////////////////////
++
++static const char * ha_sphinx_exts[] = { NullS };
++
++
++#if MYSQL_VERSION_ID<50100
++ha_sphinx::ha_sphinx ( TABLE_ARG * table )
++	: handler ( &sphinx_hton, table )
++#else
++ha_sphinx::ha_sphinx ( handlerton * hton, TABLE_ARG * table )
++	: handler ( hton, table )
++#endif
++	, m_pShare ( NULL )
++	, m_iMatchesTotal ( 0 )
++	, m_iCurrentPos ( 0 )
++	, m_pCurrentKey ( NULL )
++	, m_iCurrentKeyLen ( 0 )
++	, m_pResponse ( NULL )
++	, m_pResponseEnd ( NULL )
++	, m_pCur ( NULL )
++	, m_bUnpackError ( false )
++	, m_iFields ( 0 )
++	, m_dFields ( NULL )
++	, m_iAttrs ( 0 )
++	, m_dAttrs ( NULL )
++	, m_bId64 ( 0 )
++	, m_dUnboundFields ( NULL )
++{
++	SPH_ENTER_METHOD();
++	if ( current_thd )
++		current_thd->variables.engine_condition_pushdown = true;
++	SPH_VOID_RET();
++}
++
++
++// If frm_error() is called then we will use this to to find out what file extentions
++// exist for the storage engine. This is also used by the default rename_table and
++// delete_table method in handler.cc.
++const char ** ha_sphinx::bas_ext() const
++{
++	return ha_sphinx_exts;
++}
++
++
++// Used for opening tables. The name will be the name of the file.
++// A table is opened when it needs to be opened. For instance
++// when a request comes in for a select on the table (tables are not
++// open and closed for each request, they are cached).
++//
++// Called from handler.cc by handler::ha_open(). The server opens all tables by
++// calling ha_open() which then calls the handler specific open().
++int ha_sphinx::open ( const char * name, int, uint )
++{
++	SPH_ENTER_METHOD();
++	m_pShare = get_share ( name, table );
++	if ( !m_pShare )
++		SPH_RET(1);
++
++	thr_lock_data_init ( &m_pShare->m_tLock, &m_tLock, NULL );
++
++	#if MYSQL_VERSION_ID>50100
++	*thd_ha_data ( table->in_use, ht ) = NULL;
++	#else
++	table->in_use->ha_data [ sphinx_hton.slot ] = NULL;
++	#endif
++
++	SPH_RET(0);
++}
++
++
++int ha_sphinx::Connect ( const char * sHost, ushort uPort )
++{
++	struct sockaddr_in sin;
++#ifndef __WIN__
++	struct sockaddr_un saun;
++#endif
++
++	int iDomain = 0;
++	int iSockaddrSize = 0;
++	struct sockaddr * pSockaddr = NULL;
++
++	in_addr_t ip_addr;
++
++	if ( uPort )
++	{
++		iDomain = AF_INET;
++		iSockaddrSize = sizeof(sin);
++		pSockaddr = (struct sockaddr *) &sin;
++
++		memset ( &sin, 0, sizeof(sin) );
++		sin.sin_family = AF_INET;
++		sin.sin_port = htons(uPort);
++
++		// prepare host address
++		if ( (int)( ip_addr = inet_addr(sHost) )!=(int)INADDR_NONE )
++		{
++			memcpy ( &sin.sin_addr, &ip_addr, sizeof(ip_addr) );
++		} else
++		{
++			int tmp_errno;
++			bool bError = false;
++
++#if MYSQL_VERSION_ID>=50515
++			struct addrinfo tmp_hostent, *hp;
++			tmp_errno = getaddrinfo ( sHost, NULL, &tmp_hostent, &hp );
++			if ( !tmp_errno )
++			{
++				freeaddrinfo ( hp );
++				bError = true;
++			}
++#else
++			struct hostent tmp_hostent, *hp;
++			char buff2 [ GETHOSTBYNAME_BUFF_SIZE ];
++			hp = my_gethostbyname_r ( sHost, &tmp_hostent, buff2, sizeof(buff2), &tmp_errno );
++			if ( !hp )
++			{
++				my_gethostbyname_r_free();
++				bError = true;
++			}
++#endif
++
++			if ( bError )
++			{
++				char sError[256];
++				my_snprintf ( sError, sizeof(sError), "failed to resolve searchd host (name=%s)", sHost );
++
++				my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), sError );
++				SPH_RET(-1);
++			}
++
++#if MYSQL_VERSION_ID>=50515
++			memcpy ( &sin.sin_addr, hp->ai_addr, Min ( sizeof(sin.sin_addr), (size_t)hp->ai_addrlen ) );
++			freeaddrinfo ( hp );
++#else
++			memcpy ( &sin.sin_addr, hp->h_addr, Min ( sizeof(sin.sin_addr), (size_t)hp->h_length ) );
++			my_gethostbyname_r_free();
++#endif
++		}
++	} else
++	{
++#ifndef __WIN__
++		iDomain = AF_UNIX;
++		iSockaddrSize = sizeof(saun);
++		pSockaddr = (struct sockaddr *) &saun;
++
++		memset ( &saun, 0, sizeof(saun) );
++		saun.sun_family = AF_UNIX;
++		strncpy ( saun.sun_path, sHost, sizeof(saun.sun_path)-1 );
++#else
++		my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), "UNIX sockets are not supported on Windows" );
++		SPH_RET(-1);
++#endif
++	}
++
++	char sError[512];
++	int iSocket = socket ( iDomain, SOCK_STREAM, 0 );
++
++	if ( iSocket<0 )
++	{
++		my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), "failed to create client socket" );
++		SPH_RET(-1);
++	}
++
++	if ( connect ( iSocket, pSockaddr, iSockaddrSize )<0 )
++	{
++		sphSockClose ( iSocket );
++		my_snprintf ( sError, sizeof(sError), "failed to connect to searchd (host=%s, errno=%d, port=%d)",
++			sHost, errno, (int)uPort );
++		my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), sError );
++		SPH_RET(-1);
++	}
++
++	return iSocket;
++}
++
++
++int ha_sphinx::ConnectAPI ( const char * sQueryHost, int iQueryPort )
++{
++	SPH_ENTER_METHOD();
++
++	const char * sHost = ( sQueryHost && *sQueryHost ) ? sQueryHost : m_pShare->m_sHost;
++	ushort uPort = iQueryPort ? (ushort)iQueryPort : m_pShare->m_iPort;
++
++	int iSocket = Connect ( sHost, uPort );
++	if ( iSocket<0 )
++		SPH_RET ( iSocket );
++
++	char sError[512];
++
++	int version;
++	if ( ::recv ( iSocket, (char *)&version, sizeof(version), 0 )!=sizeof(version) )
++	{
++		sphSockClose ( iSocket );
++		my_snprintf ( sError, sizeof(sError), "failed to receive searchd version (host=%s, port=%d)",
++			sHost, (int)uPort );
++		my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), sError );
++		SPH_RET(-1);
++	}
++
++	uint uClientVersion = htonl ( SPHINX_SEARCHD_PROTO );
++	if ( ::send ( iSocket, (char*)&uClientVersion, sizeof(uClientVersion), 0 )!=sizeof(uClientVersion) )
++	{
++		sphSockClose ( iSocket );
++		my_snprintf ( sError, sizeof(sError), "failed to send client version (host=%s, port=%d)",
++			sHost, (int)uPort );
++		my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), sError );
++		SPH_RET(-1);
++	}
++
++	SPH_RET ( iSocket );
++}
++
++
++// Closes a table. We call the free_share() function to free any resources
++// that we have allocated in the "shared" structure.
++//
++// Called from sql_base.cc, sql_select.cc, and table.cc.
++// In sql_select.cc it is only used to close up temporary tables or during
++// the process where a temporary table is converted over to being a
++// myisam table.
++// For sql_base.cc look at close_data_tables().
++int ha_sphinx::close()
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( free_share ( m_pShare ) );
++}
++
++
++int ha_sphinx::HandleMysqlError ( MYSQL * pConn, int iErrCode )
++{
++	CSphSEThreadData * pTls = GetTls ();
++	if ( pTls )
++	{
++		strncpy ( pTls->m_tStats.m_sLastMessage, mysql_error ( pConn ), sizeof ( pTls->m_tStats.m_sLastMessage ) );
++		pTls->m_tStats.m_bLastError = true;
++	}
++
++	mysql_close ( pConn );
++
++	my_error ( iErrCode, MYF(0), pTls->m_tStats.m_sLastMessage );
++	return -1;
++}
++
++
++int ha_sphinx::extra ( enum ha_extra_function op )
++{
++	CSphSEThreadData * pTls = GetTls();
++	if ( pTls )
++	{
++		if ( op==HA_EXTRA_WRITE_CAN_REPLACE )
++			pTls->m_bReplace = true;
++		else if ( op==HA_EXTRA_WRITE_CANNOT_REPLACE )
++			pTls->m_bReplace = false;
++	}
++	return 0;
++}
++
++
++int ha_sphinx::write_row ( byte * )
++{
++	SPH_ENTER_METHOD();
++	if ( !m_pShare || !m_pShare->m_bSphinxQL )
++		SPH_RET ( HA_ERR_WRONG_COMMAND );
++
++	// SphinxQL inserts only, pretty much similar to abandoned federated
++	char sQueryBuf[1024];
++	char sValueBuf[1024];
++
++	String sQuery ( sQueryBuf, sizeof(sQueryBuf), &my_charset_bin );
++	String sValue ( sValueBuf, sizeof(sQueryBuf), &my_charset_bin );
++	sQuery.length ( 0 );
++	sValue.length ( 0 );
++
++	CSphSEThreadData * pTls = GetTls ();
++	sQuery.append ( pTls && pTls->m_bReplace ? "REPLACE INTO " : "INSERT INTO " );
++	sQuery.append ( m_pShare->m_sIndex );
++	sQuery.append ( " (" );
++
++	for ( Field ** ppField = table->field; *ppField; ppField++ )
++	{
++		sQuery.append ( (*ppField)->field_name );
++		if ( ppField[1] )
++			sQuery.append ( ", " );
++	}
++	sQuery.append ( ") VALUES (" );
++
++	for ( Field ** ppField = table->field; *ppField; ppField++ )
++	{
++		if ( (*ppField)->is_null() )
++		{
++			sQuery.append ( "''" );
++
++		} else
++		{
++			if ( (*ppField)->type()==MYSQL_TYPE_TIMESTAMP )
++			{
++				Item_field * pWrap = new Item_field ( *ppField ); // autofreed by query arena, I assume
++				Item_func_unix_timestamp * pConv = new Item_func_unix_timestamp ( pWrap );
++				pConv->quick_fix_field();
++				unsigned int uTs = (unsigned int) pConv->val_int();
++
++				snprintf ( sValueBuf, sizeof(sValueBuf), "'%u'", uTs );
++				sQuery.append ( sValueBuf );
++
++			} else
++			{
++				(*ppField)->val_str ( &sValue );
++				sQuery.append ( "'" );
++				sValue.print ( &sQuery );
++				sQuery.append ( "'" );
++				sValue.length(0);
++			}
++		}
++
++		if ( ppField[1] )
++			sQuery.append ( ", " );
++	}
++	sQuery.append ( ")" );
++
++	// FIXME? pretty inefficient to reconnect every time under high load,
++	// but this was intentionally written for a low load scenario..
++	MYSQL * pConn = mysql_init ( NULL );
++	if ( !pConn )
++		SPH_RET ( ER_OUT_OF_RESOURCES );
++
++	unsigned int uTimeout = 1;
++	mysql_options ( pConn, MYSQL_OPT_CONNECT_TIMEOUT, (const char*)&uTimeout );
++
++	if ( !mysql_real_connect ( pConn, m_pShare->m_sHost, "root", "", "", m_pShare->m_iPort, m_pShare->m_sSocket, 0 ) )
++		SPH_RET ( HandleMysqlError ( pConn, ER_CONNECT_TO_FOREIGN_DATA_SOURCE ) );
++
++	if ( mysql_real_query ( pConn, sQuery.ptr(), sQuery.length() ) )
++		SPH_RET ( HandleMysqlError ( pConn, ER_QUERY_ON_FOREIGN_DATA_SOURCE ) );
++
++	// all ok!
++	mysql_close ( pConn );
++	SPH_RET(0);
++}
++
++
++static inline bool IsIntegerFieldType ( enum_field_types eType )
++{
++	return eType==MYSQL_TYPE_LONG || eType==MYSQL_TYPE_LONGLONG;
++}
++
++
++static inline bool IsIDField ( Field * pField )
++{
++	enum_field_types eType = pField->type();
++
++	if ( eType==MYSQL_TYPE_LONGLONG )
++		return true;
++
++	if ( eType==MYSQL_TYPE_LONG && ((Field_num*)pField)->unsigned_flag )
++		return true;
++
++	return false;
++}
++
++
++int ha_sphinx::delete_row ( const byte * )
++{
++	SPH_ENTER_METHOD();
++	if ( !m_pShare || !m_pShare->m_bSphinxQL )
++		SPH_RET ( HA_ERR_WRONG_COMMAND );
++
++	char sQueryBuf[1024];
++	String sQuery ( sQueryBuf, sizeof(sQueryBuf), &my_charset_bin );
++	sQuery.length ( 0 );
++
++	sQuery.append ( "DELETE FROM " );
++	sQuery.append ( m_pShare->m_sIndex );
++	sQuery.append ( " WHERE id=" );
++
++	char sValue[32];
++	snprintf ( sValue, sizeof(sValue), "%lld", table->field[0]->val_int() );
++	sQuery.append ( sValue );
++
++	// FIXME? pretty inefficient to reconnect every time under high load,
++	// but this was intentionally written for a low load scenario..
++	MYSQL * pConn = mysql_init ( NULL );
++	if ( !pConn )
++		SPH_RET ( ER_OUT_OF_RESOURCES );
++
++	unsigned int uTimeout = 1;
++	mysql_options ( pConn, MYSQL_OPT_CONNECT_TIMEOUT, (const char*)&uTimeout );
++
++	if ( !mysql_real_connect ( pConn, m_pShare->m_sHost, "root", "", "", m_pShare->m_iPort, m_pShare->m_sSocket, 0 ) )
++		SPH_RET ( HandleMysqlError ( pConn, ER_CONNECT_TO_FOREIGN_DATA_SOURCE ) );
++
++	if ( mysql_real_query ( pConn, sQuery.ptr(), sQuery.length() ) )
++		SPH_RET ( HandleMysqlError ( pConn, ER_QUERY_ON_FOREIGN_DATA_SOURCE ) );
++
++	// all ok!
++	mysql_close ( pConn );
++	SPH_RET(0);
++}
++
++
++int ha_sphinx::update_row ( const byte *, byte * )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( HA_ERR_WRONG_COMMAND );
++}
++
++
++// keynr is key (index) number
++// sorted is 1 if result MUST be sorted according to index
++int ha_sphinx::index_init ( uint keynr, bool )
++{
++	SPH_ENTER_METHOD();
++	active_index = keynr;
++
++	CSphSEThreadData * pTls = GetTls();
++	if ( pTls )
++		pTls->m_bCondDone = false;
++
++	SPH_RET(0);
++}
++
++
++int ha_sphinx::index_end()
++{
++	SPH_ENTER_METHOD();
++	SPH_RET(0);
++}
++
++
++uint32 ha_sphinx::UnpackDword ()
++{
++	if ( m_pCur+sizeof(uint32)>m_pResponseEnd ) // NOLINT
++	{
++		m_pCur = m_pResponseEnd;
++		m_bUnpackError = true;
++		return 0;
++	}
++
++	uint32 uRes = ntohl ( sphUnalignedRead ( *(uint32*)m_pCur ) );
++	m_pCur += sizeof(uint32); // NOLINT
++	return uRes;
++}
++
++
++char * ha_sphinx::UnpackString ()
++{
++	uint32 iLen = UnpackDword ();
++	if ( !iLen )
++		return NULL;
++
++	if ( m_pCur+iLen>m_pResponseEnd )
++	{
++		m_pCur = m_pResponseEnd;
++		m_bUnpackError = true;
++		return NULL;
++	}
++
++	char * sRes = new char [ 1+iLen ];
++	memcpy ( sRes, m_pCur, iLen );
++	sRes[iLen] = '\0';
++	m_pCur += iLen;
++	return sRes;
++}
++
++
++static inline const char * FixNull ( const char * s )
++{
++	return s ? s : "(null)";
++}
++
++
++bool ha_sphinx::UnpackSchema ()
++{
++	SPH_ENTER_METHOD();
++
++	// cleanup
++	if ( m_dFields )
++		for ( int i=0; i<(int)m_iFields; i++ )
++			SafeDeleteArray ( m_dFields[i] );
++	SafeDeleteArray ( m_dFields );
++
++	// unpack network packet
++	uint32 uStatus = UnpackDword ();
++	char * sMessage = NULL;
++
++	if ( uStatus!=SEARCHD_OK )
++	{
++		sMessage = UnpackString ();
++		CSphSEThreadData * pTls = GetTls ();
++		if ( pTls )
++		{
++			strncpy ( pTls->m_tStats.m_sLastMessage, sMessage, sizeof(pTls->m_tStats.m_sLastMessage) );
++			pTls->m_tStats.m_bLastError = ( uStatus==SEARCHD_ERROR );
++		}
++
++		if ( uStatus==SEARCHD_ERROR )
++		{
++			char sError[1024];
++			my_snprintf ( sError, sizeof(sError), "searchd error: %s", sMessage );
++			my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), sError );
++			SafeDeleteArray ( sMessage );
++			SPH_RET ( false );
++		}
++	}
++
++	m_iFields = UnpackDword ();
++	m_dFields = new char * [ m_iFields ];
++	if ( !m_dFields )
++	{
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: UnpackSchema() failed (fields alloc error)" );
++		SPH_RET(false);
++	}
++
++	for ( uint32 i=0; i<m_iFields; i++ )
++		m_dFields[i] = UnpackString ();
++
++	SafeDeleteArray ( m_dAttrs );
++	m_iAttrs = UnpackDword ();
++	m_dAttrs = new CSphSEAttr [ m_iAttrs ];
++	if ( !m_dAttrs )
++	{
++		for ( int i=0; i<(int)m_iFields; i++ )
++			SafeDeleteArray ( m_dFields[i] );
++		SafeDeleteArray ( m_dFields );
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: UnpackSchema() failed (attrs alloc error)" );
++		SPH_RET(false);
++	}
++
++	for ( uint32 i=0; i<m_iAttrs; i++ )
++	{
++		m_dAttrs[i].m_sName = UnpackString ();
++		m_dAttrs[i].m_uType = UnpackDword ();
++		if ( m_bUnpackError ) // m_sName may be null
++			break;
++
++		m_dAttrs[i].m_iField = -1;
++		for ( int j=SPHINXSE_SYSTEM_COLUMNS; j<m_pShare->m_iTableFields; j++ )
++		{
++			const char * sTableField = m_pShare->m_sTableField[j];
++			const char * sAttrField = m_dAttrs[i].m_sName;
++			if ( m_dAttrs[i].m_sName[0]=='@' )
++			{
++				const char * sAtPrefix = "_sph_";
++				if ( strncmp ( sTableField, sAtPrefix, strlen(sAtPrefix) ) )
++					continue;
++				sTableField += strlen(sAtPrefix);
++				sAttrField++;
++			}
++
++			if ( !strcasecmp ( sAttrField, sTableField ) )
++			{
++				// we're almost good, but
++				// let's enforce that timestamp columns can only receive timestamp attributes
++				if ( m_pShare->m_eTableFieldType[j]!=MYSQL_TYPE_TIMESTAMP || m_dAttrs[i].m_uType==SPH_ATTR_TIMESTAMP )
++					m_dAttrs[i].m_iField = j;
++				break;
++			}
++		}
++	}
++
++	m_iMatchesTotal = UnpackDword ();
++
++	m_bId64 = UnpackDword ();
++	if ( m_bId64 && m_pShare->m_eTableFieldType[0]!=MYSQL_TYPE_LONGLONG )
++	{
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: 1st column must be bigint to accept 64-bit DOCIDs" );
++		SPH_RET(false);
++	}
++
++	// network packet unpacked; build unbound fields map
++	SafeDeleteArray ( m_dUnboundFields );
++	m_dUnboundFields = new int [ m_pShare->m_iTableFields ];
++
++	for ( int i=0; i<m_pShare->m_iTableFields; i++ )
++	{
++		if ( i<SPHINXSE_SYSTEM_COLUMNS )
++			m_dUnboundFields[i] = SPH_ATTR_NONE;
++
++		else if ( m_pShare->m_eTableFieldType[i]==MYSQL_TYPE_TIMESTAMP )
++			m_dUnboundFields[i] = SPH_ATTR_TIMESTAMP;
++
++		else
++			m_dUnboundFields[i] = SPH_ATTR_INTEGER;
++	}
++
++	for ( uint32 i=0; i<m_iAttrs; i++ )
++		if ( m_dAttrs[i].m_iField>=0 )
++			m_dUnboundFields [ m_dAttrs[i].m_iField ] = SPH_ATTR_NONE;
++
++	if ( m_bUnpackError )
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: UnpackSchema() failed (unpack error)" );
++
++	SPH_RET ( !m_bUnpackError );
++}
++
++
++bool ha_sphinx::UnpackStats ( CSphSEStats * pStats )
++{
++	assert ( pStats );
++
++	char * pCurSave = m_pCur;
++	for ( uint i=0; i<m_iMatchesTotal && m_pCur<m_pResponseEnd-sizeof(uint32); i++ ) // NOLINT
++	{
++		m_pCur += m_bId64 ? 12 : 8; // skip id+weight
++		for ( uint32 i=0; i<m_iAttrs && m_pCur<m_pResponseEnd-sizeof(uint32); i++ ) // NOLINT
++		{
++			if ( m_dAttrs[i].m_uType & SPH_ATTR_MULTI )
++			{
++				// skip MVA list
++				uint32 uCount = UnpackDword ();
++				m_pCur += uCount*4;
++			} else // skip normal value
++				m_pCur += m_dAttrs[i].m_uType==SPH_ATTR_BIGINT ? 8 : 4;
++		}
++	}
++
++	pStats->m_iMatchesTotal = UnpackDword ();
++	pStats->m_iMatchesFound = UnpackDword ();
++	pStats->m_iQueryMsec = UnpackDword ();
++	pStats->m_iWords = UnpackDword ();
++
++	if ( m_bUnpackError )
++		return false;
++
++	SafeDeleteArray ( pStats->m_dWords );
++	if ( pStats->m_iWords<0 || pStats->m_iWords>=SPHINXSE_MAX_KEYWORDSTATS )
++		return false;
++	pStats->m_dWords = new CSphSEWordStats [ pStats->m_iWords ];
++	if ( !pStats->m_dWords )
++		return false;
++
++	for ( int i=0; i<pStats->m_iWords; i++ )
++	{
++		CSphSEWordStats & tWord = pStats->m_dWords[i];
++		tWord.m_sWord = UnpackString ();
++		tWord.m_iDocs = UnpackDword ();
++		tWord.m_iHits = UnpackDword ();
++	}
++
++	if ( m_bUnpackError )
++		return false;
++
++	m_pCur = pCurSave;
++	return true;
++}
++
++
++/// condition pushdown implementation, to properly intercept WHERE clauses on my columns
++const COND * ha_sphinx::cond_push ( const COND * cond )
++{
++	// catch the simplest case: query_column="some text"
++	for ( ;; )
++	{
++		if ( cond->type()!=COND::FUNC_ITEM )
++			break;
++
++		Item_func * condf = (Item_func *)cond;
++		if ( condf->functype()!=Item_func::EQ_FUNC || condf->argument_count()!=2 )
++			break;
++
++		// get my tls
++		CSphSEThreadData * pTls = GetTls ();
++		if ( !pTls )
++			break;
++
++		Item ** args = condf->arguments();
++		if ( !m_pShare->m_bSphinxQL )
++		{
++			// on non-QL tables, intercept query=value condition for SELECT
++			if (!( args[0]->type()==COND::FIELD_ITEM && args[1]->type()==COND::STRING_ITEM ))
++				break;
++
++			Item_field * pField = (Item_field *) args[0];
++			if ( pField->field->field_index!=2 ) // FIXME! magic key index
++				break;
++
++			// copy the query, and let know that we intercepted this condition
++			Item_string * pString = (Item_string *) args[1];
++			pTls->m_bQuery = true;
++			strncpy ( pTls->m_sQuery, pString->str_value.c_ptr(), sizeof(pTls->m_sQuery) );
++			pTls->m_sQuery[sizeof(pTls->m_sQuery)-1] = '\0';
++			pTls->m_pQueryCharset = pString->str_value.charset();
++
++		} else
++		{
++			if (!( args[0]->type()==COND::FIELD_ITEM && args[1]->type()==COND::INT_ITEM ))
++				break;
++
++			// on QL tables, intercept id=value condition for DELETE
++			Item_field * pField = (Item_field *) args[0];
++			if ( pField->field->field_index!=0 ) // FIXME! magic key index
++				break;
++
++			Item_int * pVal = (Item_int *) args[1];
++			pTls->m_iCondId = pVal->val_int();
++			pTls->m_bCondId = true;
++		}
++
++		// we intercepted this condition
++		return NULL;
++	}
++
++	// don't change anything
++	return cond;
++}
++
++
++/// condition popup
++void ha_sphinx::cond_pop ()
++{
++	CSphSEThreadData * pTls = GetTls ();
++	if ( pTls )
++		pTls->m_bQuery = false;
++}
++
++
++/// get TLS (maybe allocate it, too)
++CSphSEThreadData * ha_sphinx::GetTls()
++{
++	// where do we store that pointer in today's version?
++	CSphSEThreadData ** ppTls;
++#if MYSQL_VERSION_ID>50100
++	ppTls = (CSphSEThreadData**) thd_ha_data ( table->in_use, ht );
++#else
++	ppTls = (CSphSEThreadData**) &current_thd->ha_data[sphinx_hton.slot];
++#endif // >50100
++
++	// allocate if needed
++	if ( !*ppTls )
++		*ppTls = new CSphSEThreadData ();
++
++	// errors will be handled by caller
++	return *ppTls;
++}
++
++
++// Positions an index cursor to the index specified in the handle. Fetches the
++// row if available. If the key value is null, begin at the first key of the
++// index.
++int ha_sphinx::index_read ( byte * buf, const byte * key, uint key_len, enum ha_rkey_function )
++{
++	SPH_ENTER_METHOD();
++	char sError[256];
++
++	// set new data for thd->ha_data, it is used in show_status
++	CSphSEThreadData * pTls = GetTls();
++	if ( !pTls )
++	{
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: TLS malloc() failed" );
++		SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++	pTls->m_tStats.Reset ();
++
++	// sphinxql table, just return the key once
++	if ( m_pShare->m_bSphinxQL )
++	{
++		// over and out
++		if ( pTls->m_bCondDone )
++			SPH_RET ( HA_ERR_END_OF_FILE );
++
++		// return a value from pushdown, if any
++		if ( pTls->m_bCondId )
++		{
++			table->field[0]->store ( pTls->m_iCondId, 1 );
++			pTls->m_bCondDone = true;
++			SPH_RET(0);
++		}
++
++		// return a value from key
++		longlong iRef = 0;
++		if ( key_len==4 )
++			iRef = uint4korr ( key );
++		else if ( key_len==8 )
++			iRef = uint8korr ( key );
++		else
++		{
++			my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: unexpected key length" );
++			SPH_RET ( HA_ERR_END_OF_FILE );
++		}
++
++		table->field[0]->store ( iRef, 1 );
++		pTls->m_bCondDone = true;
++		SPH_RET(0);
++	}
++
++	// parse query
++	if ( pTls->m_bQuery )
++	{
++		// we have a query from condition pushdown
++		m_pCurrentKey = (const byte *) pTls->m_sQuery;
++		m_iCurrentKeyLen = strlen(pTls->m_sQuery);
++	} else
++	{
++		// just use the key (might be truncated)
++		m_pCurrentKey = key+HA_KEY_BLOB_LENGTH;
++		m_iCurrentKeyLen = uint2korr(key); // or maybe key_len?
++		pTls->m_pQueryCharset = m_pShare ? m_pShare->m_pTableQueryCharset : NULL;
++	}
++
++	CSphSEQuery q ( (const char*)m_pCurrentKey, m_iCurrentKeyLen, m_pShare->m_sIndex );
++	if ( !q.Parse () )
++	{
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), q.m_sParseError );
++		SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++
++	// do connect
++	int iSocket = ConnectAPI ( q.m_sHost, q.m_iPort );
++	if ( iSocket<0 )
++		SPH_RET ( HA_ERR_END_OF_FILE );
++
++	// my buffer
++	char * pBuffer; // will be free by CSphSEQuery dtor; do NOT free manually
++	int iReqLen = q.BuildRequest ( &pBuffer );
++
++	if ( iReqLen<=0 )
++	{
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: q.BuildRequest() failed" );
++		SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++
++	// send request
++	::send ( iSocket, pBuffer, iReqLen, 0 );
++
++	// receive reply
++	char sHeader[8];
++	int iGot = ::recv ( iSocket, sHeader, sizeof(sHeader), RECV_FLAGS );
++	if ( iGot!=sizeof(sHeader) )
++	{
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "failed to receive response header (searchd went away?)" );
++		SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++
++	short int uRespStatus = ntohs ( sphUnalignedRead ( *(short int*)( &sHeader[0] ) ) );
++	short int uRespVersion = ntohs ( sphUnalignedRead ( *(short int*)( &sHeader[2] ) ) );
++	uint uRespLength = ntohl ( sphUnalignedRead ( *(uint *)( &sHeader[4] ) ) );
++	SPH_DEBUG ( "got response header (status=%d version=%d length=%d)",
++		uRespStatus, uRespVersion, uRespLength );
++
++	SafeDeleteArray ( m_pResponse );
++	if ( uRespLength<=SPHINXSE_MAX_ALLOC )
++		m_pResponse = new char [ uRespLength+1 ];
++
++	if ( !m_pResponse )
++	{
++		my_snprintf ( sError, sizeof(sError), "bad searchd response length (length=%u)", uRespLength );
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), sError );
++		SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++
++	int iRecvLength = 0;
++	while ( iRecvLength<(int)uRespLength )
++	{
++		int iRecv = ::recv ( iSocket, m_pResponse+iRecvLength, uRespLength-iRecvLength, RECV_FLAGS );
++		if ( iRecv<0 )
++			break;
++		iRecvLength += iRecv;
++	}
++
++	::closesocket ( iSocket );
++	iSocket = -1;
++
++	if ( iRecvLength!=(int)uRespLength )
++	{
++		my_snprintf ( sError, sizeof(sError), "net read error (expected=%d, got=%d)", uRespLength, iRecvLength );
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), sError );
++		SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++
++	// we'll have a message, at least
++	pTls->m_bStats = true;
++
++	// parse reply
++	m_iCurrentPos = 0;
++	m_pCur = m_pResponse;
++	m_pResponseEnd = m_pResponse + uRespLength;
++	m_bUnpackError = false;
++
++	if ( uRespStatus!=SEARCHD_OK )
++	{
++		char * sMessage = UnpackString ();
++		if ( !sMessage )
++		{
++			my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "no valid response from searchd (status=%d, resplen=%d)",
++				uRespStatus, uRespLength );
++			SPH_RET ( HA_ERR_END_OF_FILE );
++		}
++
++		strncpy ( pTls->m_tStats.m_sLastMessage, sMessage, sizeof(pTls->m_tStats.m_sLastMessage) );
++		SafeDeleteArray ( sMessage );
++
++		if ( uRespStatus!=SEARCHD_WARNING )
++		{
++			my_snprintf ( sError, sizeof(sError), "searchd error: %s", pTls->m_tStats.m_sLastMessage );
++			my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), sError );
++
++			pTls->m_tStats.m_bLastError = true;
++			SPH_RET ( HA_ERR_END_OF_FILE );
++		}
++	}
++
++	if ( !UnpackSchema () )
++		SPH_RET ( HA_ERR_END_OF_FILE );
++
++	if ( !UnpackStats ( &pTls->m_tStats ) )
++	{
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: UnpackStats() failed" );
++		SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++
++	SPH_RET ( get_rec ( buf, key, key_len ) );
++}
++
++
++// Positions an index cursor to the index specified in key. Fetches the
++// row if any. This is only used to read whole keys.
++int ha_sphinx::index_read_idx ( byte *, uint, const byte *, uint, enum ha_rkey_function )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( HA_ERR_WRONG_COMMAND );
++}
++
++
++// Used to read forward through the index.
++int ha_sphinx::index_next ( byte * buf )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( get_rec ( buf, m_pCurrentKey, m_iCurrentKeyLen ) );
++}
++
++
++int ha_sphinx::index_next_same ( byte * buf, const byte * key, uint keylen )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( get_rec ( buf, key, keylen ) );
++}
++
++
++int ha_sphinx::get_rec ( byte * buf, const byte *, uint )
++{
++	SPH_ENTER_METHOD();
++
++	if ( m_iCurrentPos>=m_iMatchesTotal )
++	{
++		SafeDeleteArray ( m_pResponse );
++		SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++
++	#if MYSQL_VERSION_ID>50100
++	my_bitmap_map * org_bitmap = dbug_tmp_use_all_columns ( table, table->write_set );
++	#endif
++	Field ** field = table->field;
++
++	// unpack and return the match
++	longlong uMatchID = UnpackDword ();
++	if ( m_bId64 )
++		uMatchID = ( uMatchID<<32 ) + UnpackDword();
++	uint32 uMatchWeight = UnpackDword ();
++
++	field[0]->store ( uMatchID, 1 );
++	field[1]->store ( uMatchWeight, 1 );
++	field[2]->store ( (const char*)m_pCurrentKey, m_iCurrentKeyLen, &my_charset_bin );
++
++	for ( uint32 i=0; i<m_iAttrs; i++ )
++	{
++		longlong iValue64 = 0;
++		uint32 uValue = UnpackDword ();
++		if ( m_dAttrs[i].m_uType==SPH_ATTR_BIGINT )
++			iValue64 = ( (longlong)uValue<<32 ) | UnpackDword();
++		if ( m_dAttrs[i].m_iField<0 )
++		{
++			// skip MVA
++			if ( m_dAttrs[i].m_uType & SPH_ATTR_MULTI )
++				for ( ; uValue>0 && !m_bUnpackError; uValue-- )
++					UnpackDword();
++			continue;
++		}
++
++		Field * af = field [ m_dAttrs[i].m_iField ];
++		switch ( m_dAttrs[i].m_uType )
++		{
++			case SPH_ATTR_INTEGER:
++			case SPH_ATTR_ORDINAL:
++			case SPH_ATTR_BOOL:
++				af->store ( uValue, 1 );
++				break;
++
++			case SPH_ATTR_FLOAT:
++				af->store ( sphDW2F(uValue) );
++				break;
++
++			case SPH_ATTR_TIMESTAMP:
++				if ( af->type()==MYSQL_TYPE_TIMESTAMP )
++					longstore ( af->ptr, uValue ); // because store() does not accept timestamps
++				else
++					af->store ( uValue, 1 );
++				break;
++
++			case SPH_ATTR_BIGINT:
++				af->store ( iValue64, 0 );
++				break;
++
++			case ( SPH_ATTR_MULTI | SPH_ATTR_INTEGER ):
++				if ( uValue<=0 )
++				{
++					// shortcut, empty MVA set
++					af->store ( "", 0, &my_charset_bin );
++
++				} else
++				{
++					// convert MVA set to comma-separated string
++					char sBuf[1024]; // FIXME! magic size
++					char * pCur = sBuf;
++
++					for ( ; uValue>0 && !m_bUnpackError; uValue-- )
++					{
++						uint32 uEntry = UnpackDword ();
++						if ( pCur < sBuf+sizeof(sBuf)-16 ) // 10 chars per 32bit value plus some safety bytes
++						{
++							snprintf ( pCur, sBuf+sizeof(sBuf)-pCur, "%u", uEntry );
++							while ( *pCur ) *pCur++;
++							if ( uValue>1 )
++								*pCur++ = ','; // non-trailing commas
++						}
++					}
++
++					af->store ( sBuf, pCur-sBuf, &my_charset_bin );
++				}
++				break;
++
++			default:
++				my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: unhandled attr type" );
++				SafeDeleteArray ( m_pResponse );
++				SPH_RET ( HA_ERR_END_OF_FILE );
++		}
++	}
++
++	if ( m_bUnpackError )
++	{
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: response unpacker failed" );
++		SafeDeleteArray ( m_pResponse );
++		SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++
++	// zero out unmapped fields
++	for ( int i=SPHINXSE_SYSTEM_COLUMNS; i<(int)table->s->fields; i++ )
++		if ( m_dUnboundFields[i]!=SPH_ATTR_NONE )
++			switch ( m_dUnboundFields[i] )
++	{
++		case SPH_ATTR_INTEGER:		table->field[i]->store ( 0, 1 ); break;
++		case SPH_ATTR_TIMESTAMP:	longstore ( table->field[i]->ptr, 0 ); break;
++		default:
++			my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0),
++				"INTERNAL ERROR: unhandled unbound field type %d", m_dUnboundFields[i] );
++			SafeDeleteArray ( m_pResponse );
++			SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++
++	memset ( buf, 0, table->s->null_bytes );
++	m_iCurrentPos++;
++
++	#if MYSQL_VERSION_ID > 50100
++	dbug_tmp_restore_column_map ( table->write_set, org_bitmap );
++	#endif
++
++	SPH_RET(0);
++}
++
++
++// Used to read backwards through the index.
++int ha_sphinx::index_prev ( byte * )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( HA_ERR_WRONG_COMMAND );
++}
++
++
++// index_first() asks for the first key in the index.
++//
++// Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
++// and sql_select.cc.
++int ha_sphinx::index_first ( byte * )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( HA_ERR_END_OF_FILE );
++}
++
++// index_last() asks for the last key in the index.
++//
++// Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
++// and sql_select.cc.
++int ha_sphinx::index_last ( byte * )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( HA_ERR_WRONG_COMMAND );
++}
++
++
++int ha_sphinx::rnd_init ( bool )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET(0);
++}
++
++
++int ha_sphinx::rnd_end()
++{
++	SPH_ENTER_METHOD();
++	SPH_RET(0);
++}
++
++
++int ha_sphinx::rnd_next ( byte * )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( HA_ERR_END_OF_FILE );
++}
++
++
++void ha_sphinx::position ( const byte * )
++{
++	SPH_ENTER_METHOD();
++	SPH_VOID_RET();
++}
++
++
++// This is like rnd_next, but you are given a position to use
++// to determine the row. The position will be of the type that you stored in
++// ref. You can use ha_get_ptr(pos,ref_length) to retrieve whatever key
++// or position you saved when position() was called.
++// Called from filesort.cc records.cc sql_insert.cc sql_select.cc sql_update.cc.
++int ha_sphinx::rnd_pos ( byte *, byte * )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( HA_ERR_WRONG_COMMAND );
++}
++
++
++#if MYSQL_VERSION_ID>=50030
++int ha_sphinx::info ( uint )
++#else
++void ha_sphinx::info ( uint )
++#endif
++{
++	SPH_ENTER_METHOD();
++
++	if ( table->s->keys>0 )
++		table->key_info[0].rec_per_key[0] = 1;
++
++	#if MYSQL_VERSION_ID>50100
++	stats.records = 20;
++	#else
++	records = 20;
++	#endif
++
++#if MYSQL_VERSION_ID>=50030
++	SPH_RET(0);
++#else
++	SPH_VOID_RET();
++#endif
++}
++
++
++int ha_sphinx::reset ()
++{
++	SPH_ENTER_METHOD();
++	CSphSEThreadData * pTls = GetTls ();
++	if ( pTls )
++		pTls->m_bQuery = false;
++	SPH_RET(0);
++}
++
++
++int ha_sphinx::delete_all_rows()
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( HA_ERR_WRONG_COMMAND );
++}
++
++
++// First you should go read the section "locking functions for mysql" in
++// lock.cc to understand this.
++// This create a lock on the table. If you are implementing a storage engine
++// that can handle transacations look at ha_berkely.cc to see how you will
++// want to go about doing this. Otherwise you should consider calling flock()
++// here.
++//
++// Called from lock.cc by lock_external() and unlock_external(). Also called
++// from sql_table.cc by copy_data_between_tables().
++int ha_sphinx::external_lock ( THD *, int )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET(0);
++}
++
++
++THR_LOCK_DATA ** ha_sphinx::store_lock ( THD *, THR_LOCK_DATA ** to,
++	enum thr_lock_type lock_type )
++{
++	SPH_ENTER_METHOD();
++
++	if ( lock_type!=TL_IGNORE && m_tLock.type==TL_UNLOCK )
++		m_tLock.type = lock_type;
++
++	*to++ = &m_tLock;
++	SPH_RET(to);
++}
++
++
++int ha_sphinx::delete_table ( const char * )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET(0);
++}
++
++
++// Renames a table from one name to another from alter table call.
++//
++// If you do not implement this, the default rename_table() is called from
++// handler.cc and it will delete all files with the file extentions returned
++// by bas_ext().
++//
++// Called from sql_table.cc by mysql_rename_table().
++int ha_sphinx::rename_table ( const char *, const char * )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET(0);
++}
++
++
++// Given a starting key, and an ending key estimate the number of rows that
++// will exist between the two. end_key may be empty which in case determine
++// if start_key matches any rows.
++//
++// Called from opt_range.cc by check_quick_keys().
++ha_rows ha_sphinx::records_in_range ( uint, key_range *, key_range * )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET(3); // low number to force index usage
++}
++
++
++// create() is called to create a database. The variable name will have the name
++// of the table. When create() is called you do not need to worry about opening
++// the table. Also, the FRM file will have already been created so adjusting
++// create_info will not do you any good. You can overwrite the frm file at this
++// point if you wish to change the table definition, but there are no methods
++// currently provided for doing that.
++//
++// Called from handle.cc by ha_create_table().
++int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * )
++{
++	SPH_ENTER_METHOD();
++	char sError[256];
++
++	CSphSEShare tInfo;
++	if ( !ParseUrl ( &tInfo, table, true ) )
++		SPH_RET(-1);
++
++	// check SphinxAPI table
++	for ( ; !tInfo.m_bSphinxQL; )
++	{
++		// check system fields (count and types)
++		if ( table->s->fields<SPHINXSE_SYSTEM_COLUMNS )
++		{
++			my_snprintf ( sError, sizeof(sError), "%s: there MUST be at least %d columns",
++				name, SPHINXSE_SYSTEM_COLUMNS );
++			break;
++		}
++
++		if ( !IsIDField ( table->field[0] ) )
++		{
++			my_snprintf ( sError, sizeof(sError), "%s: 1st column (docid) MUST be unsigned integer or bigint", name );
++			break;
++		}
++
++		if ( !IsIntegerFieldType ( table->field[1]->type() ) )
++		{
++			my_snprintf ( sError, sizeof(sError), "%s: 2nd column (weight) MUST be integer or bigint", name );
++			break;
++		}
++
++		enum_field_types f2 = table->field[2]->type();
++		if ( f2!=MYSQL_TYPE_VARCHAR
++			&& f2!=MYSQL_TYPE_BLOB && f2!=MYSQL_TYPE_MEDIUM_BLOB && f2!=MYSQL_TYPE_LONG_BLOB && f2!=MYSQL_TYPE_TINY_BLOB )
++		{
++			my_snprintf ( sError, sizeof(sError), "%s: 3rd column (search query) MUST be varchar or text", name );
++			break;
++		}
++
++		// check attributes
++		int i;
++		for ( i=3; i<(int)table->s->fields; i++ )
++		{
++			enum_field_types eType = table->field[i]->type();
++			if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT )
++			{
++				my_snprintf ( sError, sizeof(sError), "%s: %dth column (attribute %s) MUST be integer, bigint, timestamp, varchar, or float",
++					name, i+1, table->field[i]->field_name );
++				break;
++			}
++		}
++
++		if ( i!=(int)table->s->fields )
++			break;
++
++		// check index
++		if (
++			table->s->keys!=1 ||
++			table->key_info[0].key_parts!=1 ||
++			strcasecmp ( table->key_info[0].key_part[0].field->field_name, table->field[2]->field_name ) )
++		{
++			my_snprintf ( sError, sizeof(sError), "%s: there must be an index on '%s' column",
++				name, table->field[2]->field_name );
++			break;
++		}
++
++		// all good
++		sError[0] = '\0';
++		break;
++	}
++
++	// check SphinxQL table
++	for ( ; tInfo.m_bSphinxQL; )
++	{
++		sError[0] = '\0';
++
++		// check that 1st column is id, is of int type, and has an index
++		if ( strcmp ( table->field[0]->field_name, "id" ) )
++		{
++			my_snprintf ( sError, sizeof(sError), "%s: 1st column must be called 'id'", name );
++			break;
++		}
++
++		if ( !IsIDField ( table->field[0] ) )
++		{
++			my_snprintf ( sError, sizeof(sError), "%s: 'id' column must be INT UNSIGNED or BIGINT", name );
++			break;
++		}
++
++		// check index
++		if (
++			table->s->keys!=1 ||
++			table->key_info[0].key_parts!=1 ||
++			strcasecmp ( table->key_info[0].key_part[0].field->field_name, "id" ) )
++		{
++			my_snprintf ( sError, sizeof(sError), "%s: 'id' column must be indexed", name );
++			break;
++		}
++
++		// check column types
++		for ( int i=1; i<(int)table->s->fields; i++ )
++		{
++			enum_field_types eType = table->field[i]->type();
++			if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT )
++			{
++				my_snprintf ( sError, sizeof(sError), "%s: column %s is of unsupported type (use int/bigint/timestamp/varchar/float)",
++					name, i+1, table->field[i]->field_name );
++				break;
++			}
++		}
++		if ( sError[0] )
++			break;
++
++		// all good
++		break;
++	}
++
++	// report and bail
++	if ( sError[0] )
++	{
++		my_error ( ER_CANT_CREATE_TABLE, MYF(0), sError, -1 );
++		SPH_RET(-1);
++	}
++
++	SPH_RET(0);
++}
++
++// show functions
++
++#if MYSQL_VERSION_ID<50100
++#define SHOW_VAR_FUNC_BUFF_SIZE 1024
++#endif
++
++CSphSEStats * sphinx_get_stats ( THD * thd, SHOW_VAR * out )
++{
++#if MYSQL_VERSION_ID>50100
++	if ( sphinx_hton_ptr )
++	{
++		CSphSEThreadData *pTls = (CSphSEThreadData *) *thd_ha_data ( thd, sphinx_hton_ptr );
++
++		if ( pTls && pTls->m_bStats )
++			return &pTls->m_tStats;
++	}
++#else
++	CSphSEThreadData *pTls = (CSphSEThreadData *) thd->ha_data[sphinx_hton.slot];
++	if ( pTls && pTls->m_bStats )
++		return &pTls->m_tStats;
++#endif
++
++	out->type = SHOW_CHAR;
++	out->value = "";
++	return 0;
++}
++
++int sphinx_showfunc_total ( THD * thd, SHOW_VAR * out, char * )
++{
++	CSphSEStats * pStats = sphinx_get_stats ( thd, out );
++	if ( pStats )
++	{
++		out->type = SHOW_INT;
++		out->value = (char *) &pStats->m_iMatchesTotal;
++	}
++	return 0;
++}
++
++int sphinx_showfunc_total_found ( THD * thd, SHOW_VAR * out, char * )
++{
++	CSphSEStats * pStats = sphinx_get_stats ( thd, out );
++	if ( pStats )
++	{
++		out->type = SHOW_INT;
++		out->value = (char *) &pStats->m_iMatchesFound;
++	}
++	return 0;
++}
++
++int sphinx_showfunc_time ( THD * thd, SHOW_VAR * out, char * )
++{
++	CSphSEStats * pStats = sphinx_get_stats ( thd, out );
++	if ( pStats )
++	{
++		out->type = SHOW_INT;
++		out->value = (char *) &pStats->m_iQueryMsec;
++	}
++	return 0;
++}
++
++int sphinx_showfunc_word_count ( THD * thd, SHOW_VAR * out, char * )
++{
++	CSphSEStats * pStats = sphinx_get_stats ( thd, out );
++	if ( pStats )
++	{
++		out->type = SHOW_INT;
++		out->value = (char *) &pStats->m_iWords;
++	}
++	return 0;
++}
++
++int sphinx_showfunc_words ( THD * thd, SHOW_VAR * out, char * sBuffer )
++{
++#if MYSQL_VERSION_ID>50100
++	if ( sphinx_hton_ptr )
++	{
++		CSphSEThreadData * pTls = (CSphSEThreadData *) *thd_ha_data ( thd, sphinx_hton_ptr );
++#else
++	{
++		CSphSEThreadData * pTls = (CSphSEThreadData *) thd->ha_data[sphinx_hton.slot];
++#endif
++		if ( pTls && pTls->m_bStats )
++		{
++			CSphSEStats * pStats = &pTls->m_tStats;
++			if ( pStats && pStats->m_iWords )
++			{
++				uint uBuffLen = 0;
++
++				out->type = SHOW_CHAR;
++				out->value = sBuffer;
++
++				// the following is partially based on code in sphinx_show_status()
++				sBuffer[0] = 0;
++				for ( int i=0; i<pStats->m_iWords; i++ )
++				{
++					CSphSEWordStats & tWord = pStats->m_dWords[i];
++					uBuffLen = my_snprintf ( sBuffer, SHOW_VAR_FUNC_BUFF_SIZE, "%s%s:%d:%d ", sBuffer,
++						tWord.m_sWord, tWord.m_iDocs, tWord.m_iHits );
++				}
++
++				if ( uBuffLen > 0 )
++				{
++					// trim last space
++					sBuffer [ --uBuffLen ] = 0;
++
++					if ( pTls->m_pQueryCharset )
++					{
++						// String::c_ptr() will nul-terminate the buffer.
++						//
++						// NOTE: It's not entirely clear whether this conversion is necessary at all.
++
++						String sConvert;
++						uint iErrors;
++						sConvert.copy ( sBuffer, uBuffLen, pTls->m_pQueryCharset, system_charset_info, &iErrors );
++						memcpy ( sBuffer, sConvert.c_ptr(), sConvert.length() + 1 );
++					}
++				}
++
++				return 0;
++			}
++		}
++	}
++
++	out->type = SHOW_CHAR;
++	out->value = "";
++	return 0;
++}
++
++int sphinx_showfunc_error ( THD * thd, SHOW_VAR * out, char * )
++{
++	CSphSEStats * pStats = sphinx_get_stats ( thd, out );
++	if ( pStats && pStats->m_bLastError )
++	{
++		out->type = SHOW_CHAR;
++		out->value = pStats->m_sLastMessage;
++	}
++	return 0;
++}
++
++#if MYSQL_VERSION_ID>50100
++struct st_mysql_storage_engine sphinx_storage_engine =
++{
++	MYSQL_HANDLERTON_INTERFACE_VERSION
++};
++
++struct st_mysql_show_var sphinx_status_vars[] =
++{
++	{"sphinx_total",		(char *)sphinx_showfunc_total,			SHOW_FUNC},
++	{"sphinx_total_found",	(char *)sphinx_showfunc_total_found,	SHOW_FUNC},
++	{"sphinx_time",			(char *)sphinx_showfunc_time,			SHOW_FUNC},
++	{"sphinx_word_count",	(char *)sphinx_showfunc_word_count,		SHOW_FUNC},
++	{"sphinx_words",		(char *)sphinx_showfunc_words,			SHOW_FUNC},
++	{"sphinx_error",		(char *)sphinx_showfunc_error,			SHOW_FUNC},
++	{0, 0, (enum_mysql_show_type)0}
++};
++
++
++mysql_declare_plugin(sphinx)
++{
++	MYSQL_STORAGE_ENGINE_PLUGIN,
++	&sphinx_storage_engine,
++	sphinx_hton_name,
++	"Sphinx developers",
++	sphinx_hton_comment,
++	PLUGIN_LICENSE_GPL,
++	sphinx_init_func, // Plugin Init
++	sphinx_done_func, // Plugin Deinit
++	0x0001, // 0.1
++	sphinx_status_vars,
++	NULL,
++	NULL
++}
++mysql_declare_plugin_end;
++
++#endif // >50100
++
++//
++// $Id: ha_sphinx.cc 2752 2011-03-29 08:21:05Z tomat $
++//
+diff -uNr storage/sphinx/ha_sphinx.h storage/sphinx/ha_sphinx.h
+--- storage/sphinx/ha_sphinx.h	1970-01-01 01:00:00.000000000 +0100
++++ storage/sphinx/ha_sphinx.h	2011-10-13 00:59:59.282957578 +0200
+@@ -0,0 +1,169 @@
++//
++// $Id: ha_sphinx.h 2428 2010-08-03 11:38:09Z shodan $
++//
++
++#ifdef USE_PRAGMA_INTERFACE
++#pragma interface // gcc class implementation
++#endif
++
++
++#if MYSQL_VERSION_ID>=50515
++#define TABLE_ARG	TABLE_SHARE
++#elif MYSQL_VERSION_ID>50100
++#define TABLE_ARG	st_table_share
++#else
++#define TABLE_ARG	st_table
++#endif
++
++
++#if MYSQL_VERSION_ID>=50120
++typedef uchar byte;
++#endif
++
++
++/// forward decls
++class THD;
++struct CSphReqQuery;
++struct CSphSEShare;
++struct CSphSEAttr;
++struct CSphSEStats;
++struct CSphSEThreadData;
++
++/// Sphinx SE handler class
++class ha_sphinx : public handler
++{
++protected:
++	THR_LOCK_DATA	m_tLock;				///< MySQL lock
++
++	CSphSEShare *	m_pShare;				///< shared lock info
++
++	uint			m_iMatchesTotal;
++	uint			m_iCurrentPos;
++	const byte *	m_pCurrentKey;
++	uint			m_iCurrentKeyLen;
++
++	char *			m_pResponse;			///< searchd response storage
++	char *			m_pResponseEnd;			///< searchd response storage end (points to wilderness!)
++	char *			m_pCur;					///< current position into response
++	bool			m_bUnpackError;			///< any errors while unpacking response
++
++public:
++#if MYSQL_VERSION_ID<50100
++					ha_sphinx ( TABLE_ARG * table_arg ); // NOLINT
++#else
++					ha_sphinx ( handlerton * hton, TABLE_ARG * table_arg );
++#endif
++					~ha_sphinx () {}
++
++	const char *	table_type () const		{ return "SPHINX"; }	///< SE name for display purposes
++	const char *	index_type ( uint )		{ return "HASH"; }		///< index type name for display purposes
++	const char **	bas_ext () const;								///< my file extensions
++
++	#if MYSQL_VERSION_ID>50100
++	ulonglong		table_flags () const	{ return HA_CAN_INDEX_BLOBS; }			///< bitmap of implemented flags (see handler.h for more info)
++	#else
++	ulong			table_flags () const	{ return HA_CAN_INDEX_BLOBS; }			///< bitmap of implemented flags (see handler.h for more info)
++	#endif
++
++	ulong			index_flags ( uint, uint, bool ) const	{ return 0; }	///< bitmap of flags that says how SE implements indexes
++	uint			max_supported_record_length () const	{ return HA_MAX_REC_LENGTH; }
++	uint			max_supported_keys () const				{ return 1; }
++	uint			max_supported_key_parts () const		{ return 1; }
++	uint			max_supported_key_length () const		{ return MAX_KEY_LENGTH; }
++	uint			max_supported_key_part_length () const	{ return MAX_KEY_LENGTH; }
++
++	#if MYSQL_VERSION_ID>50100
++	virtual double	scan_time ()	{ return (double)( stats.records+stats.deleted )/20.0 + 10; }	///< called in test_quick_select to determine if indexes should be used
++	#else
++	virtual double	scan_time ()	{ return (double)( records+deleted )/20.0 + 10; }				///< called in test_quick_select to determine if indexes should be used
++	#endif
++
++	virtual double	read_time ( ha_rows rows )	{ return (double)rows/20.0 + 1; }					///< index read time estimate
++
++public:
++	int				open ( const char * name, int mode, uint test_if_locked );
++	int				close ();
++
++	int				write_row ( byte * buf );
++	int				update_row ( const byte * old_data, byte * new_data );
++	int				delete_row ( const byte * buf );
++	int				extra ( enum ha_extra_function op );
++
++	int				index_init ( uint keynr, bool sorted ); // 5.1.x
++	int				index_init ( uint keynr ) { return index_init ( keynr, false ); } // 5.0.x
++
++	int				index_end ();
++	int				index_read ( byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag );
++	int				index_read_idx ( byte * buf, uint idx, const byte * key, uint key_len, enum ha_rkey_function find_flag );
++	int				index_next ( byte * buf );
++	int				index_next_same ( byte * buf, const byte * key, uint keylen );
++	int				index_prev ( byte * buf );
++	int				index_first ( byte * buf );
++	int				index_last ( byte * buf );
++
++	int				get_rec ( byte * buf, const byte * key, uint keylen );
++
++	int				rnd_init ( bool scan );
++	int				rnd_end ();
++	int				rnd_next ( byte * buf );
++	int				rnd_pos ( byte * buf, byte * pos );
++	void			position ( const byte * record );
++
++#if MYSQL_VERSION_ID>=50030
++	int				info ( uint );
++#else
++	void			info ( uint );
++#endif
++
++	int				reset();
++	int				external_lock ( THD * thd, int lock_type );
++	int				delete_all_rows ();
++	ha_rows			records_in_range ( uint inx, key_range * min_key, key_range * max_key );
++
++	int				delete_table ( const char * from );
++	int				rename_table ( const char * from, const char * to );
++	int				create ( const char * name, TABLE * form, HA_CREATE_INFO * create_info );
++
++	THR_LOCK_DATA **		store_lock ( THD * thd, THR_LOCK_DATA ** to, enum thr_lock_type lock_type );
++
++public:
++	virtual const COND *	cond_push ( const COND *cond );
++	virtual void			cond_pop ();
++
++private:
++	uint32			m_iFields;
++	char **			m_dFields;
++
++	uint32			m_iAttrs;
++	CSphSEAttr *	m_dAttrs;
++	int				m_bId64;
++
++	int *			m_dUnboundFields;
++
++private:
++	int				Connect ( const char * sQueryHost, ushort uPort );
++	int				ConnectAPI ( const char * sQueryHost, int iQueryPort );
++	int				HandleMysqlError ( struct st_mysql * pConn, int iErrCode );
++
++	uint32			UnpackDword ();
++	char *			UnpackString ();
++	bool			UnpackSchema ();
++	bool			UnpackStats ( CSphSEStats * pStats );
++
++	CSphSEThreadData *	GetTls ();
++};
++
++
++#if MYSQL_VERSION_ID < 50100
++bool sphinx_show_status ( THD * thd );
++#endif
++
++int sphinx_showfunc_total_found ( THD *, SHOW_VAR *, char * );
++int sphinx_showfunc_total ( THD *, SHOW_VAR *, char * );
++int sphinx_showfunc_time ( THD *, SHOW_VAR *, char * );
++int sphinx_showfunc_word_count ( THD *, SHOW_VAR *, char * );
++int sphinx_showfunc_words ( THD *, SHOW_VAR *, char * );
++
++//
++// $Id: ha_sphinx.h 2428 2010-08-03 11:38:09Z shodan $
++//
+diff -uNr storage/sphinx/INSTALL storage/sphinx/INSTALL
+--- storage/sphinx/INSTALL	1970-01-01 01:00:00.000000000 +0100
++++ storage/sphinx/INSTALL	2010-07-07 18:12:02.000000000 +0200
+@@ -0,0 +1,48 @@
++Building MySQL with SphinxSE
++=============================
++
++Note: BUILD/autorun.sh step on Linux might malfunction with some
++versions of automake; autorun.sh will not fail but the build will.
++automake 1.9.6 is known to work.
++
++
++
++MySQL 5.0.x on Linux
++---------------------
++
++tar zxvf mysql-5.0.91.tar.gz
++cp -R mysqlse mysql-5.0.91/sql/sphinx
++cd mysql-5.0.91
++
++patch -p1 -i sql/sphinx/sphinx.5.0.91.diff
++sh BUILD/autorun.sh
++./configure --with-sphinx-storage-engine
++make
++
++
++
++MySQL 5.1.x on Linux
++---------------------
++
++tar zxvf mysql-5.1.47.tar.gz
++cp -R -p mysqlse mysql-5.1.47/storage/sphinx
++cd mysql-5.1.47
++
++sh BUILD/autorun.sh
++./configure --with-plugins=sphinx
++make
++
++
++
++MySQL 5.0.x on Windows
++-----------------------
++
++tar zxvf mysql-5.0.91.tar.gz
++cp -R mysqlse mysql-5.0.91/sql/sphinx
++cd mysql-5.0.91
++
++patch -p1 -i sql/sphinx/sphinx.5.0.91.diff
++win/configure.js WITH_SPHINX_STORAGE_ENGINE
++win/build-vs8
++
++--eof--
+diff -uNr storage/sphinx/Makefile.am storage/sphinx/Makefile.am
+--- storage/sphinx/Makefile.am	1970-01-01 01:00:00.000000000 +0100
++++ storage/sphinx/Makefile.am	2009-02-13 22:26:46.000000000 +0100
+@@ -0,0 +1,59 @@
++# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
++# 
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the License, or
++# (at your option) any later version.
++# 
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++# 
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write to the Free Software
++# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++
++#called from the top level Makefile
++
++MYSQLDATAdir =          $(localstatedir)
++MYSQLSHAREdir =         $(pkgdatadir)
++MYSQLBASEdir=           $(prefix)
++MYSQLLIBdir=            $(pkglibdir)
++pkgplugindir =          $(pkglibdir)/plugin
++INCLUDES =              -I$(top_srcdir)/include -I$(top_builddir)/include \
++			-I$(top_srcdir)/regex \
++			-I$(top_srcdir)/sql \
++                        -I$(srcdir)
++SUBDIRS =				../../include ../../mysys ../../strings ../../dbug ../../extra
++WRAPLIBS=
++
++LDADD =
++
++DEFS= @DEFS@ \
++      -D_REENTRANT -D_PTHREADS -DENGINE -DSTORAGE_ENGINE -DMYSQL_SERVER
++
++noinst_HEADERS =	ha_sphinx.h
++
++EXTRA_LTLIBRARIES =	ha_sphinx.la
++pkgplugin_LTLIBRARIES = @plugin_sphinx_shared_target@ sphinx.la
++
++ha_sphinx_la_LDFLAGS =	-module -rpath $(MYSQLLIBdir)
++ha_sphinx_la_CXXFLAGS=	$(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN
++ha_sphinx_la_CFLAGS =	$(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN
++ha_sphinx_la_SOURCES =	ha_sphinx.cc
++
++sphinx_la_LDFLAGS = -module
++sphinx_la_CXXFLAGS = $(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN
++sphinx_la_CFLAGS = $(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN
++sphinx_la_SOURCES = snippets_udf.cc
++
++EXTRA_LIBRARIES =	libsphinx.a
++noinst_LIBRARIES =	@plugin_sphinx_static_target@
++libsphinx_a_CXXFLAGS =	$(AM_CFLAGS)
++libsphinx_a_CFLAGS =	$(AM_CFLAGS)
++libsphinx_a_SOURCES=	ha_sphinx.cc
++
++EXTRA_DIST =		cmakelists.txt
++# Don't update the files from bitkeeper
++%::SCCS/s.%
+diff -uNr storage/sphinx/make-patch.sh storage/sphinx/make-patch.sh
+--- storage/sphinx/make-patch.sh	1970-01-01 01:00:00.000000000 +0100
++++ storage/sphinx/make-patch.sh	2008-09-05 20:06:30.000000000 +0200
+@@ -0,0 +1,36 @@
++#!/bin/sh
++
++OUT=$1
++ORIG=$2
++NEW=$3
++
++if [ ! \( "$1" -a "$2" -a "$3" \) ]; then
++	echo "$0 <patch> <original> <new>"
++	exit 1
++fi
++
++FILES='
++/config/ac-macros/ha_sphinx.m4
++/configure.in
++/libmysqld/Makefile.am
++/sql/handler.cc
++/sql/handler.h
++/sql/Makefile.am
++/sql/mysqld.cc
++/sql/mysql_priv.h
++/sql/set_var.cc
++/sql/sql_lex.h
++/sql/sql_parse.cc
++/sql/sql_yacc.yy
++/sql/structs.h
++/sql/sql_show.cc
++'
++
++rm -f $OUT
++if [ -e $OUT ]; then
++	exit 1
++fi
++
++for name in $FILES; do
++	diff -BNru "$ORIG$name" "$NEW$name" >> $OUT
++done
+diff -uNr storage/sphinx/plug.in storage/sphinx/plug.in
+--- storage/sphinx/plug.in	1970-01-01 01:00:00.000000000 +0100
++++ storage/sphinx/plug.in	2006-06-07 09:28:43.000000000 +0200
+@@ -0,0 +1,5 @@
++MYSQL_STORAGE_ENGINE(sphinx,,  [Sphinx Storage Engine],
++        [Sphinx Storage Engines], [max,max-no-ndb])
++MYSQL_PLUGIN_DIRECTORY(sphinx, [storage/sphinx])
++MYSQL_PLUGIN_STATIC(sphinx,    [libsphinx.a])
++MYSQL_PLUGIN_DYNAMIC(sphinx,   [ha_sphinx.la])
+diff -uNr storage/sphinx/snippets_udf.cc storage/sphinx/snippets_udf.cc
+--- storage/sphinx/snippets_udf.cc	1970-01-01 01:00:00.000000000 +0100
++++ storage/sphinx/snippets_udf.cc	2011-01-01 03:33:06.000000000 +0100
+@@ -0,0 +1,768 @@
++//
++// $Id: snippets_udf.cc 2616 2011-01-01 02:33:06Z shodan $
++//
++
++//
++// Copyright (c) 2001-2011, Andrew Aksyonoff
++// Copyright (c) 2008-2011, Sphinx Technologies Inc
++// All rights reserved
++//
++// This program is free software; you can redistribute it and/or modify
++// it under the terms of the GNU General Public License. You should have
++// received a copy of the GPL license along with this program; if you
++// did not, you can find it at http://www.gnu.org/
++//
++
++#include <stdio.h>
++#include <string.h>
++#include <assert.h>
++
++#include <sys/un.h>
++#include <netdb.h>
++
++#include <mysql_version.h>
++
++#if MYSQL_VERSION_ID>50100
++#include "mysql_priv.h"
++#include <mysql/plugin.h>
++#else
++#include "../mysql_priv.h"
++#endif
++
++#include <mysys_err.h>
++#include <my_sys.h>
++
++#if MYSQL_VERSION_ID>=50120
++typedef uchar byte;
++#endif
++
++/// partially copy-pasted stuff that should be moved elsewhere
++
++#if UNALIGNED_RAM_ACCESS
++
++/// pass-through wrapper
++template < typename T > inline T sphUnalignedRead ( const T & tRef )
++{
++	return tRef;
++}
++
++/// pass-through wrapper
++template < typename T > void sphUnalignedWrite ( void * pPtr, const T & tVal )
++{
++	*(T*)pPtr = tVal;
++}
++
++#else
++
++/// unaligned read wrapper for some architectures (eg. SPARC)
++template < typename T >
++inline T sphUnalignedRead ( const T & tRef )
++{
++	T uTmp;
++	byte * pSrc = (byte *) &tRef;
++	byte * pDst = (byte *) &uTmp;
++	for ( int i=0; i<(int)sizeof(T); i++ )
++		*pDst++ = *pSrc++;
++	return uTmp;
++}
++
++/// unaligned write wrapper for some architectures (eg. SPARC)
++template < typename T >
++void sphUnalignedWrite ( void * pPtr, const T & tVal )
++{
++	byte * pDst = (byte *) pPtr;
++	byte * pSrc = (byte *) &tVal;
++	for ( int i=0; i<(int)sizeof(T); i++ )
++		*pDst++ = *pSrc++;
++}
++
++#endif
++
++#define SPHINXSE_MAX_ALLOC			(16*1024*1024)
++
++#define SafeDelete(_arg)		{ if ( _arg ) delete ( _arg );		(_arg) = NULL; }
++#define SafeDeleteArray(_arg)	{ if ( _arg ) delete [] ( _arg );	(_arg) = NULL; }
++
++#define Min(a,b) ((a)<(b)?(a):(b))
++
++typedef unsigned int DWORD;
++
++inline DWORD sphF2DW ( float f ) { union { float f; uint32 d; } u; u.f = f; return u.d; }
++
++static char * sphDup ( const char * sSrc, int iLen=-1 )
++{
++	if ( !sSrc )
++		return NULL;
++
++	if ( iLen<0 )
++		iLen = strlen(sSrc);
++
++	char * sRes = new char [ 1+iLen ];
++	memcpy ( sRes, sSrc, iLen );
++	sRes[iLen] = '\0';
++	return sRes;
++}
++
++static inline void sphShowErrno ( const char * sCall )
++{
++	char sError[256];
++	snprintf ( sError, sizeof(sError), "%s() failed: [%d] %s", sCall, errno, strerror(errno) );
++	my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), sError );
++}
++
++static const bool sphReportErrors = true;
++
++static bool sphSend ( int iFd, const char * pBuffer, int iSize, bool bReportErrors = false )
++{
++	assert ( pBuffer );
++	assert ( iSize > 0 );
++
++	const int iResult = send ( iFd, pBuffer, iSize, 0 );
++	if ( iResult != iSize )
++	{
++		if ( bReportErrors ) sphShowErrno("send");
++		return false;
++	}
++	return true;
++}
++
++static bool sphRecv ( int iFd, char * pBuffer, int iSize, bool bReportErrors = false )
++{
++	assert ( pBuffer );
++	assert ( iSize > 0 );
++	
++	while ( iSize )
++	{
++		const int iResult = recv ( iFd, pBuffer, iSize, 0 );
++		if ( iResult > 0 )
++		{
++			iSize -= iResult;
++			pBuffer += iSize;
++		}
++		else if ( iResult == 0 )
++		{
++			if ( bReportErrors )
++				my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), "recv() failed: disconnected" );
++			return false;
++		}
++		else
++		{
++			if ( bReportErrors ) sphShowErrno("recv");
++			return false;
++		}
++	}
++	return true;
++}
++
++enum
++{
++	SPHINX_SEARCHD_PROTO		= 1,
++
++	SEARCHD_COMMAND_SEARCH		= 0,
++	SEARCHD_COMMAND_EXCERPT		= 1,
++
++	VER_COMMAND_SEARCH		= 0x116,
++	VER_COMMAND_EXCERPT		= 0x100,
++};
++
++/// known answers
++enum
++{
++	SEARCHD_OK		= 0,	///< general success, command-specific reply follows
++	SEARCHD_ERROR	= 1,	///< general failure, error message follows
++	SEARCHD_RETRY	= 2,	///< temporary failure, error message follows, client should retry later
++	SEARCHD_WARNING	= 3		///< general success, warning message and command-specific reply follow
++};
++
++#define SPHINXSE_DEFAULT_SCHEME		"sphinx"
++#define SPHINXSE_DEFAULT_HOST		"127.0.0.1"
++#define SPHINXSE_DEFAULT_PORT		9312
++#define SPHINXSE_DEFAULT_INDEX		"*"
++
++class CSphBuffer
++{
++private:
++	bool m_bOverrun;
++	int m_iSize;
++	int m_iLeft;
++	char * m_pBuffer;
++	char * m_pCurrent;
++
++public:
++	CSphBuffer ( const int iSize )
++		: m_bOverrun ( false )
++		, m_iSize ( iSize )
++		, m_iLeft ( iSize )
++	{
++		assert ( iSize > 0 );
++		m_pBuffer = new char[iSize];
++		m_pCurrent = m_pBuffer;
++	}
++
++	~CSphBuffer ()
++	{
++		SafeDelete ( m_pBuffer );
++	}
++
++	const char * Ptr() const { return m_pBuffer; }
++
++	bool Finalize()
++	{
++		return !( m_bOverrun || m_iLeft != 0 || m_pCurrent - m_pBuffer != m_iSize );
++	}
++	
++	void SendBytes ( const void * pBytes, int iBytes );
++	
++	void SendWord ( short int v )					{ v = ntohs(v); SendBytes ( &v, sizeof(v) ); }
++	void SendInt ( int v )							{ v = ntohl(v); SendBytes ( &v, sizeof(v) ); }
++	void SendDword ( DWORD v )						{ v = ntohl(v) ;SendBytes ( &v, sizeof(v) ); }
++	void SendUint64 ( ulonglong v )					{ SendDword ( uint(v>>32) ); SendDword ( uint(v&0xFFFFFFFFUL) ); }
++	void SendString ( const char * v )				{ SendString ( v, strlen(v) ); }
++	void SendString ( const char * v, int iLen )	{ SendDword(iLen); SendBytes ( v, iLen ); }
++	void SendFloat ( float v )						{ SendDword ( sphF2DW(v) ); }
++};
++
++void CSphBuffer::SendBytes ( const void * pBytes, int iBytes )
++{
++	if ( m_iLeft < iBytes )
++	{
++		m_bOverrun = true;
++		return;
++	}
++
++	memcpy ( m_pCurrent, pBytes, iBytes );
++
++	m_pCurrent += iBytes;
++	m_iLeft -= iBytes;
++}
++
++struct CSphUrl
++{
++	char * m_sBuffer;
++	char * m_sFormatted;
++	
++	char * m_sScheme;
++	char * m_sHost;
++	char * m_sIndex;
++	
++	int m_iPort;
++	
++	CSphUrl()
++		: m_sBuffer ( NULL )
++		, m_sFormatted ( NULL )
++		, m_sScheme ( SPHINXSE_DEFAULT_SCHEME )
++		, m_sHost ( SPHINXSE_DEFAULT_HOST )
++		, m_sIndex ( SPHINXSE_DEFAULT_INDEX )
++		, m_iPort ( SPHINXSE_DEFAULT_PORT )
++	{}
++	
++	~CSphUrl()
++	{
++		SafeDeleteArray ( m_sFormatted );
++		SafeDeleteArray ( m_sBuffer );
++	}
++	
++	bool Parse ( const char * sUrl, int iLen );
++	int Connect();
++	const char * Format();
++};
++
++const char * CSphUrl::Format()
++{
++	if ( !m_sFormatted )
++	{
++		int iSize = 15 + strlen(m_sHost) + strlen(m_sIndex);
++		m_sFormatted = new char [ iSize ];
++		if ( m_iPort )
++			snprintf ( m_sFormatted, iSize, "inet://%s:%d/%s", m_sHost, m_iPort, m_sIndex );
++		else
++			snprintf ( m_sFormatted, iSize, "unix://%s/%s", m_sHost, m_sIndex );
++	}
++	return m_sFormatted;
++}
++
++// the following scheme variants are recognized
++//
++// inet://host/index
++// inet://host:port/index
++// unix://unix/domain/socket:index
++// unix://unix/domain/socket
++bool CSphUrl::Parse ( const char * sUrl, int iLen )
++{
++	bool bOk = true;
++	while ( iLen )
++	{
++		bOk = false;
++		
++		m_sBuffer = sphDup ( sUrl, iLen );
++		m_sScheme = m_sBuffer;
++		
++		m_sHost = strstr ( m_sBuffer, "://" );
++		if ( !m_sHost )
++			break;
++		m_sHost[0] = '\0';
++		m_sHost += 2;
++		
++		if ( !strcmp ( m_sScheme, "unix" ) )
++		{
++			// unix-domain socket
++			m_iPort = 0;
++			if (!( m_sIndex = strrchr ( m_sHost, ':' ) ))
++				m_sIndex = SPHINXSE_DEFAULT_INDEX;
++			else
++			{
++				*m_sIndex++ = '\0';
++				if ( !*m_sIndex )
++					m_sIndex = SPHINXSE_DEFAULT_INDEX;
++			}
++			bOk = true;
++			break;
++		}
++		if( strcmp ( m_sScheme, "sphinx" ) != 0 && strcmp ( m_sScheme, "inet" ) != 0 )
++			break;
++
++		// inet
++		m_sHost++;
++		char * sPort = strchr ( m_sHost, ':' );
++		if ( sPort )
++		{
++			*sPort++ = '\0';
++			if ( *sPort )
++			{
++				m_sIndex = strchr ( sPort, '/' );
++				if ( m_sIndex )
++					*m_sIndex++ = '\0'; 
++				else
++					m_sIndex = SPHINXSE_DEFAULT_INDEX;
++				
++				m_iPort = atoi(sPort);
++				if ( !m_iPort )
++					m_iPort = SPHINXSE_DEFAULT_PORT;
++			}
++		} else
++		{
++			m_sIndex = strchr ( m_sHost, '/' );
++			if ( m_sIndex )
++				*m_sIndex++ = '\0';
++			else
++				m_sIndex = SPHINXSE_DEFAULT_INDEX;
++		}
++
++		bOk = true;
++		break;
++	}
++	
++	return bOk;
++}
++
++int CSphUrl::Connect()
++{
++	struct sockaddr_in sin;
++#ifndef __WIN__
++	struct sockaddr_un saun;
++#endif
++
++	int iDomain = 0;
++	int iSockaddrSize = 0;
++	struct sockaddr * pSockaddr = NULL;
++
++	in_addr_t ip_addr;
++
++	if ( m_iPort )
++	{
++		iDomain = AF_INET;
++		iSockaddrSize = sizeof(sin);
++		pSockaddr = (struct sockaddr *) &sin;
++
++		memset ( &sin, 0, sizeof(sin) );
++		sin.sin_family = AF_INET;
++		sin.sin_port = htons(m_iPort);
++		
++		// resolve address
++		if ( (int)( ip_addr=inet_addr(m_sHost) ) != (int)INADDR_NONE )
++			memcpy ( &sin.sin_addr, &ip_addr, sizeof(ip_addr) );
++		else
++		{
++			int tmp_errno;
++			struct hostent tmp_hostent, *hp;
++			char buff2 [ GETHOSTBYNAME_BUFF_SIZE ];
++			
++			hp = my_gethostbyname_r ( m_sHost, &tmp_hostent,
++									  buff2, sizeof(buff2), &tmp_errno );
++			if ( !hp )
++			{ 
++				my_gethostbyname_r_free();
++				
++				char sError[256];
++				snprintf ( sError, sizeof(sError), "failed to resolve searchd host (name=%s)", m_sHost );
++				
++				my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), sError );
++				return -1;
++			}
++			
++			memcpy ( &sin.sin_addr, hp->h_addr, Min ( sizeof(sin.sin_addr), (size_t)hp->h_length ) );
++			my_gethostbyname_r_free();
++		}
++	}
++	else
++	{
++#ifndef __WIN__
++		iDomain = AF_UNIX;
++		iSockaddrSize = sizeof(saun);
++		pSockaddr = (struct sockaddr *) &saun;
++
++		memset ( &saun, 0, sizeof(saun) );
++		saun.sun_family = AF_UNIX;
++		strncpy ( saun.sun_path, m_sHost, sizeof(saun.sun_path)-1 );
++#else
++		my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), "Unix-domain sockets are not supported on Windows" );
++		return -1;
++#endif
++	}
++
++	// connect to searchd and exchange versions
++	uint uServerVersion;
++	uint uClientVersion = htonl ( SPHINX_SEARCHD_PROTO );
++	int iSocket = -1;
++	char * pError = NULL;
++	do
++	{
++		iSocket = socket ( iDomain, SOCK_STREAM, 0 );
++		if ( iSocket == -1 )
++		{
++			pError = "Failed to create client socket";
++			break;
++		}
++	
++		if ( connect ( iSocket, pSockaddr, iSockaddrSize ) == -1)
++		{
++			pError = "Failed to connect to searchd";
++			break;
++		}
++
++		if ( !sphRecv ( iSocket, (char *)&uServerVersion, sizeof(uServerVersion) ) )
++		{
++			pError = "Failed to receive searchd version";
++			break;
++		}
++		
++		if ( !sphSend ( iSocket, (char *)&uClientVersion, sizeof(uClientVersion) ) )
++		{
++			pError = "Failed to send client version";
++			break;
++		}
++	}
++	while(0);
++
++	// fixme: compare versions?
++
++	if ( pError )
++	{
++		char sError[1024];
++		snprintf ( sError, sizeof(sError), "%s [%d] %s", Format(), errno, strerror(errno) );
++		my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), sError );
++
++		if ( iSocket != -1 )
++			close ( iSocket );
++		
++		return -1;
++	}
++
++	return iSocket;
++}
++
++struct CSphResponse
++{
++	char * m_pBuffer;
++	char * m_pBody;
++
++	CSphResponse ()
++		: m_pBuffer ( NULL )
++		, m_pBody ( NULL )
++	{}
++
++	CSphResponse ( DWORD uSize )
++		: m_pBody ( NULL )
++	{
++		m_pBuffer = new char[uSize];
++	}
++
++	~CSphResponse ()
++	{
++		SafeDeleteArray ( m_pBuffer );
++	}
++	
++	static CSphResponse * Read ( int iSocket, int iClientVersion );
++};
++
++CSphResponse *
++CSphResponse::Read ( int iSocket, int iClientVersion )
++{
++	char sHeader[8];
++	if ( !sphRecv ( iSocket, sHeader, sizeof(sHeader) ) )
++		return NULL;
++
++	int iStatus   = ntohs ( sphUnalignedRead ( *(short int *) &sHeader[0] ) );
++	int iVersion  = ntohs ( sphUnalignedRead ( *(short int *) &sHeader[2] ) );
++	DWORD uLength = ntohl ( sphUnalignedRead ( *(DWORD *)     &sHeader[4] ) );
++
++	if ( iVersion < iClientVersion ) // fixme: warn
++		;
++
++	if ( uLength <= SPHINXSE_MAX_ALLOC )
++	{
++		CSphResponse * pResponse = new CSphResponse ( uLength );
++		if ( !sphRecv ( iSocket, pResponse->m_pBuffer, uLength ) )
++		{
++			SafeDelete ( pResponse );
++			return NULL;
++		}
++
++		pResponse->m_pBody = pResponse->m_pBuffer;
++		if ( iStatus != SEARCHD_OK )
++		{
++			DWORD uSize = ntohl ( *(DWORD *)pResponse->m_pBuffer );
++			if ( iStatus == SEARCHD_WARNING )
++				pResponse->m_pBody += uSize; // fixme: report the warning somehow
++			else
++			{
++				char * sMessage = sphDup ( pResponse->m_pBuffer + sizeof(DWORD), uSize );
++				my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), sMessage );
++				SafeDelete ( sMessage );
++				SafeDelete ( pResponse );
++				return NULL;
++			}
++		}
++		return pResponse;
++	}
++	return NULL;
++}
++
++/// udf
++
++extern "C"
++{
++	my_bool sphinx_snippets_init ( UDF_INIT * pUDF, UDF_ARGS * pArgs, char * sMessage );
++	void sphinx_snippets_deinit ( UDF_INIT * pUDF );
++	char * sphinx_snippets ( UDF_INIT * pUDF, UDF_ARGS * pArgs, char * sResult, unsigned long * pLength, char * pIsNull, char * sError );
++};
++
++#define MAX_MESSAGE_LENGTH 255
++#define MAX_RESULT_LENGTH 255
++
++struct CSphSnippets
++{
++	CSphUrl m_tUrl;
++	CSphResponse * m_pResponse;
++
++	int m_iBeforeMatch;
++	int m_iAfterMatch;
++	int m_iChunkSeparator;
++	int m_iLimit;
++	int m_iAround;
++	int m_iFlags;
++
++	CSphSnippets()
++		: m_pResponse(NULL)
++		, m_iBeforeMatch(0)
++		, m_iAfterMatch(0)
++		, m_iChunkSeparator(0)
++		  // defaults
++		, m_iLimit(256)
++		, m_iAround(5)
++		, m_iFlags(1)
++	{
++	}
++
++	~CSphSnippets()
++	{
++		SafeDelete ( m_pResponse );
++	}
++};
++
++#define KEYWORD(NAME) else if ( strncmp ( NAME, pArgs->attributes[i], pArgs->attribute_lengths[i] ) == 0 )
++
++#define CHECK_TYPE(TYPE)											\
++	if ( pArgs->arg_type[i] != TYPE )								\
++	{																\
++		snprintf ( sMessage, MAX_MESSAGE_LENGTH,					\
++				   "%.*s argument must be a string",				\
++				   (int)pArgs->attribute_lengths[i],				\
++				   pArgs->attributes[i] );							\
++		bFail = true;												\
++		break;														\
++	}																\
++	if ( TYPE == STRING_RESULT && !pArgs->args[i] )					\
++	{																\
++		snprintf ( sMessage, MAX_MESSAGE_LENGTH,					\
++				   "%.*s argument must be constant (and not NULL)",	\
++				   (int)pArgs->attribute_lengths[i],				\
++				   pArgs->attributes[i] );							\
++		bFail = true;												\
++		break;														\
++	}
++
++#define STRING CHECK_TYPE(STRING_RESULT)
++#define INT CHECK_TYPE(INT_RESULT); int iValue = *(long long *)pArgs->args[i]
++
++my_bool sphinx_snippets_init ( UDF_INIT * pUDF, UDF_ARGS * pArgs, char * sMessage )
++{
++	if ( pArgs->arg_count < 3 )
++	{
++		strncpy ( sMessage, "insufficient arguments", MAX_MESSAGE_LENGTH );
++		return 1;
++	}
++
++	bool bFail = false;
++	CSphSnippets * pOpts = new CSphSnippets;
++	for ( uint i = 0; i < pArgs->arg_count; i++ )
++	{
++		if ( i < 3 )
++		{
++			if ( pArgs->arg_type[i] != STRING_RESULT )
++			{
++				strncpy ( sMessage, "first three arguments must be of string type", MAX_MESSAGE_LENGTH );
++				bFail = true;
++				break;
++			}
++		}
++		KEYWORD("sphinx")
++		{
++			STRING;
++			if ( !pOpts->m_tUrl.Parse ( pArgs->args[i], pArgs->lengths[i] ) )
++			{
++				strncpy ( sMessage, "failed to parse connection string", MAX_MESSAGE_LENGTH );
++				bFail = true;
++				break;
++			}
++		}
++		KEYWORD("before_match")		{ STRING; pOpts->m_iBeforeMatch = i; }
++		KEYWORD("after_match")		{ STRING; pOpts->m_iAfterMatch = i; }
++		KEYWORD("chunk_separator")	{ STRING; pOpts->m_iChunkSeparator = i; }
++		KEYWORD("limit")			{ INT; pOpts->m_iLimit = iValue; }
++		KEYWORD("around")			{ INT; pOpts->m_iAround = iValue; }
++		KEYWORD("exact_phrase")		{ INT; if ( iValue ) pOpts->m_iFlags |= 2; }
++		KEYWORD("single_passage")	{ INT; if ( iValue ) pOpts->m_iFlags |= 4; }
++		KEYWORD("use_boundaries")	{ INT; if ( iValue ) pOpts->m_iFlags |= 8; }
++		KEYWORD("weight_order")		{ INT; if ( iValue ) pOpts->m_iFlags |= 16; }
++		else
++		{
++			snprintf ( sMessage, MAX_MESSAGE_LENGTH, "unrecognized argument: %.*s",
++					   (int)pArgs->attribute_lengths[i], pArgs->attributes[i] );
++			bFail = true;
++			break;
++		}
++	}
++	
++	if ( bFail )
++	{
++		SafeDelete ( pOpts );
++		return 1;
++	}
++	pUDF->ptr = (char *)pOpts;
++	return 0;
++}
++
++#undef STRING
++#undef INT
++#undef KEYWORD
++#undef CHECK_TYPE
++
++#define ARG(i) pArgs->args[i], pArgs->lengths[i]
++#define ARG_LEN(VAR, LEN) ( VAR ? pArgs->lengths[VAR] : LEN )
++
++#define SEND_STRING(INDEX, DEFAULT)							\
++	if ( INDEX )											\
++		tBuffer.SendString ( ARG(INDEX) );					\
++	else													\
++		tBuffer.SendString ( DEFAULT, sizeof(DEFAULT) - 1 );
++
++
++char * sphinx_snippets ( UDF_INIT * pUDF, UDF_ARGS * pArgs, char * sResult, unsigned long * pLength, char * pIsNull, char * pError )
++{
++	CSphSnippets * pOpts = (CSphSnippets *)pUDF->ptr;
++	assert ( pOpts );
++
++	if ( !pArgs->args[0] || !pArgs->args[1] || !pArgs->args[2] )
++	{
++		*pIsNull = 1;
++		return sResult;
++	}
++
++	const int iSize =
++		8 + // header
++		8 +
++		4 + pArgs->lengths[1] + // index
++		4 + pArgs->lengths[2] + // words
++		4 + ARG_LEN ( pOpts->m_iBeforeMatch, 3 ) +
++		4 + ARG_LEN ( pOpts->m_iAfterMatch, 4 ) +
++		4 + ARG_LEN ( pOpts->m_iChunkSeparator, 5 ) +
++		12 +
++		4 + pArgs->lengths[0]; // document
++
++	CSphBuffer tBuffer(iSize);
++
++	tBuffer.SendWord ( SEARCHD_COMMAND_EXCERPT );
++	tBuffer.SendWord ( VER_COMMAND_EXCERPT );
++	tBuffer.SendDword ( iSize - 8 );
++
++	tBuffer.SendDword ( 0 );
++	tBuffer.SendDword ( pOpts->m_iFlags );
++
++	tBuffer.SendString ( ARG(1) ); // index
++	tBuffer.SendString ( ARG(2) ); // words
++
++	SEND_STRING ( pOpts->m_iBeforeMatch, "<b>" );
++	SEND_STRING ( pOpts->m_iAfterMatch, "</b>" );
++	SEND_STRING ( pOpts->m_iChunkSeparator, " ... " );
++
++	tBuffer.SendInt ( pOpts->m_iLimit );
++	tBuffer.SendInt ( pOpts->m_iAround );
++
++	// single document
++	tBuffer.SendInt ( 1 );
++	tBuffer.SendString ( ARG(0) );
++
++	int iSocket = -1;
++	do
++	{
++		if ( !tBuffer.Finalize() )
++		{
++			my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: failed to build request" );
++			break;
++		}
++		
++		iSocket = pOpts->m_tUrl.Connect();
++		if ( iSocket == -1 ) break;
++		if ( !sphSend ( iSocket, tBuffer.Ptr(), iSize, sphReportErrors ) ) break;
++
++		CSphResponse * pResponse = CSphResponse::Read ( iSocket, 0x100 );
++		if ( !pResponse ) break;
++
++		close ( iSocket );
++		pOpts->m_pResponse = pResponse;
++		*pLength = ntohl( *(DWORD *)pResponse->m_pBody );
++		return pResponse->m_pBody + sizeof(DWORD);
++	}
++	while(0);
++
++	if ( iSocket != -1 )
++		close ( iSocket );
++
++	*pError = 1;
++	return sResult;
++}
++
++#undef SEND_STRING
++#undef ARG_LEN	
++#undef ARG
++
++void sphinx_snippets_deinit ( UDF_INIT * pUDF )
++{
++	CSphSnippets * pOpts = (CSphSnippets *)pUDF->ptr;
++	SafeDelete ( pOpts );
++}
++
++//
++// $Id: snippets_udf.cc 2616 2011-01-01 02:33:06Z shodan $
++//
diff --git a/component/mysql-5.5/mysql_create_system_tables__no_test.patch b/component/mysql-5.5/mysql_create_system_tables__no_test.patch
new file mode 100644
index 0000000000000000000000000000000000000000..8d917ea1ad57f02c3e9e0bc94038c58815e7d6a2
--- /dev/null
+++ b/component/mysql-5.5/mysql_create_system_tables__no_test.patch
@@ -0,0 +1,27 @@
+# 33_scripts__mysql_create_system_tables__no_test.dpatch by  <ch@debian.org>
+
+A user with no password prevents a normal user from login under certain
+circumstances as it is checked first.
+See http://bugs.debian.org/301741
+and http://bugs.mysql.com/bug.php?id=6901
+
+--- scripts/mysql_system_tables_data.sql	2008-12-04 22:59:44.000000000 +0100
++++ scripts/mysql_system_tables_data.sql	2008-12-04 23:00:07.000000000 +0100
+@@ -26,8 +26,6 @@
+ -- Fill "db" table with default grants for anyone to
+ -- access database 'test' and 'test_%' if "db" table didn't exist
+ CREATE TEMPORARY TABLE tmp_db LIKE db;
+-INSERT INTO tmp_db VALUES ('%','test','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y');
+-INSERT INTO tmp_db VALUES ('%','test\_%','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y');
+ INSERT INTO db SELECT * FROM tmp_db WHERE @had_db_table=0;
+ DROP TABLE tmp_db;
+ 
+@@ -40,8 +38,6 @@
+ REPLACE INTO tmp_user SELECT @current_hostname,'root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','' FROM dual WHERE LOWER( @current_hostname) != 'localhost';
+ REPLACE INTO tmp_user VALUES ('127.0.0.1','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','');
+ REPLACE INTO tmp_user VALUES ('::1','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','');
+-INSERT INTO tmp_user (host,user) VALUES ('localhost','');
+-INSERT INTO tmp_user (host,user) SELECT @current_hostname,'' FROM dual WHERE LOWER(@current_hostname ) != 'localhost';
+ INSERT INTO user SELECT * FROM tmp_user WHERE @had_user_table=0;
+ DROP TABLE tmp_user;
+ 
diff --git a/component/ncurses/buildout.cfg b/component/ncurses/buildout.cfg
index 3944e63180e01cc713d5e03575518e202f2c6102..63dea92837fb3305b67ef0d18aa2ded492126ccb 100644
--- a/component/ncurses/buildout.cfg
+++ b/component/ncurses/buildout.cfg
@@ -15,10 +15,9 @@ configure-options =
   --enable-rpath
 # tricky way to rerun with --enable-widec
 make-targets =
-  install && (for i in curses unctrl eti form menu panel term; do ln -sf ncurses/$i.h ${buildout:parts-directory}/${:_buildout_section_name_}/include/$i.h; done) && ./configure ${:configure-options} --enable-widec && make install
+  install && (for i in curses unctrl eti form menu panel term; do ln -sf ncurses/$i.h ${buildout:parts-directory}/${:_buildout_section_name_}/include/$i.h; done; ln -sf libncurses.so ${buildout:parts-directory}/${:_buildout_section_name_}/lib/libcurses.so) && ./configure ${:configure-options} --enable-widec && make ${:make-options} install
 # pass dummy LDCONFIG to skip needless calling of ldconfig by non-root user
 environment =
-  LDFLAGS =-Wl,--as-needed
   LDCONFIG=/bin/echo
 make-options =
   -j1
diff --git a/component/openldap/buildout.cfg b/component/openldap/buildout.cfg
index c09a6692618d188d0dac7894abc17c1adf896be3..d8ea71c8143970f9817091c7931e7edf93f58b4d 100644
--- a/component/openldap/buildout.cfg
+++ b/component/openldap/buildout.cfg
@@ -7,8 +7,8 @@ extends =
 
 [openldap]
 recipe = hexagonit.recipe.cmmi
-url = ftp://ftp.openldap.org/pub/OpenLDAP/openldap-release/openldap-2.4.25.tgz
-md5sum = ec63f9c2add59f323a0459128846905b
+url = ftp://ftp.openldap.org/pub/OpenLDAP/openldap-release/openldap-2.4.26.tgz
+md5sum = f36f3086031dd56ae94f722ffae8df5e
 configure-options =
   --disable-static
   --disable-slapd
diff --git a/component/percona-toolkit/buildout.cfg b/component/percona-toolkit/buildout.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..847ec6540d30a7def4386edb47925a8d5a5f03a8
--- /dev/null
+++ b/component/percona-toolkit/buildout.cfg
@@ -0,0 +1,16 @@
+[buildout]
+extends =
+  ../perl/buildout.cfg
+  ../perl-DBI/buildout.cfg
+  ../perl-DBD-mariadb/buildout.cfg
+parts =
+  percona-toolkit
+
+[percona-toolkit]
+recipe = hexagonit.recipe.cmmi
+depends =
+  ${perl:version}
+url = http://www.percona.com/redir/downloads/percona-toolkit/percona-toolkit-1.0.1.tar.gz
+md5sum = 1d843b1b3ebd2eacfa3bf95ef2a00557
+configure-command =
+  ${perl:location}/bin/perl Makefile.PL
diff --git a/component/perl-DBD-MySQL/buildout.cfg b/component/perl-DBD-MySQL/buildout.cfg
index f9475e66a10316e536c2311dcd8c1de85ce210fe..8be6ddab2e236de40d2bbb2e6d541fd8bccea604 100644
--- a/component/perl-DBD-MySQL/buildout.cfg
+++ b/component/perl-DBD-MySQL/buildout.cfg
@@ -1,20 +1,11 @@
 [buildout]
 extends =
-  ../perl/buildout.cfg
-  ../perl-DBI/buildout.cfg
   ../mysql-tritonn-5.0/buildout.cfg
-  ../zlib/buildout.cfg
-  ../openssl/buildout.cfg
+  ../perl-DBD-common/buildout.cfg
+
 parts =
   perl-DBD-MySQL
 
-[perl-DBD-MySQL-patch]
-recipe = hexagonit.recipe.download
-md5sum = e12e9233f20b0370cfcf5228ea767fbc
-url = ${:_profile_base_location_}/${:filename}
-filename = DBD-mysql-4.019.rpathsupport.patch
-download-only = true
-
 [perl-DBD-MySQL]
 recipe = hexagonit.recipe.cmmi
 version = 4.019
diff --git a/component/perl-DBD-MySQL/DBD-mysql-4.019.rpathsupport.patch b/component/perl-DBD-common/DBD-mysql-4.019.rpathsupport.patch
similarity index 100%
rename from component/perl-DBD-MySQL/DBD-mysql-4.019.rpathsupport.patch
rename to component/perl-DBD-common/DBD-mysql-4.019.rpathsupport.patch
diff --git a/component/perl-DBD-common/buildout.cfg b/component/perl-DBD-common/buildout.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..4f2229756543b9463705f16fded5c5e6dafb1c4f
--- /dev/null
+++ b/component/perl-DBD-common/buildout.cfg
@@ -0,0 +1,16 @@
+# Please use perl-DBD-MySQL or perl-DBD-mariadb
+[buildout]
+extends =
+  ../perl/buildout.cfg
+  ../perl-DBI/buildout.cfg
+  ../zlib/buildout.cfg
+  ../openssl/buildout.cfg
+
+[perl-DBD-MySQL-patch]
+recipe = hexagonit.recipe.download
+md5sum = e12e9233f20b0370cfcf5228ea767fbc
+url = ${:_profile_base_location_}/${:filename}
+filename = DBD-mysql-4.019.rpathsupport.patch
+download-only = true
+
+
diff --git a/component/perl-DBD-mariadb/buildout.cfg b/component/perl-DBD-mariadb/buildout.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..ac379cb8763273a029a7468868de56b8afdfba03
--- /dev/null
+++ b/component/perl-DBD-mariadb/buildout.cfg
@@ -0,0 +1,23 @@
+[buildout]
+extends =
+  ../mariadb/buildout.cfg
+  ../perl-DBD-common/buildout.cfg
+
+parts =
+  perl-DBD-mariadb
+
+[perl-DBD-mariadb]
+recipe = hexagonit.recipe.cmmi
+version = 4.019
+depends =
+  ${perl:version}
+  ${perl-DBI:version}
+url = http://search.cpan.org/CPAN/authors/id/C/CA/CAPTTOFU/DBD-mysql-4.019.tar.gz
+md5sum = 566d98ab8ffac9626a31f6f6d455558e
+patches =
+  ${perl-DBD-MySQL-patch:location}/${perl-DBD-MySQL-patch:filename}
+patch-options = -p1
+configure-command =
+  ${perl:location}/bin/perl Makefile.PL --mysql_config=${mariadb:location}/bin/mysql_config
+environment =
+  OTHERLDFLAGS=-Wl,-rpath=${zlib:location}/lib -Wl,-rpath=${mariadb:location}/lib/mysql -Wl,-rpath=${openssl:location}/lib
\ No newline at end of file
diff --git a/component/perl/buildout.cfg b/component/perl/buildout.cfg
index 8673f03b367d88516c532944ac0b609f9f2710ec..c27ca6d56574ea9b77333eb06b1448e96d34a88b 100644
--- a/component/perl/buildout.cfg
+++ b/component/perl/buildout.cfg
@@ -14,9 +14,11 @@ filename = ${:_buildout_section_name_}
 
 [perl]
 recipe = hexagonit.recipe.cmmi
-version = 5.14.1
+depends =
+  ${gdbm:version}
+version = 5.14.2
 url = http://www.cpan.org/src/5.0/perl-${:version}.tar.bz2
-md5sum = 97cd306a2c22929cc141a09568f43bb0
+md5sum = 04a4c5d3c1f9f19d77daff8e8cd19a26
 siteprefix = ${buildout:parts-directory}/site_${:_buildout_section_name_}
 patch-options = -p1
 patches =
diff --git a/component/python-2.6/buildout.cfg b/component/python-2.6/buildout.cfg
index 6755b652e1f468a03320d81336b3a63443e91693..cf1f1634707b5ee91f7942ada40d0789bd1ece64 100644
--- a/component/python-2.6/buildout.cfg
+++ b/component/python-2.6/buildout.cfg
@@ -24,6 +24,8 @@ filename = python-2.6.6-no_system_inc_dirs.patch
 
 [python2.6]
 recipe = hexagonit.recipe.cmmi
+depends =
+  ${gdbm:version}
 # This is actually the default setting for prefix, but we can't use it in
 # other settings in this part if we don't set it explicitly here.
 prefix = ${buildout:parts-directory}/${:_buildout_section_name_}
diff --git a/component/python-2.7/buildout.cfg b/component/python-2.7/buildout.cfg
index 765488f74e0c1a4202ae3966454747b41a0ed630..e4a5f5cbcb7aa889f61c359bcf94003f8ac93737 100644
--- a/component/python-2.7/buildout.cfg
+++ b/component/python-2.7/buildout.cfg
@@ -38,6 +38,8 @@ python = python2.7
 
 [python2.7common]
 recipe = hexagonit.recipe.cmmi
+depends =
+  ${gdbm:version}
 # This is actually the default setting for prefix, but we can't use it in
 # other settings in this part if we don't set it explicitly here.
 prefix = ${buildout:parts-directory}/${:_buildout_section_name_}
diff --git a/component/rdiff-backup/buildout.cfg b/component/rdiff-backup/buildout.cfg
index b6d15bd56f379e0b5b51b72a81e1a2b95fd6af33..e3678ddff245a30206b10486528aa677677509dd 100644
--- a/component/rdiff-backup/buildout.cfg
+++ b/component/rdiff-backup/buildout.cfg
@@ -13,7 +13,7 @@ library-dirs =
   ${librsync:location}/lib/
 rpath =
   ${librsync:location}/lib/
-find-links = http://download.savannah.nongnu.org/releases/rdiff-backup/rdiff-backup-1.0.5.tar.gz
+find-links = http://pkgs.fedoraproject.org/repo/pkgs/rdiff-backup/rdiff-backup-1.0.5.tar.gz/fa2a165fa07a94be52c52e3545bc7758/rdiff-backup-1.0.5.tar.gz
 
 [rdiff-backup]
 # Scripts only generation part for rdiff-backup
diff --git a/component/slapos/buildout.cfg b/component/slapos/buildout.cfg
index 849f5f2bc929ab49de51f8e03dd10efa14b6de8a..bb5a10873ce5cdf1b4c6dac2c2cc8bba02dad4b8 100644
--- a/component/slapos/buildout.cfg
+++ b/component/slapos/buildout.cfg
@@ -98,6 +98,7 @@ initialization =
 
 # control scripts generation in order to avoid reinstalling bin/buildout
 scripts =
+  bang = slapos.bang:main
   generate-signature-key = slapos.signature:run
   slapconsole = slapos.console:run
   slapos-request = slapos.console:request
@@ -111,38 +112,37 @@ scripts =
   slapproxy = slapos.proxy:main
 
 [versions]
-zc.buildout = 1.5.3-dev-SlapOS-009
+zc.buildout = 1.6.0-dev-SlapOS-003
 Jinja2 = 2.6
-Werkzeug = 0.7.1
+Werkzeug = 0.8.1
 collective.recipe.template = 1.9
 hexagonit.recipe.cmmi = 1.5.0
-lxml = 2.3
+lxml = 2.3.2
 meld3 = 0.6.7
-netaddr = 0.7.5
+netaddr = 0.7.6
 setuptools = 0.6c12dev-r88846
-slapos.core = 0.14
-slapos.libnetworkcache = 0.9
+slapos.core = 0.20
+slapos.libnetworkcache = 0.10
 xml-marshaller = 0.9.7
 z3c.recipe.scripts = 1.0.1
 zc.recipe.egg = 1.3.2
 
 # Required by:
-# slapos.core==0.14
-Flask = 0.7.2
+# slapos.core==0.20
+Flask = 0.8
 
 # Required by:
 # hexagonit.recipe.cmmi==1.5.0
 hexagonit.recipe.download = 1.5.0
 
 # Required by:
-# slapos.core==0.14
-netifaces = 0.5
+# slapos.core==0.20
+netifaces = 0.6
 
 # Required by:
-# slapos.core==0.14
+# slapos.core==0.20
 supervisor = 3.0a10
 
 # Required by:
-# slapos.core==0.14
-zope.interface = 3.7.0
-
+# slapos.core==0.20
+zope.interface = 3.8.0
diff --git a/component/sphinx/buildout.cfg b/component/sphinx/buildout.cfg
index 10257b2e948376e12e1ba236accd964f40697cd4..bb8b0aa7c192c88870a8c49586f4aeb530ca15f3 100644
--- a/component/sphinx/buildout.cfg
+++ b/component/sphinx/buildout.cfg
@@ -18,8 +18,8 @@ filename = sphinx-1.10-beta-snowball.patch
 
 [sphinx]
 recipe = hexagonit.recipe.cmmi
-url = http://sphinxsearch.com/files/sphinx-2.0.1-beta.tar.gz
-md5sum = 95c217d81d0b7a4ff73d5297318c3481
+url = http://sphinxsearch.com/files/sphinx-2.0.2-beta.tar.gz
+md5sum = fafe0f1a71d0ded32404c067eba7d0b3
 configure-options =
   --with-mysql
   --with-mysql-includes=${mariadb:location}/include/mysql
diff --git a/component/sqlite3/buildout.cfg b/component/sqlite3/buildout.cfg
index 168ca9b5fde2c29861cb07c30e45bc441723ce57..05422fa1cb0f5e68e0159c688cb4ff0318af0257 100644
--- a/component/sqlite3/buildout.cfg
+++ b/component/sqlite3/buildout.cfg
@@ -5,8 +5,8 @@ parts =
 
 [sqlite3]
 recipe = hexagonit.recipe.cmmi
-url = http://www.sqlite.org/sqlite-autoconf-3070800.tar.gz
-md5sum = 6bfb46d73caaa1bbbcd2b52184b6c542
+url = http://www.sqlite.org/sqlite-autoconf-3070900.tar.gz
+md5sum = dce303524736fe89a76b8ed29d566352
 configure-options =
   --disable-static
   --enable-readline
diff --git a/component/stunnel/buildout.cfg b/component/stunnel/buildout.cfg
index ffbb02717e94d029ce1312a6506cc46097fee400..607e0c794cf329926097383bf0e2c863025bcc7e 100644
--- a/component/stunnel/buildout.cfg
+++ b/component/stunnel/buildout.cfg
@@ -17,8 +17,8 @@ filename = stunnel-4-hooks.py
 
 [stunnel-4]
 recipe = hexagonit.recipe.cmmi
-url = http://mirror.bit.nl/stunnel/stunnel-4.44.tar.gz
-md5sum = c9dd51fc02b913ce5bf7b3fc12f9cb4a
+url = http://mirror.bit.nl/stunnel/stunnel-4.50.tar.gz
+md5sum = d68b4565294496a8bdf23c728a679f53
 pre-configure-hook = ${stunnel-4-hook-download:location}/${stunnel-4-hook-download:filename}:pre_configure_hook
 configure-options =
   --enable-ipv6
diff --git a/component/xtrabackup/buildout.cfg b/component/xtrabackup/buildout.cfg
index 753376d34c0620b1809677a32c637de1e3c12695..6769e684398de27631b19fe9c324c9d954188360 100644
--- a/component/xtrabackup/buildout.cfg
+++ b/component/xtrabackup/buildout.cfg
@@ -18,7 +18,7 @@ parts =
 [xtrabackup-build-patch-download]
 recipe = hexagonit.recipe.download
 url = ${:_profile_base_location_}/${:filename}
-md5sum = 95b2c2ef625f88d85bf8876269a19372
+md5sum = b1536fe65e32592e4a0a14bf3b159885
 download-only = true
 filename = xtrabackup-1.6.2_build.patch
 
@@ -29,17 +29,33 @@ md5sum = d642ea7b30d1322a516fbece4ee100e0
 download-only = true
 filename = ${:_buildout_section_name_}
 
+[mysql-5.1-download]
+recipe = hexagonit.recipe.download
+version = 5.1.56
+url = http://s3.amazonaws.com/percona.com/downloads/community/mysql-${:version}.tar.gz
+md5sum = 15161d67f4830aad3a8a89e083749d49
+download-only = true
+filename = mysql-${:version}.tar.gz
+
+[libtar-download]
+recipe = hexagonit.recipe.download
+version = 1.2.11
+url = http://s3.amazonaws.com/percona.com/downloads/community/libtar-${:version}.tar.gz
+md5sum = 604238e8734ce6e25347a58c4f1a1d7e
+download-only = true
+filename = libtar-${:version}.tar.gz
+
 [xtrabackup]
 recipe = hexagonit.recipe.cmmi
-url = http://www.percona.com/redir/downloads/XtraBackup/xtrabackup-1.6.2/source/xtrabackup-1.6.2.tar.gz
-md5sum = 933243ae362156c98f1211eb87b3b4ea
+url = http://www.percona.com/downloads/XtraBackup/XtraBackup-1.6.3/source/xtrabackup-1.6.3.tar.gz
+md5sum = d0b827fd18cd76416101eb7b7c56a311
 make-binary = true
 patches =
   ${xtrabackup-build-patch-download:location}/${xtrabackup-build-patch-download:filename}
   ${allow_force_ibbackup.patch:location}/${allow_force_ibbackup.patch:filename}
 patch-options = -p1
 location = ${buildout:parts-directory}/${:_buildout_section_name_}
-configure-command = utils/build.sh innodb51_builtin ${:location} ${libtool:location}
+configure-command = ln -sf ${mysql-5.1-download:location}/${mysql-5.1-download:filename} ${libtar-download:location}/${libtar-download:filename} . && utils/build.sh innodb51_builtin ${:location} ${libtool:location}
 environment =
   CPPFLAGS =-I${zlib:location}/include -I${ncurses:location}/include -I${readline:location}/include
   LDFLAGS =-L${zlib:location}/lib -Wl,-rpath=${zlib:location}/lib -L${ncurses:location}/lib -Wl,-rpath=${ncurses:location}/lib -L${readline:location}/lib -Wl,-rpath=${readline:location}/lib
diff --git a/component/xtrabackup/xtrabackup-1.6.2_build.patch b/component/xtrabackup/xtrabackup-1.6.2_build.patch
index 41c2188fa9429d8c9db685233aafd83931f0a57c..5357f52128860733e3e97836fa0bd81f3730649b 100644
--- a/component/xtrabackup/xtrabackup-1.6.2_build.patch
+++ b/component/xtrabackup/xtrabackup-1.6.2_build.patch
@@ -27,17 +27,16 @@ diff -ur xtrabackup-1.6.2.orig/utils/build.sh xtrabackup-1.6.2/utils/build.sh
      exit -1
  }
  
-@@ -79,7 +81,12 @@
+@@ -79,7 +81,11 @@
  {
      echo "Configuring the server"
      cd $server_dir
 -    BUILD/autorun.sh
-+    libtoolize -c -f
-+    aclocal -I $libtool_location/share/aclocal -I config/ac-macros
-+    autoheader
-+    automake -c -a -f
-+    autoconf
-+    touch sql/sql_yacc.yy
++    aclocal -I $libtool_location/share/aclocal -I config/ac-macros || die "Can't execute aclocal"
++    autoheader || die "Can't execute autoheader"
++    libtoolize --automake --force --copy || die "Can't execute libtoolize"
++    automake --add-missing --force  --copy || die "Can't execute automake"
++    autoconf || die "Can't execute autoconf"
      eval $configure_cmd
  
      echo "Building the server"
@@ -141,12 +140,3 @@ diff -ur xtrabackup-1.6.2.orig/utils/build.sh xtrabackup-1.6.2/utils/build.sh
  	;;
  
  "innodb55" | "5.5")
-@@ -230,7 +279,7 @@
- 
- 	build_server
- 
--	build_xtrabackup
-+	build_xtrabackup 
- 
- 	build_tar4ibd
- 	;;
diff --git a/setup.py b/setup.py
index f38885a1487ebfc6aa6b3501023766fb8d456bbd..90ba14b5d0766c3f8dbff553b7e0d71c65440802 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
 import glob
 import os
 
-version = '0.32-dev'
+version = '0.39-dev'
 name = 'slapos.cookbook'
 long_description = open("README.txt").read() + "\n" + \
     open("CHANGES.txt").read() + "\n"
@@ -43,7 +43,6 @@ setup(name=name,
           'certificate_authority.request = slapos.recipe.certificate_authority:Request',
           'cron = slapos.recipe.dcron:Recipe',
           'cron.d = slapos.recipe.dcron:Part',
-          'download = slapos.recipe.download:Recipe',
           'davstorage = slapos.recipe.davstorage:Recipe',
           'duplicity = slapos.recipe.duplicity:Recipe',
           'erp5 = slapos.recipe.erp5:Recipe',
diff --git a/slapos/recipe/README.download.txt b/slapos/recipe/README.download.txt
deleted file mode 100644
index a74c946c5cad90d54a1a8b7a8541c8b9bb4687b5..0000000000000000000000000000000000000000
--- a/slapos/recipe/README.download.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-download
-========
-
-Extremely simple recipe to download using zc.buildout download utility.
-
-Usage
------
-
-::
-
-  [buildout]
-  parts =
-    download
-
-  [download]
-  recipe = slapos.cookbook:download
-  url = https://some.url/file
-
-Such profile will download https://some.url/file and put it in
-buildout:parts-directory/download/download
-
-filename parameter can be used to change destination named filename.
-
-destination parameter allows to put explicit destination.
-
-md5sum parameter allows pass md5sum.
-
-mode (octal, so for rw-r--r-- use 0644) allows to set mode
-
-Exposes target attribute which is path to downloaded file.
-
-Notes
------
-
-This recipe suffers from buildout download utility issue, which will do not
-try to redownload resource with wrong md5sum.
diff --git a/slapos/recipe/download.py b/slapos/recipe/download.py
deleted file mode 100644
index e5a602a77fd7341d27d2136ab6f9482f4bf4935f..0000000000000000000000000000000000000000
--- a/slapos/recipe/download.py
+++ /dev/null
@@ -1,72 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2010 Vifib SARL and Contributors. All Rights Reserved.
-#
-# WARNING: This program as such is intended to be used by professional
-# programmers who take the whole responsibility of assessing all potential
-# consequences resulting from its eventual inadequacies and bugs
-# End users who are looking for a ready-to-use solution with commercial
-# guarantees and support are strongly adviced to contract a Free Software
-# Service Company
-#
-# This program is Free Software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 3
-# of the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
-#
-##############################################################################
-import os
-import logging
-import shutil
-import zc.buildout
-class Recipe:
-  def __init__(self, buildout, name, options):
-    self.buildout = buildout
-    self.name = name
-    self.options = options
-    self.logger = logging.getLogger(self.name)
-    if 'filename' in self.options and 'destination' in self.options:
-      raise zc.buildout.UserError('Parameters filename and destination are '
-          'exclusive.')
-    self.parts = None
-    self.destination = self.options.get('destination', None)
-    if self.destination is None:
-      self.parts = os.path.join(self.buildout['buildout']['parts-directory'],
-          self.name)
-      self.destination = os.path.join(self.parts, self.options.get('filename',
-        self.name))
-    options['target'] = self.destination
-
-  def install(self):
-    if self.parts is not None:
-      if not os.path.isdir(self.parts):
-        os.mkdir(self.parts)
-    download = zc.buildout.download.Download(self.buildout['buildout'],
-        hash_name=True)
-    path, is_temp = download(self.options['url'],
-        md5sum=self.options.get('md5sum'))
-    if os.path.exists(self.destination):
-      os.unlink(self.destination)
-    shutil.copy(path, self.destination)
-    mode = self.options.get('mode')
-    if mode is not None:
-      mode = int(mode, 8)
-      os.chmod(self.destination, mode)
-      self.logger.debug('Mode of %r set to 0%o.' % (self.destination, mode))
-    self.logger.debug('Downloaded %r and saved to %r.' % (self.options['url'],
-      self.destination))
-    if self.parts is not None:
-      return [self.parts]
-    else:
-      return []
-
-  update = install
diff --git a/slapos/recipe/erp5/__init__.py b/slapos/recipe/erp5/__init__.py
index c867bf6d1d1a4e86b574edef95cefb3e9d0c5bf2..672bf979235765949a032f5c903dab5aaaeaab51 100644
--- a/slapos/recipe/erp5/__init__.py
+++ b/slapos/recipe/erp5/__init__.py
@@ -668,7 +668,15 @@ SSLCARevocationPath %(ca_crl)s"""
     # maxconn should be set as the maximum thread we have per zope, like this
     #      haproxy will manage the queue of request with the possibility to
     #      move a request to another node if the initially selected one is dead
-    server_template = """  server %(name)s %(address)s cookie %(name)s check inter 3s rise 1 fall 2 maxconn %(cluster_zope_thread_amount)s"""
+    # maxqueue is the number of waiting request in the queue of every zope client.
+    #      It allows to make sure that there is not a zope client handling all
+    #      the work while other clients are doing nothing. This was happening
+    #      even thoug we have round robin distribution because when a node dies
+    #      some seconds, all request are dispatched to other nodes, and then users
+    #      stick in other nodes and are not coming back. Please note this option
+    #      is not an issue if you have more than (maxqueue * node_quantity) requests
+    #      because haproxy will handle a top-level queue
+    server_template = """  server %(name)s %(address)s cookie %(name)s check inter 3s rise 1 fall 2 maxqueue 5 maxconn %(cluster_zope_thread_amount)s"""
     config = dict(name=name, ip=ip, port=port,
         server_check_path=server_check_path,)
     i = 1
@@ -781,7 +789,12 @@ SSLCARevocationPath %(ca_crl)s"""
       kumo_conf = {}
     # XXX Conversion server and memcache server coordinates are not relevant
     # for pure site creation.
-    mysql_connection_string = "%(mysql_database)s@%(ip)s:%(tcp_port)s %(mysql_user)s %(mysql_password)s" % mysql_conf
+    assert mysql_conf['mysql_user'] and mysql_conf['mysql_password'], \
+        "ZMySQLDA requires a user and a password for socket connections"
+    # XXX Use socket access to prevent unwanted connections to original MySQL
+    #     server when cloning an existing ERP5 instance.
+    #     TCP will be required if MySQL is in a different partition/server.
+    mysql_connection_string = "%(mysql_database)s %(mysql_user)s %(mysql_password)s %(socket)s" % mysql_conf
 
     bt5_list = self.parameter_dict.get("bt5_list", "").split() or default_bt5_list
     bt5_repository_list = self.parameter_dict.get("bt5_repository_list", "").split() \
@@ -1146,7 +1159,7 @@ SSLCARevocationPath %(ca_crl)s"""
   def installMysqlServer(self, ip, port, database='erp5', user='user',
       test_database='test_erp5', test_user='test_user', template_filename=None,
       parallel_test_database_amount=100, mysql_conf=None, with_backup=True,
-      with_maatkit=True):
+      with_percona_toolkit=True):
     if mysql_conf is None:
       mysql_conf = {}
     backup_directory = self.createBackupDirectory('mysql')
@@ -1185,6 +1198,8 @@ SSLCARevocationPath %(ca_crl)s"""
           mysql_conf))
 
     mysql_script_list = []
+    mysql_script_list.append(pkg_resources.resource_string(__name__,
+                   'template/mysql-init-function.sql.in'))
     for x_database, x_user, x_password in \
           [(mysql_conf['mysql_database'],
             mysql_conf['mysql_user'],
@@ -1194,7 +1209,7 @@ SSLCARevocationPath %(ca_crl)s"""
             mysql_conf['mysql_test_password']),
           ] + mysql_conf['mysql_parallel_test_dict']:
       mysql_script_list.append(pkg_resources.resource_string(__name__,
-                     'template/initmysql.sql.in') % {
+                     'template/mysql-init-database.sql.in') % {
                         'mysql_database': x_database,
                         'mysql_user': x_user,
                         'mysql_password': x_password})
@@ -1255,29 +1270,46 @@ SSLCARevocationPath %(ca_crl)s"""
       open(mysql_backup_cron, 'w').write('0 0 * * * ' + backup_controller)
       self.path_list.append(mysql_backup_cron)
 
-    if with_maatkit:
+    if with_percona_toolkit:
       # maatkit installation
-      for mk_script_name in (
-          'mk-variable-advisor',
-          'mk-table-usage',
-          'mk-visual-explain',
-          'mk-config-diff',
-          'mk-deadlock-logger',
-          'mk-error-log',
-          'mk-index-usage',
-          'mk-query-advisor',
+      for pt_script_name in (
+          'pt-archiver',
+          'pt-config-diff',
+          'pt-deadlock-logger',
+          'pt-duplicate-key-checker',
+          'pt-fifo-split',
+          'pt-find',
+          'pt-fk-error-logger',
+          'pt-heartbeat',
+          'pt-index-usage',
+          'pt-kill',
+          'pt-log-player',
+          'pt-online-schema-change',
+          'pt-query-advisor',
+          'pt-query-digest',
+          'pt-show-grants',
+          'pt-slave-delay',
+          'pt-slave-find',
+          'pt-slave-restart',
+          'pt-table-checksum',
+          'pt-table-sync',
+          'pt-tcp-model',
+          'pt-trend',
+          'pt-upgrade',
+          'pt-variable-advisor',
+          'pt-visual-explain',
           ):
-        mk_argument_list = [self.options['perl_binary'],
-            self.options['%s_binary' % mk_script_name],
+        pt_argument_list = [self.options['perl_binary'],
+            self.options['%s_binary' % pt_script_name],
             '--defaults-file=%s' % mysql_conf_path,
             '--socket=%s' %mysql_conf['socket'].strip(), '--user=root',
             ]
         environment = dict(PATH='%s' % self.bin_directory)
-        mk_exe = zc.buildout.easy_install.scripts([(
-          mk_script_name,'slapos.recipe.librecipe.execute', 'executee')],
+        pt_exe = zc.buildout.easy_install.scripts([(
+          pt_script_name,'slapos.recipe.librecipe.execute', 'executee')],
           self.ws, sys.executable, self.bin_directory, arguments=[
-            mk_argument_list, environment])[0]
-        self.path_list.append(mk_exe)
+            pt_argument_list, environment])[0]
+        self.path_list.append(pt_exe)
 
     # The return could be more explicit database, user ...
     return mysql_conf
diff --git a/slapos/recipe/erp5/template/haproxy.cfg.in b/slapos/recipe/erp5/template/haproxy.cfg.in
index aa8f8a865ecb58d166cfebcc911c681780651ae8..a732c3bc89b926dbb70b787ee32f9c2c957294d8 100644
--- a/slapos/recipe/erp5/template/haproxy.cfg.in
+++ b/slapos/recipe/erp5/template/haproxy.cfg.in
@@ -22,12 +22,12 @@ defaults
   timeout connect 5s
   # As requested in haproxy doc, make this "at least equal to timeout server".
   timeout client 305s
-  # Use "option httpclose" to not preserve client & server persistent connections
+  # Use "option forceclose" to not preserve client & server persistent connections
   # while handling every incoming request individually, dispatching them one after
   # another to servers, in HTTP close mode. This is really needed when haproxy
   # is configured with maxconn to 1, without this options browser are unable
   # to render a page
-  option httpclose
+  option forceclose
 
 listen %(name)s %(ip)s:%(port)s
   cookie  SERVERID insert
diff --git a/slapos/recipe/erp5/template/my.cnf.in b/slapos/recipe/erp5/template/my.cnf.in
index 632d35c996e4badff3d09e5b6f84ef4dbc6b252f..25d951e863b594e5ac3ca2709897da3763c45754 100644
--- a/slapos/recipe/erp5/template/my.cnf.in
+++ b/slapos/recipe/erp5/template/my.cnf.in
@@ -15,12 +15,17 @@ socket = %(socket)s
 datadir = %(data_directory)s
 pid-file = %(pid_file)s
 log-error = %(error_log)s
-log-slow-file = %(slow_query_log)s
+slow_query_log
+slow_query_log_file = %(slow_query_log)s
 long_query_time = 5
 max_allowed_packet = 128M
 query_cache_size = 32M
 
-plugin-load = ha_innodb_plugin.so
+plugin-load = ha_groonga.so;ha_sphinx.so
+
+# By default only 100 connections are allowed, when using zeo
+# we may have much more connections
+# max_connections = 1000
 
 # The following are important to configure and depend a lot on to the size of
 # your database and the available resources.
diff --git a/slapos/recipe/erp5/template/initmysql.sql.in b/slapos/recipe/erp5/template/mysql-init-database.sql.in
similarity index 100%
rename from slapos/recipe/erp5/template/initmysql.sql.in
rename to slapos/recipe/erp5/template/mysql-init-database.sql.in
diff --git a/slapos/recipe/erp5/template/mysql-init-function.sql.in b/slapos/recipe/erp5/template/mysql-init-function.sql.in
new file mode 100644
index 0000000000000000000000000000000000000000..c4d0cbde46c01372b3808db3601e517e14788b4e
--- /dev/null
+++ b/slapos/recipe/erp5/template/mysql-init-function.sql.in
@@ -0,0 +1,5 @@
+USE mysql;
+DROP FUNCTION IF EXISTS last_insert_grn_id;
+CREATE FUNCTION last_insert_grn_id RETURNS INTEGER SONAME 'ha_groonga.so';
+DROP FUNCTION IF EXISTS sphinx_snippets;
+CREATE FUNCTION sphinx_snippets RETURNS STRING SONAME 'sphinx.so';
diff --git a/slapos/recipe/erp5testnode/SlapOSControler.py b/slapos/recipe/erp5testnode/SlapOSControler.py
deleted file mode 100644
index e9ed42f711874efb143e05787cf30f4cc6787eac..0000000000000000000000000000000000000000
--- a/slapos/recipe/erp5testnode/SlapOSControler.py
+++ /dev/null
@@ -1,94 +0,0 @@
-import slapos.slap, subprocess, os, time
-from xml_marshaller import xml_marshaller
-
-class SlapOSControler(object):
-
-  def __init__(self, config, process_group_pid_set=None):
-    self.config = config
-    # By erasing everything, we make sure that we are able to "update"
-    # existing profiles. This is quite dirty way to do updates...
-    if os.path.exists(config['proxy_database']):
-      os.unlink(config['proxy_database'])
-    proxy = subprocess.Popen([config['slapproxy_binary'],
-      config['slapos_config']], close_fds=True, preexec_fn=os.setsid)
-    process_group_pid_set.add(proxy.pid)
-    # XXX: dirty, giving some time for proxy to being able to accept
-    # connections
-    time.sleep(10)
-    slap = slapos.slap.slap()
-    slap.initializeConnection(config['master_url'])
-    # register software profile
-    self.software_profile = config['custom_profile_path']
-    slap.registerSupply().supply(
-        self.software_profile,
-        computer_guid=config['computer_id'])
-    computer = slap.registerComputer(config['computer_id'])
-    # create partition and configure computer
-    partition_reference = config['partition_reference']
-    partition_path = os.path.join(config['instance_root'], partition_reference)
-    if not os.path.exists(partition_path):
-      os.mkdir(partition_path)
-      os.chmod(partition_path, 0750)
-    computer.updateConfiguration(xml_marshaller.dumps({
- 'address': config['ipv4_address'],
- 'instance_root': config['instance_root'],
- 'netmask': '255.255.255.255',
- 'partition_list': [{'address_list': [{'addr': config['ipv4_address'],
-                                       'netmask': '255.255.255.255'},
-                                      {'addr': config['ipv6_address'],
-                                       'netmask': 'ffff:ffff:ffff::'},
-                      ],
-                     'path': partition_path,
-                     'reference': partition_reference,
-                     'tap': {'name': partition_reference},
-                     }
-                    ],
- 'reference': config['computer_id'],
- 'software_root': config['software_root']}))
-
-  def runSoftwareRelease(self, config, environment, process_group_pid_set=None,
-                         stdout=None, stderr=None):
-    print "SlapOSControler.runSoftwareRelease"
-    cpu_count = os.sysconf("SC_NPROCESSORS_ONLN")
-    os.putenv('MAKEFLAGS', '-j%s' % cpu_count)
-    os.environ['PATH'] = environment['PATH']
-    slapgrid = subprocess.Popen([config['slapgrid_software_binary'], '-v', '-c',
-      #'--buildout-parameter',"'-U -N' -o",
-      config['slapos_config']],
-      stdout=stdout, stderr=stderr,
-      close_fds=True, preexec_fn=os.setsid)
-    process_group_pid_set.add(slapgrid.pid)
-    slapgrid.wait()
-    stdout.seek(0)
-    stderr.seek(0)
-    process_group_pid_set.remove(slapgrid.pid)
-    status_dict = {'status_code':slapgrid.returncode,
-                    'stdout':stdout.read(),
-                    'stderr':stderr.read()}
-    stdout.close()
-    stderr.close()
-    return status_dict
-
-  def runComputerPartition(self, config, environment,
-                           process_group_pid_set=None,
-                           stdout=None, stderr=None):
-    print "SlapOSControler.runSoftwareRelease"
-    slap = slapos.slap.slap()
-    slap.registerOpenOrder().request(self.software_profile,
-        partition_reference='testing partition',
-        partition_parameter_kw=config['instance_dict'])
-    slapgrid = subprocess.Popen([config['slapgrid_partition_binary'],
-      config['slapos_config'], '-c', '-v'],
-      stdout=stdout, stderr=stderr,
-      close_fds=True, preexec_fn=os.setsid)
-    process_group_pid_set.add(slapgrid.pid)
-    slapgrid.wait()
-    stdout.seek(0)
-    stderr.seek(0)
-    process_group_pid_set.remove(slapgrid.pid)
-    status_dict = {'status_code':slapgrid.returncode,
-                    'stdout':stdout.read(),
-                    'stderr':stderr.read()}
-    stdout.close()
-    stderr.close()
-    return status_dict
diff --git a/slapos/recipe/erp5testnode/Updater.py b/slapos/recipe/erp5testnode/Updater.py
deleted file mode 100644
index 83ea60a1e7738bdd55316ce4bb7c5937f838203c..0000000000000000000000000000000000000000
--- a/slapos/recipe/erp5testnode/Updater.py
+++ /dev/null
@@ -1,189 +0,0 @@
-import os, sys, subprocess, re, threading
-from testnode import SubprocessError
-
-_format_command_search = re.compile("[[\\s $({?*\\`#~';<>&|]").search
-_format_command_escape = lambda s: "'%s'" % r"'\''".join(s.split("'"))
-def format_command(*args, **kw):
-  cmdline = []
-  for k, v in sorted(kw.items()):
-    if _format_command_search(v):
-      v = _format_command_escape(v)
-    cmdline.append('%s=%s' % (k, v))
-  for v in args:
-    if _format_command_search(v):
-      v = _format_command_escape(v)
-    cmdline.append(v)
-  return ' '.join(cmdline)
-
-def subprocess_capture(p, quiet=False):
-  def readerthread(input, output, buffer):
-    while True:
-      data = input.readline()
-      if not data:
-        break
-      output(data)
-      buffer.append(data)
-  if p.stdout:
-    stdout = []
-    output = quiet and (lambda data: None) or sys.stdout.write
-    stdout_thread = threading.Thread(target=readerthread,
-                                     args=(p.stdout, output, stdout))
-    stdout_thread.setDaemon(True)
-    stdout_thread.start()
-  if p.stderr:
-    stderr = []
-    stderr_thread = threading.Thread(target=readerthread,
-                                     args=(p.stderr, sys.stderr.write, stderr))
-    stderr_thread.setDaemon(True)
-    stderr_thread.start()
-  if p.stdout:
-    stdout_thread.join()
-  if p.stderr:
-    stderr_thread.join()
-  p.wait()
-  return (p.stdout and ''.join(stdout),
-          p.stderr and ''.join(stderr))
-
-GIT_TYPE = 'git'
-SVN_TYPE = 'svn'
-
-class Updater(object):
-
-  _git_cache = {}
-  realtime_output = True
-  stdin = file(os.devnull)
-
-  def __init__(self, repository_path, revision=None, git_binary=None):
-    self.revision = revision
-    self._path_list = []
-    self.repository_path = repository_path
-    self.git_binary = git_binary
-
-  def getRepositoryPath(self):
-    return self.repository_path
-
-  def getRepositoryType(self):
-    try:
-      return self.repository_type
-    except AttributeError:
-      # guess the type of repository we have
-      if os.path.isdir(os.path.join(
-                       self.getRepositoryPath(), '.git')):
-        repository_type = GIT_TYPE
-      elif os.path.isdir(os.path.join(
-                       self.getRepositoryPath(), '.svn')):
-        repository_type = SVN_TYPE
-      else:
-        raise NotImplementedError
-      self.repository_type = repository_type
-      return repository_type
-
-  def deletePycFiles(self, path):
-    """Delete *.pyc files so that deleted/moved files can not be imported"""
-    for path, dir_list, file_list in os.walk(path):
-      for file in file_list:
-        if file[-4:] in ('.pyc', '.pyo'):
-          # allow several processes clean the same folder at the same time
-          try:
-            os.remove(os.path.join(path, file))
-          except OSError, e:
-            if e.errno != errno.ENOENT:
-              raise
-
-  def spawn(self, *args, **kw):
-    quiet = kw.pop('quiet', False)
-    env = kw and dict(os.environ, **kw) or None
-    command = format_command(*args, **kw)
-    print '\n$ ' + command
-    sys.stdout.flush()
-    p = subprocess.Popen(args, stdin=self.stdin, stdout=subprocess.PIPE,
-                         stderr=subprocess.PIPE, env=env,
-                         cwd=self.getRepositoryPath())
-    if self.realtime_output:
-      stdout, stderr = subprocess_capture(p, quiet)
-    else:
-      stdout, stderr = p.communicate()
-      if not quiet:
-        sys.stdout.write(stdout)
-      sys.stderr.write(stderr)
-    result = dict(status_code=p.returncode, command=command,
-                  stdout=stdout, stderr=stderr)
-    if p.returncode:
-      raise SubprocessError(result)
-    return result
-
-  def _git(self, *args, **kw):
-    return self.spawn(self.git_binary, *args, **kw)['stdout'].strip()
-
-  def _git_find_rev(self, ref):
-    try:
-      return self._git_cache[ref]
-    except KeyError:
-      if os.path.exists('.git/svn'):
-        r = self._git('svn', 'find-rev', ref)
-        assert r
-        self._git_cache[ref[0] != 'r' and 'r%u' % int(r) or r] = ref
-      else:
-        r = self._git('rev-list', '--topo-order', '--count', ref), ref
-      self._git_cache[ref] = r
-      return r
-
-  def getRevision(self, *path_list):
-    if not path_list:
-      path_list = self._path_list
-    if self.getRepositoryType() == GIT_TYPE:
-      h = self._git('log', '-1', '--format=%H', '--', *path_list)
-      return self._git_find_rev(h)
-    elif self.getRepositoryType() == SVN_TYPE:
-      stdout = self.spawn('svn', 'info', *path_list)['stdout']
-      return str(max(map(int, SVN_CHANGED_REV.findall(stdout))))
-    raise NotImplementedError
-
-  def checkout(self, *path_list):
-    if not path_list:
-      path_list = '.',
-    revision = self.revision
-    if self.getRepositoryType() == GIT_TYPE:
-      # edit .git/info/sparse-checkout if you want sparse checkout
-      if revision:
-        if type(revision) is str:
-          h = revision
-        else:
-          h = revision[1]
-        if h != self._git('rev-parse', 'HEAD'):
-          self.deletePycFiles('.')
-          self._git('reset', '--merge', h)
-      else:
-        self.deletePycFiles('.')
-        if os.path.exists('.git/svn'):
-          self._git('svn', 'rebase')
-        else:
-          self._git('pull', '--ff-only')
-        self.revision = self._git_find_rev(self._git('rev-parse', 'HEAD'))
-    elif self.getRepositoryType() == SVN_TYPE:
-      # following code allows sparse checkout
-      def svn_mkdirs(path):
-        path = os.path.dirname(path)
-        if path and not os.path.isdir(path):
-          svn_mkdirs(path)
-          self.spawn(*(args + ['--depth=empty', path]))
-      for path in path_list:
-        args = ['svn', 'up', '--force', '--non-interactive']
-        if revision:
-          args.append('-r%s' % revision)
-        svn_mkdirs(path)
-        args += '--set-depth=infinity', path
-        self.deletePycFiles(path)
-        try:
-          status_dict = self.spawn(*args)
-        except SubprocessError, e:
-          if 'cleanup' not in e.stderr:
-            raise
-          self.spawn('svn', 'cleanup', path)
-          status_dict = self.spawn(*args)
-        if not revision:
-          self.revision = revision = SVN_UP_REV.findall(
-            status_dict['stdout'].splitlines()[-1])[0]
-    else:
-      raise NotImplementedError
-    self._path_list += path_list
diff --git a/slapos/recipe/erp5testnode/__init__.py b/slapos/recipe/erp5testnode/__init__.py
index cd8f0f53b69913c533cfb7b276a8fe0c50a923aa..af7177dc2b0ef95e2c56373f4c37a9aa10182f97 100644
--- a/slapos/recipe/erp5testnode/__init__.py
+++ b/slapos/recipe/erp5testnode/__init__.py
@@ -31,129 +31,79 @@ import zc.buildout
 import zc.recipe.egg
 import sys
 
-CONFIG = dict(
-  proxy_port='5000',
-  computer_id='COMPUTER',
-  partition_reference='test0',
-)
-
 class Recipe(BaseSlapRecipe):
   def __init__(self, buildout, name, options):
     self.egg = zc.recipe.egg.Egg(buildout, options['recipe'], options)
     BaseSlapRecipe.__init__(self, buildout, name, options)
 
-  def installSlapOs(self):
+  def _install(self):
+    self.requirements, self.ws = self.egg.working_set()
+    path_list = []
+    CONFIG = dict()
     CONFIG['slapos_directory'] = self.createDataDirectory('slapos')
     CONFIG['working_directory'] = self.createDataDirectory('testnode')
-    CONFIG['software_root'] = os.path.join(CONFIG['slapos_directory'],
-        'software')
-    CONFIG['instance_root'] = os.path.join(CONFIG['slapos_directory'],
-        'instance')
-    CONFIG['proxy_database'] = os.path.join(CONFIG['slapos_directory'],
-        'proxy.db')
+    CONFIG['test_suite_directory'] = self.createDataDirectory('test_suite')
     CONFIG['proxy_host'] = self.getLocalIPv4Address()
-    CONFIG['master_url'] = 'http://%s:%s' % (CONFIG['proxy_host'],
-        CONFIG['proxy_port'])
-    self._createDirectory(CONFIG['software_root'])
-    self._createDirectory(CONFIG['instance_root'])
-    CONFIG['slapos_config'] = self.createConfigurationFile('slapos.cfg',
-        self.substituteTemplate(pkg_resources.resource_filename(__name__,
-          'template/slapos.cfg.in'), CONFIG))
-    self.path_list.append(CONFIG['slapos_config'])
-
-  def setupRunningWrapper(self):
-    self.path_list.extend(zc.buildout.easy_install.scripts([(
-      'testnode',
-        __name__+'.testnode', 'run')], self.ws,
-          sys.executable, self.wrapper_directory, arguments=[
-            dict(
-              computer_id=CONFIG['computer_id'],
-              instance_dict=eval(self.parameter_dict.get('instance_dict', '{}')),
-              instance_root=CONFIG['instance_root'],
-              ipv4_address=self.getLocalIPv4Address(),
-              ipv6_address=self.getGlobalIPv6Address(),
-              master_url=CONFIG['master_url'],
-              profile_path=self.parameter_dict['profile_path'],
-              proxy_database=CONFIG['proxy_database'],
-              proxy_port=CONFIG['proxy_port'],
-              slapgrid_partition_binary=self.options['slapgrid_partition_binary'],
-              slapgrid_software_binary=self.options['slapgrid_software_binary'],
-              slapos_config=CONFIG['slapos_config'],
-              slapproxy_binary=self.options['slapproxy_binary'],
-              git_binary=self.options['git_binary'],
-              software_root=CONFIG['software_root'],
-              working_directory=CONFIG['working_directory'],
-              vcs_repository_list=eval(self.parameter_dict.get('vcs_repository_list'),),
-              node_quantity=self.parameter_dict.get('node_quantity', '1'),
-              test_suite_master_url=self.parameter_dict.get(
-                                'test_suite_master_url', None),
-              test_suite=self.parameter_dict.get('test_suite'),
-              test_suite_title=self.parameter_dict.get('test_suite_title'),
-              test_node_title=self.parameter_dict.get('test_node_title'),
-              project_title=self.parameter_dict.get('project_title'),
-              bin_directory=self.bin_directory,
-              # botenvironemnt is splittable string of key=value to substitute
-              # environment of running bot
-              bot_environment=self.parameter_dict.get('bot_environment', ''),
-              partition_reference=CONFIG['partition_reference'],
-              environment=dict(PATH=os.environ['PATH']),
-              vcs_authentication_list=eval(self.parameter_dict.get(
-                     'vcs_authentication_list', 'None')),
-            )
-          ]))
-
-  def installLocalGit(self):
-    git_dict = dict(git_binary = self.options['git_binary'])
-    git_dict.update(self.parameter_dict)
-    double_slash_end_position = 1
-    # XXX, this should be provided by slapos
-    print "bin_directory : %r" % self.bin_directory
-    home_directory = os.path.join(*os.path.split(self.bin_directory)[0:-1])
-    print "home_directory : %r" % home_directory
-    git_dict.setdefault("git_server_name", "git.erp5.org")
-    if git_dict.get('vcs_authentication_list', None) is not None:
-      vcs_authentication_list = eval(git_dict['vcs_authentication_list'])
-      netrc_file = open(os.path.join(home_directory, '.netrc'), 'w')
-      for vcs_authentication_dict in vcs_authentication_list:
-        netrc_file.write("""
-machine %(host)s
-login %(user_name)s
-password %(password)s
-""" % vcs_authentication_dict)
-      netrc_file.close()
+    CONFIG['proxy_port'] = '5000'
+    CONFIG['log_directory'] = self.createDataDirectory('testnodelog')
+    CONFIG['run_directory'] = self.createDataDirectory('testnoderun')
+    CONFIG['test_suite_title'] = self.parameter_dict.get('test_suite_title')
+    CONFIG['test_node_title'] = self.parameter_dict.get('test_node_title')
+    CONFIG['test_suite'] = self.parameter_dict.get('test_suite')
+    CONFIG['node_quantity'] = self.parameter_dict.get('node_quantity', '1')
+    CONFIG['project_title'] = self.parameter_dict.get('project_title')
+    CONFIG['ipv4_address'] = self.getLocalIPv4Address()
+    CONFIG['ipv6_address'] = self.getGlobalIPv6Address()
+    CONFIG['test_suite_master_url'] = self.parameter_dict.get(
+                                'test_suite_master_url', None)
+    CONFIG['git_binary'] = self.options['git_binary']
+    CONFIG['slapgrid_partition_binary'] = self.options[
+      'slapgrid_partition_binary']
+    CONFIG['slapgrid_software_binary'] = self.options[
+      'slapgrid_software_binary']
+    CONFIG['slapproxy_binary'] = self.options['slapproxy_binary']
+    CONFIG['zip_binary'] = self.options['zip_binary']
+    CONFIG['PATH'] = os.environ['PATH']
+    additional_bt5_repository_id = \
+        self.parameter_dict.get('additional_bt5_repository_id')
 
-  def installLocalRepository(self):
-    self.installLocalGit()
+    CONFIG['bt5_path'] = None
+    if additional_bt5_repository_id is not None:
+      CONFIG['bt5_path'] = ""
+      additional_bt5_repository_id_list = additional_bt5_repository_id.split(",")
+      for id in additional_bt5_repository_id_list:
+        id_path = os.path.join(CONFIG['slapos_directory'], id)
+        bt_path = os.path.join(id_path, "bt5")
+        CONFIG['bt5_path'] += "%s,%s," % (id_path, bt_path)
+    CONFIG['instance_dict'] = ''
+    if 'instance_dict' in self.parameter_dict:
+      CONFIG['instance_dict'] = '[instance_dict]\n'
+      for k,v in eval(self.parameter_dict['instance_dict']).iteritems():
+        CONFIG['instance_dict'] += '%s = %s\n' % (k,v)
 
-  def installLocalZip(self):
-    zip = os.path.join(self.bin_directory, 'zip')
-    if os.path.lexists(zip):
-      os.unlink(zip)
-    os.symlink(self.options['zip_binary'], zip)
-
-  def installLocalPython(self):
-    """Installs local python fully featured with eggs"""
-    self.path_list.extend(zc.buildout.easy_install.scripts([], self.ws,
-          sys.executable, self.bin_directory, scripts=None,
-          interpreter='python'))
-
-  def installLocalRunUnitTest(self):
-    link = os.path.join(self.bin_directory, 'runUnitTest')
-    destination = os.path.join(CONFIG['instance_root'],
-        CONFIG['partition_reference'], 'bin', 'runUnitTest')
-    if os.path.lexists(link):
-      if not os.readlink(link) != destination:
-        os.unlink(link)
-    if not os.path.lexists(link):
-      os.symlink(destination, link)
-
-  def _install(self):
-    self.requirements, self.ws = self.egg.working_set()
-    self.path_list = []
-    self.installSlapOs()
-    self.setupRunningWrapper()
-    self.installLocalRepository()
-    self.installLocalZip()
-    self.installLocalPython()
-    self.installLocalRunUnitTest()
-    return self.path_list
+    CONFIG['repository_list'] = ''
+    i = 0
+    for repository in eval(self.parameter_dict['vcs_repository_list']):
+      CONFIG['repository_list'] += '[vcs_repository_%s]\n' % i
+      CONFIG['repository_list'] += 'url = %s\n' % repository['url']
+      if 'branch' in repository:
+        CONFIG['repository_list'] += 'branch = %s\n' % repository['branch']
+      if 'profile_path' in repository:
+        CONFIG['repository_list'] += 'profile_path = %s\n' % repository[
+          'profile_path']
+      if 'buildout_section_id' in repository:
+        CONFIG['repository_list'] += 'buildout_section_id = %s\n' % repository[
+          'buildout_section_id']
+      CONFIG['repository_list'] += '\n'
+      i += 1
+    testnode_config = self.createConfigurationFile('erp5testnode.cfg',
+        self.substituteTemplate(pkg_resources.resource_filename(__name__,
+          'template/erp5testnode.cfg.in'), CONFIG))
+    testnode_log = os.path.join(self.log_directory, 'erp5testnode.log')
+    wrapper = zc.buildout.easy_install.scripts([('erp5testnode',
+     'slapos.recipe.librecipe.execute', 'executee')], self.ws, sys.executable,
+      self.wrapper_directory, arguments=[[self.options['testnode'], '-l',
+      testnode_log, testnode_config], {'GIT_SSL_NO_VERIFY': '1'}])[0]
+    path_list.append(testnode_config)
+    path_list.append(wrapper)
+    return path_list
diff --git a/slapos/recipe/erp5testnode/template/erp5testnode.cfg.in b/slapos/recipe/erp5testnode/template/erp5testnode.cfg.in
new file mode 100644
index 0000000000000000000000000000000000000000..08c4619a211392c6e48094b9b0f333e4ceaa9bb6
--- /dev/null
+++ b/slapos/recipe/erp5testnode/template/erp5testnode.cfg.in
@@ -0,0 +1,31 @@
+[testnode]
+slapos_directory = %(slapos_directory)s
+working_directory = %(slapos_directory)s
+test_suite_directory = %(test_suite_directory)s
+log_directory = %(log_directory)s
+run_directory = %(run_directory)s
+proxy_host = %(proxy_host)s
+proxy_port = %(proxy_port)s
+test_suite_title = %(test_suite_title)s
+test_suite = %(test_suite)s
+node_quantity = %(node_quantity)s
+test_node_title = %(test_node_title)s
+project_title= %(project_title)s
+ipv4_address = %(ipv4_address)s
+ipv6_address = %(ipv6_address)s
+test_suite_master_url = %(test_suite_master_url)s
+bt5_path = %(bt5_path)s
+
+# Binaries
+git_binary = %(git_binary)s
+slapgrid_partition_binary = %(slapgrid_partition_binary)s
+slapgrid_software_binary = %(slapgrid_software_binary)s
+slapproxy_binary = %(slapproxy_binary)s
+zip_binary = %(zip_binary)s
+
+[environment]
+PATH = %(PATH)s
+
+%(instance_dict)s
+
+%(repository_list)s
diff --git a/slapos/recipe/erp5testnode/template/slapos.cfg.in b/slapos/recipe/erp5testnode/template/slapos.cfg.in
deleted file mode 100644
index 713f719a322502bca230db83a0c2aa4c6678607c..0000000000000000000000000000000000000000
--- a/slapos/recipe/erp5testnode/template/slapos.cfg.in
+++ /dev/null
@@ -1,10 +0,0 @@
-[slapos]
-software_root = %(software_root)s
-instance_root = %(instance_root)s
-master_url = %(master_url)s
-computer_id = %(computer_id)s
-
-[slapproxy]
-host = %(proxy_host)s
-port = %(proxy_port)s
-database_uri = %(proxy_database)s
diff --git a/slapos/recipe/erp5testnode/testnode.py b/slapos/recipe/erp5testnode/testnode.py
deleted file mode 100644
index f090a3a8668fcc91f4a1012dcac5593156a0334b..0000000000000000000000000000000000000000
--- a/slapos/recipe/erp5testnode/testnode.py
+++ /dev/null
@@ -1,245 +0,0 @@
-from xml_marshaller import xml_marshaller
-import os, xmlrpclib, time, imp
-from glob import glob
-import signal
-import slapos.slap
-import subprocess
-import sys
-import socket
-import pprint
-from SlapOSControler import SlapOSControler
-
-
-class SubprocessError(EnvironmentError):
-  def __init__(self, status_dict):
-    self.status_dict = status_dict
-  def __getattr__(self, name):
-    return self.status_dict[name]
-  def __str__(self):
-    return 'Error %i' % self.status_code
-
-
-from Updater import Updater
-
-process_group_pid_set = set()
-process_pid_file_list = []
-process_command_list = []
-def sigterm_handler(signal, frame):
-  for pgpid in process_group_pid_set:
-    try:
-      os.killpg(pgpid, signal.SIGTERM)
-    except:
-      pass
-  for pid_file in process_pid_file_list:
-    try:
-      os.kill(int(open(pid_file).read().strip()), signal.SIGTERM)
-    except:
-      pass
-  for p in process_command_list:
-    try:
-      subprocess.call(p)
-    except:
-      pass
-  sys.exit(1)
-
-signal.signal(signal.SIGTERM, sigterm_handler)
-
-def safeRpcCall(function, *args):
-  retry = 64
-  while True:
-    try:
-      return function(*args)
-    except (socket.error, xmlrpclib.ProtocolError), e:
-      print >>sys.stderr, e
-      pprint.pprint(args, file(function._Method__name, 'w'))
-      time.sleep(retry)
-      retry += retry >> 1
-
-def getInputOutputFileList(config, command_name):
-  stdout = open(os.path.join(
-                config['instance_root'],'.%s_out' % command_name),
-                'w+')
-  stdout.write("%s\n" % command_name)
-  stderr = open(os.path.join(
-                config['instance_root'],'.%s_err' % command_name),
-                'w+')
-  return (stdout, stderr)
-
-slapos_controler = None
-
-def run(args):
-  config = args[0]
-  slapgrid = None
-  supervisord_pid_file = os.path.join(config['instance_root'], 'var', 'run',
-        'supervisord.pid')
-  subprocess.check_call([config['git_binary'],
-                "config", "--global", "http.sslVerify", "false"])
-  previous_revision = None
-
-  run_software = True
-  # Write our own software.cfg to use the local repository
-  custom_profile_path = os.path.join(config['working_directory'], 'software.cfg')
-  config['custom_profile_path'] = custom_profile_path
-  vcs_repository_list = config['vcs_repository_list']
-  profile_content = None
-  assert len(vcs_repository_list), "we must have at least one repository"
-  for vcs_repository in vcs_repository_list:
-    url = vcs_repository['url']
-    buildout_section_id = vcs_repository.get('buildout_section_id', None)
-    repository_id = buildout_section_id or \
-                                  url.split('/')[-1].split('.')[0]
-    repository_path = os.path.join(config['working_directory'],repository_id)
-    vcs_repository['repository_id'] = repository_id
-    vcs_repository['repository_path'] = repository_path
-    if profile_content is None:
-      profile_content = """
-[buildout]
-extends = %(software_config_path)s
-""" %  {'software_config_path': os.path.join(repository_path,
-                                          config['profile_path'])}
-    if not(buildout_section_id is None):
-      profile_content += """
-[%(buildout_section_id)s]
-repository = %(repository_path)s
-branch = %(branch)s
-""" %  {'buildout_section_id': buildout_section_id,
-        'repository_path' : repository_path,
-        'branch' : vcs_repository.get('branch','master')}
-
-  custom_profile = open(custom_profile_path, 'w')
-  custom_profile.write(profile_content)
-  custom_profile.close()
-  config['repository_path'] = repository_path
-  sys.path.append(repository_path)
-  test_suite_title = config['test_suite_title'] or config['test_suite']
-
-  retry_software = False
-  try:
-    while True:
-      # kill processes from previous loop if any
-      try:
-        for pgpid in process_group_pid_set:
-          try:
-            os.killpg(pgpid, signal.SIGTERM)
-          except:
-            pass
-        process_group_pid_set.clear()
-        full_revision_list = []
-        # Make sure we have local repository
-        for vcs_repository in vcs_repository_list:
-          repository_path = vcs_repository['repository_path']
-          repository_id = vcs_repository['repository_id']
-          if not os.path.exists(repository_path):
-            parameter_list = [config['git_binary'], 'clone',
-                              vcs_repository['url']]
-            if vcs_repository.get('branch') is not None:
-              parameter_list.extend(['-b',vcs_repository.get('branch')])
-            parameter_list.append(repository_path)
-            subprocess.check_call(parameter_list)
-          # Make sure we have local repository
-          updater = Updater(repository_path, git_binary=config['git_binary'])
-          updater.checkout()
-          revision = "-".join(updater.getRevision())
-          full_revision_list.append('%s=%s' % (repository_id, revision))
-        revision = ','.join(full_revision_list)
-        if previous_revision == revision:
-          time.sleep(120)
-          if not(retry_software):
-            continue
-        retry_software = False
-        previous_revision = revision
-
-        print config
-        portal_url = config['test_suite_master_url']
-        test_result_path = None
-        test_result = (test_result_path, revision)
-        if portal_url:
-          if portal_url[-1] != '/':
-            portal_url += '/'
-          portal = xmlrpclib.ServerProxy("%s%s" %
-                      (portal_url, 'portal_task_distribution'),
-                      allow_none=1)
-          master = portal.portal_task_distribution
-          assert master.getProtocolRevision() == 1
-          test_result = safeRpcCall(master.createTestResult,
-            config['test_suite'], revision, [],
-            False, test_suite_title,
-            config['test_node_title'], config['project_title'])
-        print "testnode, test_result : %r" % (test_result,)
-        if test_result:
-          test_result_path, test_revision = test_result
-          if revision != test_revision:
-            for i, repository_revision in enumerate(test_revision.split(',')):
-              vcs_repository = vcs_repository_list[i]
-              repository_path = vcs_repository['repository_path']
-              # other testnodes on other boxes are already ready to test another
-              # revision
-              updater = Updater(repository_path, git_binary=config['git_binary'],
-                                revision=repository_revision.split('-')[1])
-              updater.checkout()
-
-          # Now prepare the installation of SlapOS and create instance
-          slapos_controler = SlapOSControler(config,
-            process_group_pid_set=process_group_pid_set)
-          for method_name in ("runSoftwareRelease", "runComputerPartition"):
-            stdout, stderr = getInputOutputFileList(config, method_name)
-            slapos_method = getattr(slapos_controler, method_name)
-            status_dict = slapos_method(config,
-              environment=config['environment'],
-              process_group_pid_set=process_group_pid_set,
-              stdout=stdout, stderr=stderr
-              )
-            if status_dict['status_code'] != 0:
-              break
-          if status_dict['status_code'] != 0:
-            safeRpcCall(master.reportTaskFailure,
-              test_result_path, status_dict, config['test_node_title'])
-            retry_software = True
-            continue
-
-          partition_path = os.path.join(config['instance_root'],
-                                        config['partition_reference'])
-          run_test_suite_path = os.path.join(partition_path, 'bin',
-                                            'runTestSuite')
-          if not os.path.exists(run_test_suite_path):
-            raise ValueError('No %r provided' % run_test_suite_path)
-
-          run_test_suite_revision = revision
-          if isinstance(revision, tuple):
-            revision = ','.join(revision)
-          # Deal with Shebang size limitation
-          file_object = open(run_test_suite_path, 'r')
-          line = file_object.readline()
-          file_object.close()
-          invocation_list = []
-          if line[:2] == '#!':
-            invocation_list = line[2:].split()
-          invocation_list.extend([run_test_suite_path,
-                                  '--test_suite', config['test_suite'],
-                                  '--revision', revision,
-                                  '--test_suite_title', test_suite_title,
-                                  '--node_quantity', config['node_quantity'],
-                                  '--master_url', config['test_suite_master_url']])
-          run_test_suite = subprocess.Popen(invocation_list)
-          process_group_pid_set.add(run_test_suite.pid)
-          run_test_suite.wait()
-          process_group_pid_set.remove(run_test_suite.pid)
-      except SubprocessError:
-        time.sleep(120)
-        continue
-
-  finally:
-    # Nice way to kill *everything* generated by run process -- process
-    # groups working only in POSIX compilant systems
-    # Exceptions are swallowed during cleanup phase
-    print "going to kill %r" % (process_group_pid_set,)
-    for pgpid in process_group_pid_set:
-      try:
-        os.killpg(pgpid, signal.SIGTERM)
-      except:
-        pass
-    try:
-      if os.path.exists(supervisord_pid_file):
-        os.kill(int(open(supervisord_pid_file).read().strip()), signal.SIGTERM)
-    except:
-      pass
\ No newline at end of file
diff --git a/slapos/recipe/kvm/__init__.py b/slapos/recipe/kvm/__init__.py
index 40c123135c85fddd8b483295c3664e09872f2899..e4d5688562bd5d1590f8922d144ef582d86965ec 100644
--- a/slapos/recipe/kvm/__init__.py
+++ b/slapos/recipe/kvm/__init__.py
@@ -59,7 +59,8 @@ class Recipe(BaseSlapRecipe):
 
     # Install the socket_connection_attempt script
     catcher = zc.buildout.easy_install.scripts(
-      [('check_port_listening', __name__ + 'socket_connection_attempt', 'connection_attempt')],
+      [('check_port_listening', 'slapos.recipe.kvm.socket_connection_attempt',
+        'connection_attempt')],
       self.ws,
       sys.executable,
       self.bin_directory,
@@ -84,11 +85,11 @@ class Recipe(BaseSlapRecipe):
 
     self.linkBinary()
     self.computer_partition.setConnectionDict(dict(
-        url = "https://[%s]:%s/vnc.html?host=[%s]&port=%s&encrypt=1" % (noVNC_conf['source_ip'],
-                                                     noVNC_conf['source_port'],
-                                                     noVNC_conf['source_ip'],
-                                                     noVNC_conf['source_port']
-                                                     ),
+        url = "https://[%s]:%s/vnc_auto.html?host=[%s]&port=%s&encrypt=1" % (
+            noVNC_conf['source_ip'],
+            noVNC_conf['source_port'],
+            noVNC_conf['source_ip'],
+            noVNC_conf['source_port']),
         password = kvm_conf['vnc_passwd']))
 
     return self.path_list
@@ -155,7 +156,8 @@ class Recipe(BaseSlapRecipe):
 
     # Instanciate KVM
     kvm_template_location = pkg_resources.resource_filename(
-      __name__, 'template/kvm_run.in')
+                                             __name__, os.path.join(
+                                             'template', 'kvm_run.in'))
 
     kvm_runner_path = self.createRunningWrapper("kvm",
           self.substituteTemplate(kvm_template_location,
@@ -165,7 +167,9 @@ class Recipe(BaseSlapRecipe):
 
     # Instanciate KVM controller
     kvm_controller_template_location = pkg_resources.resource_filename(
-      __name__, 'template/kvm_controller_run.in')
+                                             __name__, os.path.join(
+                                             'template',
+                                             'kvm_controller_run.in' ))
 
     kvm_controller_runner_path = self.createRunningWrapper("kvm_controller",
           self.substituteTemplate(kvm_controller_template_location,
@@ -205,34 +209,24 @@ class Recipe(BaseSlapRecipe):
     """
 
     noVNC_conf = {}
-   
+
     noVNC_conf['source_ip']   = source_ip
     noVNC_conf['source_port'] = source_port
-    
-    # Install numpy.
-    # XXX-Cedric : this looks like a hack. Do we have better solution, knowing
-    # That websockify is not an egg?
-    numpy = zc.buildout.easy_install.install(['numpy'], self.options['eggs-directory'])
-    environment = dict(PYTHONPATH='%s' % numpy.entries[0])
-    
-    # Instanciate Websockify
-    websockify_runner_path = zc.buildout.easy_install.scripts([('websockify',
-      'slapos.recipe.librecipe.execute', 'executee_wait')], self.ws,
-      sys.executable, self.wrapper_directory, arguments=[
-        [sys.executable.strip(),
-         self.options['websockify_path'],
-         '--web',
-         self.options['noVNC_location'],
-         '--key=%s' % (self.key_path),
-         '--cert=%s' % (self.certificate_path),
-         '--ssl-only',
-         '%s:%s' % (source_ip, source_port),
-         '%s:%s' % (target_ip, target_port)],
-        [self.certificate_path, self.key_path],
-        environment]
-       )[0]
-
-    self.path_list.append(websockify_runner_path)
+
+    execute_arguments = [[
+        self.options['websockify'].strip(),
+        '--web',
+        self.options['noVNC_location'],
+        '--key=%s' % (self.key_path),
+        '--cert=%s' % (self.certificate_path),
+        '--ssl-only',
+        '%s:%s' % (source_ip, source_port),
+        '%s:%s' % (target_ip, target_port)],
+        [self.certificate_path, self.key_path]]
+
+    self.path_list.extend(zc.buildout.easy_install.scripts([('websockify',
+      'slapos.recipe.librecipe.execute', 'execute_wait')], self.ws, sys.executable,
+      self.wrapper_directory, arguments=execute_arguments))
 
     # Add noVNC promise
     self.port_listening_promise_conf.update(hostname=noVNC_conf['source_ip'],
diff --git a/slapos/recipe/kvm/certificate_authority.py b/slapos/recipe/kvm/certificate_authority.py
index 8caffc0c21b2852f259a9b6f85bda5e311965c47..d05a460649c01edefd09e5caa09f3feab899ddcf 100755
--- a/slapos/recipe/kvm/certificate_authority.py
+++ b/slapos/recipe/kvm/certificate_authority.py
@@ -2,6 +2,7 @@ import os
 import subprocess
 import time
 import ConfigParser
+import uuid
 
 
 def popenCommunicate(command_list, input=None):
@@ -42,9 +43,10 @@ class CertificateAuthority:
     try:
       # no CA, let us create new one
       popenCommunicate([self.openssl_binary, 'req', '-nodes', '-config',
-          self.openssl_configuration, '-new', '-x509', '-extensions',
-          'v3_ca', '-keyout', self.key, '-out', self.certificate,
-          '-days', '10950'], 'Automatic Certificate Authority\n')
+          self.openssl_configuration, '-new', '-x509', '-extensions', 'v3_ca',
+          '-keyout', self.key, '-out', self.certificate, '-days', '10950'],
+          # Authority name will be random, so no instance has the same issuer
+          'Certificate Authority %s\n' % uuid.uuid1())
     except:
       try:
         for f in file_list:
diff --git a/slapos/recipe/lamp/template/my.cnf.in b/slapos/recipe/lamp/template/my.cnf.in
index 043fb3ad56bbea3b3e35766a9b019cd8dbf7cc77..00eb280232a8e41206dccdca6be7fe15e58a1591 100644
--- a/slapos/recipe/lamp/template/my.cnf.in
+++ b/slapos/recipe/lamp/template/my.cnf.in
@@ -15,12 +15,13 @@ socket = %(socket)s
 datadir = %(data_directory)s
 pid-file = %(pid_file)s
 log-error = %(error_log)s
-log-slow-file = %(slow_query_log)s
+slow_query_log
+slow_query_log_file = %(slow_query_log)s
 long_query_time = 5
 max_allowed_packet = 128M
 query_cache_size = 32M
 
-plugin-load = ha_innodb_plugin.so
+plugin-load = ha_groonga.so;ha_sphinx.so
 
 # The following are important to configure and depend a lot on to the size of
 # your database and the available resources.
diff --git a/slapos/recipe/mysql/template/my.cnf.in b/slapos/recipe/mysql/template/my.cnf.in
index 82af0f3e0289c98810dd6616bb5487cb07b0a33c..00eb280232a8e41206dccdca6be7fe15e58a1591 100644
--- a/slapos/recipe/mysql/template/my.cnf.in
+++ b/slapos/recipe/mysql/template/my.cnf.in
@@ -15,12 +15,13 @@ socket = %(socket)s
 datadir = %(data_directory)s
 pid-file = %(pid_file)s
 log-error = %(error_log)s
-#log-slow-file = %(slow_query_log)s
+slow_query_log
+slow_query_log_file = %(slow_query_log)s
 long_query_time = 5
 max_allowed_packet = 128M
 query_cache_size = 32M
 
-plugin-load = ha_innodb_plugin.so
+plugin-load = ha_groonga.so;ha_sphinx.so
 
 # The following are important to configure and depend a lot on to the size of
 # your database and the available resources.
diff --git a/slapos/recipe/vifib.py b/slapos/recipe/vifib.py
index 55ec2243b508aeb3e05114585825ca9e0e59f59d..7bd11b6bac0eec2e3318a84438f6d7989ef8c859 100644
--- a/slapos/recipe/vifib.py
+++ b/slapos/recipe/vifib.py
@@ -147,7 +147,10 @@ class Recipe(slapos.recipe.erp5.Recipe):
         conversion_server_conf,
       # as installERP5Site is not trusted (yet) and this recipe is production
       # ready expose more information
-      mysql_url='%(mysql_database)s@%(ip)s:%(tcp_port)s %(mysql_user)s %(mysql_password)s' % mysql_conf,
+      # XXX Use socket access to prevent unwanted connections to original MySQL
+      #     server when cloning an existing ERP5 instance.
+      #     TCP will be required if MySQL is in a different partition/server.
+      mysql_url='%(mysql_database)s %(mysql_user)s %(mysql_password)s %(socket)s' % mysql_conf,
     ))
     return self.path_list
 
@@ -200,7 +203,10 @@ class Recipe(slapos.recipe.erp5.Recipe):
         conversion_server_conf,
       # as installERP5Site is not trusted (yet) and this recipe is production
       # ready expose more information
-      mysql_url='%(mysql_database)s@%(ip)s:%(tcp_port)s %(mysql_user)s %(mysql_password)s' % mysql_conf,
+      # XXX Use socket access to prevent unwanted connections to original MySQL
+      #     server when cloning an existing ERP5 instance.
+      #     TCP will be required if MySQL is in a different partition/server.
+      mysql_url='%(mysql_database)s %(mysql_user)s %(mysql_password)s %(socket)s' % mysql_conf,
     ))
     return self.path_list
 
diff --git a/slapos/recipe/xwiki/template/my.cnf.in b/slapos/recipe/xwiki/template/my.cnf.in
index 09171fb6d9837199f4030f574791df31c251dc54..00eb280232a8e41206dccdca6be7fe15e58a1591 100644
--- a/slapos/recipe/xwiki/template/my.cnf.in
+++ b/slapos/recipe/xwiki/template/my.cnf.in
@@ -15,58 +15,39 @@ socket = %(socket)s
 datadir = %(data_directory)s
 pid-file = %(pid_file)s
 log-error = %(error_log)s
-log-slow-queries = %(slow_query_log)s
+slow_query_log
+slow_query_log_file = %(slow_query_log)s
 long_query_time = 5
-skip-locking
-key_buffer = 384M
 max_allowed_packet = 128M
-table_cache = 512
-sort_buffer_size = 2M
-read_buffer_size = 2M
-read_rnd_buffer_size = 8M
-myisam_sort_buffer_size = 64M
-thread_cache_size = 8
 query_cache_size = 32M
-# Try number of CPU's*2 for thread_concurrency
-thread_concurrency = 8
 
-# Replication Master Server (default)
-# binary logging is required for replication
-log-bin=mysql-bin
+plugin-load = ha_groonga.so;ha_sphinx.so
 
-# required unique id between 1 and 2^32 - 1
-# defaults to 1 if master-host is not set
-# but will not function as a master if omitted
-server-id	= 1
+# The following are important to configure and depend a lot on to the size of
+# your database and the available resources.
+#innodb_buffer_pool_size = 4G
+#innodb_log_file_size = 256M
+#innodb_log_buffer_size = 8M
+
+# Some dangerous settings you may want to uncomment if you only want
+# performance or less disk access. Useful for unit tests.
+#innodb_flush_log_at_trx_commit = 0
+#innodb_flush_method = nosync
+#innodb_doublewrite = 0
+#sync_frm = 0
+
+# Uncomment the following if you need binary logging, which is recommended
+# on production instances (either for replication or incremental backups).
+#log-bin=mysql-bin
 
 # Force utf8 usage
 collation_server = utf8_unicode_ci
 character_set_server = utf8
-default-character-set = utf8
 skip-character-set-client-handshake
 
-[mysqldump]
-quick
-max_allowed_packet = 16M
-
 [mysql]
 no-auto-rehash
-# Remove the next comment character if you are not familiar with SQL
-#safe-updates
 socket = %(socket)s
 
-[isamchk]
-key_buffer = 256M
-sort_buffer_size = 256M
-read_buffer = 2M
-write_buffer = 2M
-
-[myisamchk]
-key_buffer = 256M
-sort_buffer_size = 256M
-read_buffer = 2M
-write_buffer = 2M
-
 [mysqlhotcopy]
 interactive-timeout
-
diff --git a/software/erp5/instance.cfg b/software/erp5/instance.cfg
index 05e6988f5ef6f3efeefc51139834f45128b12032..26a17f269ee1969d14e195b3633fb30b47a79a87 100644
--- a/software/erp5/instance.cfg
+++ b/software/erp5/instance.cfg
@@ -34,14 +34,31 @@ tidstorage_repozo_binary = ${buildout:bin-directory}/tidstorage_repozo
 tidstoraged_binary = ${buildout:bin-directory}/tidstoraged
 xtrabackup_binary = ${xtrabackup:location}/bin/xtrabackup_51
 zabbix_agentd_binary = ${zabbix-agent:location}/sbin/zabbix_agentd
-mk-variable-advisor_binary = ${perl:siteprefix}/bin/mk-variable-advisor
-mk-table-usage_binary = ${perl:siteprefix}/bin/mk-table-usage
-mk-visual-explain_binary = ${perl:siteprefix}/bin/mk-visual-explain
-mk-config-diff_binary = ${perl:siteprefix}/bin/mk-config-diff
-mk-deadlock-logger_binary = ${perl:siteprefix}/bin/mk-deadlock-logger
-mk-error-log_binary = ${perl:siteprefix}/bin/mk-error-log
-mk-index-usage_binary = ${perl:siteprefix}/bin/mk-index-usage
-mk-query-advisor_binary = ${perl:siteprefix}/bin/mk-query-advisor
+pt-archiver_binary = ${perl:siteprefix}/bin/pt-archiver
+pt-config-diff_binary = ${perl:siteprefix}/bin/pt-config-diff
+pt-deadlock-logger_binary = ${perl:siteprefix}/bin/pt-deadlock-logger
+pt-duplicate-key-checker_binary = ${perl:siteprefix}/bin/pt-duplicate-key-checker
+pt-fifo-split_binary = ${perl:siteprefix}/bin/pt-fifo-split
+pt-find_binary = ${perl:siteprefix}/bin/pt-find
+pt-fk-error-logger_binary = ${perl:siteprefix}/bin/pt-fk-error-logger
+pt-heartbeat_binary = ${perl:siteprefix}/bin/pt-heartbeat
+pt-index-usage_binary = ${perl:siteprefix}/bin/pt-index-usage
+pt-kill_binary = ${perl:siteprefix}/bin/pt-kill
+pt-log-player_binary = ${perl:siteprefix}/bin/pt-log-player
+pt-online-schema-change_binary = ${perl:siteprefix}/bin/pt-online-schema-change
+pt-query-advisor_binary = ${perl:siteprefix}/bin/pt-query-advisor
+pt-query-digest_binary = ${perl:siteprefix}/bin/pt-query-digest
+pt-show-grants_binary = ${perl:siteprefix}/bin/pt-show-grants
+pt-slave-delay_binary = ${perl:siteprefix}/bin/pt-slave-delay
+pt-slave-find_binary = ${perl:siteprefix}/bin/pt-slave-find
+pt-slave-restart_binary = ${perl:siteprefix}/bin/pt-slave-restart
+pt-table-checksum_binary = ${perl:siteprefix}/bin/pt-table-checksum
+pt-table-sync_binary = ${perl:siteprefix}/bin/pt-table-sync
+pt-tcp-model_binary = ${perl:siteprefix}/bin/pt-tcp-model
+pt-trend_binary = ${perl:siteprefix}/bin/pt-trend
+pt-upgrade_binary = ${perl:siteprefix}/bin/pt-upgrade
+pt-variable-advisor_binary = ${perl:siteprefix}/bin/pt-variable-advisor
+pt-visual-explain_binary = ${perl:siteprefix}/bin/pt-visual-explain
 
 # cloudooo specific configuration
 ooo_binary_path = ${libreoffice-bin:location}/program
@@ -55,6 +72,7 @@ link_binary_list =
   ${coreutils:location}/bin/ls
   ${coreutils:location}/bin/tr
   ${coreutils:location}/bin/uname
+  ${gettext:location}/lib/gettext/hostname
   ${git:location}/bin/git
   ${graphviz:location}/bin/dot
   ${grep:location}/bin/grep
diff --git a/software/erp5/software.cfg b/software/erp5/software.cfg
index 2bf19d74558ca1fcac42908f8a7feb36f4ba0ab3..8ed1387ebe1fc05bcdc483bd09682e9a95da5abb 100644
--- a/software/erp5/software.cfg
+++ b/software/erp5/software.cfg
@@ -28,7 +28,7 @@ configurator_bt5_list = erp5_core_proxy_field_legacy erp5_full_text_myisam_catal
 # Default template for erp5 instance.
 recipe = slapos.recipe.template
 url = ${:_profile_base_location_}/instance.cfg
-md5sum = 53f225e13bf7ebcd88bbc2b038c83b6f
+md5sum = 07f09cca8ad4d7858bb40d723998a889
 output = ${buildout:directory}/template.cfg
 mode = 0644
 
@@ -40,27 +40,75 @@ md5sum = cbe1d75339c6cb20e1aef818797face1
 output = ${buildout:directory}/schema.json
 mode = 0644
 
+[networkcache]
+# signature certificates of the following uploaders.
+#   Romain Courteaud
+#   Sebastien Robin
+#   Kazuhiko Shiozaki
+signature-certificate-list =
+  -----BEGIN CERTIFICATE-----
+  MIIB4DCCAUkCADANBgkqhkiG9w0BAQsFADA5MQswCQYDVQQGEwJGUjEZMBcGA1UE
+  CBMQRGVmYXVsdCBQcm92aW5jZTEPMA0GA1UEChMGTmV4ZWRpMB4XDTExMDkxNTA5
+  MDAwMloXDTEyMDkxNTA5MDAwMlowOTELMAkGA1UEBhMCRlIxGTAXBgNVBAgTEERl
+  ZmF1bHQgUHJvdmluY2UxDzANBgNVBAoTBk5leGVkaTCBnzANBgkqhkiG9w0BAQEF
+  AAOBjQAwgYkCgYEApYZv6OstoqNzxG1KI6iE5U4Ts2Xx9lgLeUGAMyfJLyMmRLhw
+  boKOyJ9Xke4dncoBAyNPokUR6iWOcnPHtMvNOsBFZ2f7VA28em3+E1JRYdeNUEtX
+  Z0s3HjcouaNAnPfjFTXHYj4um1wOw2cURSPuU5dpzKBbV+/QCb5DLheynisCAwEA
+  ATANBgkqhkiG9w0BAQsFAAOBgQBCZLbTVdrw3RZlVVMFezSHrhBYKAukTwZrNmJX
+  mHqi2tN8tNo6FX+wmxUUAf3e8R2Ymbdbn2bfbPpcKQ2fG7PuKGvhwMG3BlF9paEC
+  q7jdfWO18Zp/BG7tagz0jmmC4y/8akzHsVlruo2+2du2freE8dK746uoMlXlP93g
+  QUUGLQ==
+  -----END CERTIFICATE-----
+  -----BEGIN CERTIFICATE-----
+  MIIB8jCCAVugAwIBAgIJAPu2zchZ2BxoMA0GCSqGSIb3DQEBBQUAMBIxEDAOBgNV
+  BAMMB3RzeGRldjMwHhcNMTExMDE0MTIxNjIzWhcNMTIxMDEzMTIxNjIzWjASMRAw
+  DgYDVQQDDAd0c3hkZXYzMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCrPbh+
+  YGmo6mWmhVb1vTqX0BbeU0jCTB8TK3i6ep3tzSw2rkUGSx3niXn9LNTFNcIn3MZN
+  XHqbb4AS2Zxyk/2tr3939qqOrS4YRCtXBwTCuFY6r+a7pZsjiTNddPsEhuj4lEnR
+  L8Ax5mmzoi9nE+hiPSwqjRwWRU1+182rzXmN4QIDAQABo1AwTjAdBgNVHQ4EFgQU
+  /4XXREzqBbBNJvX5gU8tLWxZaeQwHwYDVR0jBBgwFoAU/4XXREzqBbBNJvX5gU8t
+  LWxZaeQwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOBgQA07q/rKoE7fAda
+  FED57/SR00OvY9wLlFEF2QJ5OLu+O33YUXDDbGpfUSF9R8l0g9dix1JbWK9nQ6Yd
+  R/KCo6D0sw0ZgeQv1aUXbl/xJ9k4jlTxmWbPeiiPZEqU1W9wN5lkGuLxV4CEGTKU
+  hJA/yXa1wbwIPGvX3tVKdOEWPRXZLg==
+  -----END CERTIFICATE-----
+  -----BEGIN CERTIFICATE-----
+  MIIB7jCCAVegAwIBAgIJAJWA0jQ4o9DGMA0GCSqGSIb3DQEBBQUAMA8xDTALBgNV
+  BAMMBHg2MXMwIBcNMTExMTI0MTAyNDQzWhgPMjExMTEwMzExMDI0NDNaMA8xDTAL
+  BgNVBAMMBHg2MXMwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANdJNiFsRlkH
+  vq2kHP2zdxEyzPAWZH3CQ3Myb3F8hERXTIFSUqntPXDKXDb7Y/laqjMXdj+vptKk
+  3Q36J+8VnJbSwjGwmEG6tym9qMSGIPPNw1JXY1R29eF3o4aj21o7DHAkhuNc5Tso
+  67fUSKgvyVnyH4G6ShQUAtghPaAwS0KvAgMBAAGjUDBOMB0GA1UdDgQWBBSjxFUE
+  RfnTvABRLAa34Ytkhz5vPzAfBgNVHSMEGDAWgBSjxFUERfnTvABRLAa34Ytkhz5v
+  PzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAFLDS7zNhlrQYSQO5KIj
+  z2RJe3fj4rLPklo3TmP5KLvendG+LErE2cbKPqnhQ2oVoj6u9tWVwo/g03PMrrnL
+  KrDm39slYD/1KoE5kB4l/p6KVOdeJ4I6xcgu9rnkqqHzDwI4v7e8/D3WZbpiFUsY
+  vaZhjNYKWQf79l6zXfOvphzJ
+  -----END CERTIFICATE-----
+
+[erp5]
+revision = 7d57428a5cfd0fceed70acb9e86cf274558d606c
+
 [versions]
 MySQL-python = 1.2.3
 Paste = 1.7.5.1
-PasteScript = 1.7.4.2
+PasteScript = 1.7.5
 Products.CMFActionIcons = 2.1.3
 Products.CMFCalendar = 2.2.2
-Products.CMFCore = 2.2.4
+Products.CMFCore = 2.2.5
 Products.CMFDefault = 2.2.2
 Products.CMFTopic = 2.2.1
 Products.CMFUid = 2.2.1
-Products.DCWorkflowGraph = 0.4nxd001
+Products.DCWorkflowGraph = 0.4.1
 Products.ExternalEditor = 1.1.0
-Products.GenericSetup = 1.6.3
+Products.GenericSetup = 1.6.4
 Products.MimetypesRegistry = 2.0.3
-Products.PluggableAuthService = 1.7.5
+Products.PluggableAuthService = 1.7.6
 Products.PluginRegistry = 1.3b1
 Products.TIDStorage = 5.4.7.dev-r45842
 Products.Zelenium = 1.0.3
 StructuredText = 2.11.1
-Werkzeug = 0.7.1
-buildout-versions = 1.6
+Werkzeug = 0.8.1
 cElementTree = 1.0.5-20051216
 chardet = 1.0.1
 cloudooo = 1.2.3
@@ -73,40 +121,40 @@ elementtree = 1.2.7-20070827-preview
 erp5.recipe.cmmiforcei686 = 0.1.3
 erp5diff = 0.8.1.5
 eventlet = 0.9.16
-feedparser = 5.0.1
+feedparser = 5.1
 five.localsitemanager = 2.0.5
 greenlet = 0.3.1
 hexagonit.recipe.cmmi = 1.5.0
 hexagonit.recipe.download = 1.5.0
-http-parser = 0.7.0
-ipdb = 0.6
+http-parser = 0.7.1
+ipdb = 0.6.1
+ipython = 0.11
 meld3 = 0.6.7
 ordereddict = 1.1
 paramiko = 1.7.7.1
 plone.recipe.command = 1.1
 ply = 3.4
-psutil = 0.3.0
-pycrypto = 2.3
-python-ldap = 2.4.3
+psutil = 0.4.0
+python-ldap = 2.4.6
 python-memcached = 1.47
-restkit = 3.3.1
+restkit = 3.3.2
 rtjp-eventlet = 0.3.2
-slapos.cookbook = 0.25
-slapos.recipe.build = 0.6
-slapos.recipe.template = 1.1
+slapos.cookbook = 0.38
+slapos.recipe.build = 0.7
+slapos.recipe.template = 2.2
 threadframe = 0.2
 timerserver = 2.0.2
 urlnorm = 1.1.2
 uuid = 1.30
-validictory = 0.7.1
+validictory = 0.7.2
 xupdate-processor = 0.4
 
 # Required by:
-# slapos.core==0.14
-Flask = 0.7.2
+# slapos.core==0.20
+Flask = 0.8
 
 # Required by:
-# PasteScript==1.7.4.2
+# PasteScript==1.7.5
 # cloudooo==1.2.3
 PasteDeploy = 1.5.0
 
@@ -116,48 +164,60 @@ WSGIUtils = 0.7
 
 # Required by:
 # cloudooo==1.2.3
-# slapos.core==0.14
+# slapos.core==0.20
 argparse = 1.1
 
-# Required by:
-# slapos.recipe.template==1.1
-collective.recipe.template = 1.9
-
 # Required by:
 # SOAPpy==0.12.0nxd001
 fpconst = 0.7.2
 
 # Required by:
-# ipdb==0.6
-ipython = 0.11
-
-# Required by:
-# slapos.cookbook==0.25
+# slapos.cookbook==0.38
 netaddr = 0.7.6
 
 # Required by:
-# slapos.core==0.14
-netifaces = 0.4
+# slapos.core==0.20
+netifaces = 0.6
 
 # Required by:
 # cloudooo==1.2.3
 python-magic = 0.4.0.1
 
 # Required by:
-# slapos.cookbook==0.25
-# slapos.core==0.14
-# zc.buildout==1.5.3-dev-SlapOS-010
+# Products.CMFActionIcons==2.1.3
+# Products.CMFCalendar==2.2.2
+# Products.CMFCore==2.2.5
+# Products.CMFDefault==2.2.2
+# Products.CMFTopic==2.2.1
+# Products.CMFUid==2.2.1
+# Products.DCWorkflow==2.2.3nxd002
+# Products.DCWorkflowGraph==0.4.1
+# Products.ExternalEditor==1.1.0
+# Products.GenericSetup==1.6.4
+# Products.MimetypesRegistry==2.0.3
+# Products.PluggableAuthService==1.7.6
+# Products.PluginRegistry==1.3b1
+# Products.TIDStorage==5.4.7.dev-r45842
+# Products.Zelenium==1.0.3
+# Zope2==2.12.20
+# five.localsitemanager==2.0.5
+# python-ldap==2.4.6
+# slapos.cookbook==0.38
+# slapos.core==0.20
+# zc.buildout==1.6.0-dev-SlapOS-003
 # zc.recipe.egg==1.2.2
+# zope.deprecation==3.4.0
+# zope.structuredtext==3.4.0
 setuptools = 0.6c12dev-r88846
 
 # Required by:
-# slapos.cookbook==0.25
-slapos.core = 0.14
+# slapos.cookbook==0.38
+slapos.core = 0.20
 
 # Required by:
-# slapos.core==0.14
+# slapos.core==0.20
 supervisor = 3.0a10
 
 # Required by:
-# slapos.cookbook==0.25
+# slapos.cookbook==0.38
 xml-marshaller = 0.9.7
diff --git a/software/erp5testnode/instance.cfg b/software/erp5testnode/instance.cfg
index 2b0c318416e2e56226958a4c1174bfadc828c8db..5c75e01d537f21a362dc5e16e83070017998f5af 100644
--- a/software/erp5testnode/instance.cfg
+++ b/software/erp5testnode/instance.cfg
@@ -8,11 +8,11 @@ develop-eggs-directory = ${buildout:develop-eggs-directory}
 [testnode]
 recipe = slapos.cookbook:erp5testnode
 
-buildbot_binary = ${buildout:bin-directory}/buildbot
+git_binary = ${git:location}/bin/git
 slapgrid_partition_binary = ${buildout:bin-directory}/slapgrid-cp
 slapgrid_software_binary = ${buildout:bin-directory}/slapgrid-sr
 slapproxy_binary = ${buildout:bin-directory}/slapproxy
 svn_binary = ${subversion:location}/bin/svn
-git_binary = ${git:location}/bin/git
 svnversion_binary = ${subversion:location}/bin/svnversion
+testnode = ${buildout:bin-directory}/testnode
 zip_binary = ${zip:location}/bin/zip
diff --git a/software/erp5testnode/software.cfg b/software/erp5testnode/software.cfg
index 3fdec1ac67996817ffdbf8143f38dd0b00b8e6f1..9ba59340addc93cc488bbe1eca2424b4120ceb65 100644
--- a/software/erp5testnode/software.cfg
+++ b/software/erp5testnode/software.cfg
@@ -1,8 +1,4 @@
 [buildout]
-extensions =
-  slapos.rebootstrap
-  slapos.zcbworkarounds
-  mr.developer
 
 find-links = http://www.nexedi.org/static/packages/source/slapos.buildout/
     http://dist.repoze.org
@@ -14,10 +10,10 @@ include-site-packages = false
 exec-sitecustomize = false
 
 versions = versions
-rebootstrap-section = python2.6
 
 extends =
-  ../../component/python-2.6/buildout.cfg
+  ../../stack/shacache-client.cfg
+  ../../component/python-2.7/buildout.cfg
   ../../component/subversion/buildout.cfg
   ../../component/git/buildout.cfg
   ../../component/lxml-python/buildout.cfg
@@ -25,37 +21,142 @@ extends =
 
 parts =
   template
-  bootstrap
+  lxml-python
   eggs
   subversion
   zip
   git
 
-[bootstrap]
-recipe = zc.recipe.egg
-eggs = zc.buildout
-suffix =
-scripts =
-    buildout=bootstrap2.6
-arguments = sys.argv[1:] + ["bootstrap"]
-
-[rebootstrap]
-section = python2.6
-version = 1
+# Separate from site eggs
+allowed-eggs-from-site-packages =
+include-site-packages = false
+exec-sitecustomize = false
 
-[versions]
-# Use SlapOS patched zc.buildout
-zc.buildout = 1.5.3-dev-SlapOS-001
+# Use only quite well working sites.
+allow-hosts =
+  *.nexedi.org
+  *.python.org
+  *.sourceforge.net
+  dist.repoze.org
+  effbot.org
+  github.com
+  peak.telecommunity.com
+  psutil.googlecode.com
+  www.dabeaz.com
 
 [eggs]
 recipe = zc.recipe.egg
 eggs =
   ${lxml-python:egg}
+  zc.buildout
+  slapos.libnetworkcache
   slapos.core
   slapos.cookbook
+  erp5.util[testnode]
+
+scripts =
+  testnode = erp5.util.testnode:main
+  slapgrid-cp = slapos.grid.slapgrid:runComputerPartition
+  slapgrid-sr = slapos.grid.slapgrid:runSoftwareRelease
+  slapproxy = slapos.proxy:main
+
+python = python2.7
+
+[lxml-python]
+python = python2.7
 
 [template]
 recipe = slapos.recipe.template
 url = ${:_profile_base_location_}/instance.cfg
 output = ${buildout:directory}/template.cfg
 mode = 0644
+md5sum = 08e3f92bce41efc5bfe044bb9d354786
+
+[networkcache]
+# Romain Courteaud + Sebastien Robin signature certificate
+signature-certificate-list =
+  -----BEGIN CERTIFICATE-----
+  MIIB4DCCAUkCADANBgkqhkiG9w0BAQsFADA5MQswCQYDVQQGEwJGUjEZMBcGA1UE
+  CBMQRGVmYXVsdCBQcm92aW5jZTEPMA0GA1UEChMGTmV4ZWRpMB4XDTExMDkxNTA5
+  MDAwMloXDTEyMDkxNTA5MDAwMlowOTELMAkGA1UEBhMCRlIxGTAXBgNVBAgTEERl
+  ZmF1bHQgUHJvdmluY2UxDzANBgNVBAoTBk5leGVkaTCBnzANBgkqhkiG9w0BAQEF
+  AAOBjQAwgYkCgYEApYZv6OstoqNzxG1KI6iE5U4Ts2Xx9lgLeUGAMyfJLyMmRLhw
+  boKOyJ9Xke4dncoBAyNPokUR6iWOcnPHtMvNOsBFZ2f7VA28em3+E1JRYdeNUEtX
+  Z0s3HjcouaNAnPfjFTXHYj4um1wOw2cURSPuU5dpzKBbV+/QCb5DLheynisCAwEA
+  ATANBgkqhkiG9w0BAQsFAAOBgQBCZLbTVdrw3RZlVVMFezSHrhBYKAukTwZrNmJX
+  mHqi2tN8tNo6FX+wmxUUAf3e8R2Ymbdbn2bfbPpcKQ2fG7PuKGvhwMG3BlF9paEC
+  q7jdfWO18Zp/BG7tagz0jmmC4y/8akzHsVlruo2+2du2freE8dK746uoMlXlP93g
+  QUUGLQ==
+  -----END CERTIFICATE-----
+  -----BEGIN CERTIFICATE-----
+  MIIB8jCCAVugAwIBAgIJAPu2zchZ2BxoMA0GCSqGSIb3DQEBBQUAMBIxEDAOBgNV
+  BAMMB3RzeGRldjMwHhcNMTExMDE0MTIxNjIzWhcNMTIxMDEzMTIxNjIzWjASMRAw
+  DgYDVQQDDAd0c3hkZXYzMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCrPbh+
+  YGmo6mWmhVb1vTqX0BbeU0jCTB8TK3i6ep3tzSw2rkUGSx3niXn9LNTFNcIn3MZN
+  XHqbb4AS2Zxyk/2tr3939qqOrS4YRCtXBwTCuFY6r+a7pZsjiTNddPsEhuj4lEnR
+  L8Ax5mmzoi9nE+hiPSwqjRwWRU1+182rzXmN4QIDAQABo1AwTjAdBgNVHQ4EFgQU
+  /4XXREzqBbBNJvX5gU8tLWxZaeQwHwYDVR0jBBgwFoAU/4XXREzqBbBNJvX5gU8t
+  LWxZaeQwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOBgQA07q/rKoE7fAda
+  FED57/SR00OvY9wLlFEF2QJ5OLu+O33YUXDDbGpfUSF9R8l0g9dix1JbWK9nQ6Yd
+  R/KCo6D0sw0ZgeQv1aUXbl/xJ9k4jlTxmWbPeiiPZEqU1W9wN5lkGuLxV4CEGTKU
+  hJA/yXa1wbwIPGvX3tVKdOEWPRXZLg==
+  -----END CERTIFICATE-----
+
+[versions]
+# Use SlapOS patched zc.buildout
+zc.buildout = 1.6.0-dev-SlapOS-003
+
+Jinja2 = 2.6
+Werkzeug = 0.8.2
+erp5.util = 0.3
+hexagonit.recipe.cmmi = 1.5.0
+lxml = 2.3.2
+meld3 = 0.6.7
+slapos.cookbook = 0.38
+slapos.core = 0.21
+slapos.libnetworkcache = 0.11
+slapos.recipe.template = 2.2
+
+# Required by:
+# slapos.core==0.21
+Flask = 0.8
+
+# Required by:
+# slapos.cookbook==0.38
+PyXML = 0.8.5
+
+# Required by:
+# hexagonit.recipe.cmmi==1.5.0
+hexagonit.recipe.download = 1.5.0
+
+# Required by:
+# slapos.cookbook==0.38
+netaddr = 0.7.6
+
+# Required by:
+# slapos.core==0.21
+netifaces = 0.6
+
+# Required by:
+# erp5.util==0.3
+# slapos.cookbook==0.38
+# slapos.core==0.21
+# slapos.libnetworkcache==0.11
+# zc.buildout==1.6.0-dev-SlapOS-003
+setuptools = 0.6c12dev-r88846
+
+# Required by:
+# slapos.core==0.21
+supervisor = 3.0a12
+
+# Required by:
+# slapos.cookbook==0.38
+xml-marshaller = 0.9.7
+
+# Required by:
+# slapos.cookbook==0.38
+zc.recipe.egg = 1.3.2
+
+# Required by:
+# slapos.core==0.21
+zope.interface = 3.8.0
diff --git a/software/kvm/instance.cfg b/software/kvm/instance.cfg
index 92692ed826463fee89291823830c4c861c656fd2..65041a3f9aff4c2f506ad283d6b5520aec5233ce 100644
--- a/software/kvm/instance.cfg
+++ b/software/kvm/instance.cfg
@@ -11,7 +11,7 @@ qemu_path = ${kvm:location}/bin/qemu-system-x86_64
 qemu_img_path = ${kvm:location}/bin/qemu-img
 #slapmonitor_path = ${buildout:bin-directory}/slapmonitor
 #slapreport_path = ${buildout:bin-directory}/slapreport
-websockify_path = ${noVNC:location}/utils/wsproxy.py
+websockify = ${buildout:directory}/bin/websockify
 noVNC_location = ${noVNC:location}
 openssl_binary = ${openssl:location}/bin/openssl
 rdiff_backup_binary = ${buildout:bin-directory}/rdiff-backup
diff --git a/software/kvm/software.cfg b/software/kvm/software.cfg
index fb79bf26505f9210754f1410e64b12722a89c62e..541d80f78fdac489e51223edd05641e564ccdf69 100644
--- a/software/kvm/software.cfg
+++ b/software/kvm/software.cfg
@@ -1,77 +1,183 @@
 [buildout]
+extensions =
+  buildout-versions
+  
 extends =
-  ../../stack/kvm.cfg
+  ../../component/dcron/buildout.cfg
+  ../../component/git/buildout.cfg
+  ../../component/gnutls/buildout.cfg
+  ../../component/libpng/buildout.cfg
+  ../../component/libuuid/buildout.cfg
+  ../../component/lxml-python/buildout.cfg
+  ../../component/noVNC/buildout.cfg
+  ../../component/openssl/buildout.cfg
+  ../../component/python-2.7/buildout.cfg
+  ../../component/rdiff-backup/buildout.cfg
+  ../../stack/shacache-client.cfg
 
+develop =
+  ${:parts-directory}/websockify
+
+parts =
+  template
+  kvm
+  eggs
+  check-local-eggs
+
+find-links +=
+  http://www.nexedi.org/static/packages/source/slapos.buildout/
+
+versions = versions
+
+# Use only quite well working sites.
+allow-hosts =
+  *.nexedi.org
+  *.python.org
+  *.sourceforge.net
+  alastairs-place.net
+  dist.repoze.org
+  effbot.org
+  github.com
+  peak.telecommunity.com
+  psutil.googlecode.com
+  www.dabeaz.com
+  www.owlfish.com
+
+#XXX-Cedric : Currently, one can only access to KVM using noVNC.
+#             Ideally one should be able to access KVM by using either NoVNC or VNC.
+#             Problem is : no native crypto support in web browsers. So we have to disable ssl
+#             In qemu builtin vnc server, and make it available only for localhost 
+#             so that only novnc can listen to it.
+
+#XXX-Cedric: Check status of https://github.com/kanaka/noVNC/issues/13 to see
+#            When qemu has builtin support for websockets in vnc server to get rid of 
+#            Websockify (socket <-> websocket proxy server) when it is ready.
+#            May solve previous XXX depending on the implementation.
+
+#XXX-Cedric: Check status of 
+#            https://www.tiolive.com/nexedi/bug_module/20110819-11F4F70 for
+#            Chrome >= 14 and Firefox >=7 can access to noVNC. (should be solved)
+
+#XXX-Cedric : add list of keyboard layouts (azerty/us querty/...) parameter to qemu
+
+[kvm]
+recipe = hexagonit.recipe.cmmi
+url = http://downloads.sourceforge.net/project/kvm/qemu-kvm/0.15.1/qemu-kvm-0.15.1.tar.gz
+md5sum = 8800a7d6b3aa4a168ea7f78dc66c0320
+configure-options =
+  --disable-sdl
+  --disable-xen
+  --enable-vnc-tls
+  --disable-vnc-sasl
+  --disable-curses
+  --disable-curl
+  --enable-kvm
+  --disable-docs
+  --enable-vnc-png
+  --disable-vnc-jpeg
+  --extra-cflags="-I${gnutls:location}/include -I${libuuid:location}/include -I${zlib:location}/include -I${libpng:location}/include"
+  --extra-ldflags="-Wl,-rpath -Wl,${glib:location}/lib -L${glib:location}/lib -Wl,-rpath -Wl,${gnutls:location}/lib -L${gnutls:location}/lib -L${gettext:location}/lib -Wl,-rpath -Wl,${gettext:location}/lib -Wl,-rpath -Wl,${libpng:location}/lib -L${libpng:location}/lib -L${libuuid:location}/lib -Wl,-rpath -Wl,${libuuid:location}/lib -L${zlib:location}/lib -Wl,-rpath -Wl,${zlib:location}/lib -lpng -lz -lgnutls"
+  --disable-werror
+environment =
+  PATH=${pkgconfig:location}/bin:%(PATH)s
+  PKG_CONFIG_PATH=${gnutls:location}/lib/pkgconfig:${glib:location}/lib/pkgconfig
+
+[websockify]
+# XXX-Cedric : use official egg from pypi when it is released
+recipe = plone.recipe.command
+stop-on-error = true
+commit = e7363f43443deb9982bdb5c3db50eec475584b06
+repository = https://github.com/desaintmartin/websockify.git
+location = ${buildout:parts-directory}/${:_buildout_section_name_}
+git-binary = ${git:location}/bin/git
+command = export GIT_SSL_NO_VERIFY=true; (${:git-binary} clone --quiet ${:repository} ${:location} && cd ${:location} && ${:git-binary} reset --hard ${:commit}) || (rm -fr ${:location}; exit 1)
+update-command =
+
+[check-local-eggs]
+recipe = plone.recipe.command
+stop-on-error = true
+update-command = ${:command}
+command = grep parts ${buildout:develop-eggs-directory}/websockify.egg-link
+depends = ${eggs:dummy}
+
+[eggs]
+python = python2.7
+recipe = z3c.recipe.scripts
+dummy =
+  ${websockify:location}
+eggs =
+  ${lxml-python:egg}
+  slapos.cookbook
+  websockify
+  
 [template]
 recipe = slapos.recipe.template
 url = ${:_profile_base_location_}/instance.cfg
-md5sum = d899f2111aab18ad25776f35ed49a91b
+md5sum = 298b146e4efce41bfd58b3f85d064ff1
 output = ${buildout:directory}/template.cfg
 mode = 0644
 
-[kvmsource]
-command =
-  (${git:location}/bin/git clone --quiet http://git.erp5.org/repos/slapos.kvm.git ${:location} && cd ${:location} && ${git:location}/bin/git reset --hard 94ee45cc02e69798cac8209d2296fd1751125018) || (rm -fr ${:location} ; exit 1)
-update-command =
-
 [versions]
+zc.buildout = 1.5.3-dev-SlapOS-010
+
+slapos.cookbook = 0.37
 Jinja2 = 2.6
-Werkzeug = 0.7.1
-buildout-versions = 1.6
+Werkzeug = 0.8.1
+buildout-versions = 1.7
 hexagonit.recipe.cmmi = 1.5.0
-lxml = 2.3
+lxml = 2.3.2
 meld3 = 0.6.7
-numpy = 1.6.1
 plone.recipe.command = 1.1
-slapos.recipe.template = 1.1
+slapos.recipe.template = 2.2
+z3c.recipe.scripts = 1.0.1
 
 # Required by:
-# slapos.core==0.14
-Flask = 0.7.2
+# slapos.core==0.20
+Flask = 0.8
 
 # Required by:
-# slapos.cookbook==0.20
+# slapos.cookbook==0.37
 PyXML = 0.8.4
 
-# Required by:
-# slapos.recipe.template==1.1
-collective.recipe.template = 1.9
-
 # Required by:
 # hexagonit.recipe.cmmi==1.5.0
 hexagonit.recipe.download = 1.5.0
 
 # Required by:
-# slapos.cookbook==0.20
-netaddr = 0.7.5
+# slapos.cookbook==0.37
+netaddr = 0.7.6
 
 # Required by:
-# slapos.core==0.14
-netifaces = 0.5
+# slapos.core==0.20
+netifaces = 0.6
+
+# Required by:
+# websockify==0.1-dev
+numpy = 1.6.1
 
 # Required by:
-# slapos.cookbook==0.20
-# slapos.core==0.14
-# zc.buildout==1.5.3-dev-SlapOS-005
+# slapos.cookbook==0.37
+# slapos.core==0.20
+# zc.buildout==1.5.3-dev-SlapOS-010
 # zc.recipe.egg==1.3.2
 setuptools = 0.6c12dev-r88846
 
 # Required by:
-# slapos.cookbook==0.20
-slapos.core = 0.14
+# slapos.cookbook==0.37
+slapos.core = 0.20
 
 # Required by:
-# slapos.core==0.14
+# slapos.core==0.20
 supervisor = 3.0a10
 
 # Required by:
-# slapos.cookbook==0.20
+# slapos.cookbook==0.37
 xml-marshaller = 0.9.7
 
 # Required by:
-# slapos.cookbook==0.20
+# slapos.cookbook==0.37
 zc.recipe.egg = 1.3.2
 
 # Required by:
-# slapos.core==0.14
-zope.interface = 3.7.0
+# slapos.core==0.20
+zope.interface = 3.8.0
\ No newline at end of file
diff --git a/software/lamp-template/software.cfg b/software/lamp-template/software.cfg
index b48476b2610d5cb450c85f025e1b8b02c828c7eb..41ff1212c7f7ac62cfdceccaa86b79834a3ac0a5 100644
--- a/software/lamp-template/software.cfg
+++ b/software/lamp-template/software.cfg
@@ -10,15 +10,15 @@ parts =
   downloadcache-workaround
   
 extends =
-  http://git.erp5.org/gitweb/slapos.git/blob_plain/refs/tags/slapos-0.50:/stack/lamp.cfg
-  http://git.erp5.org/gitweb/slapos.git/blob_plain/refs/tags/slapos-0.50:/stack/shacache-client.cfg
+  http://git.erp5.org/gitweb/slapos.git/blob_plain/refs/tags/slapos-0.60:/stack/lamp.cfg
+  http://git.erp5.org/gitweb/slapos.git/blob_plain/refs/tags/slapos-0.60:/stack/shacache-client.cfg
 
 [application]
 recipe = slapos.recipe.build:download-unpacked
 url = Student shall put here url of zipped or tarballed web page or application
 md5sum = Student may put here md5sum of this file, this is good idea
 #If provided tarball does not contain top directory, option shall be changed to false
-#strip-top-level-dir = true | false
+strip-top-level-dir = true
 #extract-directory = 
 
 [application-template]
diff --git a/software/lamp-template/template/wp-config.php.in b/software/lamp-template/template/wp-config.php.in
new file mode 100644
index 0000000000000000000000000000000000000000..217ef3acdee00329e2f760e8746f1b2e48440bab
--- /dev/null
+++ b/software/lamp-template/template/wp-config.php.in
@@ -0,0 +1,90 @@
+<?php
+/**
+ * The base configurations of the WordPress.
+ *
+ * This file has the following configurations: MySQL settings, Table Prefix,
+ * Secret Keys, WordPress Language, and ABSPATH. You can find more information
+ * by visiting {@link http://codex.wordpress.org/Editing_wp-config.php Editing
+ * wp-config.php} Codex page. You can get the MySQL settings from your web host.
+ *
+ * This file is used by the wp-config.php creation script during the
+ * installation. You don't have to use the web site, you can just copy this file
+ * to "wp-config.php" and fill in the values.
+ *
+ * @package WordPress
+ */
+
+// ** MySQL settings - You can get this info from your web host ** //
+/** The name of the database for WordPress */
+define('DB_NAME', 'Put here mysql database name template key');
+
+/** MySQL database username */
+define('DB_USER', 'Put here mysql user template key');
+
+/** MySQL database password */
+define('DB_PASSWORD', 'Put here mysql password template key');
+
+/** MySQL hostname */
+define('DB_HOST', 'Put here mysql host template key');
+
+/** Database Charset to use in creating database tables. */
+define('DB_CHARSET', 'utf8');
+
+/** The Database Collate type. Don't change this if in doubt. */
+define('DB_COLLATE', '');
+
+/**#@+
+ * Authentication Unique Keys and Salts.
+ *
+ * Change these to different unique phrases!
+ * You can generate these using the {@link https://api.wordpress.org/secret-key/1.1/salt/ WordPress.org secret-key service}
+ * You can change these at any point in time to invalidate all existing cookies. This will force all users to have to log in again.
+ *
+ * @since 2.6.0
+ */
+define('AUTH_KEY',         'put your unique phrase here');
+define('SECURE_AUTH_KEY',  'put your unique phrase here');
+define('LOGGED_IN_KEY',    'put your unique phrase here');
+define('NONCE_KEY',        'put your unique phrase here');
+define('AUTH_SALT',        'put your unique phrase here');
+define('SECURE_AUTH_SALT', 'put your unique phrase here');
+define('LOGGED_IN_SALT',   'put your unique phrase here');
+define('NONCE_SALT',       'put your unique phrase here');
+
+/**#@-*/
+
+/**
+ * WordPress Database Table prefix.
+ *
+ * You can have multiple installations in one database if you give each a unique
+ * prefix. Only numbers, letters, and underscores please!
+ */
+$table_prefix  = 'wp_';
+
+/**
+ * WordPress Localized Language, defaults to English.
+ *
+ * Change this to localize WordPress. A corresponding MO file for the chosen
+ * language must be installed to wp-content/languages. For example, install
+ * de_DE.mo to wp-content/languages and set WPLANG to 'de_DE' to enable German
+ * language support.
+ */
+define('WPLANG', '');
+
+/**
+ * For developers: WordPress debugging mode.
+ *
+ * Change this to true to enable the display of notices during development.
+ * It is strongly recommended that plugin and theme developers use WP_DEBUG
+ * in their development environments.
+ */
+define('WP_DEBUG', false);
+
+/* That's all, stop editing! Happy blogging. */
+
+/** Absolute path to the WordPress directory. */
+if ( !defined('ABSPATH') )
+	define('ABSPATH', dirname(__FILE__) . '/');
+
+/** Sets up WordPress vars and included files. */
+require_once(ABSPATH . 'wp-settings.php');
diff --git a/software/phpmyadmin/software.cfg b/software/phpmyadmin/software.cfg
index d9c2d198745912809cd4421da52407450211a7b0..98f4c2fc650e7250c8c97d6e5c2a280301895fa7 100644
--- a/software/phpmyadmin/software.cfg
+++ b/software/phpmyadmin/software.cfg
@@ -40,13 +40,6 @@ location = config.inc.php
 egg = slapos.cookbook
 module = lamp.simple
 
-[template]
-# Default template for the instance.
-recipe = slapos.recipe.template
-url = ${:_profile_base_location_}/instance.cfg
-#md5sum = Student shall put md5 of instance.cfg here
-output = ${buildout:directory}/template.cfg
-mode = 0644
 
 [instance-recipe-egg]
 recipe = zc.recipe.egg
diff --git a/software/slaprunner/software.cfg b/software/slaprunner/software.cfg
index 63d1c2ad3a174f45a3aa84af09e07881ca1d85a3..90ca6f89c14d4c37cdc373ef4e3bf153d763224d 100644
--- a/software/slaprunner/software.cfg
+++ b/software/slaprunner/software.cfg
@@ -36,19 +36,80 @@ eggs +=
   slapos.core
 
 [versions]
-slapos.cookbook = 0.12
+# Use SlapOS patched zc.buildout
+zc.buildout = 1.5.3-dev-SlapOS-010
 
-# Required by slapos.cookbook==0.12
-slapos.core = 0.8
-collective.recipe.template = 1.8
-netaddr = 0.7.5
-xml-marshaller = 0.9.7
-setuptools = 0.6c12dev-r88795
+# Pinned to old version (2.4 does not compile well everywhere)
+pycrypto = 2.3
 
+Jinja2 = 2.6
+Werkzeug = 0.8.1
+apache-libcloud = 0.5.2
+buildout-versions = 1.7
 hexagonit.recipe.cmmi = 1.5.0
+meld3 = 0.6.7
+slapos.cookbook = 0.31
+slapos.libnetworkcache = 0.10
+slapos.recipe.template = 2.2
+slapos.toolbox = 0.8
+
+# Required by:
+# slapos.core==0.18
+Flask = 0.8
+
+# Required by:
+# slapos.cookbook==0.31
+PyXML = 0.8.4
+
+# Required by:
+# hexagonit.recipe.cmmi==1.5.0
 hexagonit.recipe.download = 1.5.0
-plone.recipe.command = 1.1
-slapos.libnetworkcache = 0.2
 
-# Use SlapOS patched zc.buildout
-zc.buildout = 1.5.3-dev-SlapOS-009
+# Required by:
+# slapos.cookbook==0.31
+# slapos.core==0.18
+# xml-marshaller==0.9.7
+lxml = 2.3.1
+
+# Required by:
+# slapos.cookbook==0.31
+netaddr = 0.7.6
+
+# Required by:
+# slapos.core==0.18
+netifaces = 0.6
+
+# Required by:
+# slapos.toolbox==0.8
+paramiko = 1.7.7.1
+
+# Required by:
+# slapos.toolbox==0.8
+psutil = 0.3.0
+
+# Required by:
+# slapos.cookbook==0.31
+# slapos.core==0.18
+# zc.buildout==1.5.3-dev-SlapOS-010
+# zc.recipe.egg==1.3.2
+setuptools = 0.6c12dev-r88846
+
+# Required by:
+# slapos.cookbook==0.31
+slapos.core = 0.18
+
+# Required by:
+# slapos.core==0.18
+supervisor = 3.0a10
+
+# Required by:
+# slapos.cookbook==0.31
+xml-marshaller = 0.9.7
+
+# Required by:
+# slapos.cookbook==0.31
+zc.recipe.egg = 1.3.2
+
+# Required by:
+# slapos.core==0.18
+zope.interface = 3.8.0
\ No newline at end of file
diff --git a/software/vifib/software.cfg b/software/vifib/software.cfg
index 7a503c5c1f6b297ce0f4049d1c87f7cbbca9edc8..8a9e6c3f5286d45d9c0c9ebeb4b0f8acc1f17d7d 100644
--- a/software/vifib/software.cfg
+++ b/software/vifib/software.cfg
@@ -4,9 +4,23 @@ extends =
 
 parts +=
   vifib
+  check-slapos.core
+
+develop =
+  ${:parts-directory}/vifib
+
+[check-slapos.core]
+recipe = plone.recipe.command
+stop-on-error = true
+update-command = ${:command}
+command = grep parts ${buildout:develop-eggs-directory}/slapos.core.egg-link
 
 [eggs]
-eggs += slapos.core
+dummy =
+  ${vifib:location}
+eggs +=
+  suds
+  slapos.core
 
 [instance-recipe]
 module = vifib
@@ -20,8 +34,7 @@ repository_id_list += vifib/master
 [vifib]
 <= erp5
 repository = http://git.erp5.org/repos/slapos.core.git
-# tag: vifib-0.2
-revision = f42ad28f0aa47d8cdb028ce6a1796eb7ef6f066e
+revision =
 
 [local-bt5-repository]
 # XXX: workaround for zc.buildout bug, as list += ends up with adding new entry
diff --git a/stack/erp5.cfg b/stack/erp5.cfg
index 1e378dc309e3d2f1d690340656a9d0b26ad8bc45..a5f0b40cfba521ee69da41ee13d841f9b5f67a07 100644
--- a/stack/erp5.cfg
+++ b/stack/erp5.cfg
@@ -20,6 +20,7 @@ allow-hosts =
   *.nexedi.org
   *.python.org
   *.sourceforge.net
+  alastairs-place.net
   dist.repoze.org
   effbot.org
   github.com
@@ -30,7 +31,7 @@ allow-hosts =
 
 extends =
 # Exact version of Zope
-  http://svn.zope.org/repos/main/Zope/tags/2.12.19/versions.cfg
+  http://svn.zope.org/repos/main/Zope/tags/2.12.20/versions.cfg
   ../component/logrotate/buildout.cfg
   ../component/dcron/buildout.cfg
   ../component/file/buildout.cfg
@@ -46,11 +47,12 @@ extends =
   ../component/kumo/buildout.cfg
   ../component/libreoffice-bin/buildout.cfg
   ../component/lxml-python/buildout.cfg
-  ../component/maatkit/buildout.cfg
+  ../component/percona-toolkit/buildout.cfg
   ../component/mariadb/buildout.cfg
   ../component/memcached/buildout.cfg
   ../component/mysql-python/buildout.cfg
   ../component/pdftk/buildout.cfg
+  ../component/pycrypto-python/buildout.cfg
   ../component/pysvn-python/buildout.cfg
   ../component/python-2.6/buildout.cfg
   ../component/python-2.7/buildout.cfg
@@ -89,6 +91,7 @@ parts =
   libpng
   ghostscript
   mariadb
+  mroonga-mariadb
   sphinx
   imagemagick
   kumo
@@ -98,6 +101,9 @@ parts =
   tesseract
   hookbox
   bootstrap2.6
+  perl-DBD-mariadb
+  perl-DBI
+  percona-toolkit
   zabbix-agent
   pdftk
   dcron
@@ -218,7 +224,7 @@ location = ${buildout:parts-directory}/${:_buildout_section_name_}
 stop-on-error = true
 repository = http://git.erp5.org/repos/erp5.git
 branch = master
-revision = f1bc8fdc0e4ce17530b32468c2affda8a6e9e896
+revision =
 command = ${git:location}/bin/git clone --quiet -b ${:branch} ${:repository} ${:location} && if [ -n ${:revision} ]; then cd ${:location} && ${git:location}/bin/git reset --quiet --hard ${:revision} ; fi
 update-command = cd ${:location} && ${git:location}/bin/git pull --quiet && if [ -n ${:revision} ]; then cd ${:location} && ${git:location}/bin/git reset --quiet --hard ${:revision} ; fi
 
@@ -304,7 +310,7 @@ eggs =
   ${lxml-python:egg}
   ${python-ldap-python:egg}
   ${pysvn-python:egg}
-  pycrypto
+  ${pycrypto-python:egg}
   PyXML
   SOAPpy
   cElementTree
@@ -406,7 +412,7 @@ scripts =
 
 [versions]
 # Use SlapOS patched zc.buildout
-zc.buildout = 1.5.3-dev-SlapOS-010
+zc.buildout = 1.6.0-dev-SlapOS-003
 
 # pin Acquisition and Products.DCWorkflow to Nexedi flavour of eggs
 Acquisition = 2.13.7nxd001
diff --git a/stack/kvm.cfg b/stack/kvm.cfg
deleted file mode 100644
index 6c6af9c6f842bdc6d30c079b722fa5888578eec5..0000000000000000000000000000000000000000
--- a/stack/kvm.cfg
+++ /dev/null
@@ -1,104 +0,0 @@
-[buildout]
-extends =
-  shacache-client.cfg
-  ../component/python-2.7/buildout.cfg
-  ../component/lxml-python/buildout.cfg
-  ../component/git/buildout.cfg
-  ../component/zlib/buildout.cfg
-  ../component/readline/buildout.cfg
-  ../component/ncurses/buildout.cfg
-  ../component/libuuid/buildout.cfg
-  ../component/noVNC/buildout.cfg
-  ../component/openssl/buildout.cfg
-  ../component/rdiff-backup/buildout.cfg
-  ../component/dcron/buildout.cfg
-  ../component/libpng/buildout.cfg
-
-#XXX-Cedric : Currently, one can only access to KVM using noVNC.
-#             Ideally one should be able to access KVM by using either NoVNC or VNC.
-#             Problem is : no native crypto support in web browsers. So we have to disable ssl
-#             In qemu builtin vnc server, and make it available only for localhost 
-#             so that only novnc can listen to it.
-
-#XXX-Cedric: Check status of https://github.com/kanaka/noVNC/issues/13 to see
-#            When qemu has builtin support for websockets in vnc server to get rid of 
-#            Websockify (socket <-> websocket proxy server) when it is ready.
-#            May solve previous XXX depending on the implementation.
-
-parts =
-  template
-  gnutls
-  kvm
-  eggs
-
-find-links +=
-  http://www.nexedi.org/static/packages/source/slapos.buildout/
-
-versions = versions
-
-[gpg-error]
-recipe = hexagonit.recipe.cmmi
-url = ftp://ftp.gnupg.org/gcrypt/libgpg-error/libgpg-error-1.10.tar.gz
-md5sum = 7c2710ef439f82ac429b88fec88e9a4c
-
-[gcrypt]
-recipe = hexagonit.recipe.cmmi
-url = ftp://ftp.gnupg.org/gcrypt/libgcrypt/libgcrypt-1.4.6.tar.gz
-md5sum = bfd45922eefb8a24d598af77366220d4
-configure-options =
-  --with-gpg-error-prefix=${gpg-error:location}
-environment =
-  CPPFLAGS=-I${gpg-error:location}/include
-  LDFLAGS=-Wl,-rpath -Wl,${gpg-error:location}/lib -Wl,${gpg-error:location}/lib/libgpg-error.so.0
-
-[gnutls]
-recipe = hexagonit.recipe.cmmi
-url = ftp://ftp.gnupg.org/gcrypt/gnutls/gnutls-2.8.6.tar.bz2
-md5sum = eb0a6d7d3cb9ac684d971c14f9f6d3ba
-configure-options =
-  --with-libgcrypt-prefix=${gcrypt:location}
-environment =
-  CPPFLAGS=-I${zlib:location}/include -I${readline:location}/include -I${ncurses:location}/include -I${ncurses:location}/include/ncursesw -I${gcrypt:location}/include -I${gpg-error:location}/include
-  LDFLAGS=-L${readline:location}/lib -L${ncurses:location}/lib -L${gcrypt:location}/lib -Wl,-rpath -Wl,${zlib:location}/lib -Wl,-rpath -Wl,${readline:location}/lib -Wl,-rpath -Wl,${ncurses:location}/lib -Wl,-rpath -Wl,${gcrypt:location}/lib -Wl,-rpath -Wl,${gpg-error:location}/lib -Wl,${gcrypt:location}/lib/libgcrypt.so.11
-  PKG_CONFIG=${zlib:location}/lib/pkgconfig
-
-[kvm]
-recipe = hexagonit.recipe.cmmi
-path = ${kvmsource:location}/
-configure-options =
-  --disable-sdl
-  --disable-xen
-  --enable-vnc-tls
-  --disable-vnc-sasl
-  --disable-curses
-  --disable-curl
-  --enable-kvm
-  --disable-docs
-  --enable-vnc-png
-  --disable-vnc-jpeg
-  --extra-cflags="-I${gnutls:location}/include -I${libuuid:location}/include -I${zlib:location}/include -I${libpng:location}/include"
-  --extra-ldflags="-Wl,-rpath -Wl,${gnutls:location}/lib -L${gnutls:location}/lib -Wl,-rpath -Wl,${libpng:location}/lib -L${libpng:location}/lib -L${libuuid:location}/lib -Wl,-rpath -Wl,${libuuid:location}/lib -L${zlib:location}/lib -Wl,-rpath -Wl,${zlib:location}/lib -lpng -lz -lgnutls"
-  --disable-werror
-environment =
-  PKG_CONFIG_PATH=${gnutls:location}/lib/pkgconfig
-
-[kvmsource]
-recipe=plone.recipe.command
-location = ${buildout:parts-directory}/${:_buildout_section_name_}
-stop-on-error = true
-#tag = slapos-v0.1
-command =
-  (${git:location}/bin/git clone --quiet http://git.erp5.org/repos/slapos.kvm.git ${:location} ) || (rm -fr ${:location} ; exit 1)
-update-command =
- cd ${:location} && ${git:location}/bin/git pull --quiet origin master
-
-[eggs]
-python = python2.7
-recipe = z3c.recipe.scripts
-eggs =
-  ${lxml-python:egg}
-  slapos.cookbook
-  numpy
-
-[versions]
-zc.buildout = 1.5.3-dev-SlapOS-009
diff --git a/stack/nbd.cfg b/stack/nbd.cfg
index 9d5d8787b6ea53b8cc958c97553f038a6f36596a..5dde4b493faffda46c2ac9c9f9c7cae430922496 100644
--- a/stack/nbd.cfg
+++ b/stack/nbd.cfg
@@ -37,6 +37,8 @@ version = 3
 eggs = slapos.libnetworkcache
 
 [nbdserver]
+# XXX-Cedric : use official tarball from kvm website
+#              (new kvm code does not seem to need special patch)
 recipe = hexagonit.recipe.cmmi
 path = ${nbdserversource:location}/
 configure-options =