diff --git a/component/apache-php/buildout.cfg b/component/apache-php/buildout.cfg
index 520bfbbfbe5431ab781e65001ed10dd4e3b35d49..71ea1daa372e2eb23dec0e51d0c22c3a006b7624 100644
--- a/component/apache-php/buildout.cfg
+++ b/component/apache-php/buildout.cfg
@@ -3,47 +3,57 @@ parts = apache-php
 
 extends = 
   ../apache/buildout.cfg
+  ../cclient/buildout.cfg
+  ../curl/buildout.cfg  
+  ../freetype/buildout.cfg
+  ../gettext/buildout.cfg
+  ../libiconv/buildout.cfg
+  ../libjpeg/buildout.cfg
+  ../libpng/buildout.cfg
+  ../libtool/buildout.cfg
+  ../mariadb/buildout.cfg
+  ../openldap/buildout.cfg
+  ../pkgconfig/buildout.cfg
+  ../zip/buildout.cfg
 
 [apache-php]
 # Note: Shall react on each build of apache and reinstall itself
 recipe = hexagonit.recipe.cmmi
-url = http://fr2.php.net/get/php-5.3.6.tar.bz2/from/this/mirror
-md5sum = 2286f5a82a6e8397955a0025c1c2ad98
+url = http://fr2.php.net/distributions/php-5.3.8.tar.gz
+md5sum = f4ce40d5d156ca66a996dbb8a0e7666a
 configure-options =
   --with-apxs2=${apache:location}/bin/apxs
   --with-libxml-dir=${libxml2:location}
   --with-mysql=${mariadb:location}
   --with-zlib-dir=${zlib:location}
   --with-mcrypt=${libmcrypt:location}
-  --enable-mbstring
-  --enable-session
-  --disable-all
-
-environment =
-  PKG_CONFIG_PATH=${libxml2:location}/lib/pkgconfig
-  PATH=${libxml2:location}/bin:%(PATH)s
-  LDFLAGS =-L${libtool:location}/lib -Wl,-rpath -Wl,${libtool:location}/lib -L${mariadb:location}/lib -Wl,-rpath -Wl,${mariadb:location}/lib -L${zlib:location}/lib -Wl,-rpath -Wl,${zlib:location}/lib -L${libmcrypt:location}/lib -Wl,-rpath -Wl,${libmcrypt:location}/lib
-
-[apache-php-xmlrpc-gd]
-# Note: Shall react on each build of apache and reinstall itself
-recipe = hexagonit.recipe.cmmi
-url = http://fr2.php.net/get/php-5.3.6.tar.bz2/from/this/mirror
-md5sum = 2286f5a82a6e8397955a0025c1c2ad98
-configure-options =
-  --with-apxs2=${apache:location}/bin/apxs
-  --with-libxml-dir=${libxml2:location}
   --with-gd
-  --with-zlib-dir=${zlib:location}
-  --with-mcrypt=${libmcrypt:location}
-  --with-xmlrpc=${xml-rpc:location}
+  --with-jpeg-dir=${libjpeg:location}
+  --with-png-dir=${libpng:location}
+  --enable-gd-native-ttf
+  --with-ttf
+  --with-freetype-dir=${freetype:location}
+  --with-pdo-mysql=mysqlnd
+  --with-mysqli=mysqlnd
+  --with-curl=${curl:location}
+  --with-zip-dir=${zip:location}
+  --with-imap=${cclient:location}  
+  --with-iconv-dir=${libiconv:location}
+  --with-gettext=${gettext:location}
+  --with-ldap=${openldap:location}
+  --with-imap-ssl
+  --with-openssl=${openssl:location}
+  --enable-libxml
   --enable-mbstring
   --enable-session
-  --disable-all
+  --enable-exif
+  --enable-zip
+  --enable-ftp
 
 environment =
-  PKG_CONFIG_PATH=${libxml2:location}/lib/pkgconfig
-  PATH=${libxml2:location}/bin:%(PATH)s
-  LDFLAGS =-L${xml-rpc:location}/lib -Wl,-rpath -Wl,${xml-rpc:location}/lib -L${zlib:location}/lib -Wl,-rpath -Wl,${zlib:location}/lib -L${libmcrypt:location}/lib -Wl,-rpath -Wl,${libmcrypt:location}/lib
+  PKG_CONFIG_PATH=${libxml2:location}/lib/pkgconfig:${openssl:location}/lib/pkgconfig
+  PATH=${pkgconfig:location}/bin:${libxml2:location}/bin:%(PATH)s
+  LDFLAGS =-L${libtool:location}/lib -Wl,-rpath -Wl,${libtool:location}/lib -L${mariadb:location}/lib -Wl,-rpath -Wl,${mariadb:location}/lib -L${zlib:location}/lib -Wl,-rpath -Wl,${zlib:location}/lib -L${libmcrypt:location}/lib -Wl,-rpath -Wl,${libmcrypt:location}/libblkid
 
 
 [libmcrypt]
diff --git a/component/apache/buildout.cfg b/component/apache/buildout.cfg
index 68136b181a73d0fad1195dc39084a99b69ee7bc5..335e87046b48c54b261a22ab808b6a07cf8f1c3a 100644
--- a/component/apache/buildout.cfg
+++ b/component/apache/buildout.cfg
@@ -16,8 +16,8 @@ extends =
 [apache-no-ssl]
 # inspired on http://old.aclark.net/team/aclark/blog/a-lamp-buildout-for-wordpress-and-other-php-apps/
 recipe = hexagonit.recipe.cmmi
-url = http://mir2.ovh.net/ftp.apache.org/dist//httpd/httpd-2.2.19.tar.bz2
-md5sum = 832f96a6ec4b8fc7cf49b9efd4e89060
+url = http://mir2.ovh.net/ftp.apache.org/dist//httpd/httpd-2.2.21.tar.bz2
+md5sum = 1696ae62cd879ab1d4dd9ff021a470f2
 configure-options = --enable-authn-alias
                     --enable-bucketeer
                     --enable-cache
@@ -41,14 +41,15 @@ configure-options = --enable-authn-alias
                     --enable-proxy-http
                     --enable-proxy-scgi
                     --enable-so
+                    --enable-dav
+                    --enable-dav-fs
                     --disable-ssl
                     --with-included-apr
                     --with-z=${zlib:location}
                     --with-expat=${libexpat:location}
                     --with-pcre=${pcre:location}
                     --with-sqlite3=${sqlite3:location}
-                    --with-dbm=gdbm
-                    --with-gdm=${gdbm:location}
+                    --with-gdbm=${gdbm:location}
                     --without-ssl
                     --without-lber
                     --without-ldap
@@ -65,8 +66,8 @@ configure-options = --enable-authn-alias
 [apache]
 # inspired on http://old.aclark.net/team/aclark/blog/a-lamp-buildout-for-wordpress-and-other-php-apps/
 recipe = hexagonit.recipe.cmmi
-url = http://mir2.ovh.net/ftp.apache.org/dist//httpd/httpd-2.2.19.tar.bz2
-md5sum = 832f96a6ec4b8fc7cf49b9efd4e89060
+url = http://mir2.ovh.net/ftp.apache.org/dist//httpd/httpd-2.2.21.tar.bz2
+md5sum = 1696ae62cd879ab1d4dd9ff021a470f2
 configure-options = --disable-static
                     --enable-authn-alias
                     --enable-bucketeer
@@ -90,6 +91,8 @@ configure-options = --disable-static
                     --enable-proxy-ftp
                     --enable-proxy-http
                     --enable-proxy-scgi
+                    --enable-dav
+                    --enable-dav-fs
                     --enable-so
                     --enable-ssl
                     --with-included-apr
@@ -98,8 +101,7 @@ configure-options = --disable-static
                     --with-expat=${libexpat:location}
                     --with-pcre=${pcre:location}
                     --with-sqlite3=${sqlite3:location}
-                    --with-dbm=gdbm
-                    --with-gdm=${gdbm:location}
+                    --with-gdbm=${gdbm:location}
                     --without-lber
                     --without-ldap
                     --without-ndbm
diff --git a/component/bash/buildout.cfg b/component/bash/buildout.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..ce64a8f59a9cc47aa1fee7b6d26a267b94fb6ade
--- /dev/null
+++ b/component/bash/buildout.cfg
@@ -0,0 +1,8 @@
+[buildout]
+parts =
+  bash
+
+[bash]
+recipe = hexagonit.recipe.cmmi
+url = ftp://ftp.gnu.org/gnu/bash/bash-4.2.tar.gz
+md5sum = 3fb927c7c33022f1c327f14a81c0d4b0
diff --git a/component/cclient/buildout.cfg b/component/cclient/buildout.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..a8095323436aa2915cd2536a3f0dcd3689149a9b
--- /dev/null
+++ b/component/cclient/buildout.cfg
@@ -0,0 +1,40 @@
+# libc-client - UW IMAP server
+# ftp://ftp.cac.washington.edu/imap/
+
+[buildout]
+extends =
+  ../openssl/buildout.cfg
+
+parts =
+  cclient-patch
+  cclient
+
+[cclient-patch]
+recipe = slapos.recipe.download
+url = ${:_profile_base_location_}/imap-2007f.patch
+#md5sum = Student may put here md5sum of this file, this is good idea
+download-only = True
+location = ${buildout:parts-directory}/${:_buildout_section_name_}
+filename = imap-2007f.patch
+
+[cclient]
+recipe = hexagonit.recipe.cmmi
+url = ftp://ftp.cac.washington.edu/imap/imap-2007f.tar.gz
+configure-command = true
+keep-compile-dir = true
+# cclient does not support parallel compilation
+make-options = 
+  slx
+  SSLTYPE=unix
+  SSLDIR=${openssl:location}
+  SSLCRYPTO=-lcrypto
+  IP=6
+  SSLLIB=${openssl:location}/lib
+  EXTRACFLAGS=-fPIC
+  CCLIENT=${buildout:parts-directory}
+  -j1
+
+patches = 
+  ${cclient-patch:location}/${cclient-patch:filename}
+
+patch-options = -p1
diff --git a/component/cclient/imap-2007f.patch b/component/cclient/imap-2007f.patch
new file mode 100644
index 0000000000000000000000000000000000000000..50b64970cdb305698f305ab9bf83a9741b66023c
--- /dev/null
+++ b/component/cclient/imap-2007f.patch
@@ -0,0 +1,32 @@
+--- old/Makefile	2011-09-22 13:19:53.000000000 +0100
++++ new/Makefile	2011-09-23 11:29:12.405271442 +0100
+@@ -580,7 +580,6 @@
+ 	@echo +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 	@echo
+ 	@echo Do you want to continue this build anyway?  Type y or n please:
+-	@$(SH) -c 'read x; case "$$x" in y) exit 0;; *) (make nounenc;exit 1);; esac'
+ 
+ nounenc:
+ 	@echo +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+@@ -639,7 +638,7 @@
+ 	@echo +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 	@echo
+ 	@echo Do you want to build with IPv6 anyway?  Type y or n please:
+-	@$(SH) -c 'read x; case "$$x" in y) exit 0;; *) (make noip6;exit 1);; esac'
++	#@$(SH) -c 'read x; case "$$x" in y) exit 0;; *) (make noip6;exit 1);; esac'
+ 	@echo OK, I will remember that you really want to build with IPv6.
+ 	@echo You will not see this message again.
+ 	@$(TOUCH) ip6
+@@ -731,6 +730,12 @@
+ 	$(SH) -c '$(RM) an ua OSTYPE SPECIALS c-client mtest imapd ipopd mailutil mlock dmail tmail || true'
+ 	$(CD) tools;$(MAKE) clean
+ 
++install:
++	install -v -d $(CCLIENT)/cclient/include
++	ln -svf $(CCLIENT)/cclient__compile__/imap-2007f/c-client $(CCLIENT)/cclient/include
++	install -v -d $(CCLIENT)/cclient/lib
++	ln -svf $(CCLIENT)/cclient__compile__/imap-2007f/c-client/c-client.a $(CCLIENT)/cclient/lib/libc-client.a
++
+ 
+ # A monument to a hack of long ago and far away...
+ love:
diff --git a/component/dropbear/buildout.cfg b/component/dropbear/buildout.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..dbb0709203192325335ec072f569234d5c9b8484
--- /dev/null
+++ b/component/dropbear/buildout.cfg
@@ -0,0 +1,47 @@
+########################################################
+# Dropbear - a secure shell client and server for *nix #
+#                                                      #
+# http://matt.ucc.asn.au/dropbear/dropbear.html        #
+########################################################
+
+[buildout]
+extends = 
+  ../zlib/buildout.cfg
+
+parts =
+  dropbear
+
+[dropbear-userspace-patch]
+recipe = hexagonit.recipe.download
+md5sum = 89f575b9a9586b04ef9073c9c3af13ae
+url = ${:_profile_base_location_}/${:filename}
+filename = userspace.patch
+download-only = true
+
+[dropbear-ipv6-patch]
+recipe = hexagonit.recipe.download
+md5sum = b30dd58d68829a80eee69188134382ef
+url = ${:_profile_base_location_}/${:filename}
+filename = ipv6-support.patch
+download-only = true
+
+[dropbear]
+recipe = hexagonit.recipe.cmmi
+md5sum = 0284ea239083f04c8b874e08e1aca243
+url = http://matt.ucc.asn.au/dropbear/dropbear-0.53.1.tar.bz2
+
+configure-options =
+  --prefix=${buildout:parts-directory}/${:_buildout_section_name_}
+  --with-zlib=${zlib:location}
+  CFLAGS="-DENABLE_SINGLEUSER "
+
+environment =
+  CPPFLAGS =-I${zlib:location}/include
+  LDFLAGS =-Wl,-rpath=${zlib:location}/lib -L${zlib:location}/lib
+
+patches=
+  ${dropbear-userspace-patch:location}/${dropbear-userspace-patch:filename}
+  ${dropbear-ipv6-patch:location}/${dropbear-ipv6-patch:filename}
+
+patch-options=
+  -p1
diff --git a/component/dropbear/ipv6-support.patch b/component/dropbear/ipv6-support.patch
new file mode 100644
index 0000000000000000000000000000000000000000..5dc7100773bf11e223c2f0562fd99071019ee0bd
--- /dev/null
+++ b/component/dropbear/ipv6-support.patch
@@ -0,0 +1,57 @@
+@@ -0,0 +1,56 @@
+--- dropbear-0.52.orig/svr-runopts.c	Wed May 13 20:56:03 2009
++++ dropbear-0.52/svr-runopts.c	Wed May 13 22:20:22 2009
+@@ -311,27 +311,39 @@
+ static void addportandaddress(char* spec) {
+ 
+ 	char *myspec = NULL;
++	char *p = NULL;
+ 
+ 	if (svr_opts.portcount < DROPBEAR_MAX_PORTS) {
+ 
+ 		/* We don't free it, it becomes part of the runopt state */
+ 		myspec = m_strdup(spec);
+ 
+-		/* search for ':', that separates address and port */
+-		svr_opts.ports[svr_opts.portcount] = strchr(myspec, ':');
+-
+-		if (svr_opts.ports[svr_opts.portcount] == NULL) {
+-			/* no ':' -> the whole string specifies just a port */
+-			svr_opts.ports[svr_opts.portcount] = myspec;
+-		} else {
+-			/* Split the address/port */
+-			svr_opts.ports[svr_opts.portcount][0] = '\0'; 
+-			svr_opts.ports[svr_opts.portcount]++;
++		/* [ipv6]:port */
++		if (myspec[0] == '[' && (p = strchr(myspec, ']')) != NULL && *(p+1) == ':') {
++			*p = '\0';
++			p+=2;
++			myspec++;
++			svr_opts.ports[svr_opts.portcount] = p;
+ 			svr_opts.addresses[svr_opts.portcount] = myspec;
+-		}
+-
+-		if (svr_opts.addresses[svr_opts.portcount] == NULL) {
+-			/* no address given -> fill in the default address */
++		} else if ((p = strchr(myspec, '.')) != NULL) {
++			if ((p = strchr(p, ':')) == NULL) {
++				/* ipv4 */
++				svr_opts.ports[svr_opts.portcount] = m_strdup(DROPBEAR_DEFPORT);
++				svr_opts.addresses[svr_opts.portcount] = myspec;
++			} else {
++				/* ipv4:port */
++				*p = '\0';
++				p++;
++				svr_opts.ports[svr_opts.portcount] = p;
++				svr_opts.addresses[svr_opts.portcount] = myspec;
++			}
++		} else if ((p = strchr(myspec, ':')) != NULL && (p = strchr(p, ':')) != NULL) {
++			/* ipv6 */
++			svr_opts.ports[svr_opts.portcount] = m_strdup(DROPBEAR_DEFPORT);
++			svr_opts.addresses[svr_opts.portcount] = myspec;
++		} else {
++			/* port */
++			svr_opts.ports[svr_opts.portcount] = myspec;
+ 			svr_opts.addresses[svr_opts.portcount] = m_strdup(DROPBEAR_DEFADDRESS);
+ 		}
+ 
diff --git a/component/dropbear/userspace.patch b/component/dropbear/userspace.patch
new file mode 100644
index 0000000000000000000000000000000000000000..376c85b08e48c25a76d6d239af25c48a28cf7124
--- /dev/null
+++ b/component/dropbear/userspace.patch
@@ -0,0 +1,141 @@
+diff --git a/options.h b/options.h
+index d309ab4..63048e1 100644
+--- a/options.h
++++ b/options.h
+@@ -287,6 +287,12 @@ be overridden at runtime with -I. 0 disables idle timeouts */
+ /* The default path. This will often get replaced by the shell */
+ #define DEFAULT_PATH "/usr/bin:/bin"
+ 
++/* The prefix of dropbear environment variable overriding. */
++#define DROPBEAR_OVERRIDE_PREFIX "DROPBEAR_OVERRIDE_"
++#define DROPBEAR_OVERRIDE_PASSWORD DROPBEAR_OVERRIDE_PREFIX "PASSWORD"
++#define DROPBEAR_OVERRIDE_HOME DROPBEAR_OVERRIDE_PREFIX "HOME"
++#define DROPBEAR_OVERRIDE_SHELL DROPBEAR_OVERRIDE_PREFIX "SHELL"
++
+ /* Some other defines (that mostly should be left alone) are defined
+  * in sysoptions.h */
+ #include "sysoptions.h"
+diff --git a/runopts.h b/runopts.h
+index 83b5861..f8999b9 100644
+--- a/runopts.h
++++ b/runopts.h
+@@ -86,6 +86,15 @@ typedef struct svr_runopts {
+ 	int noauthpass;
+ 	int norootpass;
+ 
++#ifdef ENABLE_SINGLEUSER
++	int singleuser;
++	struct {
++		char *password;
++		char *home;
++		char *shell;
++	} singleuserenv;
++#endif /* ifdef ENABLE_SINGLEUSER */
++
+ #ifdef ENABLE_SVR_REMOTETCPFWD
+ 	int noremotetcp;
+ #endif
+diff --git a/svr-auth.c b/svr-auth.c
+index 87e3c5e..adb2e8b 100644
+--- a/svr-auth.c
++++ b/svr-auth.c
+@@ -126,6 +126,14 @@ void recv_msg_userauth_request() {
+ 
+ 	
+ 	username = buf_getstring(ses.payload, &userlen);
++#ifdef ENABLE_SINGLEUSER
++	/* If userspace enabled, ignore username */
++	if (svr_opts.singleuser) {
++		m_free(username);
++		/* Get the current login of the user running dropbear */
++		username = m_strdup(getlogin());
++	}
++#endif /* ifdef ENABLE_SINGLEUSER */
+ 	servicename = buf_getstring(ses.payload, &servicelen);
+ 	methodname = buf_getstring(ses.payload, &methodlen);
+ 
+@@ -228,6 +236,18 @@ static int checkusername(unsigned char *username, unsigned int userlen) {
+ 			}
+ 			authclear();
+ 			fill_passwd(username);
++#ifdef ENABLE_SINGLEUSER
++			if (svr_opts.singleuser) {
++				if (svr_opts.singleuserenv.home != NULL) {
++					m_free(ses.authstate.pw_dir);
++					ses.authstate.pw_dir = m_strdup(svr_opts.singleuserenv.home);
++				}
++				if (svr_opts.singleuserenv.shell != NULL) {
++					m_free(ses.authstate.pw_shell);
++					ses.authstate.pw_shell = m_strdup(svr_opts.singleuserenv.shell);
++				}
++			}
++#endif /* ifdef ENABLE_SINGLEUSER */
+ 			ses.authstate.username = m_strdup(username);
+ 	}
+ 
+diff --git a/svr-runopts.c b/svr-runopts.c
+index 2e836d2..1c21d7c 100644
+--- a/svr-runopts.c
++++ b/svr-runopts.c
+@@ -83,6 +83,9 @@ static void printhelp(const char * progname) {
+ 					"-W <receive_window_buffer> (default %d, larger may be faster, max 1MB)\n"
+ 					"-K <keepalive>  (0 is never, default %d)\n"
+ 					"-I <idle_timeout>  (0 is never, default %d)\n"
++#ifdef ENABLE_SINGLEUSER
++					"-n		Enable the single user mode.\n"
++#endif /* ifdef ENABLE_SINGLEUSER */
+ #ifdef DEBUG_TRACE
+ 					"-v		verbose (compiled with DEBUG_TRACE)\n"
+ #endif
+@@ -128,6 +131,12 @@ void svr_getopts(int argc, char ** argv) {
+ #ifndef DISABLE_ZLIB
+ 	opts.enable_compress = 1;
+ #endif
++#ifdef ENABLE_SINGLEUSER
++	svr_opts.singleuser = 0;
++	svr_opts.singleuserenv.password = NULL;
++	svr_opts.singleuserenv.home = NULL;
++	svr_opts.singleuserenv.shell = NULL;
++#endif /* ifdef ENABLE_SINGLEUSER */
+ 	/* not yet
+ 	opts.ipv4 = 1;
+ 	opts.ipv6 = 1;
+@@ -242,6 +251,17 @@ void svr_getopts(int argc, char ** argv) {
+ 				case 'u':
+ 					/* backwards compatibility with old urandom option */
+ 					break;
++#ifdef ENABLE_SINGLEUSER
++				case 'n':
++#ifndef ENABLE_SINGLEUSER_ROOT
++					/* If current user is root */
++					if (getuid() == 0) {
++						dropbear_log(LOG_ERR, "Can't enable singleuser mode as root.");
++					}
++#endif /* ifndef ENABLE_SINGLEUSER_ROOT */
++					svr_opts.singleuser = 1;
++					break;
++#endif /* ifdef ENABLE_SINGLEUSER */
+ #ifdef DEBUG_TRACE
+ 				case 'v':
+ 					debug_trace = 1;
+@@ -313,6 +333,20 @@ void svr_getopts(int argc, char ** argv) {
+ 		}
+ 		opts.idle_timeout_secs = val;
+ 	}
++#ifdef ENABLE_SINGLEUSER
++	if (svr_opts.singleuser) {
++		dropbear_log(LOG_INFO, "Starting dropbear as single user mode.");
++		svr_opts.singleuserenv.password = getenv(DROPBEAR_OVERRIDE_PASSWORD);
++		svr_opts.singleuserenv.home = getenv(DROPBEAR_OVERRIDE_HOME);
++		if (svr_opts.singleuserenv.home != NULL) {
++			dropbear_log(LOG_INFO, "Single user home is '%s'", svr_opts.singleuserenv.home);
++		}
++		svr_opts.singleuserenv.shell = getenv(DROPBEAR_OVERRIDE_SHELL);
++		if (svr_opts.singleuserenv.shell != NULL) {
++			dropbear_log(LOG_INFO, "Single user shell is '%s'", svr_opts.singleuserenv.shell);
++		}
++	}
++#endif /* ifdef ENABLE_SINGLEUSER */
+ }
+ 
+ static void addportandaddress(char* spec) {
diff --git a/component/duplicity/buildout.cfg b/component/duplicity/buildout.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..2cc9e3d1402ad75ef477698db3b84ee55723a084
--- /dev/null
+++ b/component/duplicity/buildout.cfg
@@ -0,0 +1,32 @@
+[buildout]
+extends =
+  ../librsync/buildout.cfg
+  ../patch/buildout.cfg
+parts =
+  duplicity
+
+[duplicity]
+recipe = slapos.recipe.build
+url = http://code.launchpad.net/duplicity/0.6-series/0.6.15/+download/duplicity-0.6.15.tar.gz
+md5sum = 88f3c990f41fde86cd7d5af5a1bc7b81
+patch = ${:_profile_base_location_}/duplicity-ipv6.patch 32c44816a9a59401e233ef622bf50223
+script =
+  import os
+  import tempfile
+  import sys
+  workingdir = guessworkdir(self.extract(self.download(%(url)r, %(md5sum)r)))
+  os.chdir(workingdir)
+  self.applyPatchList('${:patch}', patch_binary='${patch:location}/bin/patch', patch_options='-p0', cwd=workingdir)
+  call([sys.executable, os.path.join(workingdir, 'setup.py'),
+       'build_ext',
+       '--include-dirs', os.path.join('${librsync:location}', 'include'),
+       '--library-dirs', os.path.join('${librsync:location}', 'lib'),
+       '--rpath', os.path.join('${librsync:location}', 'lib')])
+  call([sys.executable, os.path.join(workingdir, 'setup.py'),
+        'install',
+        '--prefix', %(location)r])
+  os.chdir(os.path.join(%(location)r, 'bin'))
+  paths = [os.path.join(%(location)r, 'lib', 'python%%s.%%s' %% sys.version_info[:2], 'site-packages')]
+  from zc.buildout.easy_install import scripts
+  scripts([('duplicity', '__builtin__', 'execfile')], [], sys.executable, '${buildout:bin-directory}', arguments='%%r' %% os.path.abspath('duplicity'), extra_paths=paths)
+  scripts([('rdiffdir', '__builtin__', 'execfile')], [], sys.executable, '${buildout:bin-directory}', arguments='%%r' %% os.path.abspath('rdiffdir'), extra_paths=paths)
diff --git a/component/duplicity/duplicity-ipv6.patch b/component/duplicity/duplicity-ipv6.patch
new file mode 100644
index 0000000000000000000000000000000000000000..00cfc5e17be4515c7842a247236a74419b06ea5b
--- /dev/null
+++ b/component/duplicity/duplicity-ipv6.patch
@@ -0,0 +1,33 @@
+=== modified file 'src/urlparse_2_5.py'
+--- src/urlparse_2_5.py	2011-03-06 15:12:33 +0000
++++ src/urlparse_2_5.py	2011-08-31 14:21:06 +0000
+@@ -109,18 +109,19 @@
+     password = property(get_password)
+ 
+     def get_hostname(self):
+-        netloc = self.netloc
+-        if "@" in netloc:
+-            netloc = _rsplit(netloc, "@", 1)[1]
+-        if ":" in netloc:
+-            netloc = netloc.split(":", 1)[0]
+-        return netloc.lower() or None
++        netloc = self.netloc.split('@')[-1]
++        if '[' in netloc and ']' in netloc:
++            return netloc.split(']')[0][1:].lower()
++        elif ':' in netloc:
++            return netloc.split(':')[0].lower()
++        elif netloc == '':
++            return None
++        else:
++            return netloc.lower()
+     hostname = property(get_hostname)
+ 
+     def get_port(self):
+-        netloc = self.netloc
+-        if "@" in netloc:
+-            netloc = _rsplit(netloc, "@", 1)[1]
++        netloc = self.netloc.split('@')[-1].split(']')[-1]
+         if ":" in netloc:
+             port = netloc.split(":", 1)[1]
+             return int(port, 10)
+
diff --git a/component/fonts/buildout.cfg b/component/fonts/buildout.cfg
index 773dece97b4799e8c80eac1d7dbeda62115e82a5..0a8a59c527feb748bb15d7ffb0631bc4d4d4ef67 100644
--- a/component/fonts/buildout.cfg
+++ b/component/fonts/buildout.cfg
@@ -12,8 +12,8 @@ location = ${buildout:parts-directory}/${:_buildout_section_name_}
 [liberation-fonts]
 recipe = hexagonit.recipe.download
 strip-top-level-dir = true
-url = https://fedorahosted.org/releases/l/i/liberation-fonts/liberation-fonts-ttf-1.06.0.20100721.tar.gz
-md5sum = ca4870d899fd7e943ffc310a5421ad4d
+url = https://fedorahosted.org/releases/l/i/liberation-fonts/liberation-fonts-ttf-1.07.0.tar.gz
+md5sum = 8150db1c6e27cacdfd524b563b85b69e
 destination = ${fonts:location}/${:_buildout_section_name_}
 
 # IPAex Font - Japanese fonts provided by IPA
diff --git a/component/freetype/buildout.cfg b/component/freetype/buildout.cfg
index 85cd748becc0197695057494f7cc854b409b1a9d..2ca184fa05b9b4145983a94c44f7e31210a82fb5 100644
--- a/component/freetype/buildout.cfg
+++ b/component/freetype/buildout.cfg
@@ -10,8 +10,8 @@ parts =
 
 [freetype]
 recipe = hexagonit.recipe.cmmi
-url = http://download.savannah.gnu.org/releases/freetype/freetype-2.4.5.tar.bz2
-md5sum = 90428a6d8ec4876cd1eb94858c2a59b0
+url = http://download.savannah.gnu.org/releases/freetype/freetype-2.4.6.tar.bz2
+md5sum = 5e6510613f612809d2d7862592b92ab7
 configure-options =
   --disable-static
 environment =
diff --git a/component/gcc/buildout.cfg b/component/gcc/buildout.cfg
index f263f978db8a5e02f99f1cedd8926747db1db72a..86cde9fdb07ef242de88bc530413aa57f771a58a 100644
--- a/component/gcc/buildout.cfg
+++ b/component/gcc/buildout.cfg
@@ -3,22 +3,11 @@
 [buildout]
 extends =
   ../m4/buildout.cfg
+  ../gmp/buildout.cfg
   ../zip/buildout.cfg
 
 parts =
-  gcc-java
-
-[gmp]
-recipe = hexagonit.recipe.cmmi
-url = ftp://ftp.gmplib.org/pub/gmp-5.0.2/gmp-5.0.2.tar.bz2
-md5sum = 0bbaedc82fb30315b06b1588b9077cd3
-# GMP does not correctly detect achitecture so it have to be given
-# as hexagonit.recipe.cmmi is using shell expansion in subproceses
-# backticks are working
-configure-options =
-  --build=`uname -m`-linux
-environment =
-  PATH=${m4:location}/bin:%(PATH)s
+  gcc
 
 [mpfr]
 recipe = hexagonit.recipe.cmmi
@@ -26,9 +15,9 @@ url = http://www.mpfr.org/mpfr-3.0.1/mpfr-3.0.1.tar.bz2
 md5sum = bfbecb2eacb6d48432ead5cfc3f7390a
 configure-options =
   --with-gmp=${gmp:location}
+  --disable-static
 environment =
-  CPPFLAGS =-I${gmp:location}/include
-  LDFLAGS =-L${gmp:location}/lib -Wl,-rpath=${gmp:location}/lib
+  LDFLAGS=-Wl,-rpath=${gmp:location}/lib
 
 [mpc]
 recipe = hexagonit.recipe.cmmi
@@ -37,9 +26,34 @@ md5sum = 0d6acab8d214bd7d1fbbc593e83dd00d
 configure-options =
   --with-gmp=${gmp:location}
   --with-mpfr=${mpfr:location}
+  --disable-static
 environment =
-  CPPFLAGS =-I${mpfr:location}/include -I${gmp:location}/include
-  LDFLAGS =-L${mpfr:location}/lib -Wl,-rpath=${mpfr:location}/lib -L${gmp:location}/lib -Wl,-rpath=${gmp:location}/lib
+  LDFLAGS=-Wl,-rpath=${mpfr:location}/lib -Wl,-rpath=${gmp:location}/lib
+
+[ppl]
+recipe = hexagonit.recipe.cmmi
+# we should use version 0.10.x for gcc-4.5
+url = http://www.cs.unipr.it/ppl/Download/ftp/releases/0.10.2/ppl-0.10.2.tar.bz2
+md5sum = 5667111f53150618b0fa522ffc53fc3e
+configure-options =
+  --with-libgmp-prefix=${gmp:location}
+  --with-libgmpxx-prefix=${gmp:location}
+  --disable-static
+environment =
+  PATH=${m4:location}/bin:%(PATH)s
+  LDFLAGS=-Wl,-rpath=${gmp:location}/lib
+
+[cloog-ppl]
+recipe = hexagonit.recipe.cmmi
+url = ftp://gcc.gnu.org/pub/gcc/infrastructure/cloog-ppl-0.15.9.tar.gz
+md5sum = 806e001d1b1a6b130069ff6274900af5
+configure-options =
+  --with-gmp=${gmp:location}
+  --with-ppl=${ppl:location}
+  --disable-static
+environment =
+  PATH=${m4:location}/bin:%(PATH)s
+  LDFLAGS=-Wl,-rpath=${gmp:location}/lib -Wl,-rpath=${ppl:location}/lib
 
 [ecj]
 recipe = hexagonit.recipe.download
@@ -53,33 +67,42 @@ recipe = hexagonit.recipe.download
 url = http://www.mirrorservice.org/sites/sourceware.org/pub/gcc/releases/gcc-4.5.3/gcc-4.5.3.tar.bz2
 md5sum = 8e0b5c12212e185f3e4383106bfa9cc6
 strip-top-level-dir = True
-destination = ${gcc-java-source:location}
+destination = ${gcc-source:location}
 
 [gcc-java-download]
 recipe = hexagonit.recipe.download
 url = http://www.mirrorservice.org/sites/sourceware.org/pub/gcc/releases/gcc-4.5.3/gcc-java-4.5.3.tar.bz2
 md5sum = 08e045fdbdc22ac9af3aec3b8d16dbab
 strip-top-level-dir = True
-destination = ${gcc-java-source:location}
+destination = ${gcc-source:location}
 ignore-existing = true
 
-[gcc-java-source]
+[gcc-source]
 location = ${buildout:parts-directory}/${:_buildout_section_name_}
 
-[gcc-java]
+[gcc-multiarch.patch]
+recipe = hexagonit.recipe.download
+md5sum = 819e6735270c8a432b0512b49f40219f
+url = ${:_profile_base_location_}/${:filename}
+filename = ${:_buildout_section_name_}
+download-only = true
+
+[gcc-java-minimal]
 depends =
   ${gcc-download:location}
   ${gcc-java-download:location}
 recipe = hexagonit.recipe.cmmi
-path = ${gcc-java-source:location}
+path = ${gcc-source:location}
 md5sum = bb3265edf0fa7543e50cedb93e04e427
+patches =
+  ${gcc-multiarch.patch:location}/${gcc-multiarch.patch:filename}
+patch-options = -p2
 configure-command = make clean \\; make distclean \\; ./configure
 # GMP does not correctly detect achitecture so it have to be given
 # as hexagonit.recipe.cmmi is using shell expansion in subproceses
 # backticks are working
 configure-options =
   --disable-bootstrap
-  --build=`uname -m`-linux
   --enable-languages=java
   --disable-multilib
   --with-gmp=${gmp:location}
@@ -87,6 +110,8 @@ configure-options =
   --with-mpc=${mpc:location}
   --with-ecj-jar=${ecj:location}/${ecj:filename}
   --prefix=${buildout:parts-directory}/${:_buildout_section_name_}
+  --without-ppl
+  --without-cloog
 
 environment =
   CPPFLAGS =-I${mpfr:location}/include -I${gmp:location}/include -I${mpc:location}/include
@@ -94,3 +119,35 @@ environment =
   PATH=${zip:location}/bin:%(PATH)s
 # make install does not work when several core are used
 make-targets = install -j1
+
+[gcc]
+depends =
+  ${gcc-download:location}
+  ${gcc-java-download:location}
+recipe = hexagonit.recipe.cmmi
+path = ${gcc-source:location}
+md5sum = bb3265edf0fa7543e50cedb93e04e427
+patches =
+  ${gcc-multiarch.patch:location}/${gcc-multiarch.patch:filename}
+patch-options = -p2
+configure-command = make clean \\; make distclean \\; ./configure
+# GMP does not correctly detect achitecture so it have to be given
+# as hexagonit.recipe.cmmi is using shell expansion in subproceses
+# backticks are working
+configure-options =
+  --disable-bootstrap
+  --enable-languages="c,c++,java"
+  --disable-multilib
+  --with-gmp=${gmp:location}
+  --with-mpfr=${mpfr:location}
+  --with-mpc=${mpc:location}
+  --with-ppl=${ppl:location}
+  --with-cloog=${cloog-ppl:location}
+  --with-ecj-jar=${ecj:location}/${ecj:filename}
+  --prefix=${buildout:parts-directory}/${:_buildout_section_name_}
+
+environment =
+  LDFLAGS=-Wl,-rpath=${mpfr:location}/lib -Wl,-rpath=${gmp:location}/lib -Wl,-rpath=${mpc:location}/lib -Wl,-rpath=${ppl:location}/lib -Wl,-rpath=${cloog-ppl:location}/lib
+  PATH=${zip:location}/bin:%(PATH)s
+# make install does not work when several core are used
+make-targets = install -j1
diff --git a/component/gcc/gcc-multiarch.patch b/component/gcc/gcc-multiarch.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c6c8bc79ac95204efdbfd64035aecb8e3d461cba
--- /dev/null
+++ b/component/gcc/gcc-multiarch.patch
@@ -0,0 +1,309 @@
+# DP: Add multiarch support to GCC.
+# DP:
+# DP: Convert the multilib option to a target triplet,
+# DP: add multiarch include directories and libraries path:
+# DP:	/usr/local/include/<arch>-linux-gnu
+# DP:	/usr/include/<arch>-linux-gnu
+# DP:	/usr/lib/<arch>-linux-gnu
+# DP: to the system paths.
+# DP:
+# DP: Original patch:
+# DP: http://anonscm.debian.org/viewvc/gcccvs/branches/sid/gcc-4.5/debian/patches/gcc-multiarch.diff?revision=5086&view=co
+
+2011-08-05 Arnaud Fontaine <arnaud.fontaine@nexedi.com>
+	* Enable multiarch unconditionally.
+
+2011-03-08  Steve Langasek <steve.langasek@linaro.org>
+	* Canonicalize x86 to i386 everywhere, not i486/i686
+
+2009-03-24  Arthur Loiret  <aloiret@debian.org>
+
+	* configure.ac: Handle --enable-multiarch and --with-multiarch-defaults.
+	* config.gcc: Define MULTIARCH_DEFAULTS if multiarch is enabled.
+	* config.in [!USED_FOR_TARGET]: Undef ENABLE_MULTIARCH.
+	* gcc.c: include multiarch.h.
+	(set_multiarch_dir): New function. Adds the multiarch directories to
+	the library path.
+	[ENABLE_MULTIARCH]: Use it.
+	* cppdefault.c [LOCAL_INCLUDE_DIR, STANDARD_INCLUDE_DIR] Add an include
+	directory for multiarch directories.
+	* incpath.c: include multiarch.h
+	[ENABLE_MULTIARCH]: Add the multiarch directory to include directories.
+	* Makefile.in (MULTIARCH_H): New. Use it for incpath.o and gcc.o.
+	* multiarch.h: New file.
+---
+ gcc/Makefile.in  |    7 ++--
+ gcc/config.gcc   |    9 +++++
+ gcc/config.in    |    4 ++
+ gcc/configure.ac |   13 ++++++++
+ gcc/cppdefault.c |    6 +++
+ gcc/gcc.c        |   41 ++++++++++++++++++++++++
+ gcc/incpath.c    |   28 ++++++++++++++++
+ gcc/multiarch.h  |   91 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 8 files changed, 196 insertions(+), 3 deletions(-)
+
+--- a/src/gcc/gcc.c.orig	2009-12-20
++++ b/src/gcc/gcc.c	2009-12-20
+@@ -71,6 +71,7 @@
+ #include "system.h"
+ #include "coretypes.h"
+ #include "multilib.h" /* before tm.h */
++#include "multiarch.h"
+ #include "tm.h"
+ #include <signal.h>
+ #if ! defined( SIGCHLD ) && defined( SIGCLD )
+@@ -375,6 +376,7 @@
+ static int used_arg (const char *, int);
+ static int default_arg (const char *, int);
+ static void set_multilib_dir (void);
++static void set_multiarch_dir (void);
+ static void print_multilib_info (void);
+ static void perror_with_name (const char *);
+ static void fatal_ice (const char *, ...) ATTRIBUTE_PRINTF_1 ATTRIBUTE_NORETURN;
+@@ -7354,6 +7358,9 @@
+       xputenv (XOBFINISH (&collect_obstack, char *));
+     }
+ 
++  /* Add the multiarch directories to libraries path.  */
++  set_multiarch_dir ();
++
+   /* Warn about any switches that no pass was interested in.  */
+ 
+   for (i = 0; (int) i < n_switches; i++)
+@@ -8515,6 +8524,25 @@
+     multilib_os_dir = multilib_dir;
+ }
+ 
++/* Add the multiarch directories to libraries path. This uses the converted
++   multiarch triplet from the multilib value.
++   For example, if the target supports -m32/-m64 as multilib option and
++   defaults to 64, it will add /usr/lib/$triplet_target64/lib to library
++   path if either -m64 or no multilib option at all is set. And it will
++   add /usr/lib/$triplet_target32 if -m32 is set. Triplets are defined in
++   multiarch.def.  */
++
++static void
++set_multiarch_dir (void)
++{
++  const char *path;
++
++  path = concat (STANDARD_STARTFILE_PREFIX_2, MULTIARCH_DEFAULTS,
++    dir_separator_str, NULL);
++  add_prefix (&startfile_prefixes, path, NULL,
++    PREFIX_PRIORITY_LAST, 0, 1);
++}
++
+ /* Print out the multiple library subdirectory selection
+    information.  This prints out a series of lines.  Each line looks
+    like SUBDIRECTORY;@OPTION@OPTION, with as many options as is
+--- a/src/gcc/config.gcc.orig	2009-12-20
++++ b/src/gcc/config.gcc	2009-12-20
+@@ -3371,3 +3371,10 @@
+ 		target_cpu_default=$target_cpu_default2
+ 	fi
+ fi
++
++multiarch_defaults=`echo ${target_noncanonical} | sed -e 's/unknown-//'`
++multiarch_define="__`echo ${multiarch_defaults} | tr '-' '_'`__"
++if test x${with_multiarch_defaults} != x; then
++	multiarch_defaults=${with_multiarch_defaults}
++fi
++tm_defines="${tm_defines} ${multiarch_define}=1 MULTIARCH_DEFAULTS=\\\"${multiarch_defaults}\\\""
+--- a/src/gcc/Makefile.in.orig	2009-12-20
++++ b/src/gcc/Makefile.in	2009-12-20
+@@ -949,6 +949,7 @@
+ 		$(HASHTAB_H)
+ PLUGIN_H = plugin.h $(GCC_PLUGIN_H)
+ PLUGIN_VERSION_H = plugin-version.h configargs.h
++MULTIARCH_H = multiarch.h
+ 
+ #
+ # Now figure out from those variables how to compile and link.
+@@ -1955,8 +1956,8 @@
+ 	-cp -p $^ $(srcdir)
+ 
+ incpath.o: incpath.c incpath.h $(CONFIG_H) $(SYSTEM_H) $(CPPLIB_H) \
+-		intl.h prefix.h coretypes.h $(TM_H) cppdefault.h $(TARGET_H) \
+-		$(MACHMODE_H)
++		intl.h prefix.h coretypes.h $(TM_H) cppdefault.h $(MULTIARCH_H) \
++		$(TARGET_H) $(MACHMODE_H)
+ 
+ c-decl.o : c-decl.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) \
+     $(RTL_H) $(C_TREE_H) $(GGC_H) $(TARGET_H) $(FLAGS_H) $(FUNCTION_H) output.h \
+@@ -2107,7 +2108,7 @@
+ 
+ gcc.o: gcc.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) intl.h multilib.h \
+     Makefile $(lang_specs_files) specs.h prefix.h $(GCC_H) $(FLAGS_H) \
+-    configargs.h $(OBSTACK_H) opts.h
++    configargs.h $(OBSTACK_H) opts.h $(MULTIARCH_H)
+ 	(SHLIB_LINK='$(SHLIB_LINK)'; \
+ 	$(COMPILER) $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) \
+   $(DRIVER_DEFINES) \
+--- a/src/gcc/incpath.c.orig	2009-11-30
++++ b/src/gcc/incpath.c	2009-12-20
+@@ -30,6 +30,7 @@
+ #include "intl.h"
+ #include "incpath.h"
+ #include "cppdefault.h"
++#include "multiarch.h"
+ 
+ /* Microsoft Windows does not natively support inodes.
+    VMS has non-numeric inodes.  */
+@@ -132,6 +133,7 @@
+   const struct default_include *p;
+   int relocated = cpp_relocated();
+   size_t len;
++  const char *multiarch;
+ 
+   if (iprefix && (len = cpp_GCC_INCLUDE_DIR_len) != 0)
+     {
+@@ -150,8 +154,15 @@
+ 	      if (!strncmp (p->fname, cpp_GCC_INCLUDE_DIR, len))
+ 		{
+ 		  char *str = concat (iprefix, p->fname + len, NULL);
++		  if (p->multilib == 1 && imultilib)
++		    str = concat (str, dir_separator_str, imultilib, NULL);
++		  else if (p->multilib == 2)
++		    {
++		      multiarch = multilib_to_multiarch (imultilib);
++		      if (!multiarch)
++			continue;
++		      str = concat (str, dir_separator_str, multiarch, NULL);
++		    }
+-		  if (p->multilib && imultilib)
+-		    str = concat (str, dir_separator_str, imultilib, NULL);
+ 		  add_path (str, SYSTEM, p->cxx_aware, false);
+ 		}
+ 	    }
+@@ -195,8 +211,15 @@
+ 	  else
+ 	    str = update_path (p->fname, p->component);
+ 
++	  if (p->multilib == 1 && imultilib)
++	    str = concat (str, dir_separator_str, imultilib, NULL);
++	  else if (p->multilib == 2)
++	    {
++	      multiarch = multilib_to_multiarch (imultilib);
++	      if (!multiarch)
++		continue;
++	      str = concat (str, dir_separator_str, multiarch, NULL);
++	    }
+-	  if (p->multilib && imultilib)
+-	    str = concat (str, dir_separator_str, imultilib, NULL);
+ 
+ 	  add_path (str, SYSTEM, p->cxx_aware, false);
+ 	}
+--- a/src/gcc/multiarch.h.orig	2009-12-20
++++ b/src/gcc/multiarch.h	2009-12-20
+@@ -0,0 +1,93 @@
++/* Header for multiarch handling (include directories, libraries path).
++   Copyright (C) 2009 Free Software Foundation, Inc.
++   Contributed by Arthur Loiret <aloiret@debian.org>
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++<http://www.gnu.org/licenses/>.  */
++
++#ifndef GCC_MULTIARCH_H
++#define GCC_MULTIARCH_H
++
++#include "tm.h"
++
++struct multiarch_mapping
++{
++  const char *const multilib;
++  const char *const multiarch;
++};
++
++const struct multiarch_mapping multiarch_mappings[] = {
++  { "", MULTIARCH_DEFAULTS },
++# if defined(__x86_64_linux_gnu__)
++  { "32",  "i386-linux-gnu" },
++# endif
++# if defined(__i486_linux_gnu__) || defined(__i686_linux_gnu__)
++  { "64",  "x86_64-linux-gnu" },
++# endif
++# if defined(__powerpc64_linux_gnu__)
++  { "32",  "powerpc-linux-gnu" },
++# endif
++# if defined(__powerpc_linux_gnu__)
++  { "64",  "powerpc64-linux-gnu" },
++# endif
++# if defined(__sparc64_linux_gnu__)
++  { "32",  "sparc-linux-gnu" },
++# endif
++# if defined(__sparc_linux_gnu__)
++  { "64",  "sparc64-linux-gnu" },
++# endif
++# if defined(__s390x_linux_gnu__)
++  { "31",  "s390-linux-gnu" },
++# endif
++# if defined(__s390_linux_gnu__)
++  { "64",  "s390x-linux-gnu" },
++# endif
++# if defined(__mips_linux_gnu__)
++  { "n32",  "mips64-linux-gnuabin32" },
++  { "64",  "mips64-linux-gnuabi64" },
++# endif
++# if defined(__mipsel_linux_gnu__)
++  { "n32",  "mips64el-linux-gnuabin32" },
++  { "64",  "mips64el-linux-gnuabi64" },
++# endif
++# if defined(__x86_64_kfreebsd_gnu__)
++  { "32",  "i386-kfreebsd-gnu" },
++# endif
++# if defined(__sh4_linux_gnu__)
++  { "m4",  "sh4-linux-gnu" },
++  { "m4-nofpu",  "sh4_nofpu-linux-gnu" },
++# endif
++  { 0, 0 }
++};
++
++/* Convert the multilib option to the corresponding target triplet.
++   See multiarch.def and config.gcc for multilib/multiarch pairs.
++   When the default multilib is used, the corresponding multilib/multiarch
++   pair is { "", $target_tripplet }.  */
++static inline const char*
++multilib_to_multiarch (const char *imultilib)
++{
++  const struct multiarch_mapping *p;
++
++  for (p = multiarch_mappings; p->multiarch; p++)
++    {
++      if (!strcmp(p->multilib, imultilib ? imultilib : ""))
++	return p->multiarch;
++    }
++  return NULL;
++}
++
++#endif /* GCC_MULTIARCH_H */
+--- a/src/gcc/cppdefault.c.orig	2008-07-21
++++ b/src/gcc/cppdefault.c	2009-12-20
+@@ -60,6 +60,7 @@
+ #endif
+ #ifdef LOCAL_INCLUDE_DIR
+     /* /usr/local/include comes before the fixincluded header files.  */
++    { LOCAL_INCLUDE_DIR, 0, 0, 1, 1, 2 },
+     { LOCAL_INCLUDE_DIR, 0, 0, 1, 1, 0 },
+ #endif
+ #ifdef PREFIX_INCLUDE_DIR
+@@ -95,6 +98,7 @@
+ #endif
+ #ifdef STANDARD_INCLUDE_DIR
+     /* /usr/include comes dead last.  */
++    { STANDARD_INCLUDE_DIR, STANDARD_INCLUDE_COMPONENT, 0, 0, 1, 2 },
+     { STANDARD_INCLUDE_DIR, STANDARD_INCLUDE_COMPONENT, 0, 0, 1, 0 },
+ #endif
+     { 0, 0, 0, 0, 0, 0 }
diff --git a/component/git/buildout.cfg b/component/git/buildout.cfg
index 1e670cc291b08036098cbde64c075db991905888..9ff6bc1e69df7d302cd9b69298f1c36264f03874 100644
--- a/component/git/buildout.cfg
+++ b/component/git/buildout.cfg
@@ -13,7 +13,9 @@ parts =
 
 [git]
 recipe = hexagonit.recipe.cmmi
-url = http://kernel.org/pub/software/scm/git/git-1.7.4.5.tar.bz2
+# url = http://kernel.org/pub/software/scm/git/git-1.7.4.5.tar.bz2
+# Circumvent kernel.org downtime
+url = http://ftp.free.fr/mirrors/ftp.kernel.org/software/scm/git/git-1.7.4.5.tar.bz2
 md5sum = 2fa6c4c847ed87523cf55de54af457eb
 configure-options =
   --with-curl=${curl:location}
diff --git a/component/gmp/buildout.cfg b/component/gmp/buildout.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..b47bfc8b155963d9fe9fa7e51c73eca1d1eec12f
--- /dev/null
+++ b/component/gmp/buildout.cfg
@@ -0,0 +1,23 @@
+[buildout]
+extends =
+  ../m4/buildout.cfg
+
+parts =
+  gmp
+
+[gmp]
+recipe = hexagonit.recipe.cmmi
+# we should use version 4.x for ppl-0.10
+url = ftp://ftp.gmplib.org/pub/gmp-4.3.2/gmp-4.3.2.tar.bz2
+md5sum = dd60683d7057917e34630b4a787932e8
+# GMP does not correctly detect achitecture so it have to be given
+# as hexagonit.recipe.cmmi is using shell expansion in subproceses
+# backticks are working
+configure-options =
+  --build=`uname -m`-linux
+  --enable-cxx
+  --disable-static
+environment =
+  PATH=${m4:location}/bin:%(PATH)s
+
+
diff --git a/component/groonga/buildout.cfg b/component/groonga/buildout.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..45608460ecf6affa1aa6219d7bc1be05539cf9a7
--- /dev/null
+++ b/component/groonga/buildout.cfg
@@ -0,0 +1,16 @@
+# mroonga - a MySQL storage engine using full-text search engine groonga
+# http://mroonga.github.com/
+# http://groonga.org/
+
+[buildout]
+parts =
+  groonga
+
+[groonga]
+recipe = hexagonit.recipe.cmmi
+url = http://packages.groonga.org/source/groonga/groonga-1.2.5.tar.gz
+md5sum = 7e608406677b7a3f91e287acc0c718c0
+configure-options =
+  --disable-static
+  --disable-glibtest
+  --without-mecab
diff --git a/component/groonga/groonga-storage-engine-0.4.mariadb.patch b/component/groonga/groonga-storage-engine-0.4.mariadb.patch
new file mode 100644
index 0000000000000000000000000000000000000000..6d0ba557de20258c4f8bdfb22e119244e105e716
--- /dev/null
+++ b/component/groonga/groonga-storage-engine-0.4.mariadb.patch
@@ -0,0 +1,14 @@
+diff -ur groonga-storage-engine-0.4.orig/configure groonga-storage-engine-0.4/configure
+--- groonga-storage-engine-0.4.orig/configure	2010-11-24 06:23:50.000000000 +0100
++++ groonga-storage-engine-0.4/configure	2011-01-01 16:01:07.000000000 +0100
+@@ -13925,8 +13925,8 @@
+     as_fn_error "failed to run \"$ac_mysql_config\": $plugindir" "$LINENO" 5
+   fi
+   MYSQL_INC="$MYSQL_INC $($ac_mysql_config --include)"
+-  ac_mysql_major_version="`$ac_mysql_config --version | cut -b 1-3`"
+-  if test "$ac_mysql_major_version" = "5.1"; then
++  ac_mysql_major_version="`$ac_mysql_config --version | cut -b 1,3`"
++  if test $ac_mysql_major_version -lt 55; then
+     MYSQL51="-DMYSQL51"
+ 
+   fi
diff --git a/component/haproxy/buildout.cfg b/component/haproxy/buildout.cfg
index afedb5094de01d49f5d0038dc70029a3ca5f4cff..59333f0d7e52f778236d6de711577a07aa98126a 100644
--- a/component/haproxy/buildout.cfg
+++ b/component/haproxy/buildout.cfg
@@ -3,8 +3,8 @@ parts = haproxy
 
 [haproxy]
 recipe = hexagonit.recipe.cmmi
-url = http://haproxy.1wt.eu/download/1.4/src/haproxy-1.4.15.tar.gz
-md5sum = c1b4fc6028c6d8e23dde8c91ff47eabe
+url = http://haproxy.1wt.eu/download/1.4/src/haproxy-1.4.18.tar.gz
+md5sum = 4ac88bb1a76c4b84ed4f6131183bedbe
 configure-command = true
 # If the system is running on Linux 2.6, we use "linux26" as the TARGET,
 # otherwise use "generic".
diff --git a/component/imagemagick/buildout.cfg b/component/imagemagick/buildout.cfg
index a54d5a37b934beacb1c5c3bc221f338265d7e6af..216d2389dfdef7b923e854a9be3f5d5b0559e491 100644
--- a/component/imagemagick/buildout.cfg
+++ b/component/imagemagick/buildout.cfg
@@ -35,8 +35,8 @@ filename = imagemagick-6.6.7-4-without-lzma.patch
 
 [imagemagick]
 recipe = hexagonit.recipe.cmmi
-url = ftp://ftp.imagemagick.org/pub/ImageMagick/ImageMagick-6.6.9-10.tar.bz2
-md5sum = 985bd453c3e502f2771af5329c1cc384
+url = ftp://ftp.imagemagick.org/pub/ImageMagick/ImageMagick-6.7.2-10.tar.bz2
+md5sum = 073ec5d7f2a22db96a0e87e4322b75f9
 configure-options =
   --disable-static
   --without-x
diff --git a/component/java/buildout.cfg b/component/java/buildout.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..fdfd350db67e0bcf9a5f392e12a8ecb6e411850d
--- /dev/null
+++ b/component/java/buildout.cfg
@@ -0,0 +1,92 @@
+[buildout]
+
+parts =
+  java
+
+[jdk-6u27-no-user-interaction-patch]
+recipe = hexagonit.recipe.download
+url = ${:_profile_base_location_}/${:filename}
+download-only = true
+filename = jdk-6u27-no-user-interaction.patch
+md5sum = 4c4303240647a114d07f3c411b2e6b5b
+
+[java]
+<= java-re
+
+[java-re]
+<= java-re-1.6.0
+
+[java-sdk]
+<= java-sdk-1.6.0
+
+[java-re-1.6.0]
+recipe = slapos.recipe.build
+slapos_promisee =
+  directory:bin
+  directory:lib
+  directory:man
+  directory:plugin
+  directory:javaws
+  file:lib/rt.jar
+  file:bin/java
+x86 = http://javadl.sun.com/webapps/download/AutoDL?BundleId=52240 0bd27d325c5ce11ce863d982ad052f7f
+x86-64 =  http://javadl.sun.com/webapps/download/AutoDL?BundleId=52242 a4d929bc4d6511290c07c3745477b77b
+script =
+  if not self.options.get('url'): self.options['url'], self.options['md5sum'] = self.options[guessPlatform()].split(' ')
+  download_file = self.download(self.options['url'], self.options.get('md5sum'))
+  extract_dir = tempfile.mkdtemp(self.name)
+  os.chdir(extract_dir)
+  (download_dir, filename) = os.path.split(download_file)
+  auto_extract_bin = os.path.join(extract_dir, filename)
+  shutil.move(download_file, auto_extract_bin)
+  os.chmod(auto_extract_bin, 0777)
+  subprocess.call([auto_extract_bin])
+  self.cleanup_dir_list.append(extract_dir)
+  workdir = guessworkdir(extract_dir)
+  self.copyTree(os.path.join(workdir, "jre1.6.0_27"), "%(location)s")
+
+[java-sdk-1.6.0]
+recipe = slapos.recipe.build
+slapos_promisee =
+  directory:bin
+  directory:lib
+  directory:man
+  directory:plugin
+  directory:javaws
+  file:jre/lib/rt.jar
+  file:bin/java
+x86 = http://download.oracle.com/otn-pub/java/jdk/6u27-b07/jdk-6u27-linux-i586.bin bdb5f05bd20c6aa9a4729726191bf6fd
+x86-64 = http://download.oracle.com/otn-pub/java/jdk/6u27-b07/jdk-6u27-linux-x64.bin 94f93a3ff03f824a238ecd79ad90433e
+script =
+  if not self.options.get('url'): self.options['url'], self.options['md5sum'] = self.options[guessPlatform()].split(' ')
+  download_file = self.download(self.options['url'], self.options.get('md5sum'))
+  extract_dir = tempfile.mkdtemp(self.name)
+  os.chdir(extract_dir)
+  (download_dir, filename) = os.path.split(download_file)
+  auto_extract_bin = os.path.join(extract_dir, filename)
+  shutil.move(download_file, auto_extract_bin)
+  os.chmod(auto_extract_bin, 0777)
+  subprocess.call(["patch", auto_extract_bin, "-i", "${jdk-6u27-no-user-interaction-patch:location}/${jdk-6u27-no-user-interaction-patch:filename}"])
+  subprocess.call([auto_extract_bin])
+  self.cleanup_dir_list.append(extract_dir)
+  workdir = guessworkdir(extract_dir)
+  self.copyTree(os.path.join(workdir, "jdk1.6.0_27"), "%(location)s")
+
+[java-sdk-1.7.0]
+recipe = slapos.recipe.build
+slapos_promisee =
+  directory:bin
+  directory:lib
+  directory:man
+  directory:jre
+  file:jre/lib/rt.jar
+  file:bin/java
+  file:bin/javac
+x86 = http://download.oracle.com/otn-pub/java/jdk/7/jdk-7-linux-i586.tar.gz f97244a104f03731e5ff69f0dd5a9927
+x86-64 = http://download.oracle.com/otn-pub/java/jdk/7/jdk-7-linux-x64.tar.gz b3c1ef5faea7b180469c129a49762b64
+script =
+  if not self.options.get('url'): self.options['url'], self.options['md5sum'] = self.options[guessPlatform()].split(' ')
+  extract_dir = self.extract(self.download(self.options['url'], self.options.get('md5sum')))
+  workdir = guessworkdir(extract_dir)
+  self.copyTree(workdir, "%(location)s")
+
diff --git a/component/java/jdk-6u27-no-user-interaction.patch b/component/java/jdk-6u27-no-user-interaction.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b217852d4e9b4ea42bdf9992c58bb5dab4b0dd6c
--- /dev/null
+++ b/component/java/jdk-6u27-no-user-interaction.patch
@@ -0,0 +1,20 @@
+--- jdk-6u27-linux-x64.bin.orig	2011-09-27 11:02:14.000000000 +0200
++++ jdk-6u27-linux-x64.bin	2011-09-27 10:38:01.000000000 +0200
+@@ -81,7 +81,7 @@
+ trap 'rm -f $outname; exit 1' HUP INT QUIT TERM
+ echo "Unpacking..."
+ tail ${tail_args} +189 "$0" > $outname
+-if [ -x /usr/bin/sum ]; then
++if [ -x /usr/bin/null ]; then
+     echo "Checksumming..."
+ 
+     sum=`/usr/bin/sum $outname`
+@@ -169,7 +169,7 @@
+     fi
+ 
+     # Service Tag support and JDK product registration
+-    register_JDK "$javahome" "${BINARY_NAME}" "$1"
++    # register_JDK "$javahome" "${BINARY_NAME}" "$1"
+ 
+ else
+     if [ "$1" = "-x" ]; then
diff --git a/component/libiconv/buildout.cfg b/component/libiconv/buildout.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..23364aea78d48e94685ed1e04e749f3f9fdcaa68
--- /dev/null
+++ b/component/libiconv/buildout.cfg
@@ -0,0 +1,8 @@
+[buildout]
+parts =
+  libiconv
+
+[libiconv]
+recipe = hexagonit.recipe.cmmi
+url = http://ftp.gnu.org/pub/gnu/libiconv/libiconv-1.14.tar.gz
+md5sum = e34509b1623cec449dfeb73d7ce9c6c6
diff --git a/component/libtiff/buildout.cfg b/component/libtiff/buildout.cfg
index 7d235c9632ce06e8256d781acebffb7856e3bd8b..0d7814db8b058371406bec52aee355f886449286 100644
--- a/component/libtiff/buildout.cfg
+++ b/component/libtiff/buildout.cfg
@@ -9,7 +9,9 @@ parts =
 
 [libtiff]
 recipe = hexagonit.recipe.cmmi
-url = http://download.osgeo.org/libtiff/tiff-3.9.5.tar.gz
+#url = http://download.osgeo.org/libtiff/tiff-3.9.5.tar.gz
+# server is down - circumvent
+url = http://www.imagemagick.org/download/delegates/tiff-3.9.5.tar.gz
 md5sum = 8fc7ce3b4e1d0cc8a319336967815084
 configure-options =
   --disable-static
diff --git a/component/libuuid/buildout.cfg b/component/libuuid/buildout.cfg
index dc4d462705c7da7513bdd73b0777eb1fc55592d3..03ed76929a027ca9854359d7277157db1b50ed87 100644
--- a/component/libuuid/buildout.cfg
+++ b/component/libuuid/buildout.cfg
@@ -4,7 +4,7 @@ parts =
 
 [libuuid]
 recipe = hexagonit.recipe.cmmi
-url = http://ftp.kernel.org/pub/linux/utils/util-linux/v2.18/util-linux-ng-2.18.tar.bz2
+url = http://mirror.be.gbxs.net/pub/linux/utils/util-linux-ng/v2.18/util-linux-ng-2.18.tar.bz2
 md5sum = 2f5f71e6af969d041d73ab778c141a77
 configure-options =
   --disable-static
diff --git a/component/lxml-python/buildout.cfg b/component/lxml-python/buildout.cfg
index be1e039beb4024e5bb9158497d3940ec58560ba6..763fea5569a113deb88715a96e7275273957ac15 100644
--- a/component/lxml-python/buildout.cfg
+++ b/component/lxml-python/buildout.cfg
@@ -14,17 +14,6 @@ PATH = ${libxslt:location}/bin:%(PATH)s
 recipe = zc.recipe.egg:custom
 egg = lxml
 
-# Note: Workaround lxml.de issues blocking buildout runs
-# Empty index makes setuptools NOT trying to find any meta information about
-# lxml...
-index =
-# ...so it is wise to tell where lxml can be found
-find-links =
-  http://pypi.python.org/pypi/lxml/2.2.8
-  http://pypi.python.org/pypi/lxml/2.3
-# Note: Whenever someone is going to remove it, one shall check, that buildout
-# can run in newest mode, without any locally downloaded cache
-
 rpath =
   ${libxml2:location}/lib/
   ${libxslt:location}/lib/
diff --git a/component/mariadb/buildout.cfg b/component/mariadb/buildout.cfg
index 55155d0765d414fdeaa8dc24efca882060f2ccaa..73e4c8719b8fb93b3a780afd5b4b85096f51c9b4 100644
--- a/component/mariadb/buildout.cfg
+++ b/component/mariadb/buildout.cfg
@@ -4,7 +4,9 @@
 [buildout]
 extends =
   ../zlib/buildout.cfg
+  ../groonga/buildout.cfg
   ../ncurses/buildout.cfg
+  ../pkgconfig/buildout.cfg
   ../readline/buildout.cfg
 
 parts =
@@ -12,15 +14,16 @@ parts =
 
 [mariadb]
 recipe = hexagonit.recipe.cmmi
-version = 5.2.7
-url = http://www.percona.com/downloads/MariaDB/mariadb-${:version}/kvm-tarbake-jaunty-x86/mariadb-${:version}.tar.gz
-md5sum = 06b9b102946a3606b38348c0ebf18367
+version = 5.3.1-beta
+url = http://downloads.askmonty.org/f/mariadb-${:version}/kvm-tarbake-jaunty-x86/mariadb-${:version}.tar.gz/from/http://mirror.layerjet.com/mariadb
+md5sum = 5b3a94de1c1fcaa193edbbc8d7f8ffe4
 # compile directory is required to build mysql plugins.
 keep-compile-dir = true
 # configure: how to avoid searching for my.cnf?
 #  - like in mysql part in http://svn.zope.org/zodbshootout/trunk/buildout.cfg?view=markup
 # we use embeded yassl instead of openssl to avoid compilation errors on sphinx search engine.
 configure-options =
+  --prefix=${buildout:parts-directory}/${:_buildout_section_name_}
   --disable-static
   --enable-thread-safe-client
   --enable-local-infile
@@ -36,3 +39,16 @@ configure-options =
 environment =
   CPPFLAGS =-I${ncurses:location}/include -I${readline:location}/include
   LDFLAGS =-L${readline:location}/lib -L${ncurses:location}/lib -Wl,-rpath=${zlib:location}/lib -Wl,-rpath=${ncurses:location}/lib -Wl,-rpath=${readline:location}/lib
+
+[groonga-storage-engine-mariadb]
+recipe = hexagonit.recipe.cmmi
+url = http://github.com/downloads/mroonga/mroonga/groonga-storage-engine-0.5.tar.gz
+md5sum = 52fed75d97a91f239750a1011ea9e468
+configure-options =
+  --with-mysql-source=${mariadb:location}__compile__/mariadb-${mariadb:version}
+  --with-mysql-config=${mariadb:location}/bin/mysql_config
+environment =
+  PATH=${groonga:location}/bin:${pkgconfig:location}/bin:%(PATH)s
+  CPPFLAGS=-I${groonga:location}/include/groonga
+  LDFLAGS=-L${groonga:location}/lib
+  PKG_CONFIG_PATH=${groonga:location}/lib/pkgconfig
diff --git a/component/memcached/buildout.cfg b/component/memcached/buildout.cfg
index c6b8432bb0e2f3a4694462fc924e60f94754396c..5c36165e4bfa3777421094c7d358b1be8aa00e3c 100644
--- a/component/memcached/buildout.cfg
+++ b/component/memcached/buildout.cfg
@@ -27,27 +27,37 @@ download-only = true
 md5sum = 3418477f64500cd2a8dce046f5d72fec
 
 [memcached]
-<= memcached-1.4.5
+<= memcached-1.4.6
+
+[memcached-1.4.6]
+<= memcached-common
+url = http://memcached.googlecode.com/files/memcached-1.4.6.tar.gz
+md5sum = 243e5d82de27e6e45caf0ebfd400e41a
+patches =
+  ${memcached-fix-array-subscript-is-above-array-bounds:location}/${memcached-fix-array-subscript-is-above-array-bounds:filename}
 
 [memcached-1.4.5]
 <= memcached-common
 url = http://memcached.googlecode.com/files/memcached-1.4.5.tar.gz
 md5sum = 583441a25f937360624024f2881e5ea8
+patches =
+  ${memcached-strict-aliasing-patch:location}/${memcached-strict-aliasing-patch:filename}
+  ${memcached-fix-array-subscript-is-above-array-bounds:location}/${memcached-fix-array-subscript-is-above-array-bounds:filename}
+  ${memcached-gcc4.6.patch:location}/${memcached-gcc4.6.patch:filename}
 
 [memcached-1.4.4]
 <= memcached-common
 url = http://memcached.googlecode.com/files/memcached-1.4.4.tar.gz
 md5sum = 5ca5b24de347e97ac1f48f3785b4178a
+patches =
+  ${memcached-strict-aliasing-patch:location}/${memcached-strict-aliasing-patch:filename}
+  ${memcached-fix-array-subscript-is-above-array-bounds:location}/${memcached-fix-array-subscript-is-above-array-bounds:filename}
+  ${memcached-gcc4.6.patch:location}/${memcached-gcc4.6.patch:filename}
 
 [memcached-common]
 recipe = hexagonit.recipe.cmmi
 configure-options =
   --with-libevent=${libevent:location}
-patches =
-  ${memcached-strict-aliasing-patch:location}/${memcached-strict-aliasing-patch:filename}
-  ${memcached-fix-array-subscript-is-above-array-bounds:location}/${memcached-fix-array-subscript-is-above-array-bounds:filename}
-  ${memcached-gcc4.6.patch:location}/${memcached-gcc4.6.patch:filename}
 patch-options = -p1
 environment =
     LDFLAGS =-Wl,-rpath=${libevent:location}/lib
-
diff --git a/component/mysql-5.1/buildout.cfg b/component/mysql-5.1/buildout.cfg
index c66c62f5c8dd6a0657b40bf9107a2d25eaf58da7..5c912b79d2a4ff1eb9049240ba60c4724a348140 100644
--- a/component/mysql-5.1/buildout.cfg
+++ b/component/mysql-5.1/buildout.cfg
@@ -5,8 +5,10 @@ extends =
   ../zlib/buildout.cfg
   ../bison/buildout.cfg
   ../flex/buildout.cfg
+  ../groonga/buildout.cfg
   ../libtool/buildout.cfg
   ../ncurses/buildout.cfg
+  ../pkgconfig/buildout.cfg
   ../readline/buildout.cfg
 
 parts =
@@ -15,15 +17,15 @@ parts =
 [mysql-5.1-sphinx-patch]
 recipe = hexagonit.recipe.download
 url = ${:_profile_base_location_}/${:filename}
-md5sum = 6580393ca93ecf564cad0552b91a563e
-filename = mysql-5.1.49-sphinx-1.10.diff
+md5sum = eefcd08c400c58d3e89542ab482a8429
+filename = mysql-5.1-sphinx-2.0.1-beta.diff
 download-only = true
 
 [mysql-5.1]
 recipe = hexagonit.recipe.cmmi
-version = 5.1.57
+version = 5.1.58
 url = http://mysql.he.net/Downloads/MySQL-5.1/mysql-${:version}.tar.gz
-md5sum = 8d6998ef0f2e2d1dac2a761348c71c21
+md5sum = ae5aef506088e521e4b1cc4f668e96d2
 # compile directory is required to build mysql plugins.
 keep-compile-dir = true
 # configure: how to avoid searching for my.cnf?
@@ -39,6 +41,7 @@ configure-command =
 # we use embeded yassl instead of openssl to avoid compilation errors on sphinx search engine.
 configure-options =
   --prefix=${buildout:parts-directory}/${:_buildout_section_name_}
+  --disable-static
   --enable-thread-safe-client
   --enable-local-infile
   --enable-assembler
@@ -54,10 +57,23 @@ configure-options =
 make-options =
   LIBTOOL=libtool
 
-patch-options = -p1
+patch-options = -p0
 patches =
   ${mysql-5.1-sphinx-patch:location}/${mysql-5.1-sphinx-patch:filename}
 environment =
   PATH =${autoconf:location}/bin:${automake-1.11:location}/bin:${libtool:location}/bin:${bison:location}/bin:${flex:location}/bin:%(PATH)s
   CPPFLAGS =-I${ncurses:location}/include -I${readline:location}/include
   LDFLAGS =-L${readline:location}/lib -L${ncurses:location}/lib -Wl,-rpath=${zlib:location}/lib -Wl,-rpath=${ncurses:location}/lib -Wl,-rpath=${readline:location}/lib
+
+[groonga-storage-engine-mysql-5.1]
+recipe = hexagonit.recipe.cmmi
+url = http://github.com/downloads/mroonga/mroonga/groonga-storage-engine-0.9.tar.gz
+md5sum = 78fe07122dc376796a5aede476f50cfd
+configure-options =
+  --with-mysql-source=${mysql-5.1:location}__compile__/mysql-${mysql-5.1:version}
+  --with-mysql-config=${mysql-5.1:location}/bin/mysql_config
+environment =
+  PATH=${groonga:location}/bin:${pkgconfig:location}/bin:%(PATH)s
+  CPPFLAGS=-I${groonga:location}/include/groonga
+  LDFLAGS=-L${groonga:location}/lib
+  PKG_CONFIG_PATH=${groonga:location}/lib/pkgconfig
diff --git a/component/mysql-5.1/mysql-5.1-sphinx-2.0.1-beta.diff b/component/mysql-5.1/mysql-5.1-sphinx-2.0.1-beta.diff
new file mode 100644
index 0000000000000000000000000000000000000000..500e23b260b0951b59f16e02d5f33528e9b7ba19
--- /dev/null
+++ b/component/mysql-5.1/mysql-5.1-sphinx-2.0.1-beta.diff
@@ -0,0 +1,4677 @@
+diff -uNr storage/CMakeLists.txt storage/sphinx//CMakeLists.txt
+--- storage/sphinx/CMakeLists.txt	1970-01-01 01:00:00.000000000 +0100
++++ storage/sphinx/CMakeLists.txt	2008-02-14 17:37:44.000000000 +0100
+@@ -0,0 +1,11 @@
++SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
++SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
++ADD_DEFINITIONS(-DMYSQL_SERVER)
++
++INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include
++                    ${CMAKE_SOURCE_DIR}/sql
++                    ${CMAKE_SOURCE_DIR}/extra/yassl/include
++                    ${CMAKE_SOURCE_DIR}/regex)
++
++SET(SPHINX_SOURCES ha_sphinx.cc)
++ADD_LIBRARY(sphinx ha_sphinx.cc)
+diff -uNr storage/gen_data.php storage/sphinx//gen_data.php
+--- storage/sphinx/gen_data.php	1970-01-01 01:00:00.000000000 +0100
++++ storage/sphinx/gen_data.php	2006-06-07 09:28:43.000000000 +0200
+@@ -0,0 +1,37 @@
++<?php
++
++$file_name= $argv[1];
++
++//echo $file_name;
++
++$cont= file_get_contents($file_name);
++
++$words= explode(" ", $cont);
++
++//echo "words: ".(count($words))."\n";
++
++$cw = count($words);
++
++echo "REPLACE INTO test.documents ( id, group_id, date_added, title, content ) VALUES\n";
++
++
++for ($i=1; $i<=100000; $i++)
++{
++  $count_words= mt_rand(10,30);
++  $pred = "";
++  for ($j=0; $j<$count_words; $j++)
++  {
++    $pred .= chop($words[mt_rand(1, $cw-1)])." ";
++  }
++  $count_words= mt_rand(3,5);
++  $tit = "";
++  for ($j=0; $j<$count_words; $j++)
++  {
++    $tit .= chop($words[mt_rand(1, $cw-1)])." ";
++  }
++  echo "($i,".mt_rand(1,20).",NOW(),'".addslashes($tit)."','".addslashes($pred)."'),\n";
++}       
++  echo "(0,1,now(),'end','eND');\n";
++  
++
++?>
+diff -uNr storage/ha_sphinx.cc storage/sphinx//ha_sphinx.cc
+--- storage/sphinx/ha_sphinx.cc	1970-01-01 01:00:00.000000000 +0100
++++ storage/sphinx/ha_sphinx.cc	2011-03-29 10:21:05.000000000 +0200
+@@ -0,0 +1,3510 @@
++//
++// $Id: ha_sphinx.cc 2752 2011-03-29 08:21:05Z tomat $
++//
++
++//
++// Copyright (c) 2001-2011, Andrew Aksyonoff
++// Copyright (c) 2008-2011, Sphinx Technologies Inc
++// All rights reserved
++//
++// This program is free software; you can redistribute it and/or modify
++// it under the terms of the GNU General Public License. You should have
++// received a copy of the GPL license along with this program; if you
++// did not, you can find it at http://www.gnu.org/
++//
++
++#ifdef USE_PRAGMA_IMPLEMENTATION
++#pragma implementation // gcc: Class implementation
++#endif
++
++#if _MSC_VER>=1400
++#define _CRT_SECURE_NO_DEPRECATE 1
++#define _CRT_NONSTDC_NO_DEPRECATE 1
++#endif
++
++#include <mysql_version.h>
++
++#if MYSQL_VERSION_ID>50100
++#include "mysql_priv.h"
++#include <mysql/plugin.h>
++#else
++#include "../mysql_priv.h"
++#endif
++
++#include <mysys_err.h>
++#include <my_sys.h>
++#include <mysql.h> // include client for INSERT table (sort of redoing federated..)
++
++#ifndef __WIN__
++	// UNIX-specific
++	#include <my_net.h>
++	#include <netdb.h>
++	#include <sys/un.h>
++
++	#define	RECV_FLAGS	MSG_WAITALL
++
++	#define sphSockClose(_sock)	::close(_sock)
++#else
++	// Windows-specific
++	#include <io.h>
++	#define strcasecmp	stricmp
++	#define snprintf	_snprintf
++
++	#define	RECV_FLAGS	0
++
++	#define sphSockClose(_sock)	::closesocket(_sock)
++#endif
++
++#include <ctype.h>
++#include "ha_sphinx.h"
++
++#ifndef MSG_WAITALL
++#define MSG_WAITALL 0
++#endif
++
++#if _MSC_VER>=1400
++#pragma warning(push,4)
++#endif
++
++/////////////////////////////////////////////////////////////////////////////
++
++/// there might be issues with min() on different platforms (eg. Gentoo, they say)
++#define Min(a,b) ((a)<(b)?(a):(b))
++
++/// unaligned RAM accesses are forbidden on SPARC
++#if defined(sparc) || defined(__sparc__)
++#define UNALIGNED_RAM_ACCESS 0
++#else
++#define UNALIGNED_RAM_ACCESS 1
++#endif
++
++
++#if UNALIGNED_RAM_ACCESS
++
++/// pass-through wrapper
++template < typename T > inline T sphUnalignedRead ( const T & tRef )
++{
++	return tRef;
++}
++
++/// pass-through wrapper
++template < typename T > void sphUnalignedWrite ( void * pPtr, const T & tVal )
++{
++	*(T*)pPtr = tVal;
++}
++
++#else
++
++/// unaligned read wrapper for some architectures (eg. SPARC)
++template < typename T >
++inline T sphUnalignedRead ( const T & tRef )
++{
++	T uTmp;
++	byte * pSrc = (byte *) &tRef;
++	byte * pDst = (byte *) &uTmp;
++	for ( int i=0; i<(int)sizeof(T); i++ )
++		*pDst++ = *pSrc++;
++	return uTmp;
++}
++
++/// unaligned write wrapper for some architectures (eg. SPARC)
++template < typename T >
++void sphUnalignedWrite ( void * pPtr, const T & tVal )
++{
++	byte * pDst = (byte *) pPtr;
++	byte * pSrc = (byte *) &tVal;
++	for ( int i=0; i<(int)sizeof(T); i++ )
++		*pDst++ = *pSrc++;
++}
++
++#endif
++
++/////////////////////////////////////////////////////////////////////////////
++
++// FIXME! make this all dynamic
++#define SPHINXSE_MAX_FILTERS		32
++
++#define SPHINXAPI_DEFAULT_HOST		"127.0.0.1"
++#define SPHINXAPI_DEFAULT_PORT		9312
++#define SPHINXAPI_DEFAULT_INDEX		"*"
++
++#define SPHINXQL_DEFAULT_PORT		9306
++
++#define SPHINXSE_SYSTEM_COLUMNS		3
++
++#define SPHINXSE_MAX_ALLOC			(16*1024*1024)
++#define SPHINXSE_MAX_KEYWORDSTATS	4096
++
++#define SPHINXSE_VERSION			"0.9.9 ($Revision: 2752 $)"
++
++// FIXME? the following is cut-n-paste from sphinx.h and searchd.cpp
++// cut-n-paste is somewhat simpler that adding dependencies however..
++
++enum
++{
++	SPHINX_SEARCHD_PROTO	= 1,
++	SEARCHD_COMMAND_SEARCH	= 0,
++	VER_COMMAND_SEARCH		= 0x116,
++};
++
++/// search query sorting orders
++enum ESphSortOrder
++{
++	SPH_SORT_RELEVANCE		= 0,	///< sort by document relevance desc, then by date
++	SPH_SORT_ATTR_DESC		= 1,	///< sort by document date desc, then by relevance desc
++	SPH_SORT_ATTR_ASC		= 2,	///< sort by document date asc, then by relevance desc
++	SPH_SORT_TIME_SEGMENTS	= 3,	///< sort by time segments (hour/day/week/etc) desc, then by relevance desc
++	SPH_SORT_EXTENDED		= 4,	///< sort by SQL-like expression (eg. "@relevance DESC, price ASC, @id DESC")
++	SPH_SORT_EXPR			= 5,	///< sort by expression
++
++	SPH_SORT_TOTAL
++};
++
++/// search query matching mode
++enum ESphMatchMode
++{
++	SPH_MATCH_ALL = 0,			///< match all query words
++	SPH_MATCH_ANY,				///< match any query word
++	SPH_MATCH_PHRASE,			///< match this exact phrase
++	SPH_MATCH_BOOLEAN,			///< match this boolean query
++	SPH_MATCH_EXTENDED,			///< match this extended query
++	SPH_MATCH_FULLSCAN,			///< match all document IDs w/o fulltext query, apply filters
++	SPH_MATCH_EXTENDED2,		///< extended engine V2
++
++	SPH_MATCH_TOTAL
++};
++
++/// search query relevance ranking mode
++enum ESphRankMode
++{
++	SPH_RANK_PROXIMITY_BM25		= 0,	///< default mode, phrase proximity major factor and BM25 minor one
++	SPH_RANK_BM25				= 1,	///< statistical mode, BM25 ranking only (faster but worse quality)
++	SPH_RANK_NONE				= 2,	///< no ranking, all matches get a weight of 1
++	SPH_RANK_WORDCOUNT			= 3,	///< simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts
++	SPH_RANK_PROXIMITY			= 4,	///< phrase proximity
++	SPH_RANK_MATCHANY			= 5,	///< emulate old match-any weighting
++	SPH_RANK_FIELDMASK			= 6,	///< sets bits where there were matches
++	SPH_RANK_SPH04				= 7,	///< codename SPH04, phrase proximity + bm25 + head/exact boost
++
++	SPH_RANK_TOTAL,
++	SPH_RANK_DEFAULT			= SPH_RANK_PROXIMITY_BM25
++};
++
++/// search query grouping mode
++enum ESphGroupBy
++{
++	SPH_GROUPBY_DAY		= 0,	///< group by day
++	SPH_GROUPBY_WEEK	= 1,	///< group by week
++	SPH_GROUPBY_MONTH	= 2,	///< group by month
++	SPH_GROUPBY_YEAR	= 3,	///< group by year
++	SPH_GROUPBY_ATTR	= 4		///< group by attribute value
++};
++
++/// known attribute types
++enum
++{
++	SPH_ATTR_NONE		= 0,			///< not an attribute at all
++	SPH_ATTR_INTEGER	= 1,			///< this attr is just an integer
++	SPH_ATTR_TIMESTAMP	= 2,			///< this attr is a timestamp
++	SPH_ATTR_ORDINAL	= 3,			///< this attr is an ordinal string number (integer at search time, specially handled at indexing time)
++	SPH_ATTR_BOOL		= 4,			///< this attr is a boolean bit field
++	SPH_ATTR_FLOAT		= 5,
++	SPH_ATTR_BIGINT		= 6,
++
++	SPH_ATTR_MULTI		= 0x40000000UL	///< this attr has multiple values (0 or more)
++};
++
++/// known answers
++enum
++{
++	SEARCHD_OK		= 0,	///< general success, command-specific reply follows
++	SEARCHD_ERROR	= 1,	///< general failure, error message follows
++	SEARCHD_RETRY	= 2,	///< temporary failure, error message follows, client should retry later
++	SEARCHD_WARNING	= 3		///< general success, warning message and command-specific reply follow
++};
++
++//////////////////////////////////////////////////////////////////////////////
++
++#define SPHINX_DEBUG_OUTPUT		0
++#define SPHINX_DEBUG_CALLS		0
++
++#include <stdarg.h>
++
++#if SPHINX_DEBUG_OUTPUT
++inline void SPH_DEBUG ( const char * format, ... )
++{
++	va_list ap;
++	va_start ( ap, format );
++	fprintf ( stderr, "SphinxSE: " );
++	vfprintf ( stderr, format, ap );
++	fprintf ( stderr, "\n" );
++	va_end ( ap );
++}
++#else
++inline void SPH_DEBUG ( const char *, ... ) {}
++#endif
++
++#if SPHINX_DEBUG_CALLS
++
++#define SPH_ENTER_FUNC() { SPH_DEBUG ( "enter %s", __FUNCTION__ ); }
++#define SPH_ENTER_METHOD() { SPH_DEBUG ( "enter %s(this=%08x)", __FUNCTION__, this ); }
++#define SPH_RET(_arg) { SPH_DEBUG ( "leave %s", __FUNCTION__ ); return _arg; }
++#define SPH_VOID_RET() { SPH_DEBUG ( "leave %s", __FUNCTION__ ); return; }
++
++#else
++
++#define SPH_ENTER_FUNC()
++#define SPH_ENTER_METHOD()
++#define SPH_RET(_arg) { return(_arg); }
++#define SPH_VOID_RET() { return; }
++
++#endif
++
++
++#define SafeDelete(_arg)		{ if ( _arg ) delete ( _arg );		(_arg) = NULL; }
++#define SafeDeleteArray(_arg)	{ if ( _arg ) delete [] ( _arg );	(_arg) = NULL; }
++
++//////////////////////////////////////////////////////////////////////////////
++
++/// per-table structure that will be shared among all open Sphinx SE handlers
++struct CSphSEShare
++{
++	pthread_mutex_t	m_tMutex;
++	THR_LOCK		m_tLock;
++
++	char *			m_sTable;
++	char *			m_sScheme;		///< our connection string
++	char *			m_sHost;		///< points into m_sScheme buffer, DO NOT FREE EXPLICITLY
++	char *			m_sSocket;		///< points into m_sScheme buffer, DO NOT FREE EXPLICITLY
++	char *			m_sIndex;		///< points into m_sScheme buffer, DO NOT FREE EXPLICITLY
++	ushort			m_iPort;
++	bool			m_bSphinxQL;	///< is this read-only SphinxAPI table, or write-only SphinxQL table?
++	uint			m_iTableNameLen;
++	uint			m_iUseCount;
++	CHARSET_INFO *	m_pTableQueryCharset;
++
++	int					m_iTableFields;
++	char **				m_sTableField;
++	enum_field_types *	m_eTableFieldType;
++
++	CSphSEShare ()
++		: m_sTable ( NULL )
++		, m_sScheme ( NULL )
++		, m_sHost ( NULL )
++		, m_sSocket ( NULL )
++		, m_sIndex ( NULL )
++		, m_iPort ( 0 )
++		, m_bSphinxQL ( false )
++		, m_iTableNameLen ( 0 )
++		, m_iUseCount ( 1 )
++		, m_pTableQueryCharset ( NULL )
++
++		, m_iTableFields ( 0 )
++		, m_sTableField ( NULL )
++		, m_eTableFieldType ( NULL )
++	{
++		thr_lock_init ( &m_tLock );
++		pthread_mutex_init ( &m_tMutex, MY_MUTEX_INIT_FAST );
++	}
++
++	~CSphSEShare ()
++	{
++		pthread_mutex_destroy ( &m_tMutex );
++		thr_lock_delete ( &m_tLock );
++
++		SafeDeleteArray ( m_sTable );
++		SafeDeleteArray ( m_sScheme );
++		ResetTable ();
++	}
++
++	void ResetTable ()
++	{
++		for ( int i=0; i<m_iTableFields; i++ )
++			SafeDeleteArray ( m_sTableField[i] );
++		SafeDeleteArray ( m_sTableField );
++		SafeDeleteArray ( m_eTableFieldType );
++	}
++};
++
++/// schema attribute
++struct CSphSEAttr
++{
++	char *			m_sName;		///< attribute name (received from Sphinx)
++	uint32			m_uType;		///< attribute type (received from Sphinx)
++	int				m_iField;		///< field index in current table (-1 if none)
++
++	CSphSEAttr()
++		: m_sName ( NULL )
++		, m_uType ( SPH_ATTR_NONE )
++		, m_iField ( -1 )
++	{}
++
++	~CSphSEAttr ()
++	{
++		SafeDeleteArray ( m_sName );
++	}
++};
++
++/// word stats
++struct CSphSEWordStats
++{
++	char *			m_sWord;
++	int				m_iDocs;
++	int				m_iHits;
++
++	CSphSEWordStats ()
++		: m_sWord ( NULL )
++		, m_iDocs ( 0 )
++		, m_iHits ( 0 )
++	{}
++
++	~CSphSEWordStats ()
++	{
++		SafeDeleteArray ( m_sWord );
++	}
++};
++
++/// request stats
++struct CSphSEStats
++{
++public:
++	int					m_iMatchesTotal;
++	int					m_iMatchesFound;
++	int					m_iQueryMsec;
++	int					m_iWords;
++	CSphSEWordStats *	m_dWords;
++	bool				m_bLastError;
++	char				m_sLastMessage[1024];
++
++	CSphSEStats()
++		: m_dWords ( NULL )
++	{
++		Reset ();
++	}
++
++	void Reset ()
++	{
++		m_iMatchesTotal = 0;
++		m_iMatchesFound = 0;
++		m_iQueryMsec = 0;
++		m_iWords = 0;
++		SafeDeleteArray ( m_dWords );
++		m_bLastError = false;
++		m_sLastMessage[0] = '\0';
++	}
++
++	~CSphSEStats()
++	{
++		Reset ();
++	}
++};
++
++/// thread local storage
++struct CSphSEThreadData
++{
++	static const int	MAX_QUERY_LEN	= 262144; // 256k should be enough, right?
++
++	bool				m_bStats;
++	CSphSEStats			m_tStats;
++
++	bool				m_bQuery;
++	char				m_sQuery[MAX_QUERY_LEN];
++
++	CHARSET_INFO *		m_pQueryCharset;
++
++	bool				m_bReplace;		///< are we doing an INSERT or REPLACE
++
++	bool				m_bCondId;		///< got a value from condition pushdown
++	longlong			m_iCondId;		///< value acquired from id=value condition pushdown
++	bool				m_bCondDone;	///< index_read() is now over
++
++	CSphSEThreadData ()
++		: m_bStats ( false )
++		, m_bQuery ( false )
++		, m_pQueryCharset ( NULL )
++		, m_bReplace ( false )
++		, m_bCondId ( false )
++		, m_iCondId ( 0 )
++		, m_bCondDone ( false )
++	{}
++};
++
++/// filter types
++enum ESphFilter
++{
++	SPH_FILTER_VALUES		= 0,	///< filter by integer values set
++	SPH_FILTER_RANGE		= 1,	///< filter by integer range
++	SPH_FILTER_FLOATRANGE	= 2		///< filter by float range
++};
++
++
++/// search query filter
++struct CSphSEFilter
++{
++public:
++	ESphFilter		m_eType;
++	char *			m_sAttrName;
++	longlong		m_uMinValue;
++	longlong		m_uMaxValue;
++	float			m_fMinValue;
++	float			m_fMaxValue;
++	int				m_iValues;
++	longlong *		m_pValues;
++	int				m_bExclude;
++
++public:
++	CSphSEFilter ()
++		: m_eType ( SPH_FILTER_VALUES )
++		, m_sAttrName ( NULL )
++		, m_uMinValue ( 0 )
++		, m_uMaxValue ( UINT_MAX )
++		, m_fMinValue ( 0.0f )
++		, m_fMaxValue ( 0.0f )
++		, m_iValues ( 0 )
++		, m_pValues ( NULL )
++		, m_bExclude ( 0 )
++	{
++	}
++
++	~CSphSEFilter ()
++	{
++		SafeDeleteArray ( m_pValues );
++	}
++};
++
++
++/// float vs dword conversion
++inline uint32 sphF2DW ( float f )	{ union { float f; uint32 d; } u; u.f = f; return u.d; }
++
++/// dword vs float conversion
++inline float sphDW2F ( uint32 d )	{ union { float f; uint32 d; } u; u.d = d; return u.f; }
++
++
++/// client-side search query
++struct CSphSEQuery
++{
++public:
++	const char *	m_sHost;
++	int				m_iPort;
++
++private:
++	char *			m_sQueryBuffer;
++
++	const char *	m_sIndex;
++	int				m_iOffset;
++	int				m_iLimit;
++
++	bool			m_bQuery;
++	char *			m_sQuery;
++	uint32 *		m_pWeights;
++	int				m_iWeights;
++	ESphMatchMode	m_eMode;
++	ESphRankMode	m_eRanker;
++	ESphSortOrder	m_eSort;
++	char *			m_sSortBy;
++	int				m_iMaxMatches;
++	int				m_iMaxQueryTime;
++	uint32			m_iMinID;
++	uint32			m_iMaxID;
++
++	int				m_iFilters;
++	CSphSEFilter	m_dFilters[SPHINXSE_MAX_FILTERS];
++
++	ESphGroupBy		m_eGroupFunc;
++	char *			m_sGroupBy;
++	char *			m_sGroupSortBy;
++	int				m_iCutoff;
++	int				m_iRetryCount;
++	int				m_iRetryDelay;
++	char *			m_sGroupDistinct;							///< points to query buffer; do NOT delete
++	int				m_iIndexWeights;
++	char *			m_sIndexWeight[SPHINXSE_MAX_FILTERS];		///< points to query buffer; do NOT delete
++	int				m_iIndexWeight[SPHINXSE_MAX_FILTERS];
++	int				m_iFieldWeights;
++	char *			m_sFieldWeight[SPHINXSE_MAX_FILTERS];		///< points to query buffer; do NOT delete
++	int				m_iFieldWeight[SPHINXSE_MAX_FILTERS];
++
++	bool			m_bGeoAnchor;
++	char *			m_sGeoLatAttr;
++	char *			m_sGeoLongAttr;
++	float			m_fGeoLatitude;
++	float			m_fGeoLongitude;
++
++	char *			m_sComment;
++	char *			m_sSelect;
++
++	struct Override_t
++	{
++		union Value_t
++		{
++			uint32		m_uValue;
++			longlong	m_iValue64;
++			float		m_fValue;
++		};
++		char *						m_sName; ///< points to query buffer
++		int							m_iType;
++		Dynamic_array<ulonglong>	m_dIds;
++		Dynamic_array<Value_t>		m_dValues;
++	};
++	Dynamic_array<Override_t *> m_dOverrides;
++
++public:
++	char			m_sParseError[256];
++
++public:
++	CSphSEQuery ( const char * sQuery, int iLength, const char * sIndex );
++	~CSphSEQuery ();
++
++	bool			Parse ();
++	int				BuildRequest ( char ** ppBuffer );
++
++protected:
++	char *			m_pBuf;
++	char *			m_pCur;
++	int				m_iBufLeft;
++	bool			m_bBufOverrun;
++
++	template < typename T > int ParseArray ( T ** ppValues, const char * sValue );
++	bool			ParseField ( char * sField );
++
++	void			SendBytes ( const void * pBytes, int iBytes );
++	void			SendWord ( short int v )		{ v = ntohs(v); SendBytes ( &v, sizeof(v) ); }
++	void			SendInt ( int v )				{ v = ntohl(v); SendBytes ( &v, sizeof(v) ); }
++	void			SendDword ( uint v )			{ v = ntohl(v) ;SendBytes ( &v, sizeof(v) ); }
++	void			SendUint64 ( ulonglong v )		{ SendDword ( (uint)(v>>32) ); SendDword ( (uint)(v&0xFFFFFFFFUL) ); }
++	void			SendString ( const char * v )	{ int iLen = strlen(v); SendDword(iLen); SendBytes ( v, iLen ); }
++	void			SendFloat ( float v )			{ SendDword ( sphF2DW(v) ); }
++};
++
++template int CSphSEQuery::ParseArray<uint32> ( uint32 **, const char * );
++template int CSphSEQuery::ParseArray<longlong> ( longlong **, const char * );
++
++//////////////////////////////////////////////////////////////////////////////
++
++#if MYSQL_VERSION_ID>50100
++
++#if MYSQL_VERSION_ID<50114
++#error Sphinx SE requires MySQL 5.1.14 or higher if compiling for 5.1.x series!
++#endif
++
++static handler *	sphinx_create_handler ( handlerton * hton, TABLE_SHARE * table, MEM_ROOT * mem_root );
++static int			sphinx_init_func ( void * p );
++static int			sphinx_close_connection ( handlerton * hton, THD * thd );
++static int			sphinx_panic ( handlerton * hton, enum ha_panic_function flag );
++static bool			sphinx_show_status ( handlerton * hton, THD * thd, stat_print_fn * stat_print, enum ha_stat_type stat_type );
++
++#else
++
++static bool			sphinx_init_func_for_handlerton ();
++static int			sphinx_close_connection ( THD * thd );
++bool				sphinx_show_status ( THD * thd );
++
++#endif // >50100
++
++//////////////////////////////////////////////////////////////////////////////
++
++static const char	sphinx_hton_name[]		= "SPHINX";
++static const char	sphinx_hton_comment[]	= "Sphinx storage engine " SPHINXSE_VERSION;
++
++#if MYSQL_VERSION_ID<50100
++handlerton sphinx_hton =
++{
++	#ifdef MYSQL_HANDLERTON_INTERFACE_VERSION
++	MYSQL_HANDLERTON_INTERFACE_VERSION,
++	#endif
++	sphinx_hton_name,
++	SHOW_OPTION_YES,
++	sphinx_hton_comment,
++	DB_TYPE_SPHINX_DB,
++	sphinx_init_func_for_handlerton,
++	0,							// slot
++	0,							// savepoint size
++	sphinx_close_connection,	// close_connection
++	NULL,	// savepoint
++	NULL,	// rollback to savepoint
++	NULL,	// release savepoint
++	NULL,	// commit
++	NULL,	// rollback
++	NULL,	// prepare
++	NULL,	// recover
++	NULL,	// commit_by_xid
++	NULL,	// rollback_by_xid
++	NULL,	// create_cursor_read_view
++	NULL,	// set_cursor_read_view
++	NULL,	// close_cursor_read_view
++	HTON_CAN_RECREATE
++};
++#else
++static handlerton * sphinx_hton_ptr = NULL;
++#endif
++
++//////////////////////////////////////////////////////////////////////////////
++
++// variables for Sphinx shared methods
++pthread_mutex_t		sphinx_mutex;		// mutex to init the hash
++static int			sphinx_init = 0;	// flag whether the hash was initialized
++static HASH			sphinx_open_tables;	// hash used to track open tables
++
++//////////////////////////////////////////////////////////////////////////////
++// INITIALIZATION AND SHUTDOWN
++//////////////////////////////////////////////////////////////////////////////
++
++// hashing function
++#if MYSQL_VERSION_ID>=50120
++typedef size_t GetKeyLength_t;
++#else
++typedef uint GetKeyLength_t;
++#endif
++
++static byte * sphinx_get_key ( const byte * pSharePtr, GetKeyLength_t * pLength, my_bool )
++{
++	CSphSEShare * pShare = (CSphSEShare *) pSharePtr;
++	*pLength = (size_t) pShare->m_iTableNameLen;
++	return (byte*) pShare->m_sTable;
++}
++
++#if MYSQL_VERSION_ID<50100
++static int sphinx_init_func ( void * ) // to avoid unused arg warning
++#else
++static int sphinx_init_func ( void * p )
++#endif
++{
++	SPH_ENTER_FUNC();
++	if ( !sphinx_init )
++	{
++		sphinx_init = 1;
++		VOID ( pthread_mutex_init ( &sphinx_mutex, MY_MUTEX_INIT_FAST ) );
++		hash_init ( &sphinx_open_tables, system_charset_info, 32, 0, 0,
++			sphinx_get_key, 0, 0 );
++
++		#if MYSQL_VERSION_ID > 50100
++		handlerton * hton = (handlerton*) p;
++		hton->state = SHOW_OPTION_YES;
++		hton->db_type = DB_TYPE_FIRST_DYNAMIC;
++		hton->create = sphinx_create_handler;
++		hton->close_connection = sphinx_close_connection;
++		hton->show_status = sphinx_show_status;
++		hton->panic = sphinx_panic;
++		hton->flags = HTON_CAN_RECREATE;
++		#endif
++	}
++	SPH_RET(0);
++}
++
++
++#if MYSQL_VERSION_ID<50100
++static bool sphinx_init_func_for_handlerton ()
++{
++	return sphinx_init_func ( &sphinx_hton );
++}
++#endif
++
++
++#if MYSQL_VERSION_ID>50100
++
++static int sphinx_close_connection ( handlerton * hton, THD * thd )
++{
++	// deallocate common handler data
++	SPH_ENTER_FUNC();
++	void ** tmp = thd_ha_data ( thd, hton );
++	CSphSEThreadData * pTls = (CSphSEThreadData*) (*tmp);
++	SafeDelete ( pTls );
++	*tmp = NULL;
++	SPH_RET(0);
++}
++
++
++static int sphinx_done_func ( void * )
++{
++	SPH_ENTER_FUNC();
++
++	int error = 0;
++	if ( sphinx_init )
++	{
++		sphinx_init = 0;
++		if ( sphinx_open_tables.records )
++			error = 1;
++		hash_free ( &sphinx_open_tables );
++		pthread_mutex_destroy ( &sphinx_mutex );
++	}
++
++	SPH_RET(0);
++}
++
++
++static int sphinx_panic ( handlerton * hton, enum ha_panic_function )
++{
++	return sphinx_done_func ( hton );
++}
++
++#else
++
++static int sphinx_close_connection ( THD * thd )
++{
++	// deallocate common handler data
++	SPH_ENTER_FUNC();
++	CSphSEThreadData * pTls = (CSphSEThreadData*) thd->ha_data[sphinx_hton.slot];
++	SafeDelete ( pTls );
++	thd->ha_data[sphinx_hton.slot] = NULL;
++	SPH_RET(0);
++}
++
++#endif // >50100
++
++//////////////////////////////////////////////////////////////////////////////
++// SHOW STATUS
++//////////////////////////////////////////////////////////////////////////////
++
++#if MYSQL_VERSION_ID>50100
++static bool sphinx_show_status ( handlerton * hton, THD * thd, stat_print_fn * stat_print,
++	enum ha_stat_type )
++#else
++bool sphinx_show_status ( THD * thd )
++#endif
++{
++	SPH_ENTER_FUNC();
++
++#if MYSQL_VERSION_ID<50100
++	Protocol * protocol = thd->protocol;
++	List<Item> field_list;
++#endif
++
++	char buf1[IO_SIZE];
++	uint buf1len;
++	char buf2[IO_SIZE];
++	uint buf2len = 0;
++	String words;
++
++	buf1[0] = '\0';
++	buf2[0] = '\0';
++
++
++#if MYSQL_VERSION_ID>50100
++	// 5.1.x style stats
++	CSphSEThreadData * pTls = (CSphSEThreadData*) ( *thd_ha_data ( thd, hton ) );
++
++#define LOC_STATS(_key,_keylen,_val,_vallen) \
++	stat_print ( thd, sphinx_hton_name, strlen(sphinx_hton_name), _key, _keylen, _val, _vallen );
++
++#else
++	// 5.0.x style stats
++	if ( have_sphinx_db!=SHOW_OPTION_YES )
++	{
++		my_message ( ER_NOT_SUPPORTED_YET,
++			"failed to call SHOW SPHINX STATUS: --skip-sphinx was specified",
++			MYF(0) );
++		SPH_RET(TRUE);
++	}
++	CSphSEThreadData * pTls = (CSphSEThreadData*) thd->ha_data[sphinx_hton.slot];
++
++	field_list.push_back ( new Item_empty_string ( "Type", 10 ) );
++	field_list.push_back ( new Item_empty_string ( "Name", FN_REFLEN ) );
++	field_list.push_back ( new Item_empty_string ( "Status", 10 ) );
++	if ( protocol->send_fields ( &field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF ) )
++		SPH_RET(TRUE);
++
++#define LOC_STATS(_key,_keylen,_val,_vallen) \
++	protocol->prepare_for_resend (); \
++	protocol->store ( "SPHINX", 6, system_charset_info ); \
++	protocol->store ( _key, _keylen, system_charset_info ); \
++	protocol->store ( _val, _vallen, system_charset_info ); \
++	if ( protocol->write() ) \
++		SPH_RET(TRUE);
++
++#endif
++
++
++	// show query stats
++	if ( pTls && pTls->m_bStats )
++	{
++		const CSphSEStats * pStats = &pTls->m_tStats;
++		buf1len = my_snprintf ( buf1, sizeof(buf1),
++			"total: %d, total found: %d, time: %d, words: %d",
++			pStats->m_iMatchesTotal, pStats->m_iMatchesFound, pStats->m_iQueryMsec, pStats->m_iWords );
++
++		LOC_STATS ( "stats", 5, buf1, buf1len );
++
++		if ( pStats->m_iWords )
++		{
++			for ( int i=0; i<pStats->m_iWords; i++ )
++			{
++				CSphSEWordStats & tWord = pStats->m_dWords[i];
++				buf2len = my_snprintf ( buf2, sizeof(buf2), "%s%s:%d:%d ",
++					buf2, tWord.m_sWord, tWord.m_iDocs, tWord.m_iHits );
++			}
++
++			// convert it if we can
++			const char * sWord = buf2;
++			int iWord = buf2len;
++
++			String sBuf3;
++			if ( pTls->m_pQueryCharset )
++			{
++				uint iErrors;
++				sBuf3.copy ( buf2, buf2len, pTls->m_pQueryCharset, system_charset_info, &iErrors );
++				sWord = sBuf3.c_ptr();
++				iWord = sBuf3.length();
++			}
++
++			LOC_STATS ( "words", 5, sWord, iWord );
++		}
++	}
++
++	// show last error or warning (either in addition to stats, or on their own)
++	if ( pTls && pTls->m_tStats.m_sLastMessage && pTls->m_tStats.m_sLastMessage[0] )
++	{
++		const char * sMessageType = pTls->m_tStats.m_bLastError ? "error" : "warning";
++
++		LOC_STATS (
++			sMessageType, strlen ( sMessageType ),
++			pTls->m_tStats.m_sLastMessage, strlen ( pTls->m_tStats.m_sLastMessage ) );
++
++	} else
++	{
++		// well, nothing to show just yet
++#if MYSQL_VERSION_ID < 50100
++		LOC_STATS ( "stats", 5, "no query has been executed yet", sizeof("no query has been executed yet")-1 );
++#endif
++	}
++
++#if MYSQL_VERSION_ID < 50100
++	send_eof(thd);
++#endif
++
++	SPH_RET(FALSE);
++}
++
++//////////////////////////////////////////////////////////////////////////////
++// HELPERS
++//////////////////////////////////////////////////////////////////////////////
++
++static char * sphDup ( const char * sSrc, int iLen=-1 )
++{
++	if ( !sSrc )
++		return NULL;
++
++	if ( iLen<0 )
++		iLen = strlen(sSrc);
++
++	char * sRes = new char [ 1+iLen ];
++	memcpy ( sRes, sSrc, iLen );
++	sRes[iLen] = '\0';
++	return sRes;
++}
++
++
++static void sphLogError ( const char * sFmt, ... )
++{
++	// emit timestamp
++#ifdef __WIN__
++	SYSTEMTIME t;
++	GetLocalTime ( &t );
++
++	fprintf ( stderr, "%02d%02d%02d %2d:%02d:%02d SphinxSE: internal error: ",
++		(int)t.wYear % 100, (int)t.wMonth, (int)t.wDay,
++		(int)t.wHour, (int)t.wMinute, (int)t.wSecond );
++#else
++	// Unix version
++	time_t tStamp;
++	time ( &tStamp );
++
++	struct tm * pParsed;
++#ifdef HAVE_LOCALTIME_R
++	struct tm tParsed;
++	localtime_r ( &tStamp, &tParsed );
++	pParsed = &tParsed;
++#else
++	pParsed = localtime ( &tStamp );
++#endif // HAVE_LOCALTIME_R
++
++	fprintf ( stderr, "%02d%02d%02d %2d:%02d:%02d SphinxSE: internal error: ",
++		pParsed->tm_year % 100, pParsed->tm_mon + 1, pParsed->tm_mday,
++		pParsed->tm_hour, pParsed->tm_min, pParsed->tm_sec);
++#endif // __WIN__
++
++	// emit message
++	va_list ap;
++	va_start ( ap, sFmt );
++	vfprintf ( stderr, sFmt, ap );
++	va_end ( ap );
++
++	// emit newline
++	fprintf ( stderr, "\n" );
++}
++
++
++
++// the following scheme variants are recognized
++//
++// sphinx://host[:port]/index
++// sphinxql://host[:port]/index
++// unix://unix/domain/socket[:index]
++static bool ParseUrl ( CSphSEShare * share, TABLE * table, bool bCreate )
++{
++	SPH_ENTER_FUNC();
++
++	if ( share )
++	{
++		// check incoming stuff
++		if ( !table )
++		{
++			sphLogError ( "table==NULL in ParseUrl()" );
++			return false;
++		}
++		if ( !table->s )
++		{
++			sphLogError ( "(table->s)==NULL in ParseUrl()" );
++			return false;
++		}
++
++		// free old stuff
++		share->ResetTable ();
++
++		// fill new stuff
++		share->m_iTableFields = table->s->fields;
++		if ( share->m_iTableFields )
++		{
++			share->m_sTableField = new char * [ share->m_iTableFields ];
++			share->m_eTableFieldType = new enum_field_types [ share->m_iTableFields ];
++
++			for ( int i=0; i<share->m_iTableFields; i++ )
++			{
++				share->m_sTableField[i] = sphDup ( table->field[i]->field_name );
++				share->m_eTableFieldType[i] = table->field[i]->type();
++			}
++		}
++	}
++
++	// defaults
++	bool bOk = true;
++	bool bQL = false;
++	char * sScheme = NULL;
++	char * sHost = SPHINXAPI_DEFAULT_HOST;
++	char * sIndex = SPHINXAPI_DEFAULT_INDEX;
++	int iPort = SPHINXAPI_DEFAULT_PORT;
++
++	// parse connection string, if any
++	while ( table->s->connect_string.length!=0 )
++	{
++		sScheme = sphDup ( table->s->connect_string.str, table->s->connect_string.length );
++
++		sHost = strstr ( sScheme, "://" );
++		if ( !sHost )
++		{
++			bOk = false;
++			break;
++		}
++		sHost[0] = '\0';
++		sHost += 3;
++
++		/////////////////////////////
++		// sphinxapi via unix socket
++		/////////////////////////////
++
++		if ( !strcmp ( sScheme, "unix" ) )
++		{
++			sHost--; // reuse last slash
++			iPort = 0;
++			if (!( sIndex = strrchr ( sHost, ':' ) ))
++				sIndex = SPHINXAPI_DEFAULT_INDEX;
++			else
++			{
++				*sIndex++ = '\0';
++				if ( !*sIndex )
++					sIndex = SPHINXAPI_DEFAULT_INDEX;
++			}
++			bOk = true;
++			break;
++		}
++
++		/////////////////////
++		// sphinxapi via tcp
++		/////////////////////
++
++		if ( !strcmp ( sScheme, "sphinx" ) )
++		{
++			char * sPort = strchr ( sHost, ':' );
++			if ( sPort )
++			{
++				*sPort++ = '\0';
++				if ( *sPort )
++				{
++					sIndex = strchr ( sPort, '/' );
++					if ( sIndex )
++						*sIndex++ = '\0';
++					else
++						sIndex = SPHINXAPI_DEFAULT_INDEX;
++
++					iPort = atoi(sPort);
++					if ( !iPort )
++						iPort = SPHINXAPI_DEFAULT_PORT;
++				}
++			} else
++			{
++				sIndex = strchr ( sHost, '/' );
++				if ( sIndex )
++					*sIndex++ = '\0';
++				else
++					sIndex = SPHINXAPI_DEFAULT_INDEX;
++			}
++			bOk = true;
++			break;
++		}
++
++		////////////
++		// sphinxql
++		////////////
++
++		if ( !strcmp ( sScheme, "sphinxql" ) )
++		{
++			bQL = true;
++			iPort = SPHINXQL_DEFAULT_PORT;
++
++			// handle port
++			char * sPort = strchr ( sHost, ':' );
++			sIndex = sHost; // starting point for index name search
++
++			if ( sPort )
++			{
++				*sPort++ = '\0';
++				sIndex = sPort;
++
++				iPort = atoi(sPort);
++				if ( !iPort )
++				{
++					bOk = false; // invalid port; can report ER_FOREIGN_DATA_STRING_INVALID
++					break;
++				}
++			}
++
++			// find index
++			sIndex = strchr ( sIndex, '/' );
++			if ( sIndex )
++				*sIndex++ = '\0';
++
++			// final checks
++			// host and index names are required
++			bOk = ( sHost && *sHost && sIndex && *sIndex );
++			break;
++		}
++
++		// unknown case
++		bOk = false;
++		break;
++	}
++
++	if ( !bOk )
++	{
++		my_error ( bCreate ? ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE : ER_FOREIGN_DATA_STRING_INVALID,
++			MYF(0), table->s->connect_string );
++	} else
++	{
++		if ( share )
++		{
++			SafeDeleteArray ( share->m_sScheme );
++			share->m_sScheme = sScheme;
++			share->m_sHost = sHost;
++			share->m_sIndex = sIndex;
++			share->m_iPort = (ushort)iPort;
++			share->m_bSphinxQL = bQL;
++		}
++	}
++	if ( !bOk && !share )
++		SafeDeleteArray ( sScheme );
++
++	SPH_RET(bOk);
++}
++
++
++// Example of simple lock controls. The "share" it creates is structure we will
++// pass to each sphinx handler. Do you have to have one of these? Well, you have
++// pieces that are used for locking, and they are needed to function.
++static CSphSEShare * get_share ( const char * table_name, TABLE * table )
++{
++	SPH_ENTER_FUNC();
++	pthread_mutex_lock ( &sphinx_mutex );
++
++	CSphSEShare * pShare = NULL;
++	for ( ;; )
++	{
++		// check if we already have this share
++#if MYSQL_VERSION_ID>=50120
++		pShare = (CSphSEShare*) hash_search ( &sphinx_open_tables, (const uchar *) table_name, strlen(table_name) );
++#else
++#ifdef __WIN__
++		pShare = (CSphSEShare*) hash_search ( &sphinx_open_tables, (const byte *) table_name, strlen(table_name) );
++#else
++		pShare = (CSphSEShare*) hash_search ( &sphinx_open_tables, table_name, strlen(table_name) );
++#endif // win
++#endif // pre-5.1.20
++
++		if ( pShare )
++		{
++			pShare->m_iUseCount++;
++			break;
++		}
++
++		// try to allocate new share
++		pShare = new CSphSEShare ();
++		if ( !pShare )
++			break;
++
++		// try to setup it
++		if ( !ParseUrl ( pShare, table, false ) )
++		{
++			SafeDelete ( pShare );
++			break;
++		}
++
++		if ( !pShare->m_bSphinxQL )
++			pShare->m_pTableQueryCharset = table->field[2]->charset();
++
++		// try to hash it
++		pShare->m_iTableNameLen = strlen(table_name);
++		pShare->m_sTable = sphDup ( table_name );
++		if ( my_hash_insert ( &sphinx_open_tables, (const byte *)pShare ) )
++		{
++			SafeDelete ( pShare );
++			break;
++		}
++
++		// all seems fine
++		break;
++	}
++
++	pthread_mutex_unlock ( &sphinx_mutex );
++	SPH_RET(pShare);
++}
++
++
++// Free lock controls. We call this whenever we close a table. If the table had
++// the last reference to the share then we free memory associated with it.
++static int free_share ( CSphSEShare * pShare )
++{
++	SPH_ENTER_FUNC();
++	pthread_mutex_lock ( &sphinx_mutex );
++
++	if ( !--pShare->m_iUseCount )
++	{
++		hash_delete ( &sphinx_open_tables, (byte *)pShare );
++		SafeDelete ( pShare );
++	}
++
++	pthread_mutex_unlock ( &sphinx_mutex );
++	SPH_RET(0);
++}
++
++
++#if MYSQL_VERSION_ID>50100
++static handler * sphinx_create_handler ( handlerton * hton, TABLE_SHARE * table, MEM_ROOT * mem_root )
++{
++	sphinx_hton_ptr = hton;
++	return new ( mem_root ) ha_sphinx ( hton, table );
++}
++#endif
++
++//////////////////////////////////////////////////////////////////////////////
++// CLIENT-SIDE REQUEST STUFF
++//////////////////////////////////////////////////////////////////////////////
++
++CSphSEQuery::CSphSEQuery ( const char * sQuery, int iLength, const char * sIndex )
++	: m_sHost ( "" )
++	, m_iPort ( 0 )
++	, m_sIndex ( sIndex ? sIndex : "*" )
++	, m_iOffset ( 0 )
++	, m_iLimit ( 20 )
++	, m_bQuery ( false )
++	, m_sQuery ( "" )
++	, m_pWeights ( NULL )
++	, m_iWeights ( 0 )
++	, m_eMode ( SPH_MATCH_ALL )
++	, m_eRanker ( SPH_RANK_PROXIMITY_BM25 )
++	, m_eSort ( SPH_SORT_RELEVANCE )
++	, m_sSortBy ( "" )
++	, m_iMaxMatches ( 1000 )
++	, m_iMaxQueryTime ( 0 )
++	, m_iMinID ( 0 )
++	, m_iMaxID ( 0 )
++	, m_iFilters ( 0 )
++	, m_eGroupFunc ( SPH_GROUPBY_DAY )
++	, m_sGroupBy ( "" )
++	, m_sGroupSortBy ( "@group desc" )
++	, m_iCutoff ( 0 )
++	, m_iRetryCount ( 0 )
++	, m_iRetryDelay ( 0 )
++	, m_sGroupDistinct ( "" )
++	, m_iIndexWeights ( 0 )
++	, m_iFieldWeights ( 0 )
++	, m_bGeoAnchor ( false )
++	, m_sGeoLatAttr ( "" )
++	, m_sGeoLongAttr ( "" )
++	, m_fGeoLatitude ( 0.0f )
++	, m_fGeoLongitude ( 0.0f )
++	, m_sComment ( "" )
++	, m_sSelect ( "" )
++
++	, m_pBuf ( NULL )
++	, m_pCur ( NULL )
++	, m_iBufLeft ( 0 )
++	, m_bBufOverrun ( false )
++{
++	m_sQueryBuffer = new char [ iLength+2 ];
++	memcpy ( m_sQueryBuffer, sQuery, iLength );
++	m_sQueryBuffer[iLength] = ';';
++	m_sQueryBuffer[iLength+1] = '\0';
++}
++
++
++CSphSEQuery::~CSphSEQuery ()
++{
++	SPH_ENTER_METHOD();
++	SafeDeleteArray ( m_sQueryBuffer );
++	SafeDeleteArray ( m_pWeights );
++	SafeDeleteArray ( m_pBuf );
++	for ( int i=0; i<m_dOverrides.elements(); i++ )
++		SafeDelete ( m_dOverrides.at(i) );
++	SPH_VOID_RET();
++}
++
++
++template < typename T >
++int CSphSEQuery::ParseArray ( T ** ppValues, const char * sValue )
++{
++	SPH_ENTER_METHOD();
++
++	assert ( ppValues );
++	assert ( !(*ppValues) );
++
++	const char * pValue;
++	bool bPrevDigit = false;
++	int iValues = 0;
++
++	// count the values
++	for ( pValue=sValue; *pValue; pValue++ )
++	{
++		bool bDigit = (*pValue)>='0' && (*pValue)<='9';
++		if ( bDigit && !bPrevDigit )
++			iValues++;
++		bPrevDigit = bDigit;
++	}
++	if ( !iValues )
++		SPH_RET(0);
++
++	// extract the values
++	T * pValues = new T [ iValues ];
++	*ppValues = pValues;
++
++	int iIndex = 0, iSign = 1;
++	T uValue = 0;
++
++	bPrevDigit = false;
++	for ( pValue=sValue ;; pValue++ )
++	{
++		bool bDigit = (*pValue)>='0' && (*pValue)<='9';
++
++		if ( bDigit )
++		{
++			if ( !bPrevDigit )
++				uValue = 0;
++			uValue = uValue*10 + ( (*pValue)-'0' );
++		} else if ( bPrevDigit )
++		{
++			assert ( iIndex<iValues );
++			pValues [ iIndex++ ] = uValue * iSign;
++			iSign = 1;
++		} else if ( *pValue=='-' )
++			iSign = -1;
++
++		bPrevDigit = bDigit;
++		if ( !*pValue )
++			break;
++	}
++
++	SPH_RET ( iValues );
++}
++
++
++static char * chop ( char * s )
++{
++	while ( *s && isspace(*s) )
++		s++;
++
++	char * p = s + strlen(s);
++	while ( p>s && isspace ( p[-1] ) )
++		p--;
++	*p = '\0';
++
++	return s;
++}
++
++
++static bool myisattr ( char c )
++{
++	return
++		( c>='0' && c<='9' ) ||
++		( c>='a' && c<='z' ) ||
++		( c>='A' && c<='Z' ) ||
++		c=='_';
++}
++
++
++bool CSphSEQuery::ParseField ( char * sField )
++{
++	SPH_ENTER_METHOD();
++
++	// look for option name/value separator
++	char * sValue = strchr ( sField, '=' );
++	if ( !sValue || sValue==sField || sValue[-1]=='\\' )
++	{
++		// by default let's assume it's just query
++		if ( sField[0] )
++		{
++			if ( m_bQuery )
++			{
++				snprintf ( m_sParseError, sizeof(m_sParseError), "search query already specified; '%s' is redundant", sField );
++				SPH_RET(false);
++			} else
++			{
++				m_sQuery = sField;
++				m_bQuery = true;
++
++				// unescape only 1st one
++				char *s = sField, *d = sField;
++				int iSlashes = 0;
++				while ( *s )
++				{
++					iSlashes = ( *s=='\\' ) ? iSlashes+1 : 0;
++					if ( ( iSlashes%2 )==0 ) *d++ = *s;
++					s++;
++				}
++				*d = '\0';
++			}
++		}
++		SPH_RET(true);
++	}
++
++	// split
++	*sValue++ = '\0';
++	sValue = chop ( sValue );
++	int iValue = atoi ( sValue );
++
++	// handle options
++	char * sName = chop ( sField );
++
++	if ( !strcmp ( sName, "query" ) )			m_sQuery = sValue;
++	else if ( !strcmp ( sName, "host" ) )		m_sHost = sValue;
++	else if ( !strcmp ( sName, "port" ) )		m_iPort = iValue;
++	else if ( !strcmp ( sName, "index" ) )		m_sIndex = sValue;
++	else if ( !strcmp ( sName, "offset" ) )		m_iOffset = iValue;
++	else if ( !strcmp ( sName, "limit" ) )		m_iLimit = iValue;
++	else if ( !strcmp ( sName, "weights" ) )	m_iWeights = ParseArray<uint32> ( &m_pWeights, sValue );
++	else if ( !strcmp ( sName, "minid" ) )		m_iMinID = iValue;
++	else if ( !strcmp ( sName, "maxid" ) )		m_iMaxID = iValue;
++	else if ( !strcmp ( sName, "maxmatches" ) )	m_iMaxMatches = iValue;
++	else if ( !strcmp ( sName, "maxquerytime" ) )	m_iMaxQueryTime = iValue;
++	else if ( !strcmp ( sName, "groupsort" ) )	m_sGroupSortBy = sValue;
++	else if ( !strcmp ( sName, "distinct" ) )	m_sGroupDistinct = sValue;
++	else if ( !strcmp ( sName, "cutoff" ) )		m_iCutoff = iValue;
++	else if ( !strcmp ( sName, "comment" ) )	m_sComment = sValue;
++	else if ( !strcmp ( sName, "select" ) )		m_sSelect = sValue;
++
++	else if ( !strcmp ( sName, "mode" ) )
++	{
++		m_eMode = SPH_MATCH_ALL;
++		if ( !strcmp ( sValue, "any" ) )			m_eMode = SPH_MATCH_ANY;
++		else if ( !strcmp ( sValue, "phrase" ) )	m_eMode = SPH_MATCH_PHRASE;
++		else if ( !strcmp ( sValue, "boolean" ) )	m_eMode = SPH_MATCH_BOOLEAN;
++		else if ( !strcmp ( sValue, "ext" ) )		m_eMode = SPH_MATCH_EXTENDED;
++		else if ( !strcmp ( sValue, "extended" ) )	m_eMode = SPH_MATCH_EXTENDED;
++		else if ( !strcmp ( sValue, "ext2" ) )		m_eMode = SPH_MATCH_EXTENDED2;
++		else if ( !strcmp ( sValue, "extended2" ) )	m_eMode = SPH_MATCH_EXTENDED2;
++		else if ( !strcmp ( sValue, "all" ) )		m_eMode = SPH_MATCH_ALL;
++		else if ( !strcmp ( sValue, "fullscan" ) )	m_eMode = SPH_MATCH_FULLSCAN;
++		else
++		{
++			snprintf ( m_sParseError, sizeof(m_sParseError), "unknown matching mode '%s'", sValue );
++			SPH_RET(false);
++		}
++	} else if ( !strcmp ( sName, "ranker" ) )
++	{
++		m_eRanker = SPH_RANK_PROXIMITY_BM25;
++		if ( !strcmp ( sValue, "proximity_bm25" ) )	m_eRanker = SPH_RANK_PROXIMITY_BM25;
++		else if ( !strcmp ( sValue, "bm25" ) )		m_eRanker = SPH_RANK_BM25;
++		else if ( !strcmp ( sValue, "none" ) )		m_eRanker = SPH_RANK_NONE;
++		else if ( !strcmp ( sValue, "wordcount" ) )	m_eRanker = SPH_RANK_WORDCOUNT;
++		else if ( !strcmp ( sValue, "proximity" ) )	m_eRanker = SPH_RANK_PROXIMITY;
++		else if ( !strcmp ( sValue, "matchany" ) )	m_eRanker = SPH_RANK_MATCHANY;
++		else if ( !strcmp ( sValue, "fieldmask" ) )	m_eRanker = SPH_RANK_FIELDMASK;
++		else if ( !strcmp ( sValue, "sph04" ) )		m_eRanker = SPH_RANK_SPH04;
++		else
++		{
++			snprintf ( m_sParseError, sizeof(m_sParseError), "unknown ranking mode '%s'", sValue );
++			SPH_RET(false);
++		}
++	} else if ( !strcmp ( sName, "sort" ) )
++	{
++		static const struct
++		{
++			const char *	m_sName;
++			ESphSortOrder	m_eSort;
++		} dSortModes[] =
++		{
++			{ "relevance",		SPH_SORT_RELEVANCE },
++			{ "attr_desc:",		SPH_SORT_ATTR_DESC },
++			{ "attr_asc:",		SPH_SORT_ATTR_ASC },
++			{ "time_segments:",	SPH_SORT_TIME_SEGMENTS },
++			{ "extended:",		SPH_SORT_EXTENDED },
++			{ "expr:",			SPH_SORT_EXPR }
++		};
++
++		int i;
++		const int nModes = sizeof(dSortModes)/sizeof(dSortModes[0]);
++		for ( i=0; i<nModes; i++ )
++			if ( !strncmp ( sValue, dSortModes[i].m_sName, strlen ( dSortModes[i].m_sName ) ) )
++		{
++			m_eSort = dSortModes[i].m_eSort;
++			m_sSortBy = sValue + strlen ( dSortModes[i].m_sName );
++			break;
++		}
++		if ( i==nModes )
++		{
++			snprintf ( m_sParseError, sizeof(m_sParseError), "unknown sorting mode '%s'", sValue );
++			SPH_RET(false);
++		}
++
++	} else if ( !strcmp ( sName, "groupby" ) )
++	{
++		static const struct
++		{
++			const char *	m_sName;
++			ESphGroupBy		m_eFunc;
++		} dGroupModes[] =
++		{
++			{ "day:",	SPH_GROUPBY_DAY },
++			{ "week:",	SPH_GROUPBY_WEEK },
++			{ "month:",	SPH_GROUPBY_MONTH },
++			{ "year:",	SPH_GROUPBY_YEAR },
++			{ "attr:",	SPH_GROUPBY_ATTR },
++		};
++
++		int i;
++		const int nModes = sizeof(dGroupModes)/sizeof(dGroupModes[0]);
++		for ( i=0; i<nModes; i++ )
++			if ( !strncmp ( sValue, dGroupModes[i].m_sName, strlen ( dGroupModes[i].m_sName ) ) )
++		{
++			m_eGroupFunc = dGroupModes[i].m_eFunc;
++			m_sGroupBy = sValue + strlen ( dGroupModes[i].m_sName );
++			break;
++		}
++		if ( i==nModes )
++		{
++			snprintf ( m_sParseError, sizeof(m_sParseError), "unknown groupby mode '%s'", sValue );
++			SPH_RET(false);
++		}
++
++	} else if ( m_iFilters<SPHINXSE_MAX_FILTERS &&
++		( !strcmp ( sName, "range" ) || !strcmp ( sName, "!range" ) || !strcmp ( sName, "floatrange" ) || !strcmp ( sName, "!floatrange" ) ) )
++	{
++		for ( ;; )
++		{
++			char * p = sName;
++			CSphSEFilter & tFilter = m_dFilters [ m_iFilters ];
++			tFilter.m_bExclude = ( *p=='!' ); if ( tFilter.m_bExclude ) p++;
++			tFilter.m_eType = ( *p=='f' ) ? SPH_FILTER_FLOATRANGE : SPH_FILTER_RANGE;
++
++			if (!( p = strchr ( sValue, ',' ) ))
++				break;
++			*p++ = '\0';
++
++			tFilter.m_sAttrName = chop ( sValue );
++			sValue = p;
++
++			if (!( p = strchr ( sValue, ',' ) ))
++				break;
++			*p++ = '\0';
++
++			if ( tFilter.m_eType==SPH_FILTER_RANGE )
++			{
++				tFilter.m_uMinValue = strtoll ( sValue, NULL, 0 );
++				tFilter.m_uMaxValue = strtoll ( p, NULL, 0 );
++			} else
++			{
++				tFilter.m_fMinValue = (float)atof(sValue);
++				tFilter.m_fMaxValue = (float)atof(p);
++			}
++
++			// all ok
++			m_iFilters++;
++			break;
++		}
++
++	} else if ( m_iFilters<SPHINXSE_MAX_FILTERS &&
++		( !strcmp ( sName, "filter" ) || !strcmp ( sName, "!filter" ) ) )
++	{
++		for ( ;; )
++		{
++			CSphSEFilter & tFilter = m_dFilters [ m_iFilters ];
++			tFilter.m_eType = SPH_FILTER_VALUES;
++			tFilter.m_bExclude = ( strcmp ( sName, "!filter" )==0 );
++
++			// get the attr name
++			while ( (*sValue) && !myisattr(*sValue) )
++				sValue++;
++			if ( !*sValue )
++				break;
++
++			tFilter.m_sAttrName = sValue;
++			while ( (*sValue) && myisattr(*sValue) )
++				sValue++;
++			if ( !*sValue )
++				break;
++			*sValue++ = '\0';
++
++			// get the values
++			tFilter.m_iValues = ParseArray<longlong> ( &tFilter.m_pValues, sValue );
++			if ( !tFilter.m_iValues )
++			{
++				assert ( !tFilter.m_pValues );
++				break;
++			}
++
++			// all ok
++			m_iFilters++;
++			break;
++		}
++
++	} else if ( !strcmp ( sName, "indexweights" ) || !strcmp ( sName, "fieldweights" ) )
++	{
++		bool bIndex = !strcmp ( sName, "indexweights" );
++		int * pCount = bIndex ? &m_iIndexWeights : &m_iFieldWeights;
++		char ** pNames = bIndex ? &m_sIndexWeight[0] : &m_sFieldWeight[0];
++		int * pWeights = bIndex ? &m_iIndexWeight[0] : &m_iFieldWeight[0];
++
++		*pCount = 0;
++
++		char * p = sValue;
++		while ( *p && *pCount<SPHINXSE_MAX_FILTERS )
++		{
++			// extract attr name
++			if ( !myisattr(*p) )
++			{
++				snprintf ( m_sParseError, sizeof(m_sParseError), "%s: index name expected near '%s'", sName, p );
++				SPH_RET(false);
++			}
++
++			pNames[*pCount] = p;
++			while ( myisattr(*p) ) p++;
++
++			if ( *p!=',' )
++			{
++				snprintf ( m_sParseError, sizeof(m_sParseError), "%s: comma expected near '%s'", sName, p );
++				SPH_RET(false);
++			}
++			*p++ = '\0';
++
++			// extract attr value
++			char * sVal = p;
++			while ( isdigit(*p) ) p++;
++			if ( p==sVal )
++			{
++				snprintf ( m_sParseError, sizeof(m_sParseError), "%s: integer weight expected near '%s'", sName, sVal );
++				SPH_RET(false);
++			}
++			pWeights[*pCount] = atoi(sVal);
++			(*pCount)++;
++
++			if ( !*p )
++				break;
++			if ( *p!=',' )
++			{
++				snprintf ( m_sParseError, sizeof(m_sParseError), "%s: comma expected near '%s'", sName, p );
++				SPH_RET(false);
++			}
++			p++;
++		}
++
++	} else if ( !strcmp ( sName, "geoanchor" ) )
++	{
++		m_bGeoAnchor = false;
++		for ( ;; )
++		{
++			char * sLat = sValue;
++			char * p = sValue;
++
++			if (!( p = strchr ( p, ',' ) )) break; *p++ = '\0';
++			char * sLong = p;
++
++			if (!( p = strchr ( p, ',' ) )) break; *p++ = '\0';
++			char * sLatVal = p;
++
++			if (!( p = strchr ( p, ',' ) )) break; *p++ = '\0';
++			char * sLongVal = p;
++
++			m_sGeoLatAttr = chop(sLat);
++			m_sGeoLongAttr = chop(sLong);
++			m_fGeoLatitude = (float)atof ( sLatVal );
++			m_fGeoLongitude = (float)atof ( sLongVal );
++			m_bGeoAnchor = true;
++			break;
++		}
++		if ( !m_bGeoAnchor )
++		{
++			snprintf ( m_sParseError, sizeof(m_sParseError), "geoanchor: parse error, not enough comma-separated arguments" );
++			SPH_RET(false);
++		}
++	} else if ( !strcmp ( sName, "override" ) ) // name,type,id:value,id:value,...
++	{
++		char * sName = NULL;
++		int iType = 0;
++		CSphSEQuery::Override_t * pOverride = NULL;
++
++		// get name and type
++		char * sRest = sValue;
++		for ( ;; )
++		{
++			sName = sRest;
++			if ( !*sName )
++				break;
++			if (!( sRest = strchr ( sRest, ',' ) ))
++				break;
++			*sRest++ = '\0';
++			char * sType = sRest;
++			if (!( sRest = strchr ( sRest, ',' ) ))
++				break;
++
++			static const struct
++			{
++				const char *	m_sName;
++				int				m_iType;
++			}
++			dAttrTypes[] =
++			{
++				{ "int",		SPH_ATTR_INTEGER },
++				{ "timestamp",	SPH_ATTR_TIMESTAMP },
++				{ "bool",		SPH_ATTR_BOOL },
++				{ "float",		SPH_ATTR_FLOAT },
++				{ "bigint",		SPH_ATTR_BIGINT }
++			};
++			for ( int i=0; i<sizeof(dAttrTypes)/sizeof(*dAttrTypes); i++ )
++				if ( !strncmp ( sType, dAttrTypes[i].m_sName, sRest - sType ) )
++			{
++				iType = dAttrTypes[i].m_iType;
++				break;
++			}
++			break;
++		}
++
++		// fail
++		if ( !sName || !*sName || !iType )
++		{
++			snprintf ( m_sParseError, sizeof(m_sParseError), "override: malformed query" );
++			SPH_RET(false);
++		}
++
++		// grab id:value pairs
++		sRest++;
++		while ( sRest )
++		{
++			char * sId = sRest;
++			if (!( sRest = strchr ( sRest, ':' ) )) break; *sRest++ = '\0';
++			if (!( sRest - sId )) break;
++
++			char * sValue = sRest;
++			if ( ( sRest = strchr ( sRest, ',' ) )!=NULL )
++				*sRest++ = '\0';
++			if ( !*sValue )
++				break;
++
++			if ( !pOverride )
++			{
++				pOverride = new CSphSEQuery::Override_t;
++				pOverride->m_sName = chop(sName);
++				pOverride->m_iType = iType;
++				m_dOverrides.append ( pOverride );
++			}
++
++			ulonglong uId = strtoull ( sId, NULL, 10 );
++			CSphSEQuery::Override_t::Value_t tValue;
++			if ( iType==SPH_ATTR_FLOAT )
++				tValue.m_fValue = (float)atof(sValue);
++			else if ( iType==SPH_ATTR_BIGINT )
++				tValue.m_iValue64 = strtoll ( sValue, NULL, 10 );
++			else
++				tValue.m_uValue = (uint32)strtoul ( sValue, NULL, 10 );
++
++			pOverride->m_dIds.append ( uId );
++			pOverride->m_dValues.append ( tValue );
++		}
++
++		if ( !pOverride )
++		{
++			snprintf ( m_sParseError, sizeof(m_sParseError), "override: id:value mapping expected" );
++			SPH_RET(false);
++		}
++		SPH_RET(true);
++	} else
++	{
++		snprintf ( m_sParseError, sizeof(m_sParseError), "unknown parameter '%s'", sName );
++		SPH_RET(false);
++	}
++
++	// !COMMIT handle syntax errors
++
++	SPH_RET(true);
++}
++
++
++bool CSphSEQuery::Parse ()
++{
++	SPH_ENTER_METHOD();
++	SPH_DEBUG ( "query [[ %s ]]", m_sQueryBuffer );
++
++	m_bQuery = false;
++	char * pCur = m_sQueryBuffer;
++	char * pNext = pCur;
++
++	while ( ( pNext = strchr ( pNext, ';' ) )!=NULL )
++	{
++		// handle escaped semicolons
++		if ( pNext>m_sQueryBuffer && pNext[-1]=='\\' && pNext[1]!='\0' )
++		{
++			pNext++;
++			continue;
++		}
++
++		// handle semicolon-separated clauses
++		*pNext++ = '\0';
++		if ( !ParseField ( pCur ) )
++			SPH_RET(false);
++		pCur = pNext;
++	}
++
++	SPH_DEBUG ( "q [[ %s ]]", m_sQuery );
++
++	SPH_RET(true);
++}
++
++
++void CSphSEQuery::SendBytes ( const void * pBytes, int iBytes )
++{
++	SPH_ENTER_METHOD();
++	if ( m_iBufLeft<iBytes )
++	{
++		m_bBufOverrun = true;
++		SPH_VOID_RET();
++	}
++
++	memcpy ( m_pCur, pBytes, iBytes );
++
++	m_pCur += iBytes;
++	m_iBufLeft -= iBytes;
++	SPH_VOID_RET();
++}
++
++
++int CSphSEQuery::BuildRequest ( char ** ppBuffer )
++{
++	SPH_ENTER_METHOD();
++
++	// calc request length
++	int iReqSize = 124 + 4*m_iWeights
++		+ strlen ( m_sSortBy )
++		+ strlen ( m_sQuery )
++		+ strlen ( m_sIndex )
++		+ strlen ( m_sGroupBy )
++		+ strlen ( m_sGroupSortBy )
++		+ strlen ( m_sGroupDistinct )
++		+ strlen ( m_sComment )
++		+ strlen ( m_sSelect );
++	for ( int i=0; i<m_iFilters; i++ )
++	{
++		const CSphSEFilter & tFilter = m_dFilters[i];
++		iReqSize += 12 + strlen ( tFilter.m_sAttrName ); // string attr-name; int type; int exclude-flag
++		switch ( tFilter.m_eType )
++		{
++			case SPH_FILTER_VALUES:		iReqSize += 4 + 8*tFilter.m_iValues; break;
++			case SPH_FILTER_RANGE:		iReqSize += 16; break;
++			case SPH_FILTER_FLOATRANGE:	iReqSize += 8; break;
++		}
++	}
++	if ( m_bGeoAnchor ) // 1.14+
++		iReqSize += 16 + strlen ( m_sGeoLatAttr ) + strlen ( m_sGeoLongAttr );
++	for ( int i=0; i<m_iIndexWeights; i++ ) // 1.15+
++		iReqSize += 8 + strlen(m_sIndexWeight[i] );
++	for ( int i=0; i<m_iFieldWeights; i++ ) // 1.18+
++		iReqSize += 8 + strlen(m_sFieldWeight[i] );
++	// overrides
++	iReqSize += 4;
++	for ( int i=0; i<m_dOverrides.elements(); i++ )
++	{
++		CSphSEQuery::Override_t * pOverride = m_dOverrides.at(i);
++		const uint32 uSize = pOverride->m_iType==SPH_ATTR_BIGINT ? 16 : 12; // id64 + value
++		iReqSize += strlen ( pOverride->m_sName ) + 12 + uSize*pOverride->m_dIds.elements();
++	}
++	// select
++	iReqSize += 4;
++
++	m_iBufLeft = 0;
++	SafeDeleteArray ( m_pBuf );
++
++	m_pBuf = new char [ iReqSize ];
++	if ( !m_pBuf )
++		SPH_RET(-1);
++
++	m_pCur = m_pBuf;
++	m_iBufLeft = iReqSize;
++	m_bBufOverrun = false;
++	(*ppBuffer) = m_pBuf;
++
++	// build request
++	SendWord ( SEARCHD_COMMAND_SEARCH ); // command id
++	SendWord ( VER_COMMAND_SEARCH ); // command version
++	SendInt ( iReqSize-8 ); // packet body length
++
++	SendInt ( 1 ); // number of queries
++	SendInt ( m_iOffset );
++	SendInt ( m_iLimit );
++	SendInt ( m_eMode );
++	SendInt ( m_eRanker ); // 1.16+
++	SendInt ( m_eSort );
++	SendString ( m_sSortBy ); // sort attr
++	SendString ( m_sQuery ); // query
++	SendInt ( m_iWeights );
++	for ( int j=0; j<m_iWeights; j++ )
++		SendInt ( m_pWeights[j] ); // weights
++	SendString ( m_sIndex ); // indexes
++	SendInt ( 1 ); // id64 range follows
++	SendUint64 ( m_iMinID ); // id/ts ranges
++	SendUint64 ( m_iMaxID );
++
++	SendInt ( m_iFilters );
++	for ( int j=0; j<m_iFilters; j++ )
++	{
++		const CSphSEFilter & tFilter = m_dFilters[j];
++		SendString ( tFilter.m_sAttrName );
++		SendInt ( tFilter.m_eType );
++
++		switch ( tFilter.m_eType )
++		{
++			case SPH_FILTER_VALUES:
++				SendInt ( tFilter.m_iValues );
++				for ( int k=0; k<tFilter.m_iValues; k++ )
++					SendUint64 ( tFilter.m_pValues[k] );
++				break;
++
++			case SPH_FILTER_RANGE:
++				SendUint64 ( tFilter.m_uMinValue );
++				SendUint64 ( tFilter.m_uMaxValue );
++				break;
++
++			case SPH_FILTER_FLOATRANGE:
++				SendFloat ( tFilter.m_fMinValue );
++				SendFloat ( tFilter.m_fMaxValue );
++				break;
++		}
++
++		SendInt ( tFilter.m_bExclude );
++	}
++
++	SendInt ( m_eGroupFunc );
++	SendString ( m_sGroupBy );
++	SendInt ( m_iMaxMatches );
++	SendString ( m_sGroupSortBy );
++	SendInt ( m_iCutoff ); // 1.9+
++	SendInt ( m_iRetryCount ); // 1.10+
++	SendInt ( m_iRetryDelay );
++	SendString ( m_sGroupDistinct ); // 1.11+
++	SendInt ( m_bGeoAnchor ); // 1.14+
++	if ( m_bGeoAnchor )
++	{
++		SendString ( m_sGeoLatAttr );
++		SendString ( m_sGeoLongAttr );
++		SendFloat ( m_fGeoLatitude );
++		SendFloat ( m_fGeoLongitude );
++	}
++	SendInt ( m_iIndexWeights ); // 1.15+
++	for ( int i=0; i<m_iIndexWeights; i++ )
++	{
++		SendString ( m_sIndexWeight[i] );
++		SendInt ( m_iIndexWeight[i] );
++	}
++	SendInt ( m_iMaxQueryTime ); // 1.17+
++	SendInt ( m_iFieldWeights ); // 1.18+
++	for ( int i=0; i<m_iFieldWeights; i++ )
++	{
++		SendString ( m_sFieldWeight[i] );
++		SendInt ( m_iFieldWeight[i] );
++	}
++	SendString ( m_sComment );
++
++	// overrides
++	SendInt ( m_dOverrides.elements() );
++	for ( int i=0; i<m_dOverrides.elements(); i++ )
++	{
++		CSphSEQuery::Override_t * pOverride = m_dOverrides.at(i);
++		SendString ( pOverride->m_sName );
++		SendDword ( pOverride->m_iType );
++		SendInt ( pOverride->m_dIds.elements() );
++		for ( int j=0; j<pOverride->m_dIds.elements(); j++ )
++		{
++			SendUint64 ( pOverride->m_dIds.at(j) );
++			if ( pOverride->m_iType==SPH_ATTR_FLOAT )
++				SendFloat ( pOverride->m_dValues.at(j).m_fValue );
++			else if ( pOverride->m_iType==SPH_ATTR_BIGINT )
++				SendUint64 ( pOverride->m_dValues.at(j).m_iValue64 );
++			else
++				SendDword ( pOverride->m_dValues.at(j).m_uValue );
++		}
++	}
++
++	// select
++	SendString ( m_sSelect );
++
++	// detect buffer overruns and underruns, and report internal error
++	if ( m_bBufOverrun || m_iBufLeft!=0 || m_pCur-m_pBuf!=iReqSize )
++		SPH_RET(-1);
++
++	// all fine
++	SPH_RET ( iReqSize );
++}
++
++//////////////////////////////////////////////////////////////////////////////
++// SPHINX HANDLER
++//////////////////////////////////////////////////////////////////////////////
++
++static const char * ha_sphinx_exts[] = { NullS };
++
++
++#if MYSQL_VERSION_ID<50100
++ha_sphinx::ha_sphinx ( TABLE_ARG * table )
++	: handler ( &sphinx_hton, table )
++#else
++ha_sphinx::ha_sphinx ( handlerton * hton, TABLE_ARG * table )
++	: handler ( hton, table )
++#endif
++	, m_pShare ( NULL )
++	, m_iMatchesTotal ( 0 )
++	, m_iCurrentPos ( 0 )
++	, m_pCurrentKey ( NULL )
++	, m_iCurrentKeyLen ( 0 )
++	, m_pResponse ( NULL )
++	, m_pResponseEnd ( NULL )
++	, m_pCur ( NULL )
++	, m_bUnpackError ( false )
++	, m_iFields ( 0 )
++	, m_dFields ( NULL )
++	, m_iAttrs ( 0 )
++	, m_dAttrs ( NULL )
++	, m_bId64 ( 0 )
++	, m_dUnboundFields ( NULL )
++{
++	SPH_ENTER_METHOD();
++	if ( current_thd )
++		current_thd->variables.engine_condition_pushdown = true;
++	SPH_VOID_RET();
++}
++
++
++// If frm_error() is called then we will use this to to find out what file extentions
++// exist for the storage engine. This is also used by the default rename_table and
++// delete_table method in handler.cc.
++const char ** ha_sphinx::bas_ext() const
++{
++	return ha_sphinx_exts;
++}
++
++
++// Used for opening tables. The name will be the name of the file.
++// A table is opened when it needs to be opened. For instance
++// when a request comes in for a select on the table (tables are not
++// open and closed for each request, they are cached).
++//
++// Called from handler.cc by handler::ha_open(). The server opens all tables by
++// calling ha_open() which then calls the handler specific open().
++int ha_sphinx::open ( const char * name, int, uint )
++{
++	SPH_ENTER_METHOD();
++	m_pShare = get_share ( name, table );
++	if ( !m_pShare )
++		SPH_RET(1);
++
++	thr_lock_data_init ( &m_pShare->m_tLock, &m_tLock, NULL );
++
++	#if MYSQL_VERSION_ID>50100
++	*thd_ha_data ( table->in_use, ht ) = NULL;
++	#else
++	table->in_use->ha_data [ sphinx_hton.slot ] = NULL;
++	#endif
++
++	SPH_RET(0);
++}
++
++
++int ha_sphinx::Connect ( const char * sHost, ushort uPort )
++{
++	struct sockaddr_in sin;
++#ifndef __WIN__
++	struct sockaddr_un saun;
++#endif
++
++	int iDomain = 0;
++	int iSockaddrSize = 0;
++	struct sockaddr * pSockaddr = NULL;
++
++	in_addr_t ip_addr;
++
++	if ( uPort )
++	{
++		iDomain = AF_INET;
++		iSockaddrSize = sizeof(sin);
++		pSockaddr = (struct sockaddr *) &sin;
++
++		memset ( &sin, 0, sizeof(sin) );
++		sin.sin_family = AF_INET;
++		sin.sin_port = htons(uPort);
++
++		// prepare host address
++		if ( (int)( ip_addr = inet_addr(sHost) )!=(int)INADDR_NONE )
++		{
++			memcpy ( &sin.sin_addr, &ip_addr, sizeof(ip_addr) );
++		} else
++		{
++			int tmp_errno;
++			struct hostent tmp_hostent, *hp;
++			char buff2 [ GETHOSTBYNAME_BUFF_SIZE ];
++
++			hp = my_gethostbyname_r ( sHost, &tmp_hostent,
++				buff2, sizeof(buff2), &tmp_errno );
++			if ( !hp )
++			{
++				my_gethostbyname_r_free();
++
++				char sError[256];
++				my_snprintf ( sError, sizeof(sError), "failed to resolve searchd host (name=%s)", sHost );
++
++				my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), sError );
++				SPH_RET(-1);
++			}
++
++			memcpy ( &sin.sin_addr, hp->h_addr,
++				Min ( sizeof(sin.sin_addr), (size_t)hp->h_length ) );
++			my_gethostbyname_r_free();
++		}
++	} else
++	{
++#ifndef __WIN__
++		iDomain = AF_UNIX;
++		iSockaddrSize = sizeof(saun);
++		pSockaddr = (struct sockaddr *) &saun;
++
++		memset ( &saun, 0, sizeof(saun) );
++		saun.sun_family = AF_UNIX;
++		strncpy ( saun.sun_path, sHost, sizeof(saun.sun_path)-1 );
++#else
++		my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), "UNIX sockets are not supported on Windows" );
++		SPH_RET(-1);
++#endif
++	}
++
++	char sError[512];
++	int iSocket = socket ( iDomain, SOCK_STREAM, 0 );
++
++	if ( iSocket<0 )
++	{
++		my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), "failed to create client socket" );
++		SPH_RET(-1);
++	}
++
++	if ( connect ( iSocket, pSockaddr, iSockaddrSize )<0 )
++	{
++		sphSockClose ( iSocket );
++		my_snprintf ( sError, sizeof(sError), "failed to connect to searchd (host=%s, errno=%d, port=%d)",
++			sHost, errno, (int)uPort );
++		my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), sError );
++		SPH_RET(-1);
++	}
++
++	return iSocket;
++}
++
++
++int ha_sphinx::ConnectAPI ( const char * sQueryHost, int iQueryPort )
++{
++	SPH_ENTER_METHOD();
++
++	const char * sHost = ( sQueryHost && *sQueryHost ) ? sQueryHost : m_pShare->m_sHost;
++	ushort uPort = iQueryPort ? (ushort)iQueryPort : m_pShare->m_iPort;
++
++	int iSocket = Connect ( sHost, uPort );
++	if ( iSocket<0 )
++		SPH_RET ( iSocket );
++
++	char sError[512];
++
++	int version;
++	if ( ::recv ( iSocket, (char *)&version, sizeof(version), 0 )!=sizeof(version) )
++	{
++		sphSockClose ( iSocket );
++		my_snprintf ( sError, sizeof(sError), "failed to receive searchd version (host=%s, port=%d)",
++			sHost, (int)uPort );
++		my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), sError );
++		SPH_RET(-1);
++	}
++
++	uint uClientVersion = htonl ( SPHINX_SEARCHD_PROTO );
++	if ( ::send ( iSocket, (char*)&uClientVersion, sizeof(uClientVersion), 0 )!=sizeof(uClientVersion) )
++	{
++		sphSockClose ( iSocket );
++		my_snprintf ( sError, sizeof(sError), "failed to send client version (host=%s, port=%d)",
++			sHost, (int)uPort );
++		my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), sError );
++		SPH_RET(-1);
++	}
++
++	SPH_RET ( iSocket );
++}
++
++
++// Closes a table. We call the free_share() function to free any resources
++// that we have allocated in the "shared" structure.
++//
++// Called from sql_base.cc, sql_select.cc, and table.cc.
++// In sql_select.cc it is only used to close up temporary tables or during
++// the process where a temporary table is converted over to being a
++// myisam table.
++// For sql_base.cc look at close_data_tables().
++int ha_sphinx::close()
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( free_share ( m_pShare ) );
++}
++
++
++int ha_sphinx::HandleMysqlError ( MYSQL * pConn, int iErrCode )
++{
++	CSphSEThreadData * pTls = GetTls ();
++	if ( pTls )
++	{
++		strncpy ( pTls->m_tStats.m_sLastMessage, mysql_error ( pConn ), sizeof ( pTls->m_tStats.m_sLastMessage ) );
++		pTls->m_tStats.m_bLastError = true;
++	}
++
++	mysql_close ( pConn );
++
++	my_error ( iErrCode, MYF(0), pTls->m_tStats.m_sLastMessage );
++	return -1;
++}
++
++
++int ha_sphinx::extra ( enum ha_extra_function op )
++{
++	CSphSEThreadData * pTls = GetTls();
++	if ( pTls )
++	{
++		if ( op==HA_EXTRA_WRITE_CAN_REPLACE )
++			pTls->m_bReplace = true;
++		else if ( op==HA_EXTRA_WRITE_CANNOT_REPLACE )
++			pTls->m_bReplace = false;
++	}
++	return 0;
++}
++
++
++int ha_sphinx::write_row ( byte * )
++{
++	SPH_ENTER_METHOD();
++	if ( !m_pShare || !m_pShare->m_bSphinxQL )
++		SPH_RET ( HA_ERR_WRONG_COMMAND );
++
++	// SphinxQL inserts only, pretty much similar to abandoned federated
++	char sQueryBuf[1024];
++	char sValueBuf[1024];
++
++	String sQuery ( sQueryBuf, sizeof(sQueryBuf), &my_charset_bin );
++	String sValue ( sValueBuf, sizeof(sQueryBuf), &my_charset_bin );
++	sQuery.length ( 0 );
++	sValue.length ( 0 );
++
++	CSphSEThreadData * pTls = GetTls ();
++	sQuery.append ( pTls && pTls->m_bReplace ? "REPLACE INTO " : "INSERT INTO " );
++	sQuery.append ( m_pShare->m_sIndex );
++	sQuery.append ( " (" );
++
++	for ( Field ** ppField = table->field; *ppField; ppField++ )
++	{
++		sQuery.append ( (*ppField)->field_name );
++		if ( ppField[1] )
++			sQuery.append ( ", " );
++	}
++	sQuery.append ( ") VALUES (" );
++
++	for ( Field ** ppField = table->field; *ppField; ppField++ )
++	{
++		if ( (*ppField)->is_null() )
++		{
++			sQuery.append ( "''" );
++
++		} else
++		{
++			if ( (*ppField)->type()==MYSQL_TYPE_TIMESTAMP )
++			{
++				Item_field * pWrap = new Item_field ( *ppField ); // autofreed by query arena, I assume
++				Item_func_unix_timestamp * pConv = new Item_func_unix_timestamp ( pWrap );
++				pConv->quick_fix_field();
++				unsigned int uTs = (unsigned int) pConv->val_int();
++
++				snprintf ( sValueBuf, sizeof(sValueBuf), "'%u'", uTs );
++				sQuery.append ( sValueBuf );
++
++			} else
++			{
++				(*ppField)->val_str ( &sValue );
++				sQuery.append ( "'" );
++				sValue.print ( &sQuery );
++				sQuery.append ( "'" );
++				sValue.length(0);
++			}
++		}
++
++		if ( ppField[1] )
++			sQuery.append ( ", " );
++	}
++	sQuery.append ( ")" );
++
++	// FIXME? pretty inefficient to reconnect every time under high load,
++	// but this was intentionally written for a low load scenario..
++	MYSQL * pConn = mysql_init ( NULL );
++	if ( !pConn )
++		SPH_RET ( ER_OUT_OF_RESOURCES );
++
++	unsigned int uTimeout = 1;
++	mysql_options ( pConn, MYSQL_OPT_CONNECT_TIMEOUT, (const char*)&uTimeout );
++
++	if ( !mysql_real_connect ( pConn, m_pShare->m_sHost, "root", "", "", m_pShare->m_iPort, m_pShare->m_sSocket, 0 ) )
++		SPH_RET ( HandleMysqlError ( pConn, ER_CONNECT_TO_FOREIGN_DATA_SOURCE ) );
++
++	if ( mysql_real_query ( pConn, sQuery.ptr(), sQuery.length() ) )
++		SPH_RET ( HandleMysqlError ( pConn, ER_QUERY_ON_FOREIGN_DATA_SOURCE ) );
++
++	// all ok!
++	mysql_close ( pConn );
++	SPH_RET(0);
++}
++
++
++static inline bool IsIntegerFieldType ( enum_field_types eType )
++{
++	return eType==MYSQL_TYPE_LONG || eType==MYSQL_TYPE_LONGLONG;
++}
++
++
++static inline bool IsIDField ( Field * pField )
++{
++	enum_field_types eType = pField->type();
++
++	if ( eType==MYSQL_TYPE_LONGLONG )
++		return true;
++
++	if ( eType==MYSQL_TYPE_LONG && ((Field_num*)pField)->unsigned_flag )
++		return true;
++
++	return false;
++}
++
++
++int ha_sphinx::delete_row ( const byte * )
++{
++	SPH_ENTER_METHOD();
++	if ( !m_pShare || !m_pShare->m_bSphinxQL )
++		SPH_RET ( HA_ERR_WRONG_COMMAND );
++
++	char sQueryBuf[1024];
++	String sQuery ( sQueryBuf, sizeof(sQueryBuf), &my_charset_bin );
++	sQuery.length ( 0 );
++
++	sQuery.append ( "DELETE FROM " );
++	sQuery.append ( m_pShare->m_sIndex );
++	sQuery.append ( " WHERE id=" );
++
++	char sValue[32];
++	snprintf ( sValue, sizeof(sValue), "%lld", table->field[0]->val_int() );
++	sQuery.append ( sValue );
++
++	// FIXME? pretty inefficient to reconnect every time under high load,
++	// but this was intentionally written for a low load scenario..
++	MYSQL * pConn = mysql_init ( NULL );
++	if ( !pConn )
++		SPH_RET ( ER_OUT_OF_RESOURCES );
++
++	unsigned int uTimeout = 1;
++	mysql_options ( pConn, MYSQL_OPT_CONNECT_TIMEOUT, (const char*)&uTimeout );
++
++	if ( !mysql_real_connect ( pConn, m_pShare->m_sHost, "root", "", "", m_pShare->m_iPort, m_pShare->m_sSocket, 0 ) )
++		SPH_RET ( HandleMysqlError ( pConn, ER_CONNECT_TO_FOREIGN_DATA_SOURCE ) );
++
++	if ( mysql_real_query ( pConn, sQuery.ptr(), sQuery.length() ) )
++		SPH_RET ( HandleMysqlError ( pConn, ER_QUERY_ON_FOREIGN_DATA_SOURCE ) );
++
++	// all ok!
++	mysql_close ( pConn );
++	SPH_RET(0);
++}
++
++
++int ha_sphinx::update_row ( const byte *, byte * )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( HA_ERR_WRONG_COMMAND );
++}
++
++
++// keynr is key (index) number
++// sorted is 1 if result MUST be sorted according to index
++int ha_sphinx::index_init ( uint keynr, bool )
++{
++	SPH_ENTER_METHOD();
++	active_index = keynr;
++
++	CSphSEThreadData * pTls = GetTls();
++	if ( pTls )
++		pTls->m_bCondDone = false;
++
++	SPH_RET(0);
++}
++
++
++int ha_sphinx::index_end()
++{
++	SPH_ENTER_METHOD();
++	SPH_RET(0);
++}
++
++
++uint32 ha_sphinx::UnpackDword ()
++{
++	if ( m_pCur+sizeof(uint32)>m_pResponseEnd ) // NOLINT
++	{
++		m_pCur = m_pResponseEnd;
++		m_bUnpackError = true;
++		return 0;
++	}
++
++	uint32 uRes = ntohl ( sphUnalignedRead ( *(uint32*)m_pCur ) );
++	m_pCur += sizeof(uint32); // NOLINT
++	return uRes;
++}
++
++
++char * ha_sphinx::UnpackString ()
++{
++	uint32 iLen = UnpackDword ();
++	if ( !iLen )
++		return NULL;
++
++	if ( m_pCur+iLen>m_pResponseEnd )
++	{
++		m_pCur = m_pResponseEnd;
++		m_bUnpackError = true;
++		return NULL;
++	}
++
++	char * sRes = new char [ 1+iLen ];
++	memcpy ( sRes, m_pCur, iLen );
++	sRes[iLen] = '\0';
++	m_pCur += iLen;
++	return sRes;
++}
++
++
++static inline const char * FixNull ( const char * s )
++{
++	return s ? s : "(null)";
++}
++
++
++bool ha_sphinx::UnpackSchema ()
++{
++	SPH_ENTER_METHOD();
++
++	// cleanup
++	if ( m_dFields )
++		for ( int i=0; i<(int)m_iFields; i++ )
++			SafeDeleteArray ( m_dFields[i] );
++	SafeDeleteArray ( m_dFields );
++
++	// unpack network packet
++	uint32 uStatus = UnpackDword ();
++	char * sMessage = NULL;
++
++	if ( uStatus!=SEARCHD_OK )
++	{
++		sMessage = UnpackString ();
++		CSphSEThreadData * pTls = GetTls ();
++		if ( pTls )
++		{
++			strncpy ( pTls->m_tStats.m_sLastMessage, sMessage, sizeof(pTls->m_tStats.m_sLastMessage) );
++			pTls->m_tStats.m_bLastError = ( uStatus==SEARCHD_ERROR );
++		}
++
++		if ( uStatus==SEARCHD_ERROR )
++		{
++			char sError[1024];
++			my_snprintf ( sError, sizeof(sError), "searchd error: %s", sMessage );
++			my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), sError );
++			SafeDeleteArray ( sMessage );
++			SPH_RET ( false );
++		}
++	}
++
++	m_iFields = UnpackDword ();
++	m_dFields = new char * [ m_iFields ];
++	if ( !m_dFields )
++	{
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: UnpackSchema() failed (fields alloc error)" );
++		SPH_RET(false);
++	}
++
++	for ( uint32 i=0; i<m_iFields; i++ )
++		m_dFields[i] = UnpackString ();
++
++	SafeDeleteArray ( m_dAttrs );
++	m_iAttrs = UnpackDword ();
++	m_dAttrs = new CSphSEAttr [ m_iAttrs ];
++	if ( !m_dAttrs )
++	{
++		for ( int i=0; i<(int)m_iFields; i++ )
++			SafeDeleteArray ( m_dFields[i] );
++		SafeDeleteArray ( m_dFields );
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: UnpackSchema() failed (attrs alloc error)" );
++		SPH_RET(false);
++	}
++
++	for ( uint32 i=0; i<m_iAttrs; i++ )
++	{
++		m_dAttrs[i].m_sName = UnpackString ();
++		m_dAttrs[i].m_uType = UnpackDword ();
++		if ( m_bUnpackError ) // m_sName may be null
++			break;
++
++		m_dAttrs[i].m_iField = -1;
++		for ( int j=SPHINXSE_SYSTEM_COLUMNS; j<m_pShare->m_iTableFields; j++ )
++		{
++			const char * sTableField = m_pShare->m_sTableField[j];
++			const char * sAttrField = m_dAttrs[i].m_sName;
++			if ( m_dAttrs[i].m_sName[0]=='@' )
++			{
++				const char * sAtPrefix = "_sph_";
++				if ( strncmp ( sTableField, sAtPrefix, strlen(sAtPrefix) ) )
++					continue;
++				sTableField += strlen(sAtPrefix);
++				sAttrField++;
++			}
++
++			if ( !strcasecmp ( sAttrField, sTableField ) )
++			{
++				// we're almost good, but
++				// let's enforce that timestamp columns can only receive timestamp attributes
++				if ( m_pShare->m_eTableFieldType[j]!=MYSQL_TYPE_TIMESTAMP || m_dAttrs[i].m_uType==SPH_ATTR_TIMESTAMP )
++					m_dAttrs[i].m_iField = j;
++				break;
++			}
++		}
++	}
++
++	m_iMatchesTotal = UnpackDword ();
++
++	m_bId64 = UnpackDword ();
++	if ( m_bId64 && m_pShare->m_eTableFieldType[0]!=MYSQL_TYPE_LONGLONG )
++	{
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: 1st column must be bigint to accept 64-bit DOCIDs" );
++		SPH_RET(false);
++	}
++
++	// network packet unpacked; build unbound fields map
++	SafeDeleteArray ( m_dUnboundFields );
++	m_dUnboundFields = new int [ m_pShare->m_iTableFields ];
++
++	for ( int i=0; i<m_pShare->m_iTableFields; i++ )
++	{
++		if ( i<SPHINXSE_SYSTEM_COLUMNS )
++			m_dUnboundFields[i] = SPH_ATTR_NONE;
++
++		else if ( m_pShare->m_eTableFieldType[i]==MYSQL_TYPE_TIMESTAMP )
++			m_dUnboundFields[i] = SPH_ATTR_TIMESTAMP;
++
++		else
++			m_dUnboundFields[i] = SPH_ATTR_INTEGER;
++	}
++
++	for ( uint32 i=0; i<m_iAttrs; i++ )
++		if ( m_dAttrs[i].m_iField>=0 )
++			m_dUnboundFields [ m_dAttrs[i].m_iField ] = SPH_ATTR_NONE;
++
++	if ( m_bUnpackError )
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: UnpackSchema() failed (unpack error)" );
++
++	SPH_RET ( !m_bUnpackError );
++}
++
++
++bool ha_sphinx::UnpackStats ( CSphSEStats * pStats )
++{
++	assert ( pStats );
++
++	char * pCurSave = m_pCur;
++	for ( uint i=0; i<m_iMatchesTotal && m_pCur<m_pResponseEnd-sizeof(uint32); i++ ) // NOLINT
++	{
++		m_pCur += m_bId64 ? 12 : 8; // skip id+weight
++		for ( uint32 i=0; i<m_iAttrs && m_pCur<m_pResponseEnd-sizeof(uint32); i++ ) // NOLINT
++		{
++			if ( m_dAttrs[i].m_uType & SPH_ATTR_MULTI )
++			{
++				// skip MVA list
++				uint32 uCount = UnpackDword ();
++				m_pCur += uCount*4;
++			} else // skip normal value
++				m_pCur += m_dAttrs[i].m_uType==SPH_ATTR_BIGINT ? 8 : 4;
++		}
++	}
++
++	pStats->m_iMatchesTotal = UnpackDword ();
++	pStats->m_iMatchesFound = UnpackDword ();
++	pStats->m_iQueryMsec = UnpackDword ();
++	pStats->m_iWords = UnpackDword ();
++
++	if ( m_bUnpackError )
++		return false;
++
++	SafeDeleteArray ( pStats->m_dWords );
++	if ( pStats->m_iWords<0 || pStats->m_iWords>=SPHINXSE_MAX_KEYWORDSTATS )
++		return false;
++	pStats->m_dWords = new CSphSEWordStats [ pStats->m_iWords ];
++	if ( !pStats->m_dWords )
++		return false;
++
++	for ( int i=0; i<pStats->m_iWords; i++ )
++	{
++		CSphSEWordStats & tWord = pStats->m_dWords[i];
++		tWord.m_sWord = UnpackString ();
++		tWord.m_iDocs = UnpackDword ();
++		tWord.m_iHits = UnpackDword ();
++	}
++
++	if ( m_bUnpackError )
++		return false;
++
++	m_pCur = pCurSave;
++	return true;
++}
++
++
++/// condition pushdown implementation, to properly intercept WHERE clauses on my columns
++const COND * ha_sphinx::cond_push ( const COND * cond )
++{
++	// catch the simplest case: query_column="some text"
++	for ( ;; )
++	{
++		if ( cond->type()!=COND::FUNC_ITEM )
++			break;
++
++		Item_func * condf = (Item_func *)cond;
++		if ( condf->functype()!=Item_func::EQ_FUNC || condf->argument_count()!=2 )
++			break;
++
++		// get my tls
++		CSphSEThreadData * pTls = GetTls ();
++		if ( !pTls )
++			break;
++
++		Item ** args = condf->arguments();
++		if ( !m_pShare->m_bSphinxQL )
++		{
++			// on non-QL tables, intercept query=value condition for SELECT
++			if (!( args[0]->type()==COND::FIELD_ITEM && args[1]->type()==COND::STRING_ITEM ))
++				break;
++
++			Item_field * pField = (Item_field *) args[0];
++			if ( pField->field->field_index!=2 ) // FIXME! magic key index
++				break;
++
++			// copy the query, and let know that we intercepted this condition
++			Item_string * pString = (Item_string *) args[1];
++			pTls->m_bQuery = true;
++			strncpy ( pTls->m_sQuery, pString->str_value.c_ptr(), sizeof(pTls->m_sQuery) );
++			pTls->m_sQuery[sizeof(pTls->m_sQuery)-1] = '\0';
++			pTls->m_pQueryCharset = pString->str_value.charset();
++
++		} else
++		{
++			if (!( args[0]->type()==COND::FIELD_ITEM && args[1]->type()==COND::INT_ITEM ))
++				break;
++
++			// on QL tables, intercept id=value condition for DELETE
++			Item_field * pField = (Item_field *) args[0];
++			if ( pField->field->field_index!=0 ) // FIXME! magic key index
++				break;
++
++			Item_int * pVal = (Item_int *) args[1];
++			pTls->m_iCondId = pVal->val_int();
++			pTls->m_bCondId = true;
++		}
++
++		// we intercepted this condition
++		return NULL;
++	}
++
++	// don't change anything
++	return cond;
++}
++
++
++/// condition popup
++void ha_sphinx::cond_pop ()
++{
++	CSphSEThreadData * pTls = GetTls ();
++	if ( pTls )
++		pTls->m_bQuery = false;
++}
++
++
++/// get TLS (maybe allocate it, too)
++CSphSEThreadData * ha_sphinx::GetTls()
++{
++	// where do we store that pointer in today's version?
++	CSphSEThreadData ** ppTls;
++#if MYSQL_VERSION_ID>50100
++	ppTls = (CSphSEThreadData**) thd_ha_data ( table->in_use, ht );
++#else
++	ppTls = (CSphSEThreadData**) &current_thd->ha_data[sphinx_hton.slot];
++#endif // >50100
++
++	// allocate if needed
++	if ( !*ppTls )
++		*ppTls = new CSphSEThreadData ();
++
++	// errors will be handled by caller
++	return *ppTls;
++}
++
++
++// Positions an index cursor to the index specified in the handle. Fetches the
++// row if available. If the key value is null, begin at the first key of the
++// index.
++int ha_sphinx::index_read ( byte * buf, const byte * key, uint key_len, enum ha_rkey_function )
++{
++	SPH_ENTER_METHOD();
++	char sError[256];
++
++	// set new data for thd->ha_data, it is used in show_status
++	CSphSEThreadData * pTls = GetTls();
++	if ( !pTls )
++	{
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: TLS malloc() failed" );
++		SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++	pTls->m_tStats.Reset ();
++
++	// sphinxql table, just return the key once
++	if ( m_pShare->m_bSphinxQL )
++	{
++		// over and out
++		if ( pTls->m_bCondDone )
++			SPH_RET ( HA_ERR_END_OF_FILE );
++
++		// return a value from pushdown, if any
++		if ( pTls->m_bCondId )
++		{
++			table->field[0]->store ( pTls->m_iCondId, 1 );
++			pTls->m_bCondDone = true;
++			SPH_RET(0);
++		}
++
++		// return a value from key
++		longlong iRef = 0;
++		if ( key_len==4 )
++			iRef = uint4korr ( key );
++		else if ( key_len==8 )
++			iRef = uint8korr ( key );
++		else
++		{
++			my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: unexpected key length" );
++			SPH_RET ( HA_ERR_END_OF_FILE );
++		}
++
++		table->field[0]->store ( iRef, 1 );
++		pTls->m_bCondDone = true;
++		SPH_RET(0);
++	}
++
++	// parse query
++	if ( pTls->m_bQuery )
++	{
++		// we have a query from condition pushdown
++		m_pCurrentKey = (const byte *) pTls->m_sQuery;
++		m_iCurrentKeyLen = strlen(pTls->m_sQuery);
++	} else
++	{
++		// just use the key (might be truncated)
++		m_pCurrentKey = key+HA_KEY_BLOB_LENGTH;
++		m_iCurrentKeyLen = uint2korr(key); // or maybe key_len?
++		pTls->m_pQueryCharset = m_pShare ? m_pShare->m_pTableQueryCharset : NULL;
++	}
++
++	CSphSEQuery q ( (const char*)m_pCurrentKey, m_iCurrentKeyLen, m_pShare->m_sIndex );
++	if ( !q.Parse () )
++	{
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), q.m_sParseError );
++		SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++
++	// do connect
++	int iSocket = ConnectAPI ( q.m_sHost, q.m_iPort );
++	if ( iSocket<0 )
++		SPH_RET ( HA_ERR_END_OF_FILE );
++
++	// my buffer
++	char * pBuffer; // will be free by CSphSEQuery dtor; do NOT free manually
++	int iReqLen = q.BuildRequest ( &pBuffer );
++
++	if ( iReqLen<=0 )
++	{
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: q.BuildRequest() failed" );
++		SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++
++	// send request
++	::send ( iSocket, pBuffer, iReqLen, 0 );
++
++	// receive reply
++	char sHeader[8];
++	int iGot = ::recv ( iSocket, sHeader, sizeof(sHeader), RECV_FLAGS );
++	if ( iGot!=sizeof(sHeader) )
++	{
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "failed to receive response header (searchd went away?)" );
++		SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++
++	short int uRespStatus = ntohs ( sphUnalignedRead ( *(short int*)( &sHeader[0] ) ) );
++	short int uRespVersion = ntohs ( sphUnalignedRead ( *(short int*)( &sHeader[2] ) ) );
++	uint uRespLength = ntohl ( sphUnalignedRead ( *(uint *)( &sHeader[4] ) ) );
++	SPH_DEBUG ( "got response header (status=%d version=%d length=%d)",
++		uRespStatus, uRespVersion, uRespLength );
++
++	SafeDeleteArray ( m_pResponse );
++	if ( uRespLength<=SPHINXSE_MAX_ALLOC )
++		m_pResponse = new char [ uRespLength+1 ];
++
++	if ( !m_pResponse )
++	{
++		my_snprintf ( sError, sizeof(sError), "bad searchd response length (length=%u)", uRespLength );
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), sError );
++		SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++
++	int iRecvLength = 0;
++	while ( iRecvLength<(int)uRespLength )
++	{
++		int iRecv = ::recv ( iSocket, m_pResponse+iRecvLength, uRespLength-iRecvLength, RECV_FLAGS );
++		if ( iRecv<0 )
++			break;
++		iRecvLength += iRecv;
++	}
++
++	::closesocket ( iSocket );
++	iSocket = -1;
++
++	if ( iRecvLength!=(int)uRespLength )
++	{
++		my_snprintf ( sError, sizeof(sError), "net read error (expected=%d, got=%d)", uRespLength, iRecvLength );
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), sError );
++		SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++
++	// we'll have a message, at least
++	pTls->m_bStats = true;
++
++	// parse reply
++	m_iCurrentPos = 0;
++	m_pCur = m_pResponse;
++	m_pResponseEnd = m_pResponse + uRespLength;
++	m_bUnpackError = false;
++
++	if ( uRespStatus!=SEARCHD_OK )
++	{
++		char * sMessage = UnpackString ();
++		if ( !sMessage )
++		{
++			my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "no valid response from searchd (status=%d, resplen=%d)",
++				uRespStatus, uRespLength );
++			SPH_RET ( HA_ERR_END_OF_FILE );
++		}
++
++		strncpy ( pTls->m_tStats.m_sLastMessage, sMessage, sizeof(pTls->m_tStats.m_sLastMessage) );
++		SafeDeleteArray ( sMessage );
++
++		if ( uRespStatus!=SEARCHD_WARNING )
++		{
++			my_snprintf ( sError, sizeof(sError), "searchd error: %s", pTls->m_tStats.m_sLastMessage );
++			my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), sError );
++
++			pTls->m_tStats.m_bLastError = true;
++			SPH_RET ( HA_ERR_END_OF_FILE );
++		}
++	}
++
++	if ( !UnpackSchema () )
++		SPH_RET ( HA_ERR_END_OF_FILE );
++
++	if ( !UnpackStats ( &pTls->m_tStats ) )
++	{
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: UnpackStats() failed" );
++		SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++
++	SPH_RET ( get_rec ( buf, key, key_len ) );
++}
++
++
++// Positions an index cursor to the index specified in key. Fetches the
++// row if any. This is only used to read whole keys.
++int ha_sphinx::index_read_idx ( byte *, uint, const byte *, uint, enum ha_rkey_function )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( HA_ERR_WRONG_COMMAND );
++}
++
++
++// Used to read forward through the index.
++int ha_sphinx::index_next ( byte * buf )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( get_rec ( buf, m_pCurrentKey, m_iCurrentKeyLen ) );
++}
++
++
++int ha_sphinx::index_next_same ( byte * buf, const byte * key, uint keylen )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( get_rec ( buf, key, keylen ) );
++}
++
++
++int ha_sphinx::get_rec ( byte * buf, const byte *, uint )
++{
++	SPH_ENTER_METHOD();
++
++	if ( m_iCurrentPos>=m_iMatchesTotal )
++	{
++		SafeDeleteArray ( m_pResponse );
++		SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++
++	#if MYSQL_VERSION_ID>50100
++	my_bitmap_map * org_bitmap = dbug_tmp_use_all_columns ( table, table->write_set );
++	#endif
++	Field ** field = table->field;
++
++	// unpack and return the match
++	longlong uMatchID = UnpackDword ();
++	if ( m_bId64 )
++		uMatchID = ( uMatchID<<32 ) + UnpackDword();
++	uint32 uMatchWeight = UnpackDword ();
++
++	field[0]->store ( uMatchID, 1 );
++	field[1]->store ( uMatchWeight, 1 );
++	field[2]->store ( (const char*)m_pCurrentKey, m_iCurrentKeyLen, &my_charset_bin );
++
++	for ( uint32 i=0; i<m_iAttrs; i++ )
++	{
++		longlong iValue64;
++		uint32 uValue = UnpackDword ();
++		if ( m_dAttrs[i].m_uType==SPH_ATTR_BIGINT )
++			iValue64 = ( (longlong)uValue<<32 ) | UnpackDword();
++		if ( m_dAttrs[i].m_iField<0 )
++		{
++			// skip MVA
++			if ( m_dAttrs[i].m_uType & SPH_ATTR_MULTI )
++				for ( ; uValue>0 && !m_bUnpackError; uValue-- )
++					UnpackDword();
++			continue;
++		}
++
++		Field * af = field [ m_dAttrs[i].m_iField ];
++		switch ( m_dAttrs[i].m_uType )
++		{
++			case SPH_ATTR_INTEGER:
++			case SPH_ATTR_ORDINAL:
++			case SPH_ATTR_BOOL:
++				af->store ( uValue, 1 );
++				break;
++
++			case SPH_ATTR_FLOAT:
++				af->store ( sphDW2F(uValue) );
++				break;
++
++			case SPH_ATTR_TIMESTAMP:
++				if ( af->type()==MYSQL_TYPE_TIMESTAMP )
++					longstore ( af->ptr, uValue ); // because store() does not accept timestamps
++				else
++					af->store ( uValue, 1 );
++				break;
++
++			case SPH_ATTR_BIGINT:
++				af->store ( iValue64, 0 );
++				break;
++
++			case ( SPH_ATTR_MULTI | SPH_ATTR_INTEGER ):
++				if ( uValue<=0 )
++				{
++					// shortcut, empty MVA set
++					af->store ( "", 0, &my_charset_bin );
++
++				} else
++				{
++					// convert MVA set to comma-separated string
++					char sBuf[1024]; // FIXME! magic size
++					char * pCur = sBuf;
++
++					for ( ; uValue>0 && !m_bUnpackError; uValue-- )
++					{
++						uint32 uEntry = UnpackDword ();
++						if ( pCur < sBuf+sizeof(sBuf)-16 ) // 10 chars per 32bit value plus some safety bytes
++						{
++							snprintf ( pCur, sBuf+sizeof(sBuf)-pCur, "%u", uEntry );
++							while ( *pCur ) *pCur++;
++							if ( uValue>1 )
++								*pCur++ = ','; // non-trailing commas
++						}
++					}
++
++					af->store ( sBuf, pCur-sBuf, &my_charset_bin );
++				}
++				break;
++
++			default:
++				my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: unhandled attr type" );
++				SafeDeleteArray ( m_pResponse );
++				SPH_RET ( HA_ERR_END_OF_FILE );
++		}
++	}
++
++	if ( m_bUnpackError )
++	{
++		my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: response unpacker failed" );
++		SafeDeleteArray ( m_pResponse );
++		SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++
++	// zero out unmapped fields
++	for ( int i=SPHINXSE_SYSTEM_COLUMNS; i<(int)table->s->fields; i++ )
++		if ( m_dUnboundFields[i]!=SPH_ATTR_NONE )
++			switch ( m_dUnboundFields[i] )
++	{
++		case SPH_ATTR_INTEGER:		table->field[i]->store ( 0, 1 ); break;
++		case SPH_ATTR_TIMESTAMP:	longstore ( table->field[i]->ptr, 0 ); break;
++		default:
++			my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0),
++				"INTERNAL ERROR: unhandled unbound field type %d", m_dUnboundFields[i] );
++			SafeDeleteArray ( m_pResponse );
++			SPH_RET ( HA_ERR_END_OF_FILE );
++	}
++
++	memset ( buf, 0, table->s->null_bytes );
++	m_iCurrentPos++;
++
++	#if MYSQL_VERSION_ID > 50100
++	dbug_tmp_restore_column_map ( table->write_set, org_bitmap );
++	#endif
++
++	SPH_RET(0);
++}
++
++
++// Used to read backwards through the index.
++int ha_sphinx::index_prev ( byte * )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( HA_ERR_WRONG_COMMAND );
++}
++
++
++// index_first() asks for the first key in the index.
++//
++// Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
++// and sql_select.cc.
++int ha_sphinx::index_first ( byte * )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( HA_ERR_END_OF_FILE );
++}
++
++// index_last() asks for the last key in the index.
++//
++// Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
++// and sql_select.cc.
++int ha_sphinx::index_last ( byte * )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( HA_ERR_WRONG_COMMAND );
++}
++
++
++int ha_sphinx::rnd_init ( bool )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET(0);
++}
++
++
++int ha_sphinx::rnd_end()
++{
++	SPH_ENTER_METHOD();
++	SPH_RET(0);
++}
++
++
++int ha_sphinx::rnd_next ( byte * )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( HA_ERR_END_OF_FILE );
++}
++
++
++void ha_sphinx::position ( const byte * )
++{
++	SPH_ENTER_METHOD();
++	SPH_VOID_RET();
++}
++
++
++// This is like rnd_next, but you are given a position to use
++// to determine the row. The position will be of the type that you stored in
++// ref. You can use ha_get_ptr(pos,ref_length) to retrieve whatever key
++// or position you saved when position() was called.
++// Called from filesort.cc records.cc sql_insert.cc sql_select.cc sql_update.cc.
++int ha_sphinx::rnd_pos ( byte *, byte * )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( HA_ERR_WRONG_COMMAND );
++}
++
++
++#if MYSQL_VERSION_ID>=50030
++int ha_sphinx::info ( uint )
++#else
++void ha_sphinx::info ( uint )
++#endif
++{
++	SPH_ENTER_METHOD();
++
++	if ( table->s->keys>0 )
++		table->key_info[0].rec_per_key[0] = 1;
++
++	#if MYSQL_VERSION_ID>50100
++	stats.records = 20;
++	#else
++	records = 20;
++	#endif
++
++#if MYSQL_VERSION_ID>=50030
++	SPH_RET(0);
++#else
++	SPH_VOID_RET();
++#endif
++}
++
++
++int ha_sphinx::reset ()
++{
++	SPH_ENTER_METHOD();
++	CSphSEThreadData * pTls = GetTls ();
++	if ( pTls )
++		pTls->m_bQuery = false;
++	SPH_RET(0);
++}
++
++
++int ha_sphinx::delete_all_rows()
++{
++	SPH_ENTER_METHOD();
++	SPH_RET ( HA_ERR_WRONG_COMMAND );
++}
++
++
++// First you should go read the section "locking functions for mysql" in
++// lock.cc to understand this.
++// This create a lock on the table. If you are implementing a storage engine
++// that can handle transacations look at ha_berkely.cc to see how you will
++// want to go about doing this. Otherwise you should consider calling flock()
++// here.
++//
++// Called from lock.cc by lock_external() and unlock_external(). Also called
++// from sql_table.cc by copy_data_between_tables().
++int ha_sphinx::external_lock ( THD *, int )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET(0);
++}
++
++
++THR_LOCK_DATA ** ha_sphinx::store_lock ( THD *, THR_LOCK_DATA ** to,
++	enum thr_lock_type lock_type )
++{
++	SPH_ENTER_METHOD();
++
++	if ( lock_type!=TL_IGNORE && m_tLock.type==TL_UNLOCK )
++		m_tLock.type = lock_type;
++
++	*to++ = &m_tLock;
++	SPH_RET(to);
++}
++
++
++int ha_sphinx::delete_table ( const char * )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET(0);
++}
++
++
++// Renames a table from one name to another from alter table call.
++//
++// If you do not implement this, the default rename_table() is called from
++// handler.cc and it will delete all files with the file extentions returned
++// by bas_ext().
++//
++// Called from sql_table.cc by mysql_rename_table().
++int ha_sphinx::rename_table ( const char *, const char * )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET(0);
++}
++
++
++// Given a starting key, and an ending key estimate the number of rows that
++// will exist between the two. end_key may be empty which in case determine
++// if start_key matches any rows.
++//
++// Called from opt_range.cc by check_quick_keys().
++ha_rows ha_sphinx::records_in_range ( uint, key_range *, key_range * )
++{
++	SPH_ENTER_METHOD();
++	SPH_RET(3); // low number to force index usage
++}
++
++
++// create() is called to create a database. The variable name will have the name
++// of the table. When create() is called you do not need to worry about opening
++// the table. Also, the FRM file will have already been created so adjusting
++// create_info will not do you any good. You can overwrite the frm file at this
++// point if you wish to change the table definition, but there are no methods
++// currently provided for doing that.
++//
++// Called from handle.cc by ha_create_table().
++int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * )
++{
++	SPH_ENTER_METHOD();
++	char sError[256];
++
++	CSphSEShare tInfo;
++	if ( !ParseUrl ( &tInfo, table, true ) )
++		SPH_RET(-1);
++
++	// check SphinxAPI table
++	for ( ; !tInfo.m_bSphinxQL; )
++	{
++		// check system fields (count and types)
++		if ( table->s->fields<SPHINXSE_SYSTEM_COLUMNS )
++		{
++			my_snprintf ( sError, sizeof(sError), "%s: there MUST be at least %d columns",
++				name, SPHINXSE_SYSTEM_COLUMNS );
++			break;
++		}
++
++		if ( !IsIDField ( table->field[0] ) )
++		{
++			my_snprintf ( sError, sizeof(sError), "%s: 1st column (docid) MUST be unsigned integer or bigint", name );
++			break;
++		}
++
++		if ( !IsIntegerFieldType ( table->field[1]->type() ) )
++		{
++			my_snprintf ( sError, sizeof(sError), "%s: 2nd column (weight) MUST be integer or bigint", name );
++			break;
++		}
++
++		enum_field_types f2 = table->field[2]->type();
++		if ( f2!=MYSQL_TYPE_VARCHAR
++			&& f2!=MYSQL_TYPE_BLOB && f2!=MYSQL_TYPE_MEDIUM_BLOB && f2!=MYSQL_TYPE_LONG_BLOB && f2!=MYSQL_TYPE_TINY_BLOB )
++		{
++			my_snprintf ( sError, sizeof(sError), "%s: 3rd column (search query) MUST be varchar or text", name );
++			break;
++		}
++
++		// check attributes
++		int i;
++		for ( i=3; i<(int)table->s->fields; i++ )
++		{
++			enum_field_types eType = table->field[i]->type();
++			if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT )
++			{
++				my_snprintf ( sError, sizeof(sError), "%s: %dth column (attribute %s) MUST be integer, bigint, timestamp, varchar, or float",
++					name, i+1, table->field[i]->field_name );
++				break;
++			}
++		}
++
++		if ( i!=(int)table->s->fields )
++			break;
++
++		// check index
++		if (
++			table->s->keys!=1 ||
++			table->key_info[0].key_parts!=1 ||
++			strcasecmp ( table->key_info[0].key_part[0].field->field_name, table->field[2]->field_name ) )
++		{
++			my_snprintf ( sError, sizeof(sError), "%s: there must be an index on '%s' column",
++				name, table->field[2]->field_name );
++			break;
++		}
++
++		// all good
++		sError[0] = '\0';
++		break;
++	}
++
++	// check SphinxQL table
++	for ( ; tInfo.m_bSphinxQL; )
++	{
++		sError[0] = '\0';
++
++		// check that 1st column is id, is of int type, and has an index
++		if ( strcmp ( table->field[0]->field_name, "id" ) )
++		{
++			my_snprintf ( sError, sizeof(sError), "%s: 1st column must be called 'id'", name );
++			break;
++		}
++
++		if ( !IsIDField ( table->field[0] ) )
++		{
++			my_snprintf ( sError, sizeof(sError), "%s: 'id' column must be INT UNSIGNED or BIGINT", name );
++			break;
++		}
++
++		// check index
++		if (
++			table->s->keys!=1 ||
++			table->key_info[0].key_parts!=1 ||
++			strcasecmp ( table->key_info[0].key_part[0].field->field_name, "id" ) )
++		{
++			my_snprintf ( sError, sizeof(sError), "%s: 'id' column must be indexed", name );
++			break;
++		}
++
++		// check column types
++		for ( int i=1; i<(int)table->s->fields; i++ )
++		{
++			enum_field_types eType = table->field[i]->type();
++			if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT )
++			{
++				my_snprintf ( sError, sizeof(sError), "%s: column %s is of unsupported type (use int/bigint/timestamp/varchar/float)",
++					name, i+1, table->field[i]->field_name );
++				break;
++			}
++		}
++		if ( sError[0] )
++			break;
++
++		// all good
++		break;
++	}
++
++	// report and bail
++	if ( sError[0] )
++	{
++		my_error ( ER_CANT_CREATE_TABLE, MYF(0), sError, -1 );
++		SPH_RET(-1);
++	}
++
++	SPH_RET(0);
++}
++
++// show functions
++
++#if MYSQL_VERSION_ID<50100
++#define SHOW_VAR_FUNC_BUFF_SIZE 1024
++#endif
++
++CSphSEStats * sphinx_get_stats ( THD * thd, SHOW_VAR * out )
++{
++#if MYSQL_VERSION_ID>50100
++	if ( sphinx_hton_ptr )
++	{
++		CSphSEThreadData *pTls = (CSphSEThreadData *) *thd_ha_data ( thd, sphinx_hton_ptr );
++
++		if ( pTls && pTls->m_bStats )
++			return &pTls->m_tStats;
++	}
++#else
++	CSphSEThreadData *pTls = (CSphSEThreadData *) thd->ha_data[sphinx_hton.slot];
++	if ( pTls && pTls->m_bStats )
++		return &pTls->m_tStats;
++#endif
++
++	out->type = SHOW_CHAR;
++	out->value = "";
++	return 0;
++}
++
++int sphinx_showfunc_total ( THD * thd, SHOW_VAR * out, char * )
++{
++	CSphSEStats * pStats = sphinx_get_stats ( thd, out );
++	if ( pStats )
++	{
++		out->type = SHOW_INT;
++		out->value = (char *) &pStats->m_iMatchesTotal;
++	}
++	return 0;
++}
++
++int sphinx_showfunc_total_found ( THD * thd, SHOW_VAR * out, char * )
++{
++	CSphSEStats * pStats = sphinx_get_stats ( thd, out );
++	if ( pStats )
++	{
++		out->type = SHOW_INT;
++		out->value = (char *) &pStats->m_iMatchesFound;
++	}
++	return 0;
++}
++
++int sphinx_showfunc_time ( THD * thd, SHOW_VAR * out, char * )
++{
++	CSphSEStats * pStats = sphinx_get_stats ( thd, out );
++	if ( pStats )
++	{
++		out->type = SHOW_INT;
++		out->value = (char *) &pStats->m_iQueryMsec;
++	}
++	return 0;
++}
++
++int sphinx_showfunc_word_count ( THD * thd, SHOW_VAR * out, char * )
++{
++	CSphSEStats * pStats = sphinx_get_stats ( thd, out );
++	if ( pStats )
++	{
++		out->type = SHOW_INT;
++		out->value = (char *) &pStats->m_iWords;
++	}
++	return 0;
++}
++
++int sphinx_showfunc_words ( THD * thd, SHOW_VAR * out, char * sBuffer )
++{
++#if MYSQL_VERSION_ID>50100
++	if ( sphinx_hton_ptr )
++	{
++		CSphSEThreadData * pTls = (CSphSEThreadData *) *thd_ha_data ( thd, sphinx_hton_ptr );
++#else
++	{
++		CSphSEThreadData * pTls = (CSphSEThreadData *) thd->ha_data[sphinx_hton.slot];
++#endif
++		if ( pTls && pTls->m_bStats )
++		{
++			CSphSEStats * pStats = &pTls->m_tStats;
++			if ( pStats && pStats->m_iWords )
++			{
++				uint uBuffLen = 0;
++
++				out->type = SHOW_CHAR;
++				out->value = sBuffer;
++
++				// the following is partially based on code in sphinx_show_status()
++				sBuffer[0] = 0;
++				for ( int i=0; i<pStats->m_iWords; i++ )
++				{
++					CSphSEWordStats & tWord = pStats->m_dWords[i];
++					uBuffLen = my_snprintf ( sBuffer, SHOW_VAR_FUNC_BUFF_SIZE, "%s%s:%d:%d ", sBuffer,
++						tWord.m_sWord, tWord.m_iDocs, tWord.m_iHits );
++				}
++
++				if ( uBuffLen > 0 )
++				{
++					// trim last space
++					sBuffer [ --uBuffLen ] = 0;
++
++					if ( pTls->m_pQueryCharset )
++					{
++						// String::c_ptr() will nul-terminate the buffer.
++						//
++						// NOTE: It's not entirely clear whether this conversion is necessary at all.
++
++						String sConvert;
++						uint iErrors;
++						sConvert.copy ( sBuffer, uBuffLen, pTls->m_pQueryCharset, system_charset_info, &iErrors );
++						memcpy ( sBuffer, sConvert.c_ptr(), sConvert.length() + 1 );
++					}
++				}
++
++				return 0;
++			}
++		}
++	}
++
++	out->type = SHOW_CHAR;
++	out->value = "";
++	return 0;
++}
++
++int sphinx_showfunc_error ( THD * thd, SHOW_VAR * out, char * )
++{
++	CSphSEStats * pStats = sphinx_get_stats ( thd, out );
++	if ( pStats && pStats->m_bLastError )
++	{
++		out->type = SHOW_CHAR;
++		out->value = pStats->m_sLastMessage;
++	}
++	return 0;
++}
++
++#if MYSQL_VERSION_ID>50100
++struct st_mysql_storage_engine sphinx_storage_engine =
++{
++	MYSQL_HANDLERTON_INTERFACE_VERSION
++};
++
++struct st_mysql_show_var sphinx_status_vars[] =
++{
++	{"sphinx_total",		(char *)sphinx_showfunc_total,			SHOW_FUNC},
++	{"sphinx_total_found",	(char *)sphinx_showfunc_total_found,	SHOW_FUNC},
++	{"sphinx_time",			(char *)sphinx_showfunc_time,			SHOW_FUNC},
++	{"sphinx_word_count",	(char *)sphinx_showfunc_word_count,		SHOW_FUNC},
++	{"sphinx_words",		(char *)sphinx_showfunc_words,			SHOW_FUNC},
++	{"sphinx_error",		(char *)sphinx_showfunc_error,			SHOW_FUNC},
++	{0, 0, (enum_mysql_show_type)0}
++};
++
++
++mysql_declare_plugin(sphinx)
++{
++	MYSQL_STORAGE_ENGINE_PLUGIN,
++	&sphinx_storage_engine,
++	sphinx_hton_name,
++	"Sphinx developers",
++	sphinx_hton_comment,
++	PLUGIN_LICENSE_GPL,
++	sphinx_init_func, // Plugin Init
++	sphinx_done_func, // Plugin Deinit
++	0x0001, // 0.1
++	sphinx_status_vars,
++	NULL,
++	NULL
++}
++mysql_declare_plugin_end;
++
++#endif // >50100
++
++//
++// $Id: ha_sphinx.cc 2752 2011-03-29 08:21:05Z tomat $
++//
+diff -uNr storage/ha_sphinx.h storage/sphinx//ha_sphinx.h
+--- storage/sphinx/ha_sphinx.h	1970-01-01 01:00:00.000000000 +0100
++++ storage/sphinx/ha_sphinx.h	2010-08-03 13:38:09.000000000 +0200
+@@ -0,0 +1,167 @@
++//
++// $Id: ha_sphinx.h 2428 2010-08-03 11:38:09Z shodan $
++//
++
++#ifdef USE_PRAGMA_INTERFACE
++#pragma interface // gcc class implementation
++#endif
++
++
++#if MYSQL_VERSION_ID>50100
++#define TABLE_ARG	st_table_share
++#else
++#define TABLE_ARG	st_table
++#endif
++
++
++#if MYSQL_VERSION_ID>=50120
++typedef uchar byte;
++#endif
++
++
++/// forward decls
++class THD;
++struct CSphReqQuery;
++struct CSphSEShare;
++struct CSphSEAttr;
++struct CSphSEStats;
++struct CSphSEThreadData;
++
++/// Sphinx SE handler class
++class ha_sphinx : public handler
++{
++protected:
++	THR_LOCK_DATA	m_tLock;				///< MySQL lock
++
++	CSphSEShare *	m_pShare;				///< shared lock info
++
++	uint			m_iMatchesTotal;
++	uint			m_iCurrentPos;
++	const byte *	m_pCurrentKey;
++	uint			m_iCurrentKeyLen;
++
++	char *			m_pResponse;			///< searchd response storage
++	char *			m_pResponseEnd;			///< searchd response storage end (points to wilderness!)
++	char *			m_pCur;					///< current position into response
++	bool			m_bUnpackError;			///< any errors while unpacking response
++
++public:
++#if MYSQL_VERSION_ID<50100
++					ha_sphinx ( TABLE_ARG * table_arg );
++#else
++					ha_sphinx ( handlerton * hton, TABLE_ARG * table_arg );
++#endif
++					~ha_sphinx () {}
++
++	const char *	table_type () const		{ return "SPHINX"; }	///< SE name for display purposes
++	const char *	index_type ( uint )		{ return "HASH"; }		///< index type name for display purposes
++	const char **	bas_ext () const;								///< my file extensions
++
++	#if MYSQL_VERSION_ID>50100
++	ulonglong		table_flags () const	{ return HA_CAN_INDEX_BLOBS; }			///< bitmap of implemented flags (see handler.h for more info)
++	#else
++	ulong			table_flags () const	{ return HA_CAN_INDEX_BLOBS; }			///< bitmap of implemented flags (see handler.h for more info)
++	#endif
++
++	ulong			index_flags ( uint, uint, bool ) const	{ return 0; }	///< bitmap of flags that says how SE implements indexes
++	uint			max_supported_record_length () const	{ return HA_MAX_REC_LENGTH; }
++	uint			max_supported_keys () const				{ return 1; }
++	uint			max_supported_key_parts () const		{ return 1; }
++	uint			max_supported_key_length () const		{ return MAX_KEY_LENGTH; }
++	uint			max_supported_key_part_length () const	{ return MAX_KEY_LENGTH; }
++
++	#if MYSQL_VERSION_ID>50100
++	virtual double	scan_time ()	{ return (double)( stats.records+stats.deleted )/20.0 + 10; }	///< called in test_quick_select to determine if indexes should be used
++	#else
++	virtual double	scan_time ()	{ return (double)( records+deleted )/20.0 + 10; }				///< called in test_quick_select to determine if indexes should be used
++	#endif
++
++	virtual double	read_time ( ha_rows rows )	{ return (double)rows/20.0 + 1; }					///< index read time estimate
++
++public:
++	int				open ( const char * name, int mode, uint test_if_locked );
++	int				close ();
++
++	int				write_row ( byte * buf );
++	int				update_row ( const byte * old_data, byte * new_data );
++	int				delete_row ( const byte * buf );
++	int				extra ( enum ha_extra_function op );
++
++	int				index_init ( uint keynr, bool sorted ); // 5.1.x
++	int				index_init ( uint keynr ) { return index_init ( keynr, false ); } // 5.0.x
++
++	int				index_end (); 
++	int				index_read ( byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag );
++	int				index_read_idx ( byte * buf, uint idx, const byte * key, uint key_len, enum ha_rkey_function find_flag );
++	int				index_next ( byte * buf );
++	int				index_next_same ( byte * buf, const byte * key, uint keylen );
++	int				index_prev ( byte * buf );
++	int				index_first ( byte * buf );
++	int				index_last ( byte * buf );
++
++	int				get_rec ( byte * buf, const byte * key, uint keylen );
++
++	int				rnd_init ( bool scan );
++	int				rnd_end ();
++	int				rnd_next ( byte * buf );
++	int				rnd_pos ( byte * buf, byte * pos );
++	void			position ( const byte * record );
++
++#if MYSQL_VERSION_ID>=50030
++	int				info ( uint );
++#else
++	void			info ( uint );
++#endif
++
++	int				reset();
++	int				external_lock ( THD * thd, int lock_type );
++	int				delete_all_rows ();
++	ha_rows			records_in_range ( uint inx, key_range * min_key, key_range * max_key );
++
++	int				delete_table ( const char * from );
++	int				rename_table ( const char * from, const char * to );
++	int				create ( const char * name, TABLE * form, HA_CREATE_INFO * create_info );
++
++	THR_LOCK_DATA **		store_lock ( THD * thd, THR_LOCK_DATA ** to, enum thr_lock_type lock_type );
++
++public:
++	virtual const COND *	cond_push ( const COND *cond );
++	virtual void			cond_pop ();
++
++private:
++	uint32			m_iFields;
++	char **			m_dFields;
++
++	uint32			m_iAttrs;
++	CSphSEAttr *	m_dAttrs;
++	int				m_bId64;
++
++	int *			m_dUnboundFields;
++
++private:
++	int				Connect ( const char * sQueryHost, ushort uPort );
++	int				ConnectAPI ( const char * sQueryHost, int iQueryPort );
++	int				HandleMysqlError ( struct st_mysql * pConn, int iErrCode );
++
++	uint32			UnpackDword ();
++	char *			UnpackString ();
++	bool			UnpackSchema ();
++	bool			UnpackStats ( CSphSEStats * pStats );
++
++	CSphSEThreadData *	GetTls ();
++};
++
++
++#if MYSQL_VERSION_ID < 50100
++bool sphinx_show_status ( THD * thd );
++#endif
++
++int sphinx_showfunc_total_found ( THD *, SHOW_VAR *, char * );
++int sphinx_showfunc_total ( THD *, SHOW_VAR *, char * );
++int sphinx_showfunc_time ( THD *, SHOW_VAR *, char * );
++int sphinx_showfunc_word_count ( THD *, SHOW_VAR *, char * );
++int sphinx_showfunc_words ( THD *, SHOW_VAR *, char * );
++
++//
++// $Id: ha_sphinx.h 2428 2010-08-03 11:38:09Z shodan $
++//
+diff -uNr storage/INSTALL storage/sphinx//INSTALL
+--- storage/sphinx/INSTALL	1970-01-01 01:00:00.000000000 +0100
++++ storage/sphinx/INSTALL	2010-07-07 18:12:02.000000000 +0200
+@@ -0,0 +1,48 @@
++Building MySQL with SphinxSE
++=============================
++
++Note: BUILD/autorun.sh step on Linux might malfunction with some
++versions of automake; autorun.sh will not fail but the build will.
++automake 1.9.6 is known to work.
++
++
++
++MySQL 5.0.x on Linux
++---------------------
++
++tar zxvf mysql-5.0.91.tar.gz
++cp -R mysqlse mysql-5.0.91/sql/sphinx
++cd mysql-5.0.91
++
++patch -p1 -i sql/sphinx/sphinx.5.0.91.diff
++sh BUILD/autorun.sh
++./configure --with-sphinx-storage-engine
++make
++
++
++
++MySQL 5.1.x on Linux
++---------------------
++
++tar zxvf mysql-5.1.47.tar.gz
++cp -R -p mysqlse mysql-5.1.47/storage/sphinx
++cd mysql-5.1.47
++
++sh BUILD/autorun.sh
++./configure --with-plugins=sphinx
++make
++
++
++
++MySQL 5.0.x on Windows
++-----------------------
++
++tar zxvf mysql-5.0.91.tar.gz
++cp -R mysqlse mysql-5.0.91/sql/sphinx
++cd mysql-5.0.91
++
++patch -p1 -i sql/sphinx/sphinx.5.0.91.diff
++win/configure.js WITH_SPHINX_STORAGE_ENGINE
++win/build-vs8
++
++--eof--
+diff -uNr storage/Makefile.am storage/sphinx//Makefile.am
+--- storage/sphinx/Makefile.am	1970-01-01 01:00:00.000000000 +0100
++++ storage/sphinx/Makefile.am	2009-02-13 22:26:46.000000000 +0100
+@@ -0,0 +1,59 @@
++# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
++# 
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the License, or
++# (at your option) any later version.
++# 
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++# 
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write to the Free Software
++# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++
++#called from the top level Makefile
++
++MYSQLDATAdir =          $(localstatedir)
++MYSQLSHAREdir =         $(pkgdatadir)
++MYSQLBASEdir=           $(prefix)
++MYSQLLIBdir=            $(pkglibdir)
++pkgplugindir =          $(pkglibdir)/plugin
++INCLUDES =              -I$(top_srcdir)/include -I$(top_builddir)/include \
++			-I$(top_srcdir)/regex \
++			-I$(top_srcdir)/sql \
++                        -I$(srcdir)
++SUBDIRS =				../../include ../../mysys ../../strings ../../dbug ../../extra
++WRAPLIBS=
++
++LDADD =
++
++DEFS= @DEFS@ \
++      -D_REENTRANT -D_PTHREADS -DENGINE -DSTORAGE_ENGINE -DMYSQL_SERVER
++
++noinst_HEADERS =	ha_sphinx.h
++
++EXTRA_LTLIBRARIES =	ha_sphinx.la
++pkgplugin_LTLIBRARIES = @plugin_sphinx_shared_target@ sphinx.la
++
++ha_sphinx_la_LDFLAGS =	-module -rpath $(MYSQLLIBdir)
++ha_sphinx_la_CXXFLAGS=	$(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN
++ha_sphinx_la_CFLAGS =	$(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN
++ha_sphinx_la_SOURCES =	ha_sphinx.cc
++
++sphinx_la_LDFLAGS = -module
++sphinx_la_CXXFLAGS = $(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN
++sphinx_la_CFLAGS = $(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN
++sphinx_la_SOURCES = snippets_udf.cc
++
++EXTRA_LIBRARIES =	libsphinx.a
++noinst_LIBRARIES =	@plugin_sphinx_static_target@
++libsphinx_a_CXXFLAGS =	$(AM_CFLAGS)
++libsphinx_a_CFLAGS =	$(AM_CFLAGS)
++libsphinx_a_SOURCES=	ha_sphinx.cc
++
++EXTRA_DIST =		cmakelists.txt
++# Don't update the files from bitkeeper
++%::SCCS/s.%
+diff -uNr storage/make-patch.sh storage/sphinx//make-patch.sh
+--- storage/sphinx/make-patch.sh	1970-01-01 01:00:00.000000000 +0100
++++ storage/sphinx/make-patch.sh	2008-09-05 20:06:30.000000000 +0200
+@@ -0,0 +1,36 @@
++#!/bin/sh
++
++OUT=$1
++ORIG=$2
++NEW=$3
++
++if [ ! \( "$1" -a "$2" -a "$3" \) ]; then
++	echo "$0 <patch> <original> <new>"
++	exit 1
++fi
++
++FILES='
++/config/ac-macros/ha_sphinx.m4
++/configure.in
++/libmysqld/Makefile.am
++/sql/handler.cc
++/sql/handler.h
++/sql/Makefile.am
++/sql/mysqld.cc
++/sql/mysql_priv.h
++/sql/set_var.cc
++/sql/sql_lex.h
++/sql/sql_parse.cc
++/sql/sql_yacc.yy
++/sql/structs.h
++/sql/sql_show.cc
++'
++
++rm -f $OUT
++if [ -e $OUT ]; then
++	exit 1
++fi
++
++for name in $FILES; do
++	diff -BNru "$ORIG$name" "$NEW$name" >> $OUT
++done
+diff -uNr storage/plug.in storage/sphinx//plug.in
+--- storage/sphinx/plug.in	1970-01-01 01:00:00.000000000 +0100
++++ storage/sphinx/plug.in	2006-06-07 09:28:43.000000000 +0200
+@@ -0,0 +1,5 @@
++MYSQL_STORAGE_ENGINE(sphinx,,  [Sphinx Storage Engine],
++        [Sphinx Storage Engines], [max,max-no-ndb])
++MYSQL_PLUGIN_DIRECTORY(sphinx, [storage/sphinx])
++MYSQL_PLUGIN_STATIC(sphinx,    [libsphinx.a])
++MYSQL_PLUGIN_DYNAMIC(sphinx,   [ha_sphinx.la])
+diff -uNr storage/snippets_udf.cc storage/sphinx//snippets_udf.cc
+--- storage/sphinx/snippets_udf.cc	1970-01-01 01:00:00.000000000 +0100
++++ storage/sphinx/snippets_udf.cc	2011-01-01 03:33:06.000000000 +0100
+@@ -0,0 +1,768 @@
++//
++// $Id: snippets_udf.cc 2616 2011-01-01 02:33:06Z shodan $
++//
++
++//
++// Copyright (c) 2001-2011, Andrew Aksyonoff
++// Copyright (c) 2008-2011, Sphinx Technologies Inc
++// All rights reserved
++//
++// This program is free software; you can redistribute it and/or modify
++// it under the terms of the GNU General Public License. You should have
++// received a copy of the GPL license along with this program; if you
++// did not, you can find it at http://www.gnu.org/
++//
++
++#include <stdio.h>
++#include <string.h>
++#include <assert.h>
++
++#include <sys/un.h>
++#include <netdb.h>
++
++#include <mysql_version.h>
++
++#if MYSQL_VERSION_ID>50100
++#include "mysql_priv.h"
++#include <mysql/plugin.h>
++#else
++#include "../mysql_priv.h"
++#endif
++
++#include <mysys_err.h>
++#include <my_sys.h>
++
++#if MYSQL_VERSION_ID>=50120
++typedef uchar byte;
++#endif
++
++/// partially copy-pasted stuff that should be moved elsewhere
++
++#if UNALIGNED_RAM_ACCESS
++
++/// pass-through wrapper
++template < typename T > inline T sphUnalignedRead ( const T & tRef )
++{
++	return tRef;
++}
++
++/// pass-through wrapper
++template < typename T > void sphUnalignedWrite ( void * pPtr, const T & tVal )
++{
++	*(T*)pPtr = tVal;
++}
++
++#else
++
++/// unaligned read wrapper for some architectures (eg. SPARC)
++template < typename T >
++inline T sphUnalignedRead ( const T & tRef )
++{
++	T uTmp;
++	byte * pSrc = (byte *) &tRef;
++	byte * pDst = (byte *) &uTmp;
++	for ( int i=0; i<(int)sizeof(T); i++ )
++		*pDst++ = *pSrc++;
++	return uTmp;
++}
++
++/// unaligned write wrapper for some architectures (eg. SPARC)
++template < typename T >
++void sphUnalignedWrite ( void * pPtr, const T & tVal )
++{
++	byte * pDst = (byte *) pPtr;
++	byte * pSrc = (byte *) &tVal;
++	for ( int i=0; i<(int)sizeof(T); i++ )
++		*pDst++ = *pSrc++;
++}
++
++#endif
++
++#define SPHINXSE_MAX_ALLOC			(16*1024*1024)
++
++#define SafeDelete(_arg)		{ if ( _arg ) delete ( _arg );		(_arg) = NULL; }
++#define SafeDeleteArray(_arg)	{ if ( _arg ) delete [] ( _arg );	(_arg) = NULL; }
++
++#define Min(a,b) ((a)<(b)?(a):(b))
++
++typedef unsigned int DWORD;
++
++inline DWORD sphF2DW ( float f ) { union { float f; uint32 d; } u; u.f = f; return u.d; }
++
++static char * sphDup ( const char * sSrc, int iLen=-1 )
++{
++	if ( !sSrc )
++		return NULL;
++
++	if ( iLen<0 )
++		iLen = strlen(sSrc);
++
++	char * sRes = new char [ 1+iLen ];
++	memcpy ( sRes, sSrc, iLen );
++	sRes[iLen] = '\0';
++	return sRes;
++}
++
++static inline void sphShowErrno ( const char * sCall )
++{
++	char sError[256];
++	snprintf ( sError, sizeof(sError), "%s() failed: [%d] %s", sCall, errno, strerror(errno) );
++	my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), sError );
++}
++
++static const bool sphReportErrors = true;
++
++static bool sphSend ( int iFd, const char * pBuffer, int iSize, bool bReportErrors = false )
++{
++	assert ( pBuffer );
++	assert ( iSize > 0 );
++
++	const int iResult = send ( iFd, pBuffer, iSize, 0 );
++	if ( iResult != iSize )
++	{
++		if ( bReportErrors ) sphShowErrno("send");
++		return false;
++	}
++	return true;
++}
++
++static bool sphRecv ( int iFd, char * pBuffer, int iSize, bool bReportErrors = false )
++{
++	assert ( pBuffer );
++	assert ( iSize > 0 );
++	
++	while ( iSize )
++	{
++		const int iResult = recv ( iFd, pBuffer, iSize, 0 );
++		if ( iResult > 0 )
++		{
++			iSize -= iResult;
++			pBuffer += iSize;
++		}
++		else if ( iResult == 0 )
++		{
++			if ( bReportErrors )
++				my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), "recv() failed: disconnected" );
++			return false;
++		}
++		else
++		{
++			if ( bReportErrors ) sphShowErrno("recv");
++			return false;
++		}
++	}
++	return true;
++}
++
++enum
++{
++	SPHINX_SEARCHD_PROTO		= 1,
++
++	SEARCHD_COMMAND_SEARCH		= 0,
++	SEARCHD_COMMAND_EXCERPT		= 1,
++
++	VER_COMMAND_SEARCH		= 0x116,
++	VER_COMMAND_EXCERPT		= 0x100,
++};
++
++/// known answers
++enum
++{
++	SEARCHD_OK		= 0,	///< general success, command-specific reply follows
++	SEARCHD_ERROR	= 1,	///< general failure, error message follows
++	SEARCHD_RETRY	= 2,	///< temporary failure, error message follows, client should retry later
++	SEARCHD_WARNING	= 3		///< general success, warning message and command-specific reply follow
++};
++
++#define SPHINXSE_DEFAULT_SCHEME		"sphinx"
++#define SPHINXSE_DEFAULT_HOST		"127.0.0.1"
++#define SPHINXSE_DEFAULT_PORT		9312
++#define SPHINXSE_DEFAULT_INDEX		"*"
++
++class CSphBuffer
++{
++private:
++	bool m_bOverrun;
++	int m_iSize;
++	int m_iLeft;
++	char * m_pBuffer;
++	char * m_pCurrent;
++
++public:
++	CSphBuffer ( const int iSize )
++		: m_bOverrun ( false )
++		, m_iSize ( iSize )
++		, m_iLeft ( iSize )
++	{
++		assert ( iSize > 0 );
++		m_pBuffer = new char[iSize];
++		m_pCurrent = m_pBuffer;
++	}
++
++	~CSphBuffer ()
++	{
++		SafeDelete ( m_pBuffer );
++	}
++
++	const char * Ptr() const { return m_pBuffer; }
++
++	bool Finalize()
++	{
++		return !( m_bOverrun || m_iLeft != 0 || m_pCurrent - m_pBuffer != m_iSize );
++	}
++	
++	void SendBytes ( const void * pBytes, int iBytes );
++	
++	void SendWord ( short int v )					{ v = ntohs(v); SendBytes ( &v, sizeof(v) ); }
++	void SendInt ( int v )							{ v = ntohl(v); SendBytes ( &v, sizeof(v) ); }
++	void SendDword ( DWORD v )						{ v = ntohl(v) ;SendBytes ( &v, sizeof(v) ); }
++	void SendUint64 ( ulonglong v )					{ SendDword ( uint(v>>32) ); SendDword ( uint(v&0xFFFFFFFFUL) ); }
++	void SendString ( const char * v )				{ SendString ( v, strlen(v) ); }
++	void SendString ( const char * v, int iLen )	{ SendDword(iLen); SendBytes ( v, iLen ); }
++	void SendFloat ( float v )						{ SendDword ( sphF2DW(v) ); }
++};
++
++void CSphBuffer::SendBytes ( const void * pBytes, int iBytes )
++{
++	if ( m_iLeft < iBytes )
++	{
++		m_bOverrun = true;
++		return;
++	}
++
++	memcpy ( m_pCurrent, pBytes, iBytes );
++
++	m_pCurrent += iBytes;
++	m_iLeft -= iBytes;
++}
++
++struct CSphUrl
++{
++	char * m_sBuffer;
++	char * m_sFormatted;
++	
++	char * m_sScheme;
++	char * m_sHost;
++	char * m_sIndex;
++	
++	int m_iPort;
++	
++	CSphUrl()
++		: m_sBuffer ( NULL )
++		, m_sFormatted ( NULL )
++		, m_sScheme ( SPHINXSE_DEFAULT_SCHEME )
++		, m_sHost ( SPHINXSE_DEFAULT_HOST )
++		, m_sIndex ( SPHINXSE_DEFAULT_INDEX )
++		, m_iPort ( SPHINXSE_DEFAULT_PORT )
++	{}
++	
++	~CSphUrl()
++	{
++		SafeDeleteArray ( m_sFormatted );
++		SafeDeleteArray ( m_sBuffer );
++	}
++	
++	bool Parse ( const char * sUrl, int iLen );
++	int Connect();
++	const char * Format();
++};
++
++const char * CSphUrl::Format()
++{
++	if ( !m_sFormatted )
++	{
++		int iSize = 15 + strlen(m_sHost) + strlen(m_sIndex);
++		m_sFormatted = new char [ iSize ];
++		if ( m_iPort )
++			snprintf ( m_sFormatted, iSize, "inet://%s:%d/%s", m_sHost, m_iPort, m_sIndex );
++		else
++			snprintf ( m_sFormatted, iSize, "unix://%s/%s", m_sHost, m_sIndex );
++	}
++	return m_sFormatted;
++}
++
++// the following scheme variants are recognized
++//
++// inet://host/index
++// inet://host:port/index
++// unix://unix/domain/socket:index
++// unix://unix/domain/socket
++bool CSphUrl::Parse ( const char * sUrl, int iLen )
++{
++	bool bOk = true;
++	while ( iLen )
++	{
++		bOk = false;
++		
++		m_sBuffer = sphDup ( sUrl, iLen );
++		m_sScheme = m_sBuffer;
++		
++		m_sHost = strstr ( m_sBuffer, "://" );
++		if ( !m_sHost )
++			break;
++		m_sHost[0] = '\0';
++		m_sHost += 2;
++		
++		if ( !strcmp ( m_sScheme, "unix" ) )
++		{
++			// unix-domain socket
++			m_iPort = 0;
++			if (!( m_sIndex = strrchr ( m_sHost, ':' ) ))
++				m_sIndex = SPHINXSE_DEFAULT_INDEX;
++			else
++			{
++				*m_sIndex++ = '\0';
++				if ( !*m_sIndex )
++					m_sIndex = SPHINXSE_DEFAULT_INDEX;
++			}
++			bOk = true;
++			break;
++		}
++		if( strcmp ( m_sScheme, "sphinx" ) != 0 && strcmp ( m_sScheme, "inet" ) != 0 )
++			break;
++
++		// inet
++		m_sHost++;
++		char * sPort = strchr ( m_sHost, ':' );
++		if ( sPort )
++		{
++			*sPort++ = '\0';
++			if ( *sPort )
++			{
++				m_sIndex = strchr ( sPort, '/' );
++				if ( m_sIndex )
++					*m_sIndex++ = '\0'; 
++				else
++					m_sIndex = SPHINXSE_DEFAULT_INDEX;
++				
++				m_iPort = atoi(sPort);
++				if ( !m_iPort )
++					m_iPort = SPHINXSE_DEFAULT_PORT;
++			}
++		} else
++		{
++			m_sIndex = strchr ( m_sHost, '/' );
++			if ( m_sIndex )
++				*m_sIndex++ = '\0';
++			else
++				m_sIndex = SPHINXSE_DEFAULT_INDEX;
++		}
++
++		bOk = true;
++		break;
++	}
++	
++	return bOk;
++}
++
++int CSphUrl::Connect()
++{
++	struct sockaddr_in sin;
++#ifndef __WIN__
++	struct sockaddr_un saun;
++#endif
++
++	int iDomain = 0;
++	int iSockaddrSize = 0;
++	struct sockaddr * pSockaddr = NULL;
++
++	in_addr_t ip_addr;
++
++	if ( m_iPort )
++	{
++		iDomain = AF_INET;
++		iSockaddrSize = sizeof(sin);
++		pSockaddr = (struct sockaddr *) &sin;
++
++		memset ( &sin, 0, sizeof(sin) );
++		sin.sin_family = AF_INET;
++		sin.sin_port = htons(m_iPort);
++		
++		// resolve address
++		if ( (int)( ip_addr=inet_addr(m_sHost) ) != (int)INADDR_NONE )
++			memcpy ( &sin.sin_addr, &ip_addr, sizeof(ip_addr) );
++		else
++		{
++			int tmp_errno;
++			struct hostent tmp_hostent, *hp;
++			char buff2 [ GETHOSTBYNAME_BUFF_SIZE ];
++			
++			hp = my_gethostbyname_r ( m_sHost, &tmp_hostent,
++									  buff2, sizeof(buff2), &tmp_errno );
++			if ( !hp )
++			{ 
++				my_gethostbyname_r_free();
++				
++				char sError[256];
++				snprintf ( sError, sizeof(sError), "failed to resolve searchd host (name=%s)", m_sHost );
++				
++				my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), sError );
++				return -1;
++			}
++			
++			memcpy ( &sin.sin_addr, hp->h_addr, Min ( sizeof(sin.sin_addr), (size_t)hp->h_length ) );
++			my_gethostbyname_r_free();
++		}
++	}
++	else
++	{
++#ifndef __WIN__
++		iDomain = AF_UNIX;
++		iSockaddrSize = sizeof(saun);
++		pSockaddr = (struct sockaddr *) &saun;
++
++		memset ( &saun, 0, sizeof(saun) );
++		saun.sun_family = AF_UNIX;
++		strncpy ( saun.sun_path, m_sHost, sizeof(saun.sun_path)-1 );
++#else
++		my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), "Unix-domain sockets are not supported on Windows" );
++		return -1;
++#endif
++	}
++
++	// connect to searchd and exchange versions
++	uint uServerVersion;
++	uint uClientVersion = htonl ( SPHINX_SEARCHD_PROTO );
++	int iSocket = -1;
++	char * pError = NULL;
++	do
++	{
++		iSocket = socket ( iDomain, SOCK_STREAM, 0 );
++		if ( iSocket == -1 )
++		{
++			pError = "Failed to create client socket";
++			break;
++		}
++	
++		if ( connect ( iSocket, pSockaddr, iSockaddrSize ) == -1)
++		{
++			pError = "Failed to connect to searchd";
++			break;
++		}
++
++		if ( !sphRecv ( iSocket, (char *)&uServerVersion, sizeof(uServerVersion) ) )
++		{
++			pError = "Failed to receive searchd version";
++			break;
++		}
++		
++		if ( !sphSend ( iSocket, (char *)&uClientVersion, sizeof(uClientVersion) ) )
++		{
++			pError = "Failed to send client version";
++			break;
++		}
++	}
++	while(0);
++
++	// fixme: compare versions?
++
++	if ( pError )
++	{
++		char sError[1024];
++		snprintf ( sError, sizeof(sError), "%s [%d] %s", Format(), errno, strerror(errno) );
++		my_error ( ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), sError );
++
++		if ( iSocket != -1 )
++			close ( iSocket );
++		
++		return -1;
++	}
++
++	return iSocket;
++}
++
++struct CSphResponse
++{
++	char * m_pBuffer;
++	char * m_pBody;
++
++	CSphResponse ()
++		: m_pBuffer ( NULL )
++		, m_pBody ( NULL )
++	{}
++
++	CSphResponse ( DWORD uSize )
++		: m_pBody ( NULL )
++	{
++		m_pBuffer = new char[uSize];
++	}
++
++	~CSphResponse ()
++	{
++		SafeDeleteArray ( m_pBuffer );
++	}
++	
++	static CSphResponse * Read ( int iSocket, int iClientVersion );
++};
++
++CSphResponse *
++CSphResponse::Read ( int iSocket, int iClientVersion )
++{
++	char sHeader[8];
++	if ( !sphRecv ( iSocket, sHeader, sizeof(sHeader) ) )
++		return NULL;
++
++	int iStatus   = ntohs ( sphUnalignedRead ( *(short int *) &sHeader[0] ) );
++	int iVersion  = ntohs ( sphUnalignedRead ( *(short int *) &sHeader[2] ) );
++	DWORD uLength = ntohl ( sphUnalignedRead ( *(DWORD *)     &sHeader[4] ) );
++
++	if ( iVersion < iClientVersion ) // fixme: warn
++		;
++
++	if ( uLength <= SPHINXSE_MAX_ALLOC )
++	{
++		CSphResponse * pResponse = new CSphResponse ( uLength );
++		if ( !sphRecv ( iSocket, pResponse->m_pBuffer, uLength ) )
++		{
++			SafeDelete ( pResponse );
++			return NULL;
++		}
++
++		pResponse->m_pBody = pResponse->m_pBuffer;
++		if ( iStatus != SEARCHD_OK )
++		{
++			DWORD uSize = ntohl ( *(DWORD *)pResponse->m_pBuffer );
++			if ( iStatus == SEARCHD_WARNING )
++				pResponse->m_pBody += uSize; // fixme: report the warning somehow
++			else
++			{
++				char * sMessage = sphDup ( pResponse->m_pBuffer + sizeof(DWORD), uSize );
++				my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), sMessage );
++				SafeDelete ( sMessage );
++				SafeDelete ( pResponse );
++				return NULL;
++			}
++		}
++		return pResponse;
++	}
++	return NULL;
++}
++
++/// udf
++
++extern "C"
++{
++	my_bool sphinx_snippets_init ( UDF_INIT * pUDF, UDF_ARGS * pArgs, char * sMessage );
++	void sphinx_snippets_deinit ( UDF_INIT * pUDF );
++	char * sphinx_snippets ( UDF_INIT * pUDF, UDF_ARGS * pArgs, char * sResult, unsigned long * pLength, char * pIsNull, char * sError );
++};
++
++#define MAX_MESSAGE_LENGTH 255
++#define MAX_RESULT_LENGTH 255
++
++struct CSphSnippets
++{
++	CSphUrl m_tUrl;
++	CSphResponse * m_pResponse;
++
++	int m_iBeforeMatch;
++	int m_iAfterMatch;
++	int m_iChunkSeparator;
++	int m_iLimit;
++	int m_iAround;
++	int m_iFlags;
++
++	CSphSnippets()
++		: m_pResponse(NULL)
++		, m_iBeforeMatch(0)
++		, m_iAfterMatch(0)
++		, m_iChunkSeparator(0)
++		  // defaults
++		, m_iLimit(256)
++		, m_iAround(5)
++		, m_iFlags(1)
++	{
++	}
++
++	~CSphSnippets()
++	{
++		SafeDelete ( m_pResponse );
++	}
++};
++
++#define KEYWORD(NAME) else if ( strncmp ( NAME, pArgs->attributes[i], pArgs->attribute_lengths[i] ) == 0 )
++
++#define CHECK_TYPE(TYPE)											\
++	if ( pArgs->arg_type[i] != TYPE )								\
++	{																\
++		snprintf ( sMessage, MAX_MESSAGE_LENGTH,					\
++				   "%.*s argument must be a string",				\
++				   (int)pArgs->attribute_lengths[i],				\
++				   pArgs->attributes[i] );							\
++		bFail = true;												\
++		break;														\
++	}																\
++	if ( TYPE == STRING_RESULT && !pArgs->args[i] )					\
++	{																\
++		snprintf ( sMessage, MAX_MESSAGE_LENGTH,					\
++				   "%.*s argument must be constant (and not NULL)",	\
++				   (int)pArgs->attribute_lengths[i],				\
++				   pArgs->attributes[i] );							\
++		bFail = true;												\
++		break;														\
++	}
++
++#define STRING CHECK_TYPE(STRING_RESULT)
++#define INT CHECK_TYPE(INT_RESULT); int iValue = *(long long *)pArgs->args[i]
++
++my_bool sphinx_snippets_init ( UDF_INIT * pUDF, UDF_ARGS * pArgs, char * sMessage )
++{
++	if ( pArgs->arg_count < 3 )
++	{
++		strncpy ( sMessage, "insufficient arguments", MAX_MESSAGE_LENGTH );
++		return 1;
++	}
++
++	bool bFail = false;
++	CSphSnippets * pOpts = new CSphSnippets;
++	for ( uint i = 0; i < pArgs->arg_count; i++ )
++	{
++		if ( i < 3 )
++		{
++			if ( pArgs->arg_type[i] != STRING_RESULT )
++			{
++				strncpy ( sMessage, "first three arguments must be of string type", MAX_MESSAGE_LENGTH );
++				bFail = true;
++				break;
++			}
++		}
++		KEYWORD("sphinx")
++		{
++			STRING;
++			if ( !pOpts->m_tUrl.Parse ( pArgs->args[i], pArgs->lengths[i] ) )
++			{
++				strncpy ( sMessage, "failed to parse connection string", MAX_MESSAGE_LENGTH );
++				bFail = true;
++				break;
++			}
++		}
++		KEYWORD("before_match")		{ STRING; pOpts->m_iBeforeMatch = i; }
++		KEYWORD("after_match")		{ STRING; pOpts->m_iAfterMatch = i; }
++		KEYWORD("chunk_separator")	{ STRING; pOpts->m_iChunkSeparator = i; }
++		KEYWORD("limit")			{ INT; pOpts->m_iLimit = iValue; }
++		KEYWORD("around")			{ INT; pOpts->m_iAround = iValue; }
++		KEYWORD("exact_phrase")		{ INT; if ( iValue ) pOpts->m_iFlags |= 2; }
++		KEYWORD("single_passage")	{ INT; if ( iValue ) pOpts->m_iFlags |= 4; }
++		KEYWORD("use_boundaries")	{ INT; if ( iValue ) pOpts->m_iFlags |= 8; }
++		KEYWORD("weight_order")		{ INT; if ( iValue ) pOpts->m_iFlags |= 16; }
++		else
++		{
++			snprintf ( sMessage, MAX_MESSAGE_LENGTH, "unrecognized argument: %.*s",
++					   (int)pArgs->attribute_lengths[i], pArgs->attributes[i] );
++			bFail = true;
++			break;
++		}
++	}
++	
++	if ( bFail )
++	{
++		SafeDelete ( pOpts );
++		return 1;
++	}
++	pUDF->ptr = (char *)pOpts;
++	return 0;
++}
++
++#undef STRING
++#undef INT
++#undef KEYWORD
++#undef CHECK_TYPE
++
++#define ARG(i) pArgs->args[i], pArgs->lengths[i]
++#define ARG_LEN(VAR, LEN) ( VAR ? pArgs->lengths[VAR] : LEN )
++
++#define SEND_STRING(INDEX, DEFAULT)							\
++	if ( INDEX )											\
++		tBuffer.SendString ( ARG(INDEX) );					\
++	else													\
++		tBuffer.SendString ( DEFAULT, sizeof(DEFAULT) - 1 );
++
++
++char * sphinx_snippets ( UDF_INIT * pUDF, UDF_ARGS * pArgs, char * sResult, unsigned long * pLength, char * pIsNull, char * pError )
++{
++	CSphSnippets * pOpts = (CSphSnippets *)pUDF->ptr;
++	assert ( pOpts );
++
++	if ( !pArgs->args[0] || !pArgs->args[1] || !pArgs->args[2] )
++	{
++		*pIsNull = 1;
++		return sResult;
++	}
++
++	const int iSize =
++		8 + // header
++		8 +
++		4 + pArgs->lengths[1] + // index
++		4 + pArgs->lengths[2] + // words
++		4 + ARG_LEN ( pOpts->m_iBeforeMatch, 3 ) +
++		4 + ARG_LEN ( pOpts->m_iAfterMatch, 4 ) +
++		4 + ARG_LEN ( pOpts->m_iChunkSeparator, 5 ) +
++		12 +
++		4 + pArgs->lengths[0]; // document
++
++	CSphBuffer tBuffer(iSize);
++
++	tBuffer.SendWord ( SEARCHD_COMMAND_EXCERPT );
++	tBuffer.SendWord ( VER_COMMAND_EXCERPT );
++	tBuffer.SendDword ( iSize - 8 );
++
++	tBuffer.SendDword ( 0 );
++	tBuffer.SendDword ( pOpts->m_iFlags );
++
++	tBuffer.SendString ( ARG(1) ); // index
++	tBuffer.SendString ( ARG(2) ); // words
++
++	SEND_STRING ( pOpts->m_iBeforeMatch, "<b>" );
++	SEND_STRING ( pOpts->m_iAfterMatch, "</b>" );
++	SEND_STRING ( pOpts->m_iChunkSeparator, " ... " );
++
++	tBuffer.SendInt ( pOpts->m_iLimit );
++	tBuffer.SendInt ( pOpts->m_iAround );
++
++	// single document
++	tBuffer.SendInt ( 1 );
++	tBuffer.SendString ( ARG(0) );
++
++	int iSocket = -1;
++	do
++	{
++		if ( !tBuffer.Finalize() )
++		{
++			my_error ( ER_QUERY_ON_FOREIGN_DATA_SOURCE, MYF(0), "INTERNAL ERROR: failed to build request" );
++			break;
++		}
++		
++		iSocket = pOpts->m_tUrl.Connect();
++		if ( iSocket == -1 ) break;
++		if ( !sphSend ( iSocket, tBuffer.Ptr(), iSize, sphReportErrors ) ) break;
++
++		CSphResponse * pResponse = CSphResponse::Read ( iSocket, 0x100 );
++		if ( !pResponse ) break;
++
++		close ( iSocket );
++		pOpts->m_pResponse = pResponse;
++		*pLength = ntohl( *(DWORD *)pResponse->m_pBody );
++		return pResponse->m_pBody + sizeof(DWORD);
++	}
++	while(0);
++
++	if ( iSocket != -1 )
++		close ( iSocket );
++
++	*pError = 1;
++	return sResult;
++}
++
++#undef SEND_STRING
++#undef ARG_LEN	
++#undef ARG
++
++void sphinx_snippets_deinit ( UDF_INIT * pUDF )
++{
++	CSphSnippets * pOpts = (CSphSnippets *)pUDF->ptr;
++	SafeDelete ( pOpts );
++}
++
++//
++// $Id: snippets_udf.cc 2616 2011-01-01 02:33:06Z shodan $
++//
diff --git a/component/ncurses/buildout.cfg b/component/ncurses/buildout.cfg
index f9b9ab8a2fb18424822f5f9a29cec5d34b204e9a..3944e63180e01cc713d5e03575518e202f2c6102 100644
--- a/component/ncurses/buildout.cfg
+++ b/component/ncurses/buildout.cfg
@@ -11,11 +11,14 @@ configure-options =
   --with-shared
   --without-normal
   --without-debug
+  --without-gpm
   --enable-rpath
 # tricky way to rerun with --enable-widec
 make-targets =
   install && (for i in curses unctrl eti form menu panel term; do ln -sf ncurses/$i.h ${buildout:parts-directory}/${:_buildout_section_name_}/include/$i.h; done) && ./configure ${:configure-options} --enable-widec && make install
+# pass dummy LDCONFIG to skip needless calling of ldconfig by non-root user
 environment =
   LDFLAGS =-Wl,--as-needed
+  LDCONFIG=/bin/echo
 make-options =
   -j1
diff --git a/component/openssh/buildout.cfg b/component/openssh/buildout.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..35539184ee7ec75912d06393138dfe4d013faee7
--- /dev/null
+++ b/component/openssh/buildout.cfg
@@ -0,0 +1,26 @@
+################################################################
+# OpenSSH Portable - a secure shell client and server for *nix #
+#                                                              #
+# http://www.openssh.com/                                      #
+################################################################
+
+[buildout]
+extends = 
+  ../openssl/buildout.cfg
+
+parts =
+  openssh
+
+[openssh]
+recipe = hexagonit.recipe.cmmi
+md5sum = 0541579adf9d55abb15ef927048d372e
+url = http://ftp.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-5.8p2.tar.gz
+
+configure-option = 
+  -I${zlib:location}/include
+  -L${zlib:location}/lib
+  -I${openssl:location}/include
+  -L${openssl:location}/lib
+  --with-ssl-dir=${openssl:location}/lib
+  --prefix=${buildout:parts-directory}/${:_buildout_section_name_}
+  --libdir=lib
diff --git a/component/openssl/buildout.cfg b/component/openssl/buildout.cfg
index 257f51a39c0e1b8c9251fe43003ea11ade2c48c6..a5b1b950e09c66a9cd658c2f6e87464f6f82c13e 100644
--- a/component/openssl/buildout.cfg
+++ b/component/openssl/buildout.cfg
@@ -12,8 +12,8 @@ parts =
 
 [openssl]
 recipe = hexagonit.recipe.cmmi
-url = https://www.openssl.org/source/openssl-1.0.0d.tar.gz
-md5sum = 40b6ea380cc8a5bf9734c2f8bf7e701e
+url = https://www.openssl.org/source/openssl-1.0.0e.tar.gz
+md5sum = 7040b89c4c58c7a1016c0dfa6e821c86
 configure-command = ./config
 configure-options =
   -I${zlib:location}/include
diff --git a/component/pdftk/buildout.cfg b/component/pdftk/buildout.cfg
index f636fce05929bc89a504e4ffa85c30e005a14124..53efe53cdd1ed92d36cb5668ca12d73f4c3dcc72 100644
--- a/component/pdftk/buildout.cfg
+++ b/component/pdftk/buildout.cfg
@@ -39,6 +39,6 @@ make-options =
 
 pre-make-hook = ${pdftk-hooks-download:location}/${pdftk-hooks-download:filename}:pre_make_hook
 environment =
-  PATH=${gcc-java:location}/bin:${fastjar:location}/bin:%(PATH)s
-  LDFLAGS=-L${gcc-java:location}/lib -Wl,-rpath=${gcc-java:location}/lib -L${gcc-java:location}/lib64 -Wl,-rpath=${gcc-java:location}/lib64
-  LD_LIBRARY_PATH=${gcc-java:location}/lib:${gcc-java:location}/lib64
+  PATH=${gcc-java-minimal:location}/bin:${fastjar:location}/bin:%(PATH)s
+  LDFLAGS=-L${gcc-java-minimal:location}/lib -Wl,-rpath=${gcc-java-minimal:location}/lib -L${gcc-java-minimal:location}/lib64 -Wl,-rpath=${gcc-java-minimal:location}/lib64
+  LD_LIBRARY_PATH=${gcc-java-minimal:location}/lib:${gcc-java-minimal:location}/lib64
diff --git a/component/poppler/buildout.cfg b/component/poppler/buildout.cfg
index adf79592b440de05cb0aa9f4f12d7603f6899ab4..938aa32a707ba399cc9d46f17368bcb06d1f42c2 100644
--- a/component/poppler/buildout.cfg
+++ b/component/poppler/buildout.cfg
@@ -15,8 +15,8 @@ extends =
 
 [poppler-0.17.1]
 recipe = hexagonit.recipe.cmmi
-md5sum = 8d7276d1943078c76aabe9f2ec52d50b
-url = http://poppler.freedesktop.org/poppler-0.17.1.tar.gz
+md5sum = b566d1fbaa29b9257bf0ecc130e7b2ca
+url = http://poppler.freedesktop.org/poppler-0.17.2.tar.gz
 configure-options =
   --disable-cairo-output
   --disable-cms
@@ -26,6 +26,7 @@ configure-options =
   --disable-poppler-cpp
   --disable-poppler-glib
   --disable-poppler-qt4
+  --disable-static
   --enable-zlib
 environment =
   PATH=${pkgconfig:location}/bin:%(PATH)s
diff --git a/component/pycrypto-python/buildout.cfg b/component/pycrypto-python/buildout.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..835f9c884f6380ce57f1962da49c5c957850c685
--- /dev/null
+++ b/component/pycrypto-python/buildout.cfg
@@ -0,0 +1,22 @@
+[buildout]
+extends =
+  ../gmp/buildout.cfg
+
+parts =
+  pycrypto-python
+
+versions = versions
+
+[pycrypto-python]
+recipe = zc.recipe.egg:custom
+egg = pycrypto
+include-dirs =
+  ${gmp:location}/include
+library-dirs =
+  ${gmp:location}/lib
+rpath =
+  ${gmp:location}/lib
+
+[versions]
+# pycrypto 2.4 does not work with setuptools/buildout
+pycrypto = 2.3
diff --git a/component/rpm2cpio/buildout.cfg b/component/rpm2cpio/buildout.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..d1a374706b77e499a7810c31458bd9d5c27728e6
--- /dev/null
+++ b/component/rpm2cpio/buildout.cfg
@@ -0,0 +1,8 @@
+[buildout]
+parts =
+  rpm2cpio
+
+[rpm2cpio]
+recipe = slapos.recipe.build:download
+url = http://ruda.googlecode.com/hg/rpm/rpm2cpio.py
+md5sum = 06001f57616581312f9599e104f2473a
diff --git a/component/slapos/buildout.cfg b/component/slapos/buildout.cfg
index 41718ac269c4c06cdbdf7d88ed852eefb3aed6bc..6b254b3b4c614ab2102e2e37c215b25833a3995d 100644
--- a/component/slapos/buildout.cfg
+++ b/component/slapos/buildout.cfg
@@ -1,11 +1,29 @@
 [buildout]
 extends =
   ../../stack/shacache-client.cfg
+  ../bison/buildout.cfg
+  ../bzip2/buildout.cfg
+  ../gdbm/buildout.cfg
+  ../gettext/buildout.cfg
+  ../glib/buildout.cfg
+  ../libxml2/buildout.cfg
+  ../libxslt/buildout.cfg
   ../lxml-python/buildout.cfg
+  ../m4/buildout.cfg
+  ../ncurses/buildout.cfg
+  ../openssl/buildout.cfg
+  ../pkgconfig/buildout.cfg
+  ../popt/buildout.cfg
   ../python-2.7/buildout.cfg
+  ../readline/buildout.cfg
+  ../sqlite3/buildout.cfg
+  ../swig/buildout.cfg
+  ../zlib/buildout.cfg
 
 parts =
   slapos
+  cfg-environment
+  sh-environment
 
 find-links =
   http://www.nexedi.org/static/packages/source/slapos.buildout/
@@ -26,6 +44,42 @@ include-site-packages = false
 exec-sitecustomize = false
 allowed-eggs-from-site-packages =
 
+[environment]
+# Note: For now original PATH is appeneded to the end, as not all tools are
+#       provided by SlapOS
+PATH=${bison:location}/bin:${bzip2:location}/bin:${gettext:location}/bin:${glib:location}/bin:${libxml2:location}/bin:${libxslt:location}/bin:${m4:location}/bin:${ncurses:location}/bin:${openssl:location}/bin:${pkgconfig:location}/bin:${python2.7:location}/bin:${readline:location}/bin:${sqlite3:location}/bin:${swig:location}/bin:${buildout:bin-directory}:$PATH
+CFLAGS=-I${bzip2:location}/include -I${gdbm:location}/include -I${gettext:location}/include -I${glib:location}/include -I${libxml2:location}/include -I${libxslt:location}/include -I${ncurses:location}/include -I${openssl:location}/include -I${popt:location}/include -I${readline:location}/include -I${sqlite3:location}/include -I${zlib:location}/include
+CPPFLAGS=${:CFLAGS}
+LDFLAGS=-L${bzip2:location}/lib -Wl,-rpath=${bzip2:location}/lib -L${gdbm:location}/lib -Wl,-rpath=${gdbm:location}/lib -L${gettext:location}/lib -Wl,-rpath=${gettext:location}/lib -L${glib:location}/lib -Wl,-rpath=${glib:location}/lib -L${libxml2:location}/lib -Wl,-rpath=${libxml2:location}/lib -L${libxslt:location}/lib -Wl,-rpath=${libxslt:location}/lib -L${ncurses:location}/lib -Wl,-rpath=${ncurses:location}/lib -L${openssl:location}/lib -Wl,-rpath=${openssl:location}/lib -L${popt:location}/lib -Wl,-rpath=${popt:location}/lib -L${readline:location}/lib -Wl,-rpath=${readline:location}/lib -L${sqlite3:location}/lib -Wl,-rpath=${sqlite3:location}/lib -L${zlib:location}/lib -Wl,-rpath=${zlib:location}/lib
+PKG_CONFIG_PATH=${glib:location}/lib/pkgconfig:${libxml2:location}/lib/pkgconfig:${libxslt:location}/lib/pkgconfig:${openssl:location}/lib/pkgconfig:${popt:location}/lib/pkgconfig:${python2.7:location}/lib/pkconfig:${sqlite3:location}/lib/pkconfig
+LD_LIBRARY_PATH=${bzip2:location}/lib:${gdbm:location}/lib:${gettext:location}/lib:${glib:location}/lib:${libxml2:location}/lib:${libxslt:location}/lib:${ncurses:location}/lib:${openssl:location}/lib:${popt:location}/lib:${readline:location}/lib:${sqlite3:location}/lib:${zlib:location}/lib
+
+[cfg-environment]
+# Section exposes SlapOS default environment as configuration file.
+recipe = collective.recipe.template
+input = inline:
+  [environment]
+  PATH=${environment:PATH}
+  CFLAGS=${environment:CFLAGS}
+  CPPFLAGS=${environment:CPPFLAGS}
+  LDFLAGS=${environment:LDFLAGS}
+  PKG_CONFIG_PATH=${environment:PKG_CONFIG_PATH}
+  LD_LIBRARY_PATH=${environment:LD_LIBRARY_PATH}
+output = ${buildout:directory}/environment.cfg
+
+[sh-environment]
+# Section exposes SlapOS default environment as sh file.
+recipe = collective.recipe.template
+input = inline:
+  export PATH="${environment:PATH}"
+  export CFLAGS="${environment:CFLAGS}"
+  export CPPFLAGS="${environment:CPPFLAGS}"
+  export LDFLAGS="${environment:LDFLAGS}"
+  export PKG_CONFIG_PATH="${environment:PKG_CONFIG_PATH}"
+  export LD_LIBRARY_PATH="${environment:LD_LIBRARY_PATH}"
+  export PS1="[SlapOS env Active] $PS1"
+output = ${buildout:directory}/environment.sh
+
 [lxml-python]
 python = python2.7
 
@@ -38,38 +92,58 @@ eggs =
   ${lxml-python:egg}
   slapos.core
 
+initialization =
+  import os
+  os.environ['PATH'] = os.path.expandvars('${environment:PATH}')
+
+# control scripts generation in order to avoid reinstalling bin/buildout
+scripts =
+  bang = slapos.bang:main
+  generate-signature-key = slapos.signature:run
+  slapconsole = slapos.console:run
+  slapos-request = slapos.console:request
+  slapformat = slapos.format:main
+  slapgrid = slapos.grid.slapgrid:run
+  slapgrid-cp = slapos.grid.slapgrid:runComputerPartition
+  slapgrid-sr = slapos.grid.slapgrid:runSoftwareRelease
+  slapgrid-ur = slapos.grid.slapgrid:runUsageReport
+  slapgrid-supervisorctl = slapos.grid.svcbackend:supervisorctl
+  slapgrid-supervisord = slapos.grid.svcbackend:supervisord
+  slapproxy = slapos.proxy:main
+
 [versions]
-zc.buildout = 1.5.3-dev-SlapOS-005
-Jinja2 = 2.5.5
-Werkzeug = 0.6.2
+zc.buildout = 1.6.0-dev-SlapOS-002
+Jinja2 = 2.6
+Werkzeug = 0.8.1
+collective.recipe.template = 1.9
 hexagonit.recipe.cmmi = 1.5.0
-lxml = 2.3
+ipython = 0.11
+lxml = 2.3.1
 meld3 = 0.6.7
-netaddr = 0.7.5
+netaddr = 0.7.6
 setuptools = 0.6c12dev-r88846
-slapos.core = 0.12
-slapos.libnetworkcache = 0.2
+slapos.core = 0.19
+slapos.libnetworkcache = 0.10
 xml-marshaller = 0.9.7
 z3c.recipe.scripts = 1.0.1
 zc.recipe.egg = 1.3.2
 
 # Required by:
-# slapos.core==0.12
-Flask = 0.7.2
+# slapos.core==0.19
+Flask = 0.8
 
 # Required by:
 # hexagonit.recipe.cmmi==1.5.0
 hexagonit.recipe.download = 1.5.0
 
 # Required by:
-# slapos.core==0.12
-netifaces = 0.5
+# slapos.core==0.19
+netifaces = 0.6
 
 # Required by:
-# slapos.core==0.12
+# slapos.core==0.19
 supervisor = 3.0a10
 
 # Required by:
-# slapos.core==0.12
-zope.interface = 3.6.4
-
+# slapos.core==0.19
+zope.interface = 3.8.0
diff --git a/component/sqlite3/buildout.cfg b/component/sqlite3/buildout.cfg
index 2a4d2db354f47d900763d171a92ab7abff77853b..168ca9b5fde2c29861cb07c30e45bc441723ce57 100644
--- a/component/sqlite3/buildout.cfg
+++ b/component/sqlite3/buildout.cfg
@@ -5,8 +5,8 @@ parts =
 
 [sqlite3]
 recipe = hexagonit.recipe.cmmi
-url = http://www.sqlite.org/sqlite-autoconf-3070701.tar.gz
-md5sum = 554026fe7fac47b1cf61c18d5fe43419
+url = http://www.sqlite.org/sqlite-autoconf-3070800.tar.gz
+md5sum = 6bfb46d73caaa1bbbcd2b52184b6c542
 configure-options =
   --disable-static
   --enable-readline
diff --git a/component/stunnel/buildout.cfg b/component/stunnel/buildout.cfg
index 470069bb0b229b84b14b1cc6aa812fb222ecd2ba..6381abbb4e292830ecf6f28cb5dd15a11887c2ef 100644
--- a/component/stunnel/buildout.cfg
+++ b/component/stunnel/buildout.cfg
@@ -17,8 +17,8 @@ filename = stunnel-4-hooks.py
 
 [stunnel-4]
 recipe = hexagonit.recipe.cmmi
-url = ftp://ftp.stunnel.org/stunnel/stunnel-4.39.tar.gz
-md5sum = 853739119a8364daea750154af6d7e79
+url = http://mirror.bit.nl/stunnel/stunnel-4.46.tar.gz
+md5sum = 978030ff42f087ec26eb8a095ab69994
 pre-configure-hook = ${stunnel-4-hook-download:location}/${stunnel-4-hook-download:filename}:pre_configure_hook
 configure-options =
   --enable-ipv6
diff --git a/component/w3m/buildout.cfg b/component/w3m/buildout.cfg
index 1692891e9908be31d0ff662b0d2a783285615a43..21b831aec293607c0c054d45a791b5a1deb50c3d 100644
--- a/component/w3m/buildout.cfg
+++ b/component/w3m/buildout.cfg
@@ -9,11 +9,6 @@ extends =
 parts =
   w3m
 
-versions = versions
-
-find-links =
-    http://www.nexedi.org/static/packages/source/slapos.buildout/
-
 [w3m-w3m.gcc.forward.compat.patch]
 recipe = hexagonit.recipe.download
 url =${:_profile_base_location_}/${:filename}
@@ -50,7 +45,3 @@ environment =
   PKG_CONFIG_PATH=${openssl:location}/lib/pkgconfig
   CPPFLAGS=-I${ncurses:location}/include/ -I${zlib:location}/include/
   LDFLAGS=-Wl,--as-needed -L${garbage-collector:location}/lib -Wl,-rpath=${garbage-collector:location}/lib -L${ncurses:location}/lib -Wl,-rpath=${ncurses:location}/lib -L${openssl:location}/lib -Wl,-rpath=${openssl:location}/lib -L${zlib:location}/lib -Wl,-rpath=${zlib:location}/lib
-
-[versions]
-# Use SlapOS patched zc.buildout
-zc.buildout = 1.5.3-dev-SlapOS-005
diff --git a/component/xorg/700c7896b832d6e4fb0185f0d5382b01f94e7141.patch b/component/xorg/700c7896b832d6e4fb0185f0d5382b01f94e7141.patch
new file mode 100644
index 0000000000000000000000000000000000000000..ebf9384cc8bc6d7479073d6629196e4fdc019f0c
--- /dev/null
+++ b/component/xorg/700c7896b832d6e4fb0185f0d5382b01f94e7141.patch
@@ -0,0 +1,21 @@
+From 700c7896b832d6e4fb0185f0d5382b01f94e7141 Mon Sep 17 00:00:00 2001
+From: Alan Hourihane <alanh@vmware.com>
+Date: Fri, 25 Feb 2011 11:05:27 +0000
+Subject: Add _X_HIDDEN to xgeExtRegister to fix build problems on 64bit
+
+---
+diff --git a/src/Xge.c b/src/Xge.c
+index 0655e00..d28a4f0 100644
+--- a/src/Xge.c
++++ b/src/Xge.c
+@@ -292,7 +292,7 @@ _xgeEventToWire(Display* dpy, XEvent* re, xEvent* event)
+  * Extensions need to register callbacks for their events.
+  */
+ Bool
+-xgeExtRegister(Display* dpy, int offset, XExtensionHooks* callbacks)
++_X_HIDDEN xgeExtRegister(Display* dpy, int offset, XExtensionHooks* callbacks)
+ {
+     XGEExtNode* newExt;
+     XGEData* xge_data;
+--
+cgit v0.8.3-6-g21f6
diff --git a/component/xorg/buildout.cfg b/component/xorg/buildout.cfg
index e1cee1d2e5b0bc0ff326a3b99501d40939bbf777..8807f69e9817de7d1b60454444b9cd405b1bf9a9 100644
--- a/component/xorg/buildout.cfg
+++ b/component/xorg/buildout.cfg
@@ -2,6 +2,7 @@
 [buildout]
 extends =
   ../freetype/buildout.cfg
+  ../libuuid/buildout.cfg
   ../libxml2/buildout.cfg
   ../libxslt/buildout.cfg
   ../pkgconfig/buildout.cfg
@@ -11,7 +12,8 @@ parts =
   libXdmcp
   libXext
   libXau
-  libX11
+  libXinerama
+  libSM
 
 [xorg-aclocal]
 ACLOCAL=${xorg-util-macros:location}/share/aclocal
@@ -56,8 +58,6 @@ configure-options =
   --disable-static
 environment =
   PKG_CONFIG_PATH=${xproto:location}/lib/pkgconfig
-  LD_LIBRARY_PATH=${xproto:location}/lib
-  LD_RUN_PATH=${xproto:location}/lib
   PATH=${pkgconfig:location}/bin:%(PATH)s
 
 [xcbproto]
@@ -82,8 +82,6 @@ configure-options =
   --disable-build-docs
 environment =
   PKG_CONFIG_PATH=${xcbproto:location}/lib/pkgconfig:${libXau:location}/lib/pkgconfig:${xproto:location}/lib/pkgconfig:${xorg-libpthread-stubs:location}/lib/pkgconfig:${libxslt:location}/lib/pkgconfig
-  LD_LIBRARY_PATH=${xcbproto:location}/lib:${libXau:location}/lib:${xorg-libpthread-stubs:location}/lib:${libxslt:location}/lib
-  LD_RUN_PATH=${xcbproto:location}/lib:${libXau:location}/lib:${xorg-libpthread-stubs:location}/lib:${libxslt:location}/lib
   PATH=${pkgconfig:location}/bin:${libxslt:location}/bin:%(PATH)s
   PYTHON=${buildout:executable}
 # Python note: libxcb requires python with ElementTree. In case of appliance
@@ -97,8 +95,6 @@ url = http://www.x.org/releases/X11R7.6/src/lib/libXext-1.2.0.tar.bz2
 md5sum = 9bb236ff0193e9fc1c1fb504dd840331
 environment =
   PKG_CONFIG_PATH=${xcbproto:location}/lib/pkgconfig:${libXau:location}/lib/pkgconfig:${xproto:location}/lib/pkgconfig:${xorg-libpthread-stubs:location}/lib/pkgconfig:${xextproto:location}/lib/pkgconfig:${libX11:location}/lib/pkgconfig:${libxcb:location}/lib/pkgconfig
-  LD_LIBRARY_PATH=${xcbproto:location}/lib:${libXau:location}/lib:${xorg-libpthread-stubs:location}/lib:${xextproto:location}/lib:${libX11:location}/lib:${libxcb:location}/lib
-  LD_RUN_PATH=${xcbproto:location}/lib:${libXau:location}/lib:${xorg-libpthread-stubs:location}/lib:${xextproto:location}/lib:${libX11:location}/lib:${libxcb:location}/lib
   PATH=${pkgconfig:location}/bin:%(PATH)s
 # Warning: do *not* enable -fPIC CFLAGS for this library. Even if it fails and ld asks you to enable it. This will not solve your problem, and create an unexpected (by build chain) setup (all .o will be position-independant code).
 # CFLAGS=-fPIC
@@ -113,8 +109,9 @@ patch-options = -p1
 
 [libXext-patch-link-error]
 # Fixes libXext.la link error
+# http://cgit.freedesktop.org/xorg/lib/libXext/patch/?id=700c7896b832d6e4fb0185f0d5382b01f94e7141
 recipe = hexagonit.recipe.download
-url = http://cgit.freedesktop.org/xorg/lib/libXext/patch/?id=700c7896b832d6e4fb0185f0d5382b01f94e7141
+url = ${:_profile_base_location_}/${:filename}
 download-only = true
 filename = 700c7896b832d6e4fb0185f0d5382b01f94e7141.patch
 md5sum = 52635ef694ee6f1acb642a77ee8eb010
@@ -133,14 +130,9 @@ configure-options =
   --disable-specs
   --without-xmlto
   --without-fop
-
 environment =
   PKG_CONFIG_PATH=${xproto:location}/lib/pkgconfig:${xextproto:location}/lib/pkgconfig:${xtrans:location}/share/pkgconfig:${libxcb:location}/lib/pkgconfig:${xorg-libpthread-stubs:location}/lib/pkgconfig:${libXau:location}/lib/pkgconfig
-  LD_LIBRARY_PATH=${xproto:location}/lib:${xextproto:location}/lib:${libxcb:location}/lib
-  LD_RUN_PATH=${xproto:location}/lib:${xextproto:location}/lib:${libxcb:location}/lib
   PATH=${pkgconfig:location}/bin:%(PATH)s
-  CFLAGS=-I${xproto:location}/include
-  CPPFLAGS=-I${xproto:location}/include
 
 [libXdmcp]
 recipe = hexagonit.recipe.cmmi
@@ -148,10 +140,48 @@ url = http://www.x.org/releases/X11R7.6/src/lib/libXdmcp-1.1.0.tar.bz2
 md5sum = 762b6bbaff7b7d0831ddb4f072f939a5
 environment =
   PKG_CONFIG_PATH=${xorg-util-macros:location}/share/pkgconfig:${xproto:location}/lib/pkgconfig
-  LD_LIBRARY_PATH=${xproto:location}/lib
-  LD_RUN_PATH=${xproto:location}/lib
   PATH=${pkgconfig:location}/bin:%(PATH)s
 configure-options =
   --disable-static
   --without-xmlto
   --without-fop
+
+[xineramaproto]
+recipe = hexagonit.recipe.cmmi
+url = http://www.x.org/releases/X11R7.6/src/proto/xineramaproto-1.2.tar.bz2
+md5sum = a8aadcb281b9c11a91303e24cdea45f5
+
+[libXinerama]
+recipe = hexagonit.recipe.cmmi
+url = http://www.x.org/releases/X11R7.6/src/lib/libXinerama-1.1.1.tar.bz2
+md5sum = ecd4839ad01f6f637c6fb5327207f89b
+environment =
+  PKG_CONFIG_PATH=${libX11:location}/lib/pkgconfig:${libXau:location}/lib/pkgconfig:${libXext:location}/lib/pkgconfig:${libxcb:location}/lib/pkgconfig:${xextproto:location}/lib/pkgconfig:${xineramaproto:location}/lib/pkgconfig:${xorg-libpthread-stubs:location}/lib/pkgconfig:${xproto:location}/lib/pkgconfig
+  PATH=${pkgconfig:location}/bin:%(PATH)s
+configure-options =
+  --disable-static
+
+[libICE]
+recipe = hexagonit.recipe.cmmi
+url = http://www.x.org/releases/X11R7.6/src/lib/libICE-1.0.7.tar.bz2
+md5sum = bb72a732b15e9dc25c3036559387eed5
+configure-options =
+  --disable-static
+  --without-xmlto
+  --without-fop
+environment =
+  PKG_CONFIG_PATH=${xproto:location}/lib/pkgconfig:${xtrans:location}/share/pkgconfig
+  PATH=${pkgconfig:location}/bin:%(PATH)s
+
+[libSM]
+recipe = hexagonit.recipe.cmmi
+url = http://www.x.org/releases/X11R7.6/src/lib/libSM-1.2.0.tar.bz2
+md5sum = e78c447bf1790552b644eca81b542742
+configure-options =
+  --disable-static
+  --without-xmlto
+  --without-fop
+environment =
+  PKG_CONFIG_PATH=${libICE:location}/lib/pkgconfig:${libuuid:location}/lib/pkgconfig:${xproto:location}/lib/pkgconfig:${xtrans:location}/share/pkgconfig
+  PATH=${pkgconfig:location}/bin:%(PATH)s
+  LIBUUID_CFLAGS=-I${libuuid:location}/include
diff --git a/component/xtrabackup/buildout.cfg b/component/xtrabackup/buildout.cfg
index 716ec9ad29cd31f7dd74478506fb44047787db3b..753376d34c0620b1809677a32c637de1e3c12695 100644
--- a/component/xtrabackup/buildout.cfg
+++ b/component/xtrabackup/buildout.cfg
@@ -15,17 +15,12 @@ extends =
 parts =
   xtrabackup
 
-find-links =
-    http://www.nexedi.org/static/packages/source/slapos.buildout/
-
-versions = versions
-
 [xtrabackup-build-patch-download]
 recipe = hexagonit.recipe.download
 url = ${:_profile_base_location_}/${:filename}
-md5sum = e018df8bb3ed672891388556b8e91e35
+md5sum = 95b2c2ef625f88d85bf8876269a19372
 download-only = true
-filename = xtrabackup_build.patch
+filename = xtrabackup-1.6.2_build.patch
 
 [allow_force_ibbackup.patch]
 recipe = hexagonit.recipe.download
@@ -36,8 +31,8 @@ filename = ${:_buildout_section_name_}
 
 [xtrabackup]
 recipe = hexagonit.recipe.cmmi
-url = http://www.percona.com/redir/downloads/XtraBackup/XtraBackup-1.6/source/xtrabackup-1.6.tar.gz
-md5sum = 7c263723312cba36539df4cd7a119744
+url = http://www.percona.com/redir/downloads/XtraBackup/xtrabackup-1.6.2/source/xtrabackup-1.6.2.tar.gz
+md5sum = 933243ae362156c98f1211eb87b3b4ea
 make-binary = true
 patches =
   ${xtrabackup-build-patch-download:location}/${xtrabackup-build-patch-download:filename}
@@ -51,7 +46,3 @@ environment =
   PATH=${autoconf:location}/bin:${automake-1.11:location}/bin:${libtool:location}/bin:${flex:location}/bin:%(PATH)s:${bison:location}/bin
 make-options =
   -j1
-
-[versions]
-# Use SlapOS patched zc.buildout
-zc.buildout = 1.5.3-dev-SlapOS-001
diff --git a/component/xtrabackup/xtrabackup-1.6.2_build.patch b/component/xtrabackup/xtrabackup-1.6.2_build.patch
new file mode 100644
index 0000000000000000000000000000000000000000..41c2188fa9429d8c9db685233aafd83931f0a57c
--- /dev/null
+++ b/component/xtrabackup/xtrabackup-1.6.2_build.patch
@@ -0,0 +1,152 @@
+diff -ur xtrabackup-1.6.2.orig/Makefile xtrabackup-1.6.2/Makefile
+--- xtrabackup-1.6.2.orig/Makefile	2011-07-19 05:16:22.000000000 +0900
++++ xtrabackup-1.6.2/Makefile	2011-07-27 17:58:38.108925111 +0900
+@@ -137,5 +137,5 @@
+ clean:
+ 	rm -f *.o xtrabackup_* 
+ install:
+-	install -m 755 innobackupex-1.5.1 $(BIN_DIR)
++	install -m 755 innobackupex-1.5.1 $(BIN_DIR)/innobackupex
+ 	install -m 755 xtrabackup_*  $(BIN_DIR)
+diff -ur xtrabackup-1.6.2.orig/utils/build.sh xtrabackup-1.6.2/utils/build.sh
+--- xtrabackup-1.6.2.orig/utils/build.sh	2011-07-19 05:16:22.000000000 +0900
++++ xtrabackup-1.6.2/utils/build.sh	2011-07-27 18:01:53.809212142 +0900
+@@ -21,12 +21,14 @@
+ {
+     echo "Build an xtrabackup binary against the specified InnoDB flavor."
+     echo
+-    echo "Usage: `basename $0` CODEBASE"
++    echo "Usage: `basename $0` CODEBASE PREFIX LIBTOOL_LOCATION"
+     echo "where CODEBASE can be one of the following values or aliases:"
+     echo "  innodb51_builtin | 5.1	build against built-in InnoDB in MySQL 5.1"
+     echo "  innodb55         | 5.5	build against InnoDB in MySQL 5.5"
+     echo "  xtradb51         | xtradb   build against Percona Server with XtraDB 5.1"
+     echo "  xtradb55         | xtradb55 build against Percona Server with XtraDB 5.5"
++    echo "where PREFIX is abolute path for install location"
++    echo "where LIBTOOL_LOCATION is abolute path of libtool"
+     exit -1
+ }
+ 
+@@ -79,7 +81,12 @@
+ {
+     echo "Configuring the server"
+     cd $server_dir
+-    BUILD/autorun.sh
++    libtoolize -c -f
++    aclocal -I $libtool_location/share/aclocal -I config/ac-macros
++    autoheader
++    automake -c -a -f
++    autoconf
++    touch sql/sql_yacc.yy
+     eval $configure_cmd
+ 
+     echo "Building the server"
+@@ -92,12 +99,13 @@
+     echo "Building XtraBackup"
+     mkdir $build_dir
+     cp $top_dir/Makefile $top_dir/xtrabackup.c $build_dir
++    cp $top_dir/innobackupex $build_dir/innobackupex-1.5.1
+ 
+     # Read XTRABACKUP_VERSION from the VERSION file
+     . $top_dir/VERSION
+ 
+     cd $build_dir
+-    $MAKE_CMD $xtrabackup_target XTRABACKUP_VERSION=$XTRABACKUP_VERSION
++    $MAKE_CMD $xtrabackup_target XTRABACKUP_VERSION=$XTRABACKUP_VERSION PREFIX=$1
+     cd $top_dir
+ }
+ 
+@@ -106,11 +114,36 @@
+     echo "Building tar4ibd"
+     unpack_and_patch libtar-1.2.11.tar.gz tar4ibd_libtar-1.2.11.patch
+     cd libtar-1.2.11
+-    ./configure
++    ./configure --prefix=$1
+     $MAKE_CMD
+     cd $topdir
+ }
+ 
++function install_server()
++{
++    echo "Installing the server"
++    cd $server_dir
++    $MAKE_CMD install
++    cd $top_dir
++}
++
++function install_xtrabackup()
++{
++    echo "Installing XtraBackup"
++    echo $build_dir
++    cd $build_dir
++    $MAKE_CMD PREFIX=$1 install
++    cd $top_dir
++}
++
++function install_tar4ibd()
++{
++    echo "Installing tar4ibd"
++    cd libtar-1.2.11
++    $MAKE_CMD install
++    cd $topdir
++}
++
+ ################################################################################
+ # Do all steps to build the server, xtrabackup and tar4ibd
+ # Expects the following variables to be set before calling:
+@@ -141,9 +174,15 @@
+ 
+     build_server
+ 
+-    build_xtrabackup
++    build_xtrabackup $1
++
++    build_tar4ibd $1
++
++    install_server
++
++    install_xtrabackup $1
+ 
+-    build_tar4ibd
++    install_tar4ibd
+ }
+ 
+ if ! test -f xtrabackup.c
+@@ -153,6 +192,15 @@
+ fi
+ 
+ type=$1
++prefix=$2
++if [ "x$prefix" == "x" ] ; then
++  usage
++fi
++libtool_location=$3
++if [ "x$libtool_location" == "x" ] ; then
++  usage
++fi
++
+ top_dir=`pwd`
+ 
+ case "$type" in
+@@ -166,9 +214,10 @@
+ 	    --with-plugins=innobase \
+ 	    --with-zlib-dir=bundled \
+ 	    --enable-shared \
+-	    --with-extra-charsets=all"
++	    --with-extra-charsets=all \
++	    --prefix=$2"
+ 
+-	build_all
++	build_all $2
+ 	;;
+ 
+ "innodb55" | "5.5")
+@@ -230,7 +279,7 @@
+ 
+ 	build_server
+ 
+-	build_xtrabackup
++	build_xtrabackup 
+ 
+ 	build_tar4ibd
+ 	;;
diff --git a/slapos/recipe/README.lamp.txt b/slapos/recipe/README.lamp.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7a75a2033a3ed733ff5198bbce92e65b3fcb21ab
--- /dev/null
+++ b/slapos/recipe/README.lamp.txt
@@ -0,0 +1,104 @@
+lamp
+=====
+
+the lamp recipe help you to deploy simply a php based application on slapos. This recipe is 
+able to setup mariadb, apache and apache-php for your php application, is also capable to
+configure your software during installation to ensure a full compatibility.
+
+
+How to use?
+-----------
+
+just add this part in your software.cfg to use the lamp.simple module
+
+[instance-recipe]
+egg = slapos.cookbook
+module = lamp.simple
+
+you also need to extend lamp.cfg
+
+extends =
+  http://git.erp5.org/gitweb/slapos.git/blob_plain/refs/tags/slapos-0.50:/stack/lamp.cfg
+
+
+lamp.runner
+=====
+
+When you install some software (such as prestashop) you need to remove or rename folder, with slapos you can not 
+access to the www-data directory. to do this, you need to tell to lamp recipe to remove or/and it when software 
+will be instantiated. Some software requires more than rename or delete a folder (manualy create database etc...)
+in this case you need to write a python script and lamp recipe must run it when installing your software.
+
+
+
+How to use?
+-----------
+
+CONDITION
+--------
+the action (move, rename, launch script) only starts when the condition is filled.
+in instance.cfg, add 
+
+file_token = path_of_file
+
+and the action will begin when path_of_www-data/path_of_file will be created
+you can also use database to check condition. add 
+
+table_name = name_of_table
+constraint = sql_where_condition
+
+name_of_table is the full or partial name(in some cases we can not know the prefix used to create tables) of table
+into mariadb databse for example table_name = admin. if you use
+name_of_table = **, the action will begin when database is ready. 
+constraint is the sql_condition to use when search entry into name_of_table for example constraint = `admin_id`=1
+
+you can no use file_token and table_name at the same time, otherwise file_token will be used in priority. attention 
+to the conditions that will never be satisfied.
+
+
+
+ACTION
+-------
+the action start when condition is true
+1- delete file or folder
+into instance.cfg, use 
+
+delete = file_or_folder1, file_or_folder2, file_or_folder3 ...
+
+for example delete = admin 
+
+2- rename file or folder
+into instance.cfg, use 
+
+rename = old_name1 => new_name1, old_name2 => new_name2, ... you can also use
+
+rename = old_name1, old_name2 => new_name2, ... in this case old_name1 will be rename and the new name will be chose
+by joining old_name1 and mysql_user: this should give 
+rename = old_name1 => old_name1-mysql_user, old_name2 => new_name2, ...
+
+3- launch python script
+
+use script = ${configure-script:location}/${configure-script:filename} into instance.cfg, add part configure-script
+into software.cfg
+
+parts = configure-script
+
+[configure-script]
+recipe = hexagonit.recipe.download
+location = ${buildout:parts-directory}/${:_buildout_section_name_}
+url = url_of_script_name.py
+filename = script_name.py
+download-only = True
+
+the script_name.py should contain a main module, sys.argv is passed to the main. you can write script_name.py like this
+....
+def setup(args):
+    base_url, htdocs, renamed, mysql_user, mysql_password, mysql_database, mysql_host = args
+    .......
+
+if __name__ == '__main__':
+    setup(sys.argv[1:])
+
+base_url: is the url of php software
+htdocs: is the path of www-data directory
+mysql_user, mysql_password, mysql_database, mysql_host: is the mariadb parameters
diff --git a/slapos/recipe/README.mkdirectory.txt b/slapos/recipe/README.mkdirectory.txt
new file mode 100644
index 0000000000000000000000000000000000000000..426282a219dd9e230774b64334f8e8ecf8d6bb87
--- /dev/null
+++ b/slapos/recipe/README.mkdirectory.txt
@@ -0,0 +1,13 @@
+mkdirectory
+===========
+
+mkdirectory loops on its options and create the directory joined
+
+.. Note::
+
+   Use a slash ``/`` as directory separator. Don't use system dependent separator.
+   The slash will be parsed and replace by the operating system right separator.
+
+   Only use relative directory to the buildout root directory.
+
+The created directory won't be added to path list.
diff --git a/slapos/recipe/certificate_authority/__init__.py b/slapos/recipe/certificate_authority/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2bf43ed728565ac7ff4415e8c04a509fd9938c21
--- /dev/null
+++ b/slapos/recipe/certificate_authority/__init__.py
@@ -0,0 +1,132 @@
+##############################################################################
+#
+# Copyright (c) 2010 Vifib SARL and Contributors. All Rights Reserved.
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsibility of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# guarantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+#
+##############################################################################
+import os
+import hashlib
+import ConfigParser
+
+from slapos.recipe.librecipe import GenericBaseRecipe
+
+class Recipe(GenericBaseRecipe):
+
+  def setPath(self):
+    self.ca_dir = self.options['ca-dir']
+    self.request_directory = self.options['requests-directory']
+    self.ca_private = self.options['ca-private']
+    self.ca_certs = self.options['ca-certs']
+    self.ca_newcerts = self.options['ca-newcerts']
+    self.ca_crl = self.options['ca-crl']
+    self.ca_key_ext = '.key'
+    self.ca_crt_ext = '.crt'
+
+  def install(self):
+    path_list = []
+
+    # XXX: We gotta find better a way to get these options
+    ca_country_code = 'XX'
+    ca_email = 'xx@example.com'
+    ca_state = 'State',
+    ca_city = 'City'
+    ca_company = 'Company'
+    # XXX: end
+
+    self.setPath()
+
+    config = dict(ca_dir=self.ca_dir, request_dir=self.request_directory)
+
+    for f in ['crlnumber', 'serial']:
+      if not os.path.exists(os.path.join(self.ca_dir, f)):
+        open(os.path.join(self.ca_dir, f), 'w').write('01')
+    if not os.path.exists(os.path.join(self.ca_dir, 'index.txt')):
+      open(os.path.join(self.ca_dir, 'index.txt'), 'w').write('')
+    openssl_configuration = os.path.join(self.ca_dir, 'openssl.cnf')
+    config.update(
+        working_directory=self.ca_dir,
+        country_code=ca_country_code,
+        state=ca_state,
+        city=ca_city,
+        company=ca_company,
+        email_address=ca_email,
+    )
+    self.createFile(openssl_configuration, self.substituteTemplate(
+      self.getTemplateFilename('openssl.cnf.ca.in'), config))
+
+    ca_wrapper = self.createPythonScript(
+      self.options['wrapper'],
+      '%s.certificate_authority.runCertificateAuthority' % __name__,
+      dict(
+        openssl_configuration=openssl_configuration,
+        openssl_binary=self.options['openssl-binary'],
+        certificate=os.path.join(self.ca_dir, 'cacert.pem'),
+        key=os.path.join(self.ca_private, 'cakey.pem'),
+        crl=self.ca_crl,
+        request_dir=self.request_directory
+      )
+    )
+    path_list.append(ca_wrapper)
+
+    return path_list
+
+class Request(Recipe):
+
+  def _options(self, options):
+    if 'name' not in options:
+      options['name'] = self.name
+
+  def install(self):
+    self.setPath()
+
+    key_file = self.options['key-file']
+    cert_file = self.options['cert-file']
+
+    name = self.options['name']
+    hash_ = hashlib.sha512(name).hexdigest()
+    key = os.path.join(self.ca_private, hash_ + self.ca_key_ext)
+    certificate = os.path.join(self.ca_certs, hash_ + self.ca_crt_ext)
+    parser = ConfigParser.RawConfigParser()
+    parser.add_section('certificate')
+    parser.set('certificate', 'name', name)
+    parser.set('certificate', 'key_file', key)
+    parser.set('certificate', 'certificate_file', certificate)
+    parser.write(open(os.path.join(self.request_directory, hash_), 'w'))
+
+    for link in [key_file, cert_file]:
+      if os.path.islink(link):
+        os.unlink(link)
+      elif os.path.exists(link):
+        raise OSError("%r file should be a symbolic link.")
+
+    os.symlink(key, key_file)
+    os.symlink(certificate, cert_file)
+
+    wrapper = self.createPythonScript(
+      self.options['wrapper'],
+      'slapos.recipe.librecipe.execute.execute_wait',
+      [ [self.options['executable']],
+        [certificate, key] ],
+    )
+
+    return [key_file, cert_file, wrapper]
diff --git a/slapos/recipe/certificate_authority/certificate_authority.py b/slapos/recipe/certificate_authority/certificate_authority.py
new file mode 100644
index 0000000000000000000000000000000000000000..a4af4ab76df89af085dd779e12b5d1a64af37ff6
--- /dev/null
+++ b/slapos/recipe/certificate_authority/certificate_authority.py
@@ -0,0 +1,117 @@
+import os
+import subprocess
+import time
+import ConfigParser
+
+
+def popenCommunicate(command_list, input=None):
+  subprocess_kw = dict(stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+  if input is not None:
+    subprocess_kw.update(stdin=subprocess.PIPE)
+  popen = subprocess.Popen(command_list, **subprocess_kw)
+  result = popen.communicate(input)[0]
+  if popen.returncode is None:
+    popen.kill()
+  if popen.returncode != 0:
+    raise ValueError('Issue during calling %r, result was:\n%s' % (
+      command_list, result))
+  return result
+
+
+class CertificateAuthority:
+  def __init__(self, key, certificate, openssl_binary,
+      openssl_configuration, request_dir):
+    self.key = key
+    self.certificate = certificate
+    self.openssl_binary = openssl_binary
+    self.openssl_configuration = openssl_configuration
+    self.request_dir = request_dir
+
+  def checkAuthority(self):
+    file_list = [ self.key, self.certificate ]
+    ca_ready = True
+    for f in file_list:
+      if not os.path.exists(f):
+        ca_ready = False
+        break
+    if ca_ready:
+      return
+    for f in file_list:
+      if os.path.exists(f):
+        os.unlink(f)
+    try:
+      # no CA, let us create new one
+      popenCommunicate([self.openssl_binary, 'req', '-nodes', '-config',
+          self.openssl_configuration, '-new', '-x509', '-extensions',
+          'v3_ca', '-keyout', self.key, '-out', self.certificate,
+          '-days', '10950'], 'Automatic Certificate Authority\n')
+    except:
+      try:
+        for f in file_list:
+          if os.path.exists(f):
+            os.unlink(f)
+      except:
+        # do not raise during cleanup
+        pass
+      raise
+
+  def _checkCertificate(self, common_name, key, certificate):
+    file_list = [key, certificate]
+    ready = True
+    for f in file_list:
+      if not os.path.exists(f):
+        ready = False
+        break
+    if ready:
+      return False
+    for f in file_list:
+      if os.path.exists(f):
+        os.unlink(f)
+    csr = certificate + '.csr'
+    try:
+      popenCommunicate([self.openssl_binary, 'req', '-config',
+        self.openssl_configuration, '-nodes', '-new', '-keyout',
+        key, '-out', csr, '-days', '3650'],
+        common_name + '\n')
+      try:
+        popenCommunicate([self.openssl_binary, 'ca', '-batch', '-config',
+          self.openssl_configuration, '-out', certificate,
+          '-infiles', csr])
+      finally:
+        if os.path.exists(csr):
+          os.unlink(csr)
+    except:
+      try:
+        for f in file_list:
+          if os.path.exists(f):
+            os.unlink(f)
+      except:
+        # do not raise during cleanup
+        pass
+      raise
+    else:
+      return True
+
+  def checkRequestDir(self):
+    for request_file in os.listdir(self.request_dir):
+      parser = ConfigParser.RawConfigParser()
+      parser.readfp(open(os.path.join(self.request_dir, request_file), 'r'))
+      if self._checkCertificate(parser.get('certificate', 'name'),
+          parser.get('certificate', 'key_file'), parser.get('certificate',
+            'certificate_file')):
+        print 'Created certificate %r' % parser.get('certificate', 'name')
+
+def runCertificateAuthority(ca_conf):
+  ca = CertificateAuthority(ca_conf['key'], ca_conf['certificate'],
+      ca_conf['openssl_binary'], ca_conf['openssl_configuration'],
+      ca_conf['request_dir'])
+  while True:
+    ca.checkAuthority()
+    ca.checkRequestDir()
+    # XXX
+    # Antoine: I really don't like that at all. It wastes useful CPU time.
+    #          I think it would be a greater idea to use pyinotify
+    #          <http://pyinotify.sourceforge.net/>
+    #          Or we could use select() with socket as well.
+    time.sleep(60)
+    # end XXX
diff --git a/slapos/recipe/certificate_authority/template/openssl.cnf.ca.in b/slapos/recipe/certificate_authority/template/openssl.cnf.ca.in
new file mode 100644
index 0000000000000000000000000000000000000000..8a450a68762145e72923635273e06a00e80d34ca
--- /dev/null
+++ b/slapos/recipe/certificate_authority/template/openssl.cnf.ca.in
@@ -0,0 +1,350 @@
+#
+# OpenSSL example configuration file.
+# This is mostly being used for generation of certificate requests.
+#
+
+# This definition stops the following lines choking if HOME isn't
+# defined.
+HOME			= .
+RANDFILE		= $ENV::HOME/.rnd
+
+# Extra OBJECT IDENTIFIER info:
+#oid_file		= $ENV::HOME/.oid
+oid_section		= new_oids
+
+# To use this configuration file with the "-extfile" option of the
+# "openssl x509" utility, name here the section containing the
+# X.509v3 extensions to use:
+# extensions		= 
+# (Alternatively, use a configuration file that has only
+# X.509v3 extensions in its main [= default] section.)
+
+[ new_oids ]
+
+# We can add new OIDs in here for use by 'ca', 'req' and 'ts'.
+# Add a simple OID like this:
+# testoid1=1.2.3.4
+# Or use config file substitution like this:
+# testoid2=${testoid1}.5.6
+
+# Policies used by the TSA examples.
+tsa_policy1 = 1.2.3.4.1
+tsa_policy2 = 1.2.3.4.5.6
+tsa_policy3 = 1.2.3.4.5.7
+
+####################################################################
+[ ca ]
+default_ca	= CA_default		# The default ca section
+
+####################################################################
+[ CA_default ]
+
+dir		= %(working_directory)s		# Where everything is kept
+certs		= $dir/certs		# Where the issued certs are kept
+crl_dir		= $dir/crl		# Where the issued crl are kept
+database	= $dir/index.txt	# database index file.
+#unique_subject	= no			# Set to 'no' to allow creation of
+					# several ctificates with same subject.
+new_certs_dir	= $dir/newcerts		# default place for new certs.
+
+certificate	= $dir/cacert.pem 	# The CA certificate
+serial		= $dir/serial 		# The current serial number
+crlnumber	= $dir/crlnumber	# the current crl number
+					# must be commented out to leave a V1 CRL
+crl		= $dir/crl.pem 		# The current CRL
+private_key	= $dir/private/cakey.pem # The private key
+RANDFILE	= $dir/private/.rand	# private random number file
+
+x509_extensions	= usr_cert		# The extentions to add to the cert
+
+# Comment out the following two lines for the "traditional"
+# (and highly broken) format.
+name_opt 	= ca_default		# Subject Name options
+cert_opt 	= ca_default		# Certificate field options
+
+# Extension copying option: use with caution.
+# copy_extensions = copy
+
+# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs
+# so this is commented out by default to leave a V1 CRL.
+# crlnumber must also be commented out to leave a V1 CRL.
+# crl_extensions	= crl_ext
+
+default_days	= 3650			# how long to certify for
+default_crl_days= 30			# how long before next CRL
+default_md	= default		# use public key default MD
+preserve	= no			# keep passed DN ordering
+
+# A few difference way of specifying how similar the request should look
+# For type CA, the listed attributes must be the same, and the optional
+# and supplied fields are just that :-)
+policy		= policy_match
+
+# For the CA policy
+[ policy_match ]
+countryName		= match
+stateOrProvinceName	= match
+organizationName	= match
+organizationalUnitName	= optional
+commonName		= supplied
+emailAddress		= optional
+
+# For the 'anything' policy
+# At this point in time, you must list all acceptable 'object'
+# types.
+[ policy_anything ]
+countryName		= optional
+stateOrProvinceName	= optional
+localityName		= optional
+organizationName	= optional
+organizationalUnitName	= optional
+commonName		= supplied
+emailAddress		= optional
+
+####################################################################
+[ req ]
+default_bits		= 2048
+default_md		= sha1
+default_keyfile 	= privkey.pem
+distinguished_name	= req_distinguished_name
+#attributes		= req_attributes
+x509_extensions	= v3_ca	# The extentions to add to the self signed cert
+
+# Passwords for private keys if not present they will be prompted for
+# input_password = secret
+# output_password = secret
+
+# This sets a mask for permitted string types. There are several options. 
+# default: PrintableString, T61String, BMPString.
+# pkix	 : PrintableString, BMPString (PKIX recommendation before 2004)
+# utf8only: only UTF8Strings (PKIX recommendation after 2004).
+# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).
+# MASK:XXXX a literal mask value.
+# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings.
+string_mask = utf8only
+
+# req_extensions = v3_req # The extensions to add to a certificate request
+
+[ req_distinguished_name ]
+countryName			= Country Name (2 letter code)
+countryName_value		= %(country_code)s
+countryName_min			= 2
+countryName_max			= 2
+
+stateOrProvinceName		= State or Province Name (full name)
+stateOrProvinceName_value	= %(state)s
+
+localityName			= Locality Name (eg, city)
+localityName_value		= %(city)s
+
+0.organizationName		= Organization Name (eg, company)
+0.organizationName_value	= %(company)s
+
+# we can do this but it is not needed normally :-)
+#1.organizationName		= Second Organization Name (eg, company)
+#1.organizationName_default	= World Wide Web Pty Ltd
+
+commonName			= Common Name (eg, your name or your server\'s hostname)
+commonName_max			= 64
+
+emailAddress			= Email Address
+emailAddress_value = %(email_address)s
+emailAddress_max		= 64
+
+# SET-ex3			= SET extension number 3
+
+#[ req_attributes ]
+#challengePassword		= A challenge password
+#challengePassword_min		= 4
+#challengePassword_max		= 20
+#
+#unstructuredName		= An optional company name
+
+[ usr_cert ]
+
+# These extensions are added when 'ca' signs a request.
+
+# This goes against PKIX guidelines but some CAs do it and some software
+# requires this to avoid interpreting an end user certificate as a CA.
+
+basicConstraints=CA:FALSE
+
+# Here are some examples of the usage of nsCertType. If it is omitted
+# the certificate can be used for anything *except* object signing.
+
+# This is OK for an SSL server.
+# nsCertType			= server
+
+# For an object signing certificate this would be used.
+# nsCertType = objsign
+
+# For normal client use this is typical
+# nsCertType = client, email
+
+# and for everything including object signing:
+# nsCertType = client, email, objsign
+
+# This is typical in keyUsage for a client certificate.
+# keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+
+# This will be displayed in Netscape's comment listbox.
+nsComment			= "OpenSSL Generated Certificate"
+
+# PKIX recommendations harmless if included in all certificates.
+subjectKeyIdentifier=hash
+authorityKeyIdentifier=keyid,issuer
+
+# This stuff is for subjectAltName and issuerAltname.
+# Import the email address.
+# subjectAltName=email:copy
+# An alternative to produce certificates that aren't
+# deprecated according to PKIX.
+# subjectAltName=email:move
+
+# Copy subject details
+# issuerAltName=issuer:copy
+
+#nsCaRevocationUrl		= http://www.domain.dom/ca-crl.pem
+#nsBaseUrl
+#nsRevocationUrl
+#nsRenewalUrl
+#nsCaPolicyUrl
+#nsSslServerName
+
+# This is required for TSA certificates.
+# extendedKeyUsage = critical,timeStamping
+
+[ v3_req ]
+
+# Extensions to add to a certificate request
+
+basicConstraints = CA:FALSE
+keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+
+[ v3_ca ]
+
+
+# Extensions for a typical CA
+
+
+# PKIX recommendation.
+
+subjectKeyIdentifier=hash
+
+authorityKeyIdentifier=keyid:always,issuer
+
+# This is what PKIX recommends but some broken software chokes on critical
+# extensions.
+#basicConstraints = critical,CA:true
+# So we do this instead.
+basicConstraints = CA:true
+
+# Key usage: this is typical for a CA certificate. However since it will
+# prevent it being used as an test self-signed certificate it is best
+# left out by default.
+# keyUsage = cRLSign, keyCertSign
+
+# Some might want this also
+# nsCertType = sslCA, emailCA
+
+# Include email address in subject alt name: another PKIX recommendation
+# subjectAltName=email:copy
+# Copy issuer details
+# issuerAltName=issuer:copy
+
+# DER hex encoding of an extension: beware experts only!
+# obj=DER:02:03
+# Where 'obj' is a standard or added object
+# You can even override a supported extension:
+# basicConstraints= critical, DER:30:03:01:01:FF
+
+[ crl_ext ]
+
+# CRL extensions.
+# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.
+
+# issuerAltName=issuer:copy
+authorityKeyIdentifier=keyid:always
+
+[ proxy_cert_ext ]
+# These extensions should be added when creating a proxy certificate
+
+# This goes against PKIX guidelines but some CAs do it and some software
+# requires this to avoid interpreting an end user certificate as a CA.
+
+basicConstraints=CA:FALSE
+
+# Here are some examples of the usage of nsCertType. If it is omitted
+# the certificate can be used for anything *except* object signing.
+
+# This is OK for an SSL server.
+# nsCertType			= server
+
+# For an object signing certificate this would be used.
+# nsCertType = objsign
+
+# For normal client use this is typical
+# nsCertType = client, email
+
+# and for everything including object signing:
+# nsCertType = client, email, objsign
+
+# This is typical in keyUsage for a client certificate.
+# keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+
+# This will be displayed in Netscape's comment listbox.
+nsComment			= "OpenSSL Generated Certificate"
+
+# PKIX recommendations harmless if included in all certificates.
+subjectKeyIdentifier=hash
+authorityKeyIdentifier=keyid,issuer
+
+# This stuff is for subjectAltName and issuerAltname.
+# Import the email address.
+# subjectAltName=email:copy
+# An alternative to produce certificates that aren't
+# deprecated according to PKIX.
+# subjectAltName=email:move
+
+# Copy subject details
+# issuerAltName=issuer:copy
+
+#nsCaRevocationUrl		= http://www.domain.dom/ca-crl.pem
+#nsBaseUrl
+#nsRevocationUrl
+#nsRenewalUrl
+#nsCaPolicyUrl
+#nsSslServerName
+
+# This really needs to be in place for it to be a proxy certificate.
+proxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo
+
+####################################################################
+[ tsa ]
+
+default_tsa = tsa_config1	# the default TSA section
+
+[ tsa_config1 ]
+
+# These are used by the TSA reply generation only.
+dir		= /etc/pki/tls		# TSA root directory
+serial		= $dir/tsaserial	# The current serial number (mandatory)
+crypto_device	= builtin		# OpenSSL engine to use for signing
+signer_cert	= $dir/tsacert.pem 	# The TSA signing certificate
+					# (optional)
+certs		= $dir/cacert.pem	# Certificate chain to include in reply
+					# (optional)
+signer_key	= $dir/private/tsakey.pem # The TSA private key (optional)
+
+default_policy	= tsa_policy1		# Policy if request did not specify it
+					# (optional)
+other_policies	= tsa_policy2, tsa_policy3	# acceptable policies (optional)
+digests		= md5, sha1		# Acceptable message digests (mandatory)
+accuracy	= secs:1, millisecs:500, microsecs:100	# (optional)
+clock_precision_digits  = 0	# number of digits after dot. (optional)
+ordering		= yes	# Is ordering defined for timestamps?
+				# (optional, default: no)
+tsa_name		= yes	# Must the TSA name be included in the reply?
+				# (optional, default: no)
+ess_cert_id_chain	= no	# Must the ESS cert id chain be included?
+				# (optional, default: no)
diff --git a/slapos/recipe/davstorage/__init__.py b/slapos/recipe/davstorage/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae79dd33f2f063a795b6e3b0929659fc29b8ea98
--- /dev/null
+++ b/slapos/recipe/davstorage/__init__.py
@@ -0,0 +1,125 @@
+##############################################################################
+#
+# Copyright (c) 2010 Vifib SARL and Contributors. All Rights Reserved.
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsibility of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# guarantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+#
+##############################################################################
+from slapos.recipe.librecipe import BaseSlapRecipe
+import os
+import subprocess
+import pkg_resources
+import zc.buildout
+import zc.recipe.egg
+import sys
+
+class Recipe(BaseSlapRecipe):
+  def getTemplateFilename(self, template_name):
+    return pkg_resources.resource_filename(__name__,
+        'template/%s' % template_name)
+
+  def _install(self):
+    self.path_list = []
+    self.requirements, self.ws = self.egg.working_set()
+    document_root = self.createDataDirectory('www')
+    apache_config = self.installApache(document_root)
+    self.setConnectionUrl(scheme='webdavs',
+                          host=apache_config['ip'],
+                          port=apache_config['port'],
+                          auth=(apache_config['user'],
+                                apache_config['password']))
+    return self.path_list
+
+  def installApache(self, document_root, ip=None, port=None):
+    if ip is None:
+      ip=self.getGlobalIPv6Address()
+    if port is None:
+      port = '9080'
+
+    htpasswd_config = self.createHtpasswd()
+    ssl_config = self.createCertificate(size=2048)
+
+    apache_config = dict(
+      pid_file=os.path.join(self.run_directory, 'httpd.pid'),
+      lock_file=os.path.join(self.run_directory, 'httpd.lock'),
+      davlock_db=os.path.join(self.run_directory, 'davdb.lock'),
+      ip=ip,
+      port=port,
+      error_log=os.path.join(self.log_directory, 'httpd-error.log'),
+      access_log=os.path.join(self.log_directory, 'httpd-access.log'),
+      document_root=document_root,
+      modules_dir=self.options['apache_modules_dir'],
+      mime_types=self.options['apache_mime_file'],
+      server_root=self.work_directory,
+      email_address='admin@vifib.net',
+      htpasswd_file=htpasswd_config['htpasswd_file'],
+      ssl_certificate=ssl_config['certificate'],
+      ssl_key=ssl_config['key'],
+    )
+    httpd_config_file = self.createConfigurationFile('httpd.conf',
+      self.substituteTemplate(self.getTemplateFilename('httpd.conf.in'),
+                              apache_config))
+    self.path_list.append(httpd_config_file)
+    apache_runner = zc.buildout.easy_install.scripts(
+      [('httpd', 'slapos.recipe.librecipe.execute', 'execute')],
+      self.ws, sys.executable, self.wrapper_directory,
+      arguments=[self.options['apache_binary'],
+                 '-f', httpd_config_file,
+                 '-DFOREGROUND',
+                ]
+    )[0]
+    self.path_list.append(apache_runner)
+    return dict(ip=apache_config['ip'],
+                port=apache_config['port'],
+                user=htpasswd_config['user'],
+                password=htpasswd_config['password']
+               )
+
+  def createHtpasswd(self):
+    htpasswd = self.createConfigurationFile('htpasswd', '')
+    self.path_list.append(htpasswd)
+    password = self.generatePassword()
+    user = 'user'
+    subprocess.check_call([self.options['apache_htpasswd'],
+                           '-bc', htpasswd,
+                           user, password
+                          ])
+    return dict(htpasswd_file=htpasswd,
+                user=user,
+                password=password)
+
+  def createCertificate(self, size=1024, subject='/C=FR/L=Marcq-en-Baroeul/O=Nexedi'):
+    key_file = os.path.join(self.etc_directory, 'httpd.key')
+    self.path_list.append(key_file)
+
+    certificate_file = os.path.join(self.etc_directory, 'httpd.crt')
+    self.path_list.append(certificate_file)
+
+    subprocess.check_call([self.options['openssl_binary'],
+                           'req', '-x509', '-nodes',
+                           '-newkey', 'rsa:%s' % size,
+                           '-subj', str(subject),
+                           '-out', certificate_file,
+                           '-keyout', key_file
+                          ])
+    return dict(key=key_file,
+                certificate=certificate_file)
diff --git a/slapos/recipe/davstorage/template/httpd.conf.in b/slapos/recipe/davstorage/template/httpd.conf.in
new file mode 100644
index 0000000000000000000000000000000000000000..515593e8cff5e37dbb88bc8ff15b6c78a843a490
--- /dev/null
+++ b/slapos/recipe/davstorage/template/httpd.conf.in
@@ -0,0 +1,82 @@
+ServerRoot "%(server_root)s"
+
+Listen [%(ip)s]:%(port)s
+
+# Needed modules
+LoadModule authn_file_module "%(modules_dir)s/mod_authn_file.so"
+LoadModule authz_host_module "%(modules_dir)s/mod_authz_host.so"
+LoadModule authz_user_module "%(modules_dir)s/mod_authz_user.so"
+LoadModule auth_basic_module "%(modules_dir)s/mod_auth_basic.so"
+LoadModule auth_digest_module "%(modules_dir)s/mod_auth_digest.so"
+LoadModule log_config_module "%(modules_dir)s/mod_log_config.so"
+LoadModule headers_module "%(modules_dir)s/mod_headers.so"
+LoadModule setenvif_module "%(modules_dir)s/mod_setenvif.so"
+LoadModule ssl_module "%(modules_dir)s/mod_ssl.so"
+LoadModule mime_module "%(modules_dir)s/mod_mime.so"
+LoadModule dav_module "%(modules_dir)s/mod_dav.so"
+LoadModule dav_fs_module "%(modules_dir)s/mod_dav_fs.so"
+LoadModule dir_module "%(modules_dir)s/mod_dir.so"
+
+ServerAdmin %(email_address)s
+
+# Quiet Server header (if not, Apache give its life history)
+# It's safer
+ServerTokens ProductOnly
+
+DocumentRoot "%(document_root)s"
+PidFile "%(pid_file)s"
+LockFile "%(lock_file)s"
+DavLockDB "%(davlock_db)s"
+
+<Directory />
+    Options FollowSymLinks
+    AllowOverride None
+    Order deny,allow
+    Deny from all
+</Directory>
+
+<Directory %(document_root)s>
+    Options Indexes MultiViews
+    AllowOverride None
+    Order allow,deny
+    Allow from all
+
+    Dav On
+    # Security Rules to avoid DDoS Attacks
+    DavDepthInfinity Off
+    LimitXMLRequestBody 0
+
+    # Cross-Origin Resources Sharing
+    Header always set Access-Control-Max-Age "0"
+    Header always set Access-Control-Allow-Origin "*"
+    Header always set Access-Control-Allow-Methods "OPTIONS, GET, HEAD, POST, PUT, DELETE, PROPFIND"
+    Header always set Access-Control-Allow-Headers "Content-Type, X-Requested-With, X-HTTP-Method-Override, Accept, Authorization, Depth"
+    SetEnvIf Origin "(.+)" ORIGIN=$1
+    Header always set Access-Control-Allow-Origin %%{ORIGIN}e
+
+    AuthType Basic
+    AuthName "WebDAV Storage"
+    AuthUserFile "%(htpasswd_file)s"
+    <LimitExcept OPTIONS>
+        Require valid-user
+    </LimitExcept>
+
+</Directory>
+
+ErrorLog "%(error_log)s"
+LogLevel warn
+
+LogFormat "%%h %%l %%u %%t \"%%r\" %%>s %%b \"%%{Referer}i\" \"%%{User-Agent}i\"" combined
+LogFormat "%%h %%l %%u %%t \"%%r\" %%>s %%b" common
+CustomLog "%(access_log)s" common
+
+DefaultType text/plain
+TypesConfig "%(mime_types)s"
+AddType application/x-compress .Z
+AddType application/x-gzip .gz .tgz
+
+SSLRandomSeed startup builtin
+SSLRandomSeed connect builtin
+SSLEngine on
+SSLCertificateFile "%(ssl_certificate)s"
+SSLCertificateKeyFile "%(ssl_key)s"
diff --git a/slapos/recipe/dcron.py b/slapos/recipe/dcron.py
new file mode 100644
index 0000000000000000000000000000000000000000..3bb1336c597ca3d61b8760eec020ef66a3fd6e4e
--- /dev/null
+++ b/slapos/recipe/dcron.py
@@ -0,0 +1,75 @@
+##############################################################################
+#
+# Copyright (c) 2010 Vifib SARL and Contributors. All Rights Reserved.
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsibility of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# guarantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+#
+##############################################################################
+import os
+
+from slapos.recipe.librecipe import GenericBaseRecipe
+
+class Recipe(GenericBaseRecipe):
+
+  def install(self):
+    self.logger.info("Installing dcron...")
+
+    path_list = []
+
+    cronstamps = self.options['cronstamps']
+    cron_d = self.options['cron-entries']
+    crontabs = self.options['crontabs']
+    catcher = self.options['catcher']
+
+    binary = self.options['binary']
+
+    script = self.createPythonScript(binary,
+      'slapos.recipe.librecipe.execute.execute',
+      [self.options['dcrond-binary'].strip(), '-s', cron_d, '-c', crontabs,
+       '-t', cronstamps, '-f', '-l', '5', '-M', catcher]
+      )
+    path_list.append(script)
+    self.logger.debug('Main cron executable created at : %r', script)
+
+    self.logger.info("dcron successfully installed.")
+
+    return path_list
+
+
+
+class Part(GenericBaseRecipe):
+
+  def _options(self, options):
+    if 'name' not in options:
+      options['name'] = self.name
+
+  def install(self):
+    cron_d = self.options['cron-entries']
+    filename = os.path.join(cron_d, 'name')
+
+    with open(filename, 'w') as part:
+      part.write('%(frequency)s %(command)s\n' % {
+        'frequency': self.options['frequency'],
+        'command': self.options['command'],
+      })
+
+    return [filename]
diff --git a/slapos/recipe/duplicity.py b/slapos/recipe/duplicity.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e5d5d56887e67e30e7596068ba7040a21c7dcf4
--- /dev/null
+++ b/slapos/recipe/duplicity.py
@@ -0,0 +1,44 @@
+##############################################################################
+#
+# Copyright (c) 2010 Vifib SARL and Contributors. All Rights Reserved.
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsibility of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# guarantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+#
+##############################################################################
+from slapos.recipe.librecipe import GenericBaseRecipe
+
+class Recipe(GenericBaseRecipe):
+
+  def install(self):
+
+    remote_url = self.options['remote_backup']
+    backup_directory = self.options['directory']
+
+    wrapper = self.createPythonScript(
+      self.options['wrapper'],
+      'slapos.recipe.librecipe.execute.execute',
+      [self.options['duplicity_binary'], '--no-encryption',
+       backup_directory, remote_url]
+    )
+    return [wrapper]
+
+
diff --git a/slapos/recipe/erp5/__init__.py b/slapos/recipe/erp5/__init__.py
index f9c478a1b632311514dcccbc3531721e6ad46ffc..03c7fb227c56cc4f450e889ef8ecfcb96a2622d2 100644
--- a/slapos/recipe/erp5/__init__.py
+++ b/slapos/recipe/erp5/__init__.py
@@ -74,10 +74,11 @@ class Recipe(BaseSlapRecipe):
 
     if self.parameter_dict.get("slap_software_type", "").lower() == "cluster":
       # Site access is done by HAProxy
-      zope_access, site_access = self.installZopeCluster()
+      zope_access, site_access, key_access = self.installZopeCluster(ca_conf)
     else:
       zope_access = self.installZopeStandalone()
       site_access = zope_access
+      key_access = None
 
     key, certificate = self.requestCertificate('Login Based Access')
     apache_conf = dict(
@@ -96,9 +97,9 @@ class Recipe(BaseSlapRecipe):
              self.requestCertificate(frontend_name)
 
       connection_dict["site_url"] = self.installFrontendZopeApache(
-        ip=self.getGlobalIPv6Address(), port=13001, name=frontend_name,
-        frontend_path='/%s' % self.site_id, backend_path='/%s' % self.site_id,
-        backend_url="http://%s" % site_access, key=frontend_key,
+        ip=self.getGlobalIPv6Address(), port=4443, name=frontend_name,
+        frontend_path='/', backend_path='',
+        backend_url=apache_conf['apache_login'], key=frontend_key,
         certificate=frontend_certificate)
 
     default_bt5_list = []
@@ -107,7 +108,7 @@ class Recipe(BaseSlapRecipe):
 
     self.installERP5Site(user, password, zope_access, mysql_conf,
              conversion_server_conf, memcached_conf, kumo_conf,
-             self.site_id, default_bt5_list)
+             self.site_id, default_bt5_list, ca_conf)
 
     self.installTestRunner(ca_conf, mysql_conf, conversion_server_conf,
                            memcached_conf, kumo_conf)
@@ -120,6 +121,11 @@ class Recipe(BaseSlapRecipe):
       memcached_url=memcached_conf['memcached_url'],
       kumo_url=kumo_conf['kumo_address']
     ))
+    if key_access is not None:
+      connection_dict['key_access'] = key_access
+    if self.options.get('fulltext_search', None) == 'sphinx':
+      sphinx_searchd = self.installSphinxSearchd(ip=self.getLocalIPv4Address())
+      connection_dict.update(**sphinx_searchd)
     self.setConnectionDict(connection_dict)
     return self.path_list
 
@@ -128,19 +134,82 @@ class Recipe(BaseSlapRecipe):
     """
     zodb_dir = os.path.join(self.data_root_directory, 'zodb')
     self._createDirectory(zodb_dir)
-    zodb_root_path = os.path.join(zodb_dir, 'root.fs')
+    zodb_root_path = os.path.join(zodb_dir, 'main.fs')
 
     thread_amount_per_zope = int(self.options.get(
                                  'single_zope_thread_amount', 4))
 
+    zodb_cache_size = int(self.options.get('zodb_cache_size', 5000))
+
     return self.installZope(ip=self.getLocalIPv4Address(),
           port=12000 + 1, name='zope_%s' % 1,
           zodb_configuration_string=self.substituteTemplate(
             self.getTemplateFilename('zope-zodb-snippet.conf.in'),
-            dict(zodb_root_path=zodb_root_path)), with_timerservice=True,
+            dict(zodb_root_path=zodb_root_path,
+                 zodb_cache_size=zodb_cache_size)),
+            with_timerservice=True,
             thread_amount=thread_amount_per_zope)
 
-  def installZopeCluster(self):
+  def installKeyAuthorisationApache(self, ipv6, port, backend, key, certificate,
+      ca_conf, key_auth_path='/'):
+    if ipv6:
+      ip = self.getGlobalIPv6Address()
+    else:
+      ip = self.getLocalIPv4Address()
+    ssl_template = """SSLEngine on
+SSLVerifyClient require
+RequestHeader set REMOTE_USER %%{SSL_CLIENT_S_DN_CN}s
+SSLCertificateFile %(key_auth_certificate)s
+SSLCertificateKeyFile %(key_auth_key)s
+SSLCACertificateFile %(ca_certificate)s
+SSLCARevocationPath %(ca_crl)s"""
+    apache_conf = self._getApacheConfigurationDict('key_auth_apache', ip, port)
+    apache_conf['ssl_snippet'] = ssl_template % dict(
+        key_auth_certificate=certificate,
+        key_auth_key=key,
+        ca_certificate=ca_conf['ca_certificate'],
+        ca_crl=ca_conf['ca_crl']
+        )
+    prefix = 'ssl_key_auth_apache'
+    rewrite_rule_template = \
+      "RewriteRule (.*) http://%(backend)s%(key_auth_path)s$1 [L,P]"
+    path_template = pkg_resources.resource_string('slapos.recipe.erp5',
+      'template/apache.zope.conf.path.in')
+    path = path_template % dict(path='/')
+    d = dict(
+          path=path,
+          backend=backend,
+          backend_path='/',
+          port=apache_conf['port'],
+          vhname=path.replace('/', ''),
+          key_auth_path=key_auth_path,
+    )
+    rewrite_rule = rewrite_rule_template % d
+    apache_conf.update(**dict(
+      path_enable=path,
+      rewrite_rule=rewrite_rule
+    ))
+    apache_config_file = self.createConfigurationFile(prefix + '.conf',
+        pkg_resources.resource_string('slapos.recipe.erp5',
+          'template/apache.zope.conf.in') % apache_conf)
+    self.path_list.append(apache_config_file)
+    self.path_list.extend(zc.buildout.easy_install.scripts([(
+      'key_auth_apache',
+        'slapos.recipe.erp5.apache', 'runApache')], self.ws,
+          sys.executable, self.wrapper_directory, arguments=[
+            dict(
+              required_path_list=[certificate, key, ca_conf['ca_certificate'],
+                ca_conf['ca_crl']],
+              binary=self.options['httpd_binary'],
+              config=apache_config_file
+            )
+          ]))
+    if ipv6:
+      return 'https://[%(ip)s:%(port)s]' % apache_conf
+    else:
+      return 'https://%(ip)s:%(port)s' % apache_conf
+
+  def installZopeCluster(self, ca_conf=None):
     """ Install ERP5 using ZEO Cluster
     """
     site_check_path = '/%s/getId' % self.site_id
@@ -153,6 +222,9 @@ class Recipe(BaseSlapRecipe):
     user_node_amount = int(self.options.get(
                    "cluster_user_node_amount", 2))
 
+    key_auth_node_amount = int(self.options.get(
+                   "key_auth_node_amount", 0))
+
     ip = self.getLocalIPv4Address()
     storage_dict = self._requestZeoFileStorage('Zeo Server 1', 'main')
 
@@ -161,6 +233,8 @@ class Recipe(BaseSlapRecipe):
 
     # XXX How to define good values for this?
     mount_point = '/'
+    zodb_cache_size = 5000
+    zeo_client_cache_size = '20MB'
     check_path = '/erp5/account_module'
 
     known_tid_storage_identifier_dict = {}
@@ -172,7 +246,8 @@ class Recipe(BaseSlapRecipe):
         self.getTemplateFilename('zope-zeo-snippet.conf.in'), dict(
         storage_name=storage_dict['storage_name'],
         address='%s:%s' % (storage_dict['ip'], storage_dict['port']),
-        mount_point=mount_point
+        mount_point=mount_point, zodb_cache_size=zodb_cache_size,
+        zeo_client_cache_size=zeo_client_cache_size
         ))
 
     zope_port = 12000
@@ -203,11 +278,28 @@ class Recipe(BaseSlapRecipe):
     login_haproxy = self.installHaproxy(ip, 15001, 'login',
                                site_check_path, login_url_list)
 
+    key_access = None
+    if key_auth_node_amount > 0:
+      service_url_list = []
+      for i in range(key_auth_node_amount):
+        zope_port += 1
+        service_url_list.append(self.installZope(ip, zope_port,
+          'zope_service_%s' % i, with_timerservice=False,
+          zodb_configuration_string=zodb_configuration_string,
+          tidstorage_config=tidstorage_config))
+      service_haproxy = self.installHaproxy(ip, 15000, 'service',
+          site_check_path, service_url_list)
+
+      key_auth_key, key_auth_certificate = self.requestCertificate(
+          'Key Based Access')
+      key_access = self.installKeyAuthorisationApache(True, 15500,
+          service_haproxy, key_auth_key, key_auth_certificate, ca_conf)
+
     self.installTidStorage(tidstorage_config['host'],
                            tidstorage_config['port'],
             known_tid_storage_identifier_dict, 'http://' + login_haproxy)
 
-    return login_url_list[-1], login_haproxy
+    return login_url_list[-1], login_haproxy, key_access
 
   def _requestZeoFileStorage(self, server_name, storage_name):
     """Local, slap.request compatible, call to ask for filestorage on Zeo
@@ -342,6 +434,27 @@ class Recipe(BaseSlapRecipe):
         memcached_ip=config['memcached_ip'],
         memcached_port=config['memcached_port'])
 
+  def installSphinxSearchd(self, ip, port=9312, sql_port=9306):
+    data_directory = self.createDataDirectory('sphinx')
+    sphinx_conf_path = self.createConfigurationFile('sphinx.conf',
+      self.substituteTemplate(self.getTemplateFilename('sphinx.conf.in'), dict(
+          ip_address=ip,
+          port=port,
+          sql_port=sql_port,
+          data_directory=data_directory,
+          log_directory=self.log_directory,
+          )))
+    self.path_list.append(sphinx_conf_path)
+    wrapper = zc.buildout.easy_install.scripts([('sphinx_searchd',
+     'slapos.recipe.librecipe.execute', 'execute')], self.ws, sys.executable,
+      self.wrapper_directory, arguments=[
+        self.options['sphinx_searchd_binary'].strip(), '-c', sphinx_conf_path, '--nodetach']
+      )[0]
+    self.path_list.append(wrapper)
+    return dict(sphinx_searchd_ip=ip,
+                sphinx_searchd_port=port,
+                sphinx_searchd_sql_port=sql_port)
+
   def installTestRunner(self, ca_conf, mysql_conf, conversion_server_conf,
                         memcached_conf, kumo_conf):
     """Installs bin/runUnitTest executable to run all tests using
@@ -546,14 +659,24 @@ class Recipe(BaseSlapRecipe):
       }
 
   def installHaproxy(self, ip, port, name, server_check_path, url_list):
-    server_template = """  server %(name)s %(address)s cookie %(name)s check inter 20s rise 2 fall 4"""
+    # inter must be quite short in order to detect quickly an unresponsive node
+    #      and to detect quickly a node which is back
+    # rise must be minimal possible : 1, indeed, a node which is back don't need
+    #      to sleep more time and we can give him work immediately
+    # fall should be quite sort. with inter at 3, and fall at 2, a node will be
+    #      considered as dead after 6 seconds.
+    # maxconn should be set as the maximum thread we have per zope, like this
+    #      haproxy will manage the queue of request with the possibility to
+    #      move a request to another node if the initially selected one is dead
+    server_template = """  server %(name)s %(address)s cookie %(name)s check inter 3s rise 1 fall 2 maxconn %(cluster_zope_thread_amount)s"""
     config = dict(name=name, ip=ip, port=port,
         server_check_path=server_check_path,)
     i = 1
     server_list = []
+    cluster_zope_thread_amount = self.options.get('cluster_zope_thread_amount', 1)
     for url in url_list:
       server_list.append(server_template % dict(name='%s_%s' % (name, i),
-        address=url))
+        address=url, cluster_zope_thread_amount=cluster_zope_thread_amount))
       i += 1
     config['server_text'] = '\n'.join(server_list)
     haproxy_conf_path = self.createConfigurationFile('haproxy_%s.cfg' % name,
@@ -640,10 +763,14 @@ class Recipe(BaseSlapRecipe):
     return user, password
 
   def installERP5Site(self, user, password, zope_access, mysql_conf,
-          conversion_server_conf=None, memcached_conf=None, kumo_conf=None,
-          erp5_site_id='erp5', default_bt5_list=[]):
-    """ Create a script controlled by supervisor, which creates a erp5
-    site on current available zope and mysql environment"""
+                      conversion_server_conf=None, memcached_conf=None,
+                      kumo_conf=None,
+                      erp5_site_id='erp5', default_bt5_list=[], ca_conf={},
+                      supervisor_controlled=True):
+    """
+    Create  a script  to  automatically set  up  an erp5  site (controlled  by
+    supervisor by default) on available zope and mysql environments.
+    """
     conversion_server = None
     if conversion_server_conf is not None:
       conversion_server = "%s:%s" % (conversion_server_conf['conversion_server_ip'],
@@ -660,9 +787,12 @@ class Recipe(BaseSlapRecipe):
     bt5_repository_list = self.parameter_dict.get("bt5_repository_list", "").split() \
       or getattr(self, 'bt5_repository_list', [])
 
-    self.path_list.extend(zc.buildout.easy_install.scripts([('erp5_update',
+    erp5_update_directory = supervisor_controlled and self.wrapper_directory or \
+        self.bin_directory
+
+    script = zc.buildout.easy_install.scripts([('erp5_update',
             __name__ + '.erp5', 'updateERP5')], self.ws,
-                  sys.executable, self.wrapper_directory,
+                  sys.executable, erp5_update_directory,
                   arguments=[erp5_site_id,
                              mysql_connection_string,
                              [user, password, zope_access],
@@ -670,7 +800,12 @@ class Recipe(BaseSlapRecipe):
                              conversion_server,
                              kumo_conf.get("kumo_address"),
                              bt5_list,
-                             bt5_repository_list]))
+                             bt5_repository_list,
+                             ca_conf.get('certificate_authority_path'),
+                             self.options.get('openssl_binary')])
+
+    self.path_list.extend(script)
+
     return []
 
   def installZeo(self, ip):
@@ -713,6 +848,26 @@ class Recipe(BaseSlapRecipe):
       self.path_list.append(wrapper)
     return zeo_configuration_dict
 
+  def installRepozo(self, zodb_root_path):
+    """
+    Add only repozo to cron (e.g. without tidstorage) allowing full
+    and incremental backups.
+    """
+    backup_path = self.createBackupDirectory('zodb')
+    repozo_cron_path = os.path.join(self.cron_d, 'repozo')
+    repozo_cron_file = open(repozo_cron_path, 'w')
+    try:
+      repozo_cron_file.write('''
+0 0 * * 0 %(repozo_binary)s --backup --full --file="%(zodb_root_path)s" --repository="%(backup_path)s"
+0 * * * * %(repozo_binary)s --backup --file="%(zodb_root_path)s" --repository="%(backup_path)s"
+''' % dict(repozo_binary=self.options['repozo_binary'],
+           zodb_root_path=zodb_root_path,
+           backup_path=backup_path))
+    finally:
+      repozo_cron_file.close()
+
+    self.path_list.append(repozo_cron_path)
+
   def installTidStorage(self, ip, port, known_tid_storage_identifier_dict,
       access_url):
     """Install TidStorage with all required backup tools
@@ -825,10 +980,6 @@ class Recipe(BaseSlapRecipe):
                              self.erp5_directory, 'Products'))
     zope_config['products'] = '\n'.join(prefixed_products)
     zope_config['address'] = '%s:%s' % (ip, port)
-    zope_environment_list = []
-    for envk, envv in zope_environment.iteritems():
-      zope_environment_list.append('%s %s' % (envk, envv))
-    zope_config['environment'] = "\n".join(zope_environment_list)
 
     zope_wrapper_template_location = self.getTemplateFilename('zope.conf.in')
     zope_conf_content = self.substituteTemplate(
@@ -851,10 +1002,11 @@ class Recipe(BaseSlapRecipe):
     self.path_list.append(zope_conf_path)
     # Create init script
     wrapper = zc.buildout.easy_install.scripts([(name,
-     'slapos.recipe.librecipe.execute', 'execute')], self.ws, sys.executable,
+     'slapos.recipe.librecipe.execute', 'executee')], self.ws, sys.executable,
       self.wrapper_directory, arguments=[
-        self.options['runzope_binary'].strip(), '-C', zope_conf_path]
-      )[0]
+        [self.options['runzope_binary'].strip(), '-C', zope_conf_path],
+        zope_environment
+      ])[0]
     self.path_list.append(wrapper)
     return zope_config['address']
 
@@ -915,9 +1067,6 @@ class Recipe(BaseSlapRecipe):
         'template/apache.ssl-snippet.conf.in') % dict(
         login_certificate=certificate, login_key=key)
 
-    rewrite_rule_template = \
-        "RewriteRule ^%(path)s($|/.*) %(backend_url)s/VirtualHostBase/https/%(server_name)s:%(port)s%(backend_path)s/VirtualHostRoot/_vh_%(vhname)s$1 [L,P]\n"
-
     path = pkg_resources.resource_string(__name__,
            'template/apache.zope.conf.path-protected.in') % \
               dict(path='/', access_control_string='none')
@@ -931,14 +1080,24 @@ class Recipe(BaseSlapRecipe):
         'template/apache.zope.conf.path-protected.in')
       path += path_template % dict(path=frontend_path,
           access_control_string=access_control_string)
-    d = dict(
+
+    rewrite_rule_template = \
+        "RewriteRule ^%(path)s($|/.*) %(backend_url)s/VirtualHostBase/https/%(server_name)s:%(port)s%(backend_path)s/VirtualHostRoot/%(vhname)s$1 [L,P]\n"
+
+    if frontend_path not in ["", None, "/"]:
+      vhname = "_vh_%s" % frontend_path.replace('/', '')
+    else:
+      vhname = ""
+      frontend_path = ""
+
+    rewrite_rule = rewrite_rule_template % dict(
           path=frontend_path,
           backend_url=backend_url,
           backend_path=backend_path,
           port=apache_conf['port'],
-          vhname=frontend_path.replace('/', ''),
+          vhname=vhname,
           server_name=name)
-    rewrite_rule = rewrite_rule_template % d
+
     apache_conf.update(**dict(
       path_enable=path,
       rewrite_rule=rewrite_rule
@@ -987,7 +1146,7 @@ class Recipe(BaseSlapRecipe):
   def installMysqlServer(self, ip, port, database='erp5', user='user',
       test_database='test_erp5', test_user='test_user', template_filename=None,
       parallel_test_database_amount=100, mysql_conf=None, with_backup=True,
-      with_maatkit=True):
+      with_percona_toolkit=True):
     if mysql_conf is None:
       mysql_conf = {}
     backup_directory = self.createBackupDirectory('mysql')
@@ -1096,29 +1255,46 @@ class Recipe(BaseSlapRecipe):
       open(mysql_backup_cron, 'w').write('0 0 * * * ' + backup_controller)
       self.path_list.append(mysql_backup_cron)
 
-    if with_maatkit:
+    if with_percona_toolkit:
       # maatkit installation
-      for mk_script_name in (
-          'mk-variable-advisor',
-          'mk-table-usage',
-          'mk-visual-explain',
-          'mk-config-diff',
-          'mk-deadlock-logger',
-          'mk-error-log',
-          'mk-index-usage',
-          'mk-query-advisor',
+      for pt_script_name in (
+          'pt-archiver',
+          'pt-config-diff',
+          'pt-deadlock-logger',
+          'pt-duplicate-key-checker',
+          'pt-fifo-split',
+          'pt-find',
+          'pt-fk-error-logger',
+          'pt-heartbeat',
+          'pt-index-usage',
+          'pt-kill',
+          'pt-log-player',
+          'pt-online-schema-change',
+          'pt-query-advisor',
+          'pt-query-digest',
+          'pt-show-grants',
+          'pt-slave-delay',
+          'pt-slave-find',
+          'pt-slave-restart',
+          'pt-table-checksum',
+          'pt-table-sync',
+          'pt-tcp-model',
+          'pt-trend',
+          'pt-upgrade',
+          'pt-variable-advisor',
+          'pt-visual-explain',
           ):
-        mk_argument_list = [self.options['perl_binary'],
-            self.options['%s_binary' % mk_script_name],
+        pt_argument_list = [self.options['perl_binary'],
+            self.options['%s_binary' % pt_script_name],
             '--defaults-file=%s' % mysql_conf_path,
             '--socket=%s' %mysql_conf['socket'].strip(), '--user=root',
             ]
         environment = dict(PATH='%s' % self.bin_directory)
-        mk_exe = zc.buildout.easy_install.scripts([(
-          mk_script_name,'slapos.recipe.librecipe.execute', 'executee')],
+        pt_exe = zc.buildout.easy_install.scripts([(
+          pt_script_name,'slapos.recipe.librecipe.execute', 'executee')],
           self.ws, sys.executable, self.bin_directory, arguments=[
-            mk_argument_list, environment])[0]
-        self.path_list.append(mk_exe)
+            pt_argument_list, environment])[0]
+        self.path_list.append(pt_exe)
 
     # The return could be more explicit database, user ...
     return mysql_conf
diff --git a/slapos/recipe/erp5/erp5.py b/slapos/recipe/erp5/erp5.py
index bf99ad1150549a9c3ec602d00248b0b71c84d572..c5cadbade50f191ca6927d6aac0dc06a199976f8 100644
--- a/slapos/recipe/erp5/erp5.py
+++ b/slapos/recipe/erp5/erp5.py
@@ -37,12 +37,14 @@ class ERP5Updater(object):
   erp5_catalog_storage = "erp5_mysql_innodb_catalog"
   header_dict = {}
 
-  sleeping_time = 120
+  sleeping_time = 300
+  short_sleeping_time = 60
 
   def __init__(self, user, password, host,
       site_id, mysql_url, memcached_address,
       conversion_server_address, persistent_cache_address,
-      bt5_list, bt5_repository_list):
+      bt5_list, bt5_repository_list, certificate_authority_path,
+      openssl_binary):
 
     authentication_string = '%s:%s' % (user, password)
     base64string = base64.encodestring(authentication_string).strip()
@@ -54,13 +56,17 @@ class ERP5Updater(object):
     self.business_template_repository_list = bt5_repository_list
     self.business_template_list = bt5_list
     self.memcached_address = memcached_address
-    self.persintent_cached_address = persistent_cache_address
+    self.persistent_cached_address = persistent_cache_address
     self.mysql_url = mysql_url
 
     host, port = conversion_server_address.split(":")
     self.conversion_server_address = host
     self.conversion_server_port = int(port)
 
+    # Certificate Authority Tool configuration
+    self.certificate_authority_path = certificate_authority_path
+    self.openssl_binary = openssl_binary
+
   def log(self, level, message):
     date = time.strftime("%a, %d %b %Y %H:%M:%S +0000")
     print "%s - %s : %s" % (date, level, message)
@@ -147,34 +153,25 @@ class ERP5Updater(object):
     return [i for i in self.business_template_repository_list
                     if i not in found_list]
 
-  def getMissingBusinessTemplateList(self):
-    bt5_dict = self.getSystemSignatureDict("business_template_dict", [])
-    found_bt5_list = bt5_dict.keys()
-    return [bt for bt in self.business_template_list\
-                          if bt not in found_bt5_list]
-
-  def isBusinessTemplateUpdated(self):
-    return len(self.getMissingBusinessTemplateList()) == 0
-
-  def isBusinessTemplateRepositoryUpdated(self):
-    return len(self.getMissingBusinessTemplateRepositoryList()) == 0
+  def getMissingBusinessTemplateSet(self):
+    found_dict = self.getSystemSignatureDict("business_template_dict", {})
+    return set(self.business_template_list).difference(found_dict)
 
   def updateBusinessTemplateList(self):
     """ Update Business Template Configuration, including the repositories
     """
-    if not self.isBusinessTemplateUpdated():
-      # Before update the business templates, it is required to make
-      # sure the repositories are updated.
-      if not self.isBusinessTemplateRepositoryUpdated():
-        # Require to update Business template Repository
-        repository_list = self.getSystemSignatureDict(
-           "business_template_repository_list", [])
-        repository_list.extend(self.getMissingBusinessTemplateRepositoryList())
-        self._setRepositoryList(repository_list)
+    missing_business_template_set = self.getMissingBusinessTemplateSet()
+    if missing_business_template_set:
+      # Before updating  the business templates,  it is required to  make sure
+      # the  repositories are  updated,  thus  update them  even  if they  are
+      # already present because there may be new business templates...
+      repository_list = self.getSystemSignatureDict(
+        "business_template_repository_list", [])
+      repository_list.extend(self.getMissingBusinessTemplateRepositoryList())
+      self._setRepositoryList(repository_list)
 
       # Require to update Business template
-      for bt in self.getMissingBusinessTemplateList():
-        self._installBusinessTemplateList([bt])
+      self._installBusinessTemplateList(list(missing_business_template_set))
       return True
 
     return False
@@ -186,18 +183,20 @@ class ERP5Updater(object):
 
   def _installBusinessTemplateList(self, name_list, update_catalog=False):
     """ Install a Business Template on Remote ERP5 setup """
-    set_path = "/%s/portal_templates/installBusinessTemplatesFromRepositories" % self.site_id
+    set_path = "/%s/portal_templates/installBusinessTemplateListFromRepository" % self.site_id
     self.POST(set_path, {"template_list": name_list,
                          "only_newer": 1,
-                         "update_catalog": int(update_catalog)})
+                         "update_catalog": int(update_catalog),
+                         "activate": 1,
+                         "install_dependency": 1})
 
-  def _createActiveSystemPreference(self):
+  def _createActiveSystemPreference(self, edit_kw={}):
     """ Assert that at least one enabled System Preference is present on
         the erp5 instance.
     """
     self.log("INFO", "Try to create New System Preference into ERP5!")
     path = "/%s/portal_preferences/createActiveSystemPreference" % self.site_id
-    status, data = self.POST(path, {})
+    status, data = self.POST(path, edit_kw)
     if status != 200:
       self.log("ERROR", "Unable to create System Preference, an error ocurred %s." % data)
 
@@ -217,18 +216,62 @@ class ERP5Updater(object):
 
     if None in [host_key, port_key]:
       self.log("ERROR", "Unable to find the Active System Preference to Update!")
-      self._createActiveSystemPreference()
+      self._createActiveSystemPreference(
+          {"preferred_ooodoc_server_address" : self.conversion_server_address,
+           "preferred_ooodoc_server_port_number": self.conversion_server_port })
       return True
 
     is_updated = self._assertAndUpdateDocument(host_key, self.conversion_server_address,
          "setPreferredOoodocServerAddress")
 
-    is_updated = is_updated or self._assertAndUpdateDocument(port_key,
+    is_updated = self._assertAndUpdateDocument(port_key,
          self.conversion_server_port,
-         "setPreferredOoodocServerPortNumber")
+         "setPreferredOoodocServerPortNumber") or is_updated
 
     return is_updated
 
+  def updateCertificateAuthority(self):
+    """ Update the certificate authority only if is not configured yet """
+    if self.isCertificateAuthorityAvailable():
+      if self.isCertificateAuthorityConfigured():
+        return True
+
+      path = "/%s/portal_certificate_authority/" \
+             "manage_editCertificateAuthorityTool" % self.site_id
+      self.POST(path, {"certificate_authority_path": self.certificate_authority_path,
+                       "openssl_binary": self.openssl_binary})
+
+
+  def isCertificateAuthorityAvailable(self):
+    """ Check if certificate Authority is available. """
+    external_connection_dict = self.system_signature_dict[
+      'external_connection_dict']
+    if 'portal_certificate_authority/certificate_authority_path' in \
+      external_connection_dict:
+      return True
+    return False
+
+  def isCertificateAuthorityConfigured(self):
+    """ Check if certificate Authority is configured correctly. """
+    external_connection_dict = self.system_signature_dict[
+      'external_connection_dict']
+    if self.certificate_authority_path == external_connection_dict.get(
+          'portal_certificate_authority/certificate_authority_path') and \
+       self.openssl_binary == external_connection_dict.get(
+          'portal_certificate_authority/openssl_binary'):
+      return True
+    return False
+  def isCertificateAuthorityConfigured(self):
+    """ Check if certificate Authority is configured correctly. """
+    external_connection_dict = self.system_signature_dict[
+      'external_connection_dict']
+    if self.certificate_authority_path == external_connection_dict.get(
+          'portal_certificate_authority/certificate_authority_path') and \
+       self.openssl_binary == external_connection_dict.get(
+          'portal_certificate_authority/openssl_binary'):
+      return True
+    return False
+
   def updateMemcached(self):
     # Assert Memcached configuration
     self._assertAndUpdateDocument(
@@ -239,7 +282,7 @@ class ERP5Updater(object):
     # Assert Persistent cache configuration (Kumofs)
     self._assertAndUpdateDocument(
       "portal_memcached/persistent_memcached_plugin/getUrlString",
-      self.persintent_cached_address,
+      self.persistent_cached_address,
       "setUrlString")
 
   def _assertAndUpdateDocument(self, key, expected_value, update_method):
@@ -258,24 +301,9 @@ class ERP5Updater(object):
       return True
     return False
 
-  def updateMysql(self):
-    """ This API is not implemented yet, because it is not needed to
-    update Mysql Connection on ERP5 Sites.
-    """
-    pass
-
-  def updatePortalActivities(self):
-    """ This API is not implemented yet, because it is not needed for
-        a single instance configuration. This method should define which
-        instances will handle activities, which one will distribute
-        activities
-    """
-    pass
-
   def updateERP5Site(self):
     if not self.isERP5Present():
-      url = '/manage_addProduct/ERP5/manage_addERP5Site'
-      self.POST(url, {
+      self.POST('/manage_addProduct/ERP5/manage_addERP5Site', {
           "id": self.site_id,
           "erp5_catalog_storage": self.erp5_catalog_storage,
           "erp5_sql_connection_string": self.mysql_url,
@@ -286,54 +314,30 @@ class ERP5Updater(object):
   def _hasActivityPresent(self):
     activity_dict = self.getSystemSignatureDict("activity_dict")
     if activity_dict["total"] > 0:
+      self.log("DEBUG", "Waiting for activities on ERP5...")
       return True
+    return False
 
   def _hasFailureActivity(self):
     activity_dict = self.getSystemSignatureDict("activity_dict")
     if activity_dict["failure"] > 0:
+       self.log("ERROR", "Update progress found Failure activities" +\
+                         "and it will not be able to progress until" +\
+                         " activites issue be solved")
        return True
-
-  def _updatePreRequiredBusinessTemplateList(self):
-    """ Update only the first part of bt5."""
-
-    # This list contains the minimal set of bt5 required to install
-    # portal_introspections. Move portal_introspection to erp5_core
-    # can remove this set.
-    pre_required_business_template_list = [i for i in self.business_template_list\
-                if i.startswith("erp5_full_text") or i == "erp5_base"]
-
-    if len(self.business_template_repository_list) > 0 and \
-         len(pre_required_business_template_list):
-      pre_required_business_template_list.insert(0, "erp5_core_proxy_field_legacy")
-      self._setRepositoryList(self.business_template_repository_list)
-      time.sleep(30)
-      for bt in pre_required_business_template_list:
-        update_catalog = bt.endswith("_catalog")
-        self._installBusinessTemplateList([bt], update_catalog)
-    else:
-      self.log("ERROR", "Unable to install erp5_base, it is not on your " +\
-               "requested business templates list. Once it is installed " +\
-               "setup will continue")
+    return False
 
   def run(self):
     """ Keep running until kill"""
     while 1:
-      time.sleep(30)
+      time.sleep(self.short_sleeping_time)
       if not self.updateERP5Site():
         self.loadSystemSignatureDict()
-        if self.getSystemSignatureDict() is None:
-          self.log("INFO", "The erp5_base is not installed yet, trying to " +\
-                           "install it before continue.")
-          self._updatePreRequiredBusinessTemplateList()
-          time.sleep(60)
+        if self._hasFailureActivity():
+          time.sleep(self.sleeping_time)
           continue
-
+          
         if self._hasActivityPresent():
-          self.log("DEBUG", "Waiting for activities on ERP5...")
-          if self._hasFailureActivity():
-            self.log("ERROR", "Update progress found " +\
-                     "Failure activities and it will not progress until " +\
-                     " activites issue be solved")
           continue
 
         if self.updateBusinessTemplateList():
@@ -341,11 +345,8 @@ class ERP5Updater(object):
 
         self.updateMemcached()
         if self.updateConversionServer():
-          # If update Conversion Server adds a bit more delay to continue
-          # To wait for activiies.
-          time.sleep(60)
           continue
-
+        self.updateCertificateAuthority()
         time.sleep(self.sleeping_time)
 
 def updateERP5(argument_list):
@@ -356,6 +357,8 @@ def updateERP5(argument_list):
   conversion_server_address = argument_list[4]
   persistent_cache_provider = argument_list[5]
   bt5_list = argument_list[6]
+  certificate_authority_path = argument_list[8]
+  openssl_binary = argument_list[9]
   bt5_repository_list = []
 
   if len(argument_list) > 7:
@@ -374,6 +377,8 @@ def updateERP5(argument_list):
     conversion_server_address=conversion_server_address,
     persistent_cache_address=persistent_cache_provider,
     bt5_list=bt5_list,
-    bt5_repository_list=bt5_repository_list)
+    bt5_repository_list=bt5_repository_list,
+    certificate_authority_path=certificate_authority_path,
+    openssl_binary=openssl_binary)
 
   erp5_upgrader.run()
diff --git a/slapos/recipe/erp5/template/apache.ssl-snippet.conf.in b/slapos/recipe/erp5/template/apache.ssl-snippet.conf.in
index 0dd6e653d59bd2e9e85f5fd29b140d96814cd8a0..f85a164cb87fee8995a4ee4bfe8f99a18906cc80 100644
--- a/slapos/recipe/erp5/template/apache.ssl-snippet.conf.in
+++ b/slapos/recipe/erp5/template/apache.ssl-snippet.conf.in
@@ -4,3 +4,4 @@ SSLCertificateKeyFile %(login_key)s
 SSLRandomSeed startup builtin
 SSLRandomSeed connect builtin
 
+SSLProxyEngine On
diff --git a/slapos/recipe/erp5/template/apache.zope.conf.in b/slapos/recipe/erp5/template/apache.zope.conf.in
index 131040d535278991f7d2f71b68c90c7224bf3953..f24a5327b68651531f1f7e25832dfd36d85cded3 100644
--- a/slapos/recipe/erp5/template/apache.zope.conf.in
+++ b/slapos/recipe/erp5/template/apache.zope.conf.in
@@ -34,9 +34,9 @@ RequestHeader unset REMOTE_USER
 
 # Log configuration
 ErrorLog "%(error_log)s"
-LogFormat "%%h %%{REMOTE_USER}i %%l %%u %%t \"%%r\" %%>s %%b \"%%{Referer}i\" \"%%{User-Agent}i\"" combined
-LogFormat "%%h %%{REMOTE_USER}i %%l %%u %%t \"%%r\" %%>s %%b" common
-CustomLog "%(access_log)s" common
+# Default apache log format with request time in microsecond at the end
+LogFormat "%%h %%l %%u %%t \"%%r\" %%>s %%b \"%%{Referer}i\" \"%%{User-Agent}i\" %%D" combined
+CustomLog "%(access_log)s" combined
 
 # Directory protection
 <Directory />
diff --git a/slapos/recipe/erp5/template/haproxy.cfg.in b/slapos/recipe/erp5/template/haproxy.cfg.in
index 4087c1a4eacdc46a4af9c374417471ed265a7062..aa8f8a865ecb58d166cfebcc911c681780651ae8 100644
--- a/slapos/recipe/erp5/template/haproxy.cfg.in
+++ b/slapos/recipe/erp5/template/haproxy.cfg.in
@@ -9,10 +9,25 @@ defaults
   retries 1
   option redispatch
   maxconn 2000
-  timeout server 3000s
-  timeout queue 5s
-  timeout connect 10s
-  timeout client 3600s
+  # it is useless to have timeout much bigger than the one of apache.
+  # By default apache use 300s, so we set slightly more in order to
+  # make sure that apache will first stop the connection.
+  timeout server 305s
+  # Stop waiting in queue for a zope to become available.
+  # If no zope can be reached after one minute, consider the request will
+  # never succeed.
+  timeout queue 60s
+  # The connection should be immediate on LAN,
+  # so we should not set more than 5 seconds, and it could be already too much
+  timeout connect 5s
+  # As requested in haproxy doc, make this "at least equal to timeout server".
+  timeout client 305s
+  # Use "option httpclose" to not preserve client & server persistent connections
+  # while handling every incoming request individually, dispatching them one after
+  # another to servers, in HTTP close mode. This is really needed when haproxy
+  # is configured with maxconn to 1, without this options browser are unable
+  # to render a page
+  option httpclose
 
 listen %(name)s %(ip)s:%(port)s
   cookie  SERVERID insert
diff --git a/slapos/recipe/erp5/template/initmysql.sql.in b/slapos/recipe/erp5/template/initmysql.sql.in
index 93256efae53bda3e52a30142bb01cf7bbed66312..981ffbc65a0d3ee40c2f7f565899f0ce25fb5e53 100644
--- a/slapos/recipe/erp5/template/initmysql.sql.in
+++ b/slapos/recipe/erp5/template/initmysql.sql.in
@@ -1,2 +1,3 @@
 CREATE DATABASE IF NOT EXISTS %(mysql_database)s;
 GRANT ALL PRIVILEGES ON %(mysql_database)s.* TO %(mysql_user)s@'%%' IDENTIFIED BY '%(mysql_password)s';
+GRANT ALL PRIVILEGES ON %(mysql_database)s.* TO %(mysql_user)s@'localhost' IDENTIFIED BY '%(mysql_password)s';
diff --git a/slapos/recipe/erp5/template/logrotate_entry.in b/slapos/recipe/erp5/template/logrotate_entry.in
index bfa2abf0970af28f8cab98793db9b09db85d1847..14461dccf875d9ae57ef4728bc77525855a20041 100644
--- a/slapos/recipe/erp5/template/logrotate_entry.in
+++ b/slapos/recipe/erp5/template/logrotate_entry.in
@@ -1,7 +1,7 @@
 %(file_list)s {
   daily
   dateext
-  rotate 30
+  rotate 3650
   compress
   notifempty
   sharedscripts
diff --git a/slapos/recipe/erp5/template/sphinx.conf.in b/slapos/recipe/erp5/template/sphinx.conf.in
new file mode 100644
index 0000000000000000000000000000000000000000..984c4b0b605b35009678dc07dfea2ac2f052934e
--- /dev/null
+++ b/slapos/recipe/erp5/template/sphinx.conf.in
@@ -0,0 +1,596 @@
+#############################################################################
+## index definition
+#############################################################################
+
+# realtime index
+#
+# you can run INSERT, REPLACE, and DELETE on this index on the fly
+# using MySQL protocol (see 'listen' directive below)
+index erp5
+{
+	# 'rt' index type must be specified to use RT index
+	type			= rt
+
+	# index files path and file name, without extension
+	# mandatory, path must be writable, extensions will be auto-appended
+	path			= %(data_directory)s/erp5
+
+	# RAM chunk size limit
+	# RT index will keep at most this much data in RAM, then flush to disk
+	# optional, default is 32M
+	#
+	# rt_mem_limit		= 512M
+
+	# full-text field declaration
+	# multi-value, mandatory
+	rt_field		= SearchableText
+
+	# unsigned integer attribute declaration
+	# multi-value (an arbitrary number of attributes is allowed), optional
+	# declares an unsigned 32-bit attribute
+	rt_attr_uint		= uid
+
+	# RT indexes currently support the following attribute types:
+	# uint, bigint, float, timestamp, string
+	#
+	# rt_attr_bigint		= guid
+	# rt_attr_float		= gpa
+	# rt_attr_timestamp	= ts_added
+	# rt_attr_string		= author
+
+	# document attribute values (docinfo) storage mode
+	# optional, default is 'extern'
+	# known values are 'none', 'extern' and 'inline'
+	# docinfo			= extern
+
+	# memory locking for cached data (.spa and .spi), to prevent swapping
+	# optional, default is 0 (do not mlock)
+	# requires searchd to be run from root
+	# mlock			= 0
+
+	# a list of morphology preprocessors to apply
+	# optional, default is empty
+	#
+	# builtin preprocessors are 'none', 'stem_en', 'stem_ru', 'stem_enru',
+	# 'soundex', and 'metaphone'; additional preprocessors available from
+	# libstemmer are 'libstemmer_XXX', where XXX is algorithm code
+	# (see libstemmer_c/libstemmer/modules.txt)
+	#
+	# morphology		= stem_en, stem_ru, soundex
+	# morphology		= libstemmer_german
+	# morphology		= libstemmer_sv
+	morphology		= stem_en
+
+	# minimum word length at which to enable stemming
+	# optional, default is 1 (stem everything)
+	#
+	# min_stemming_len	= 1
+
+	# stopword files list (space separated)
+	# optional, default is empty
+	# contents are plain text, charset_table and stemming are both applied
+	#
+	# stopwords		= %(data_directory)s/erp5/stopwords.txt
+
+	# wordforms file, in "mapfrom > mapto" plain text format
+	# optional, default is empty
+	#
+	# wordforms		= %(data_directory)s/erp5/wordforms.txt
+
+	# tokenizing exceptions file
+	# optional, default is empty
+	#
+	# plain text, case sensitive, space insensitive in map-from part
+	# one "Map Several Words => ToASingleOne" entry per line
+	#
+	# exceptions		= %(data_directory)s/erp5/exceptions.txt
+
+	# minimum indexed word length
+	# default is 1 (index everything)
+	min_word_len		= 1
+
+	# charset encoding type
+	# optional, default is 'sbcs'
+	# known types are 'sbcs' (Single Byte CharSet) and 'utf-8'
+	charset_type		= utf-8
+
+	# charset definition and case folding rules "table"
+	# optional, default value depends on charset_type
+	#
+	# defaults are configured to include English and Russian characters only
+	# you need to change the table to include additional ones
+	# this behavior MAY change in future versions
+	#
+	# 'sbcs' default value is
+	# charset_table		= 0..9, A..Z->a..z, _, a..z, U+A8->U+B8, U+B8, U+C0..U+DF->U+E0..U+FF, U+E0..U+FF
+	#
+	# 'utf-8' default value is
+	# charset_table		= 0..9, A..Z->a..z, _, a..z, U+410..U+42F->U+430..U+44F, U+430..U+44F
+	charset_table		= \
+		U+00C0->a, U+00C1->a, U+00C2->a, U+00C3->a, U+00C4->a, U+00C5->a, U+00E0->a, U+00E1->a, U+00E2->a, U+00E3->a, U+00E4->a, U+00E5->a, U+0100->a, U+0101->a, U+0102->a, U+0103->a, U+010300->a, U+0104->a, U+0105->a, U+01CD->a, U+01CE->a, U+01DE->a, U+01DF->a,  \
+		U+01E0->a, U+01E1->a, U+01FA->a, U+01FB->a, U+0200->a, U+0201->a, U+0202->a, U+0203->a, U+0226->a, U+0227->a, U+023A->a, U+0250->a, U+04D0->a, U+04D1->a, U+1D2C->a, U+1D43->a, U+1D44->a, U+1D8F->a, U+1E00->a, U+1E01->a, U+1E9A->a, U+1EA0->a, U+1EA1->a,  \
+		U+1EA2->a, U+1EA3->a, U+1EA4->a, U+1EA5->a, U+1EA6->a, U+1EA7->a, U+1EA8->a, U+1EA9->a, U+1EAA->a, U+1EAB->a, U+1EAC->a, U+1EAD->a, U+1EAE->a, U+1EAF->a, U+1EB0->a, U+1EB1->a, U+1EB2->a, U+1EB3->a, U+1EB4->a, U+1EB5->a, U+1EB6->a, U+1EB7->a, U+2090->a,  \
+		U+2C65->a, U+0180->b, U+0181->b, U+0182->b, U+0183->b, U+0243->b, U+0253->b, U+0299->b, U+16D2->b, U+1D03->b, U+1D2E->b, U+1D2F->b, U+1D47->b, U+1D6C->b, U+1D80->b, U+1E02->b, U+1E03->b, U+1E04->b, U+1E05->b, U+1E06->b, U+1E07->b, U+00C7->c, U+00E7->c,  \
+		U+0106->c, U+0107->c, U+0108->c, U+0109->c, U+010A->c, U+010B->c, U+010C->c, U+010D->c, U+0187->c, U+0188->c, U+023B->c, U+023C->c, U+0255->c, U+0297->c, U+1D9C->c, U+1D9D->c, U+1E08->c, U+1E09->c, U+212D->c, U+2184->c, U+010E->d, U+010F->d, U+0110->d,  \
+		U+0111->d, U+0189->d, U+018A->d, U+018B->d, U+018C->d, U+01C5->d, U+01F2->d, U+0221->d, U+0256->d, U+0257->d, U+1D05->d, U+1D30->d, U+1D48->d, U+1D6D->d, U+1D81->d, U+1D91->d, U+1E0A->d, U+1E0B->d, U+1E0C->d, U+1E0D->d, U+1E0E->d, U+1E0F->d, U+1E10->d,  \
+		U+1E11->d, U+1E12->d, U+1E13->d, U+00C8->e, U+00C9->e, U+00CA->e, U+00CB->e, U+00E8->e, U+00E9->e, U+00EA->e, U+00EB->e, U+0112->e, U+0113->e, U+0114->e, U+0115->e, U+0116->e, U+0117->e, U+0118->e, U+0119->e, U+011A->e, U+011B->e, U+018E->e, U+0190->e,  \
+		U+01DD->e, U+0204->e, U+0205->e, U+0206->e, U+0207->e, U+0228->e, U+0229->e, U+0246->e, U+0247->e, U+0258->e, U+025B->e, U+025C->e, U+025D->e, U+025E->e, U+029A->e, U+1D07->e, U+1D08->e, U+1D31->e, U+1D32->e, U+1D49->e, U+1D4B->e, U+1D4C->e, U+1D92->e,  \
+		U+1D93->e, U+1D94->e, U+1D9F->e, U+1E14->e, U+1E15->e, U+1E16->e, U+1E17->e, U+1E18->e, U+1E19->e, U+1E1A->e, U+1E1B->e, U+1E1C->e, U+1E1D->e, U+1EB8->e, U+1EB9->e, U+1EBA->e, U+1EBB->e, U+1EBC->e, U+1EBD->e, U+1EBE->e, U+1EBF->e, U+1EC0->e, U+1EC1->e,  \
+		U+1EC2->e, U+1EC3->e, U+1EC4->e, U+1EC5->e, U+1EC6->e, U+1EC7->e, U+2091->e, U+0191->f, U+0192->f, U+1D6E->f, U+1D82->f, U+1DA0->f, U+1E1E->f, U+1E1F->f, U+011C->g, U+011D->g, U+011E->g, U+011F->g, U+0120->g, U+0121->g, U+0122->g, U+0123->g, U+0193->g,  \
+		U+01E4->g, U+01E5->g, U+01E6->g, U+01E7->g, U+01F4->g, U+01F5->g, U+0260->g, U+0261->g, U+0262->g, U+029B->g, U+1D33->g, U+1D4D->g, U+1D77->g, U+1D79->g, U+1D83->g, U+1DA2->g, U+1E20->g, U+1E21->g, U+0124->h, U+0125->h, U+0126->h, U+0127->h, U+021E->h,  \
+		U+021F->h, U+0265->h, U+0266->h, U+029C->h, U+02AE->h, U+02AF->h, U+02B0->h, U+02B1->h, U+1D34->h, U+1DA3->h, U+1E22->h, U+1E23->h, U+1E24->h, U+1E25->h, U+1E26->h, U+1E27->h, U+1E28->h, U+1E29->h, U+1E2A->h, U+1E2B->h, U+1E96->h, U+210C->h, U+2C67->h,  \
+		U+2C68->h, U+2C75->h, U+2C76->h, U+00CC->i, U+00CD->i, U+00CE->i, U+00CF->i, U+00EC->i, U+00ED->i, U+00EE->i, U+00EF->i, U+010309->i, U+0128->i, U+0129->i, U+012A->i, U+012B->i, U+012C->i, U+012D->i, U+012E->i, U+012F->i, U+0130->i, U+0131->i, U+0197->i,  \
+		U+01CF->i, U+01D0->i, U+0208->i, U+0209->i, U+020A->i, U+020B->i, U+0268->i, U+026A->i, U+040D->i, U+0418->i, U+0419->i, U+0438->i, U+0439->i, U+0456->i, U+1D09->i, U+1D35->i, U+1D4E->i, U+1D62->i, U+1D7B->i, U+1D96->i, U+1DA4->i, U+1DA6->i, U+1DA7->i,  \
+		U+1E2C->i, U+1E2D->i, U+1E2E->i, U+1E2F->i, U+1EC8->i, U+1EC9->i, U+1ECA->i, U+1ECB->i, U+2071->i, U+2111->i, U+0134->j, U+0135->j, U+01C8->j, U+01CB->j, U+01F0->j, U+0237->j, U+0248->j, U+0249->j, U+025F->j, U+0284->j, U+029D->j, U+02B2->j, U+1D0A->j,  \
+		U+1D36->j, U+1DA1->j, U+1DA8->j, U+0136->k, U+0137->k, U+0198->k, U+0199->k, U+01E8->k, U+01E9->k, U+029E->k, U+1D0B->k, U+1D37->k, U+1D4F->k, U+1D84->k, U+1E30->k, U+1E31->k, U+1E32->k, U+1E33->k, U+1E34->k, U+1E35->k, U+2C69->k, U+2C6A->k, U+0139->l,  \
+		U+013A->l, U+013B->l, U+013C->l, U+013D->l, U+013E->l, U+013F->l, U+0140->l, U+0141->l, U+0142->l, U+019A->l, U+01C8->l, U+0234->l, U+023D->l, U+026B->l, U+026C->l, U+026D->l, U+029F->l, U+02E1->l, U+1D0C->l, U+1D38->l, U+1D85->l, U+1DA9->l, U+1DAA->l,  \
+		U+1DAB->l, U+1E36->l, U+1E37->l, U+1E38->l, U+1E39->l, U+1E3A->l, U+1E3B->l, U+1E3C->l, U+1E3D->l, U+2C60->l, U+2C61->l, U+2C62->l, U+019C->m, U+026F->m, U+0270->m, U+0271->m, U+1D0D->m, U+1D1F->m, U+1D39->m, U+1D50->m, U+1D5A->m, U+1D6F->m, U+1D86->m,  \
+		U+1DAC->m, U+1DAD->m, U+1E3E->m, U+1E3F->m, U+1E40->m, U+1E41->m, U+1E42->m, U+1E43->m, U+00D1->n, U+00F1->n, U+0143->n, U+0144->n, U+0145->n, U+0146->n, U+0147->n, U+0148->n, U+0149->n, U+019D->n, U+019E->n, U+01CB->n, U+01F8->n, U+01F9->n, U+0220->n,  \
+		U+0235->n, U+0272->n, U+0273->n, U+0274->n, U+1D0E->n, U+1D3A->n, U+1D3B->n, U+1D70->n, U+1D87->n, U+1DAE->n, U+1DAF->n, U+1DB0->n, U+1E44->n, U+1E45->n, U+1E46->n, U+1E47->n, U+1E48->n, U+1E49->n, U+1E4A->n, U+1E4B->n, U+207F->n, U+00D2->o, U+00D3->o,  \
+		U+00D4->o, U+00D5->o, U+00D6->o, U+00D8->o, U+00F2->o, U+00F3->o, U+00F4->o, U+00F5->o, U+00F6->o, U+00F8->o, U+01030F->o, U+014C->o, U+014D->o, U+014E->o, U+014F->o, U+0150->o, U+0151->o, U+0186->o, U+019F->o, U+01A0->o, U+01A1->o, U+01D1->o, U+01D2->o,  \
+		U+01EA->o, U+01EB->o, U+01EC->o, U+01ED->o, U+01FE->o, U+01FF->o, U+020C->o, U+020D->o, U+020E->o, U+020F->o, U+022A->o, U+022B->o, U+022C->o, U+022D->o, U+022E->o, U+022F->o, U+0230->o, U+0231->o, U+0254->o, U+0275->o, U+043E->o, U+04E6->o, U+04E7->o,  \
+		U+04E8->o, U+04E9->o, U+04EA->o, U+04EB->o, U+1D0F->o, U+1D10->o, U+1D11->o, U+1D12->o, U+1D13->o, U+1D16->o, U+1D17->o, U+1D3C->o, U+1D52->o, U+1D53->o, U+1D54->o, U+1D55->o, U+1D97->o, U+1DB1->o, U+1E4C->o, U+1E4D->o, U+1E4E->o, U+1E4F->o, U+1E50->o,  \
+		U+1E51->o, U+1E52->o, U+1E53->o, U+1ECC->o, U+1ECD->o, U+1ECE->o, U+1ECF->o, U+1ED0->o, U+1ED1->o, U+1ED2->o, U+1ED3->o, U+1ED4->o, U+1ED5->o, U+1ED6->o, U+1ED7->o, U+1ED8->o, U+1ED9->o, U+1EDA->o, U+1EDB->o, U+1EDC->o, U+1EDD->o, U+1EDE->o, U+1EDF->o,  \
+		U+1EE0->o, U+1EE1->o, U+1EE2->o, U+1EE3->o, U+2092->o, U+2C9E->o, U+2C9F->o, U+01A4->p, U+01A5->p, U+1D18->p, U+1D3E->p, U+1D56->p, U+1D71->p, U+1D7D->p, U+1D88->p, U+1E54->p, U+1E55->p, U+1E56->p, U+1E57->p, U+2C63->p, U+024A->q, U+024B->q, U+02A0->q,  \
+		U+0154->r, U+0155->r, U+0156->r, U+0157->r, U+0158->r, U+0159->r, U+0210->r, U+0211->r, U+0212->r, U+0213->r, U+024C->r, U+024D->r, U+0279->r, U+027A->r, U+027B->r, U+027C->r, U+027D->r, U+027E->r, U+027F->r, U+0280->r, U+0281->r, U+02B3->r, U+02B4->r,  \
+		U+02B5->r, U+02B6->r, U+1D19->r, U+1D1A->r, U+1D3F->r, U+1D63->r, U+1D72->r, U+1D73->r, U+1D89->r, U+1DCA->r, U+1E58->r, U+1E59->r, U+1E5A->r, U+1E5B->r, U+1E5C->r, U+1E5D->r, U+1E5E->r, U+1E5F->r, U+211C->r, U+2C64->r, U+00DF->s, U+015A->s, U+015B->s,  \
+		U+015C->s, U+015D->s, U+015E->s, U+015F->s, U+0160->s, U+0161->s, U+017F->s, U+0218->s, U+0219->s, U+023F->s, U+0282->s, U+02E2->s, U+1D74->s, U+1D8A->s, U+1DB3->s, U+1E60->s, U+1E61->s, U+1E62->s, U+1E63->s, U+1E64->s, U+1E65->s, U+1E66->s, U+1E67->s,  \
+		U+1E68->s, U+1E69->s, U+1E9B->s, U+0162->t, U+0163->t, U+0164->t, U+0165->t, U+0166->t, U+0167->t, U+01AB->t, U+01AC->t, U+01AD->t, U+01AE->t, U+021A->t, U+021B->t, U+0236->t, U+023E->t, U+0287->t, U+0288->t, U+1D1B->t, U+1D40->t, U+1D57->t, U+1D75->t,  \
+		U+1DB5->t, U+1E6A->t, U+1E6B->t, U+1E6C->t, U+1E6D->t, U+1E6E->t, U+1E6F->t, U+1E70->t, U+1E71->t, U+1E97->t, U+2C66->t, U+00D9->u, U+00DA->u, U+00DB->u, U+00DC->u, U+00F9->u, U+00FA->u, U+00FB->u, U+00FC->u, U+010316->u, U+0168->u, U+0169->u, U+016A->u,  \
+		U+016B->u, U+016C->u, U+016D->u, U+016E->u, U+016F->u, U+0170->u, U+0171->u, U+0172->u, U+0173->u, U+01AF->u, U+01B0->u, U+01D3->u, U+01D4->u, U+01D5->u, U+01D6->u, U+01D7->u, U+01D8->u, U+01D9->u, U+01DA->u, U+01DB->u, U+01DC->u, U+0214->u, U+0215->u,  \
+		U+0216->u, U+0217->u, U+0244->u, U+0289->u, U+1D1C->u, U+1D1D->u, U+1D1E->u, U+1D41->u, U+1D58->u, U+1D59->u, U+1D64->u, U+1D7E->u, U+1D99->u, U+1DB6->u, U+1DB8->u, U+1E72->u, U+1E73->u, U+1E74->u, U+1E75->u, U+1E76->u, U+1E77->u, U+1E78->u, U+1E79->u,  \
+		U+1E7A->u, U+1E7B->u, U+1EE4->u, U+1EE5->u, U+1EE6->u, U+1EE7->u, U+1EE8->u, U+1EE9->u, U+1EEA->u, U+1EEB->u, U+1EEC->u, U+1EED->u, U+1EEE->u, U+1EEF->u, U+1EF0->u, U+1EF1->u, U+01B2->v, U+0245->v, U+028B->v, U+028C->v, U+1D20->v, U+1D5B->v, U+1D65->v,  \
+		U+1D8C->v, U+1DB9->v, U+1DBA->v, U+1E7C->v, U+1E7D->v, U+1E7E->v, U+1E7F->v, U+2C74->v, U+0174->w, U+0175->w, U+028D->w, U+02B7->w, U+1D21->w, U+1D42->w, U+1E80->w, U+1E81->w, U+1E82->w, U+1E83->w, U+1E84->w, U+1E85->w, U+1E86->w, U+1E87->w, U+1E88->w,  \
+		U+1E89->w, U+1E98->w, U+02E3->x, U+1D8D->x, U+1E8A->x, U+1E8B->x, U+1E8C->x, U+1E8D->x, U+2093->x, U+00DD->y, U+00FD->y, U+00FF->y, U+0176->y, U+0177->y, U+0178->y, U+01B3->y, U+01B4->y, U+0232->y, U+0233->y, U+024E->y, U+024F->y, U+028E->y, U+028F->y,  \
+		U+02B8->y, U+1E8E->y, U+1E8F->y, U+1E99->y, U+1EF2->y, U+1EF3->y, U+1EF4->y, U+1EF5->y, U+1EF6->y, U+1EF7->y, U+1EF8->y, U+1EF9->y, U+0179->z, U+017A->z, U+017B->z, U+017C->z, U+017D->z, U+017E->z, U+01B5->z, U+01B6->z, U+0224->z, U+0225->z, U+0240->z,  \
+		U+0290->z, U+0291->z, U+1D22->z, U+1D76->z, U+1D8E->z, U+1DBB->z, U+1DBC->z, U+1DBD->z, U+1E90->z, U+1E91->z, U+1E92->z, U+1E93->z, U+1E94->z, U+1E95->z, U+2128->z, U+2C6B->z, U+2C6C->z, U+00C6->U+00E6, U+01E2->U+00E6, U+01E3->U+00E6, U+01FC->U+00E6,  \
+		U+01FD->U+00E6, U+1D01->U+00E6, U+1D02->U+00E6, U+1D2D->U+00E6, U+1D46->U+00E6, U+00E6, U+0622->U+0627, U+0623->U+0627, U+0624->U+0648, U+0625->U+0627, U+0626->U+064A, U+06C0->U+06D5, U+06C2->U+06C1, U+06D3->U+06D2, U+FB50->U+0671, U+FB51->U+0671, U+FB52->U+067B,  \
+		U+FB53->U+067B, U+FB54->U+067B, U+FB56->U+067E, U+FB57->U+067E, U+FB58->U+067E, U+FB5A->U+0680, U+FB5B->U+0680, U+FB5C->U+0680, U+FB5E->U+067A, U+FB5F->U+067A, U+FB60->U+067A, U+FB62->U+067F, U+FB63->U+067F, U+FB64->U+067F, U+FB66->U+0679, U+FB67->U+0679,  \
+		U+FB68->U+0679, U+FB6A->U+06A4, U+FB6B->U+06A4, U+FB6C->U+06A4, U+FB6E->U+06A6, U+FB6F->U+06A6, U+FB70->U+06A6, U+FB72->U+0684, U+FB73->U+0684, U+FB74->U+0684, U+FB76->U+0683, U+FB77->U+0683, U+FB78->U+0683, U+FB7A->U+0686, U+FB7B->U+0686, U+FB7C->U+0686,  \
+		U+FB7E->U+0687, U+FB7F->U+0687, U+FB80->U+0687, U+FB82->U+068D, U+FB83->U+068D, U+FB84->U+068C, U+FB85->U+068C, U+FB86->U+068E, U+FB87->U+068E, U+FB88->U+0688, U+FB89->U+0688, U+FB8A->U+0698, U+FB8B->U+0698, U+FB8C->U+0691, U+FB8D->U+0691, U+FB8E->U+06A9,  \
+		U+FB8F->U+06A9, U+FB90->U+06A9, U+FB92->U+06AF, U+FB93->U+06AF, U+FB94->U+06AF, U+FB96->U+06B3, U+FB97->U+06B3, U+FB98->U+06B3, U+FB9A->U+06B1, U+FB9B->U+06B1, U+FB9C->U+06B1, U+FB9E->U+06BA, U+FB9F->U+06BA, U+FBA0->U+06BB, U+FBA1->U+06BB, U+FBA2->U+06BB,  \
+		U+FBA4->U+06C0, U+FBA5->U+06C0, U+FBA6->U+06C1, U+FBA7->U+06C1, U+FBA8->U+06C1, U+FBAA->U+06BE, U+FBAB->U+06BE, U+FBAC->U+06BE, U+FBAE->U+06D2, U+FBAF->U+06D2, U+FBB0->U+06D3, U+FBB1->U+06D3, U+FBD3->U+06AD, U+FBD4->U+06AD, U+FBD5->U+06AD, U+FBD7->U+06C7,  \
+		U+FBD8->U+06C7, U+FBD9->U+06C6, U+FBDA->U+06C6, U+FBDB->U+06C8, U+FBDC->U+06C8, U+FBDD->U+0677, U+FBDE->U+06CB, U+FBDF->U+06CB, U+FBE0->U+06C5, U+FBE1->U+06C5, U+FBE2->U+06C9, U+FBE3->U+06C9, U+FBE4->U+06D0, U+FBE5->U+06D0, U+FBE6->U+06D0, U+FBE8->U+0649,  \
+		U+FBFC->U+06CC, U+FBFD->U+06CC, U+FBFE->U+06CC, U+0621, U+0627..U+063A, U+0641..U+064A, U+0660..U+0669, U+066E, U+066F, U+0671..U+06BF, U+06C1, U+06C3..U+06D2, U+06D5, U+06EE..U+06FC, U+06FF, U+0750..U+076D, U+FB55, U+FB59, U+FB5D, U+FB61, U+FB65, U+FB69,  \
+		U+FB6D, U+FB71, U+FB75, U+FB79, U+FB7D, U+FB81, U+FB91, U+FB95, U+FB99, U+FB9D, U+FBA3, U+FBA9, U+FBAD, U+FBD6, U+FBE7, U+FBE9, U+FBFF, U+0531..U+0556->U+0561..U+0586, U+0561..U+0586, U+0587, U+09DC->U+09A1, U+09DD->U+09A2, U+09DF->U+09AF, U+09F0->U+09AC,  \
+		U+09F1->U+09AC, U+0985..U+0990, U+0993..U+09B0, U+09B2, U+09B6..U+09B9, U+09CE, U+09E0, U+09E1, U+09E6..U+09EF, U+F900->U+8C48, U+F901->U+66F4, U+F902->U+8ECA, U+F903->U+8CC8, U+F904->U+6ED1, U+F905->U+4E32, U+F906->U+53E5, U+F907->U+9F9C, U+F908->U+9F9C,  \
+		U+F909->U+5951, U+F90A->U+91D1, U+F90B->U+5587, U+F90C->U+5948, U+F90D->U+61F6, U+F90E->U+7669, U+F90F->U+7F85, U+F910->U+863F, U+F911->U+87BA, U+F912->U+88F8, U+F913->U+908F, U+F914->U+6A02, U+F915->U+6D1B, U+F916->U+70D9, U+F917->U+73DE, U+F918->U+843D,  \
+		U+F919->U+916A, U+F91A->U+99F1, U+F91B->U+4E82, U+F91C->U+5375, U+F91D->U+6B04, U+F91E->U+721B, U+F91F->U+862D, U+F920->U+9E1E, U+F921->U+5D50, U+F922->U+6FEB, U+F923->U+85CD, U+F924->U+8964, U+F925->U+62C9, U+F926->U+81D8, U+F927->U+881F, U+F928->U+5ECA,  \
+		U+F929->U+6717, U+F92A->U+6D6A, U+F92B->U+72FC, U+F92C->U+90CE, U+F92D->U+4F86, U+F92E->U+51B7, U+F92F->U+52DE, U+F930->U+64C4, U+F931->U+6AD3, U+F932->U+7210, U+F933->U+76E7, U+F934->U+8001, U+F935->U+8606, U+F936->U+865C, U+F937->U+8DEF, U+F938->U+9732,  \
+		U+F939->U+9B6F, U+F93A->U+9DFA, U+F93B->U+788C, U+F93C->U+797F, U+F93D->U+7DA0, U+F93E->U+83C9, U+F93F->U+9304, U+F940->U+9E7F, U+F941->U+8AD6, U+F942->U+58DF, U+F943->U+5F04, U+F944->U+7C60, U+F945->U+807E, U+F946->U+7262, U+F947->U+78CA, U+F948->U+8CC2,  \
+		U+F949->U+96F7, U+F94A->U+58D8, U+F94B->U+5C62, U+F94C->U+6A13, U+F94D->U+6DDA, U+F94E->U+6F0F, U+F94F->U+7D2F, U+F950->U+7E37, U+F951->U+964B, U+F952->U+52D2, U+F953->U+808B, U+F954->U+51DC, U+F955->U+51CC, U+F956->U+7A1C, U+F957->U+7DBE, U+F958->U+83F1,  \
+		U+F959->U+9675, U+F95A->U+8B80, U+F95B->U+62CF, U+F95C->U+6A02, U+F95D->U+8AFE, U+F95E->U+4E39, U+F95F->U+5BE7, U+F960->U+6012, U+F961->U+7387, U+F962->U+7570, U+F963->U+5317, U+F964->U+78FB, U+F965->U+4FBF, U+F966->U+5FA9, U+F967->U+4E0D, U+F968->U+6CCC,  \
+		U+F969->U+6578, U+F96A->U+7D22, U+F96B->U+53C3, U+F96C->U+585E, U+F96D->U+7701, U+F96E->U+8449, U+F96F->U+8AAA, U+F970->U+6BBA, U+F971->U+8FB0, U+F972->U+6C88, U+F973->U+62FE, U+F974->U+82E5, U+F975->U+63A0, U+F976->U+7565, U+F977->U+4EAE, U+F978->U+5169,  \
+		U+F979->U+51C9, U+F97A->U+6881, U+F97B->U+7CE7, U+F97C->U+826F, U+F97D->U+8AD2, U+F97E->U+91CF, U+F97F->U+52F5, U+F980->U+5442, U+F981->U+5973, U+F982->U+5EEC, U+F983->U+65C5, U+F984->U+6FFE, U+F985->U+792A, U+F986->U+95AD, U+F987->U+9A6A, U+F988->U+9E97,  \
+		U+F989->U+9ECE, U+F98A->U+529B, U+F98B->U+66C6, U+F98C->U+6B77, U+F98D->U+8F62, U+F98E->U+5E74, U+F98F->U+6190, U+F990->U+6200, U+F991->U+649A, U+F992->U+6F23, U+F993->U+7149, U+F994->U+7489, U+F995->U+79CA, U+F996->U+7DF4, U+F997->U+806F, U+F998->U+8F26,  \
+		U+F999->U+84EE, U+F99A->U+9023, U+F99B->U+934A, U+F99C->U+5217, U+F99D->U+52A3, U+F99E->U+54BD, U+F99F->U+70C8, U+F9A0->U+88C2, U+F9A1->U+8AAA, U+F9A2->U+5EC9, U+F9A3->U+5FF5, U+F9A4->U+637B, U+F9A5->U+6BAE, U+F9A6->U+7C3E, U+F9A7->U+7375, U+F9A8->U+4EE4,  \
+		U+F9A9->U+56F9, U+F9AA->U+5BE7, U+F9AB->U+5DBA, U+F9AC->U+601C, U+F9AD->U+73B2, U+F9AE->U+7469, U+F9AF->U+7F9A, U+F9B0->U+8046, U+F9B1->U+9234, U+F9B2->U+96F6, U+F9B3->U+9748, U+F9B4->U+9818, U+F9B5->U+4F8B, U+F9B6->U+79AE, U+F9B7->U+91B4, U+F9B8->U+96B8,  \
+		U+F9B9->U+60E1, U+F9BA->U+4E86, U+F9BB->U+50DA, U+F9BC->U+5BEE, U+F9BD->U+5C3F, U+F9BE->U+6599, U+F9BF->U+6A02, U+F9C0->U+71CE, U+F9C1->U+7642, U+F9C2->U+84FC, U+F9C3->U+907C, U+F9C4->U+9F8D, U+F9C5->U+6688, U+F9C6->U+962E, U+F9C7->U+5289, U+F9C8->U+677B,  \
+		U+F9C9->U+67F3, U+F9CA->U+6D41, U+F9CB->U+6E9C, U+F9CC->U+7409, U+F9CD->U+7559, U+F9CE->U+786B, U+F9CF->U+7D10, U+F9D0->U+985E, U+F9D1->U+516D, U+F9D2->U+622E, U+F9D3->U+9678, U+F9D4->U+502B, U+F9D5->U+5D19, U+F9D6->U+6DEA, U+F9D7->U+8F2A, U+F9D8->U+5F8B,  \
+		U+F9D9->U+6144, U+F9DA->U+6817, U+F9DB->U+7387, U+F9DC->U+9686, U+F9DD->U+5229, U+F9DE->U+540F, U+F9DF->U+5C65, U+F9E0->U+6613, U+F9E1->U+674E, U+F9E2->U+68A8, U+F9E3->U+6CE5, U+F9E4->U+7406, U+F9E5->U+75E2, U+F9E6->U+7F79, U+F9E7->U+88CF, U+F9E8->U+88E1,  \
+		U+F9E9->U+91CC, U+F9EA->U+96E2, U+F9EB->U+533F, U+F9EC->U+6EBA, U+F9ED->U+541D, U+F9EE->U+71D0, U+F9EF->U+7498, U+F9F0->U+85FA, U+F9F1->U+96A3, U+F9F2->U+9C57, U+F9F3->U+9E9F, U+F9F4->U+6797, U+F9F5->U+6DCB, U+F9F6->U+81E8, U+F9F7->U+7ACB, U+F9F8->U+7B20,  \
+		U+F9F9->U+7C92, U+F9FA->U+72C0, U+F9FB->U+7099, U+F9FC->U+8B58, U+F9FD->U+4EC0, U+F9FE->U+8336, U+F9FF->U+523A, U+FA00->U+5207, U+FA01->U+5EA6, U+FA02->U+62D3, U+FA03->U+7CD6, U+FA04->U+5B85, U+FA05->U+6D1E, U+FA06->U+66B4, U+FA07->U+8F3B, U+FA08->U+884C,  \
+		U+FA09->U+964D, U+FA0A->U+898B, U+FA0B->U+5ED3, U+FA0C->U+5140, U+FA0D->U+55C0, U+FA10->U+585A, U+FA12->U+6674, U+FA15->U+51DE, U+FA16->U+732A, U+FA17->U+76CA, U+FA18->U+793C, U+FA19->U+795E, U+FA1A->U+7965, U+FA1B->U+798F, U+FA1C->U+9756, U+FA1D->U+7CBE,  \
+		U+FA1E->U+7FBD, U+FA20->U+8612, U+FA22->U+8AF8, U+FA25->U+9038, U+FA26->U+90FD, U+FA2A->U+98EF, U+FA2B->U+98FC, U+FA2C->U+9928, U+FA2D->U+9DB4, U+FA30->U+4FAE, U+FA31->U+50E7, U+FA32->U+514D, U+FA33->U+52C9, U+FA34->U+52E4, U+FA35->U+5351, U+FA36->U+559D,  \
+		U+FA37->U+5606, U+FA38->U+5668, U+FA39->U+5840, U+FA3A->U+58A8, U+FA3B->U+5C64, U+FA3C->U+5C6E, U+FA3D->U+6094, U+FA3E->U+6168, U+FA3F->U+618E, U+FA40->U+61F2, U+FA41->U+654F, U+FA42->U+65E2, U+FA43->U+6691, U+FA44->U+6885, U+FA45->U+6D77, U+FA46->U+6E1A,  \
+		U+FA47->U+6F22, U+FA48->U+716E, U+FA49->U+722B, U+FA4A->U+7422, U+FA4B->U+7891, U+FA4C->U+793E, U+FA4D->U+7949, U+FA4E->U+7948, U+FA4F->U+7950, U+FA50->U+7956, U+FA51->U+795D, U+FA52->U+798D, U+FA53->U+798E, U+FA54->U+7A40, U+FA55->U+7A81, U+FA56->U+7BC0,  \
+		U+FA57->U+7DF4, U+FA58->U+7E09, U+FA59->U+7E41, U+FA5A->U+7F72, U+FA5B->U+8005, U+FA5C->U+81ED, U+FA5D->U+8279, U+FA5E->U+8279, U+FA5F->U+8457, U+FA60->U+8910, U+FA61->U+8996, U+FA62->U+8B01, U+FA63->U+8B39, U+FA64->U+8CD3, U+FA65->U+8D08, U+FA66->U+8FB6,  \
+		U+FA67->U+9038, U+FA68->U+96E3, U+FA69->U+97FF, U+FA6A->U+983B, U+FA70->U+4E26, U+FA71->U+51B5, U+FA72->U+5168, U+FA73->U+4F80, U+FA74->U+5145, U+FA75->U+5180, U+FA76->U+52C7, U+FA77->U+52FA, U+FA78->U+559D, U+FA79->U+5555, U+FA7A->U+5599, U+FA7B->U+55E2,  \
+		U+FA7C->U+585A, U+FA7D->U+58B3, U+FA7E->U+5944, U+FA7F->U+5954, U+FA80->U+5A62, U+FA81->U+5B28, U+FA82->U+5ED2, U+FA83->U+5ED9, U+FA84->U+5F69, U+FA85->U+5FAD, U+FA86->U+60D8, U+FA87->U+614E, U+FA88->U+6108, U+FA89->U+618E, U+FA8A->U+6160, U+FA8B->U+61F2,  \
+		U+FA8C->U+6234, U+FA8D->U+63C4, U+FA8E->U+641C, U+FA8F->U+6452, U+FA90->U+6556, U+FA91->U+6674, U+FA92->U+6717, U+FA93->U+671B, U+FA94->U+6756, U+FA95->U+6B79, U+FA96->U+6BBA, U+FA97->U+6D41, U+FA98->U+6EDB, U+FA99->U+6ECB, U+FA9A->U+6F22, U+FA9B->U+701E,  \
+		U+FA9C->U+716E, U+FA9D->U+77A7, U+FA9E->U+7235, U+FA9F->U+72AF, U+FAA0->U+732A, U+FAA1->U+7471, U+FAA2->U+7506, U+FAA3->U+753B, U+FAA4->U+761D, U+FAA5->U+761F, U+FAA6->U+76CA, U+FAA7->U+76DB, U+FAA8->U+76F4, U+FAA9->U+774A, U+FAAA->U+7740, U+FAAB->U+78CC,  \
+		U+FAAC->U+7AB1, U+FAAD->U+7BC0, U+FAAE->U+7C7B, U+FAAF->U+7D5B, U+FAB0->U+7DF4, U+FAB1->U+7F3E, U+FAB2->U+8005, U+FAB3->U+8352, U+FAB4->U+83EF, U+FAB5->U+8779, U+FAB6->U+8941, U+FAB7->U+8986, U+FAB8->U+8996, U+FAB9->U+8ABF, U+FABA->U+8AF8, U+FABB->U+8ACB,  \
+		U+FABC->U+8B01, U+FABD->U+8AFE, U+FABE->U+8AED, U+FABF->U+8B39, U+FAC0->U+8B8A, U+FAC1->U+8D08, U+FAC2->U+8F38, U+FAC3->U+9072, U+FAC4->U+9199, U+FAC5->U+9276, U+FAC6->U+967C, U+FAC7->U+96E3, U+FAC8->U+9756, U+FAC9->U+97DB, U+FACA->U+97FF, U+FACB->U+980B,  \
+		U+FACC->U+983B, U+FACD->U+9B12, U+FACE->U+9F9C, U+FACF->U+2284A, U+FAD0->U+22844, U+FAD1->U+233D5, U+FAD2->U+3B9D, U+FAD3->U+4018, U+FAD4->U+4039, U+FAD5->U+25249, U+FAD6->U+25CD0, U+FAD7->U+27ED3, U+FAD8->U+9F43, U+FAD9->U+9F8E, U+2F800->U+4E3D, U+2F801->U+4E38,  \
+		U+2F802->U+4E41, U+2F803->U+20122, U+2F804->U+4F60, U+2F805->U+4FAE, U+2F806->U+4FBB, U+2F807->U+5002, U+2F808->U+507A, U+2F809->U+5099, U+2F80A->U+50E7, U+2F80B->U+50CF, U+2F80C->U+349E, U+2F80D->U+2063A, U+2F80E->U+514D, U+2F80F->U+5154, U+2F810->U+5164,  \
+		U+2F811->U+5177, U+2F812->U+2051C, U+2F813->U+34B9, U+2F814->U+5167, U+2F815->U+518D, U+2F816->U+2054B, U+2F817->U+5197, U+2F818->U+51A4, U+2F819->U+4ECC, U+2F81A->U+51AC, U+2F81B->U+51B5, U+2F81C->U+291DF, U+2F81D->U+51F5, U+2F81E->U+5203, U+2F81F->U+34DF,  \
+		U+2F820->U+523B, U+2F821->U+5246, U+2F822->U+5272, U+2F823->U+5277, U+2F824->U+3515, U+2F825->U+52C7, U+2F826->U+52C9, U+2F827->U+52E4, U+2F828->U+52FA, U+2F829->U+5305, U+2F82A->U+5306, U+2F82B->U+5317, U+2F82C->U+5349, U+2F82D->U+5351, U+2F82E->U+535A,  \
+		U+2F82F->U+5373, U+2F830->U+537D, U+2F831->U+537F, U+2F832->U+537F, U+2F833->U+537F, U+2F834->U+20A2C, U+2F835->U+7070, U+2F836->U+53CA, U+2F837->U+53DF, U+2F838->U+20B63, U+2F839->U+53EB, U+2F83A->U+53F1, U+2F83B->U+5406, U+2F83C->U+549E, U+2F83D->U+5438,  \
+		U+2F83E->U+5448, U+2F83F->U+5468, U+2F840->U+54A2, U+2F841->U+54F6, U+2F842->U+5510, U+2F843->U+5553, U+2F844->U+5563, U+2F845->U+5584, U+2F846->U+5584, U+2F847->U+5599, U+2F848->U+55AB, U+2F849->U+55B3, U+2F84A->U+55C2, U+2F84B->U+5716, U+2F84C->U+5606,  \
+		U+2F84D->U+5717, U+2F84E->U+5651, U+2F84F->U+5674, U+2F850->U+5207, U+2F851->U+58EE, U+2F852->U+57CE, U+2F853->U+57F4, U+2F854->U+580D, U+2F855->U+578B, U+2F856->U+5832, U+2F857->U+5831, U+2F858->U+58AC, U+2F859->U+214E4, U+2F85A->U+58F2, U+2F85B->U+58F7,  \
+		U+2F85C->U+5906, U+2F85D->U+591A, U+2F85E->U+5922, U+2F85F->U+5962, U+2F860->U+216A8, U+2F861->U+216EA, U+2F862->U+59EC, U+2F863->U+5A1B, U+2F864->U+5A27, U+2F865->U+59D8, U+2F866->U+5A66, U+2F867->U+36EE, U+2F868->U+36FC, U+2F869->U+5B08, U+2F86A->U+5B3E,  \
+		U+2F86B->U+5B3E, U+2F86C->U+219C8, U+2F86D->U+5BC3, U+2F86E->U+5BD8, U+2F86F->U+5BE7, U+2F870->U+5BF3, U+2F871->U+21B18, U+2F872->U+5BFF, U+2F873->U+5C06, U+2F874->U+5F53, U+2F875->U+5C22, U+2F876->U+3781, U+2F877->U+5C60, U+2F878->U+5C6E, U+2F879->U+5CC0,  \
+		U+2F87A->U+5C8D, U+2F87B->U+21DE4, U+2F87C->U+5D43, U+2F87D->U+21DE6, U+2F87E->U+5D6E, U+2F87F->U+5D6B, U+2F880->U+5D7C, U+2F881->U+5DE1, U+2F882->U+5DE2, U+2F883->U+382F, U+2F884->U+5DFD, U+2F885->U+5E28, U+2F886->U+5E3D, U+2F887->U+5E69, U+2F888->U+3862,  \
+		U+2F889->U+22183, U+2F88A->U+387C, U+2F88B->U+5EB0, U+2F88C->U+5EB3, U+2F88D->U+5EB6, U+2F88E->U+5ECA, U+2F88F->U+2A392, U+2F890->U+5EFE, U+2F891->U+22331, U+2F892->U+22331, U+2F893->U+8201, U+2F894->U+5F22, U+2F895->U+5F22, U+2F896->U+38C7, U+2F897->U+232B8,  \
+		U+2F898->U+261DA, U+2F899->U+5F62, U+2F89A->U+5F6B, U+2F89B->U+38E3, U+2F89C->U+5F9A, U+2F89D->U+5FCD, U+2F89E->U+5FD7, U+2F89F->U+5FF9, U+2F8A0->U+6081, U+2F8A1->U+393A, U+2F8A2->U+391C, U+2F8A3->U+6094, U+2F8A4->U+226D4, U+2F8A5->U+60C7, U+2F8A6->U+6148,  \
+		U+2F8A7->U+614C, U+2F8A8->U+614E, U+2F8A9->U+614C, U+2F8AA->U+617A, U+2F8AB->U+618E, U+2F8AC->U+61B2, U+2F8AD->U+61A4, U+2F8AE->U+61AF, U+2F8AF->U+61DE, U+2F8B0->U+61F2, U+2F8B1->U+61F6, U+2F8B2->U+6210, U+2F8B3->U+621B, U+2F8B4->U+625D, U+2F8B5->U+62B1,  \
+		U+2F8B6->U+62D4, U+2F8B7->U+6350, U+2F8B8->U+22B0C, U+2F8B9->U+633D, U+2F8BA->U+62FC, U+2F8BB->U+6368, U+2F8BC->U+6383, U+2F8BD->U+63E4, U+2F8BE->U+22BF1, U+2F8BF->U+6422, U+2F8C0->U+63C5, U+2F8C1->U+63A9, U+2F8C2->U+3A2E, U+2F8C3->U+6469, U+2F8C4->U+647E,  \
+		U+2F8C5->U+649D, U+2F8C6->U+6477, U+2F8C7->U+3A6C, U+2F8C8->U+654F, U+2F8C9->U+656C, U+2F8CA->U+2300A, U+2F8CB->U+65E3, U+2F8CC->U+66F8, U+2F8CD->U+6649, U+2F8CE->U+3B19, U+2F8CF->U+6691, U+2F8D0->U+3B08, U+2F8D1->U+3AE4, U+2F8D2->U+5192, U+2F8D3->U+5195,  \
+		U+2F8D4->U+6700, U+2F8D5->U+669C, U+2F8D6->U+80AD, U+2F8D7->U+43D9, U+2F8D8->U+6717, U+2F8D9->U+671B, U+2F8DA->U+6721, U+2F8DB->U+675E, U+2F8DC->U+6753, U+2F8DD->U+233C3, U+2F8DE->U+3B49, U+2F8DF->U+67FA, U+2F8E0->U+6785, U+2F8E1->U+6852, U+2F8E2->U+6885,  \
+		U+2F8E3->U+2346D, U+2F8E4->U+688E, U+2F8E5->U+681F, U+2F8E6->U+6914, U+2F8E7->U+3B9D, U+2F8E8->U+6942, U+2F8E9->U+69A3, U+2F8EA->U+69EA, U+2F8EB->U+6AA8, U+2F8EC->U+236A3, U+2F8ED->U+6ADB, U+2F8EE->U+3C18, U+2F8EF->U+6B21, U+2F8F0->U+238A7, U+2F8F1->U+6B54,  \
+		U+2F8F2->U+3C4E, U+2F8F3->U+6B72, U+2F8F4->U+6B9F, U+2F8F5->U+6BBA, U+2F8F6->U+6BBB, U+2F8F7->U+23A8D, U+2F8F8->U+21D0B, U+2F8F9->U+23AFA, U+2F8FA->U+6C4E, U+2F8FB->U+23CBC, U+2F8FC->U+6CBF, U+2F8FD->U+6CCD, U+2F8FE->U+6C67, U+2F8FF->U+6D16, U+2F900->U+6D3E,  \
+		U+2F901->U+6D77, U+2F902->U+6D41, U+2F903->U+6D69, U+2F904->U+6D78, U+2F905->U+6D85, U+2F906->U+23D1E, U+2F907->U+6D34, U+2F908->U+6E2F, U+2F909->U+6E6E, U+2F90A->U+3D33, U+2F90B->U+6ECB, U+2F90C->U+6EC7, U+2F90D->U+23ED1, U+2F90E->U+6DF9, U+2F90F->U+6F6E,  \
+		U+2F910->U+23F5E, U+2F911->U+23F8E, U+2F912->U+6FC6, U+2F913->U+7039, U+2F914->U+701E, U+2F915->U+701B, U+2F916->U+3D96, U+2F917->U+704A, U+2F918->U+707D, U+2F919->U+7077, U+2F91A->U+70AD, U+2F91B->U+20525, U+2F91C->U+7145, U+2F91D->U+24263, U+2F91E->U+719C,  \
+		U+2F91F->U+243AB, U+2F920->U+7228, U+2F921->U+7235, U+2F922->U+7250, U+2F923->U+24608, U+2F924->U+7280, U+2F925->U+7295, U+2F926->U+24735, U+2F927->U+24814, U+2F928->U+737A, U+2F929->U+738B, U+2F92A->U+3EAC, U+2F92B->U+73A5, U+2F92C->U+3EB8, U+2F92D->U+3EB8,  \
+		U+2F92E->U+7447, U+2F92F->U+745C, U+2F930->U+7471, U+2F931->U+7485, U+2F932->U+74CA, U+2F933->U+3F1B, U+2F934->U+7524, U+2F935->U+24C36, U+2F936->U+753E, U+2F937->U+24C92, U+2F938->U+7570, U+2F939->U+2219F, U+2F93A->U+7610, U+2F93B->U+24FA1, U+2F93C->U+24FB8,  \
+		U+2F93D->U+25044, U+2F93E->U+3FFC, U+2F93F->U+4008, U+2F940->U+76F4, U+2F941->U+250F3, U+2F942->U+250F2, U+2F943->U+25119, U+2F944->U+25133, U+2F945->U+771E, U+2F946->U+771F, U+2F947->U+771F, U+2F948->U+774A, U+2F949->U+4039, U+2F94A->U+778B, U+2F94B->U+4046,  \
+		U+2F94C->U+4096, U+2F94D->U+2541D, U+2F94E->U+784E, U+2F94F->U+788C, U+2F950->U+78CC, U+2F951->U+40E3, U+2F952->U+25626, U+2F953->U+7956, U+2F954->U+2569A, U+2F955->U+256C5, U+2F956->U+798F, U+2F957->U+79EB, U+2F958->U+412F, U+2F959->U+7A40, U+2F95A->U+7A4A,  \
+		U+2F95B->U+7A4F, U+2F95C->U+2597C, U+2F95D->U+25AA7, U+2F95E->U+25AA7, U+2F95F->U+7AEE, U+2F960->U+4202, U+2F961->U+25BAB, U+2F962->U+7BC6, U+2F963->U+7BC9, U+2F964->U+4227, U+2F965->U+25C80, U+2F966->U+7CD2, U+2F967->U+42A0, U+2F968->U+7CE8, U+2F969->U+7CE3,  \
+		U+2F96A->U+7D00, U+2F96B->U+25F86, U+2F96C->U+7D63, U+2F96D->U+4301, U+2F96E->U+7DC7, U+2F96F->U+7E02, U+2F970->U+7E45, U+2F971->U+4334, U+2F972->U+26228, U+2F973->U+26247, U+2F974->U+4359, U+2F975->U+262D9, U+2F976->U+7F7A, U+2F977->U+2633E, U+2F978->U+7F95,  \
+		U+2F979->U+7FFA, U+2F97A->U+8005, U+2F97B->U+264DA, U+2F97C->U+26523, U+2F97D->U+8060, U+2F97E->U+265A8, U+2F97F->U+8070, U+2F980->U+2335F, U+2F981->U+43D5, U+2F982->U+80B2, U+2F983->U+8103, U+2F984->U+440B, U+2F985->U+813E, U+2F986->U+5AB5, U+2F987->U+267A7,  \
+		U+2F988->U+267B5, U+2F989->U+23393, U+2F98A->U+2339C, U+2F98B->U+8201, U+2F98C->U+8204, U+2F98D->U+8F9E, U+2F98E->U+446B, U+2F98F->U+8291, U+2F990->U+828B, U+2F991->U+829D, U+2F992->U+52B3, U+2F993->U+82B1, U+2F994->U+82B3, U+2F995->U+82BD, U+2F996->U+82E6,  \
+		U+2F997->U+26B3C, U+2F998->U+82E5, U+2F999->U+831D, U+2F99A->U+8363, U+2F99B->U+83AD, U+2F99C->U+8323, U+2F99D->U+83BD, U+2F99E->U+83E7, U+2F99F->U+8457, U+2F9A0->U+8353, U+2F9A1->U+83CA, U+2F9A2->U+83CC, U+2F9A3->U+83DC, U+2F9A4->U+26C36, U+2F9A5->U+26D6B,  \
+		U+2F9A6->U+26CD5, U+2F9A7->U+452B, U+2F9A8->U+84F1, U+2F9A9->U+84F3, U+2F9AA->U+8516, U+2F9AB->U+273CA, U+2F9AC->U+8564, U+2F9AD->U+26F2C, U+2F9AE->U+455D, U+2F9AF->U+4561, U+2F9B0->U+26FB1, U+2F9B1->U+270D2, U+2F9B2->U+456B, U+2F9B3->U+8650, U+2F9B4->U+865C,  \
+		U+2F9B5->U+8667, U+2F9B6->U+8669, U+2F9B7->U+86A9, U+2F9B8->U+8688, U+2F9B9->U+870E, U+2F9BA->U+86E2, U+2F9BB->U+8779, U+2F9BC->U+8728, U+2F9BD->U+876B, U+2F9BE->U+8786, U+2F9BF->U+45D7, U+2F9C0->U+87E1, U+2F9C1->U+8801, U+2F9C2->U+45F9, U+2F9C3->U+8860,  \
+		U+2F9C4->U+8863, U+2F9C5->U+27667, U+2F9C6->U+88D7, U+2F9C7->U+88DE, U+2F9C8->U+4635, U+2F9C9->U+88FA, U+2F9CA->U+34BB, U+2F9CB->U+278AE, U+2F9CC->U+27966, U+2F9CD->U+46BE, U+2F9CE->U+46C7, U+2F9CF->U+8AA0, U+2F9D0->U+8AED, U+2F9D1->U+8B8A, U+2F9D2->U+8C55,  \
+		U+2F9D3->U+27CA8, U+2F9D4->U+8CAB, U+2F9D5->U+8CC1, U+2F9D6->U+8D1B, U+2F9D7->U+8D77, U+2F9D8->U+27F2F, U+2F9D9->U+20804, U+2F9DA->U+8DCB, U+2F9DB->U+8DBC, U+2F9DC->U+8DF0, U+2F9DD->U+208DE, U+2F9DE->U+8ED4, U+2F9DF->U+8F38, U+2F9E0->U+285D2, U+2F9E1->U+285ED,  \
+		U+2F9E2->U+9094, U+2F9E3->U+90F1, U+2F9E4->U+9111, U+2F9E5->U+2872E, U+2F9E6->U+911B, U+2F9E7->U+9238, U+2F9E8->U+92D7, U+2F9E9->U+92D8, U+2F9EA->U+927C, U+2F9EB->U+93F9, U+2F9EC->U+9415, U+2F9ED->U+28BFA, U+2F9EE->U+958B, U+2F9EF->U+4995, U+2F9F0->U+95B7,  \
+		U+2F9F1->U+28D77, U+2F9F2->U+49E6, U+2F9F3->U+96C3, U+2F9F4->U+5DB2, U+2F9F5->U+9723, U+2F9F6->U+29145, U+2F9F7->U+2921A, U+2F9F8->U+4A6E, U+2F9F9->U+4A76, U+2F9FA->U+97E0, U+2F9FB->U+2940A, U+2F9FC->U+4AB2, U+2F9FD->U+29496, U+2F9FE->U+980B, U+2F9FF->U+980B,  \
+		U+2FA00->U+9829, U+2FA01->U+295B6, U+2FA02->U+98E2, U+2FA03->U+4B33, U+2FA04->U+9929, U+2FA05->U+99A7, U+2FA06->U+99C2, U+2FA07->U+99FE, U+2FA08->U+4BCE, U+2FA09->U+29B30, U+2FA0A->U+9B12, U+2FA0B->U+9C40, U+2FA0C->U+9CFD, U+2FA0D->U+4CCE, U+2FA0E->U+4CED,  \
+		U+2FA0F->U+9D67, U+2FA10->U+2A0CE, U+2FA11->U+4CF8, U+2FA12->U+2A105, U+2FA13->U+2A20E, U+2FA14->U+2A291, U+2FA15->U+9EBB, U+2FA16->U+4D56, U+2FA17->U+9EF9, U+2FA18->U+9EFE, U+2FA19->U+9F05, U+2FA1A->U+9F0F, U+2FA1B->U+9F16, U+2FA1C->U+9F3B, U+2FA1D->U+2A600,  \
+		U+2F00->U+4E00, U+2F01->U+4E28, U+2F02->U+4E36, U+2F03->U+4E3F, U+2F04->U+4E59, U+2F05->U+4E85, U+2F06->U+4E8C, U+2F07->U+4EA0, U+2F08->U+4EBA, U+2F09->U+513F, U+2F0A->U+5165, U+2F0B->U+516B, U+2F0C->U+5182, U+2F0D->U+5196, U+2F0E->U+51AB, U+2F0F->U+51E0,  \
+		U+2F10->U+51F5, U+2F11->U+5200, U+2F12->U+529B, U+2F13->U+52F9, U+2F14->U+5315, U+2F15->U+531A, U+2F16->U+5338, U+2F17->U+5341, U+2F18->U+535C, U+2F19->U+5369, U+2F1A->U+5382, U+2F1B->U+53B6, U+2F1C->U+53C8, U+2F1D->U+53E3, U+2F1E->U+56D7, U+2F1F->U+571F,  \
+		U+2F20->U+58EB, U+2F21->U+5902, U+2F22->U+590A, U+2F23->U+5915, U+2F24->U+5927, U+2F25->U+5973, U+2F26->U+5B50, U+2F27->U+5B80, U+2F28->U+5BF8, U+2F29->U+5C0F, U+2F2A->U+5C22, U+2F2B->U+5C38, U+2F2C->U+5C6E, U+2F2D->U+5C71, U+2F2E->U+5DDB, U+2F2F->U+5DE5,  \
+		U+2F30->U+5DF1, U+2F31->U+5DFE, U+2F32->U+5E72, U+2F33->U+5E7A, U+2F34->U+5E7F, U+2F35->U+5EF4, U+2F36->U+5EFE, U+2F37->U+5F0B, U+2F38->U+5F13, U+2F39->U+5F50, U+2F3A->U+5F61, U+2F3B->U+5F73, U+2F3C->U+5FC3, U+2F3D->U+6208, U+2F3E->U+6236, U+2F3F->U+624B,  \
+		U+2F40->U+652F, U+2F41->U+6534, U+2F42->U+6587, U+2F43->U+6597, U+2F44->U+65A4, U+2F45->U+65B9, U+2F46->U+65E0, U+2F47->U+65E5, U+2F48->U+66F0, U+2F49->U+6708, U+2F4A->U+6728, U+2F4B->U+6B20, U+2F4C->U+6B62, U+2F4D->U+6B79, U+2F4E->U+6BB3, U+2F4F->U+6BCB,  \
+		U+2F50->U+6BD4, U+2F51->U+6BDB, U+2F52->U+6C0F, U+2F53->U+6C14, U+2F54->U+6C34, U+2F55->U+706B, U+2F56->U+722A, U+2F57->U+7236, U+2F58->U+723B, U+2F59->U+723F, U+2F5A->U+7247, U+2F5B->U+7259, U+2F5C->U+725B, U+2F5D->U+72AC, U+2F5E->U+7384, U+2F5F->U+7389,  \
+		U+2F60->U+74DC, U+2F61->U+74E6, U+2F62->U+7518, U+2F63->U+751F, U+2F64->U+7528, U+2F65->U+7530, U+2F66->U+758B, U+2F67->U+7592, U+2F68->U+7676, U+2F69->U+767D, U+2F6A->U+76AE, U+2F6B->U+76BF, U+2F6C->U+76EE, U+2F6D->U+77DB, U+2F6E->U+77E2, U+2F6F->U+77F3,  \
+		U+2F70->U+793A, U+2F71->U+79B8, U+2F72->U+79BE, U+2F73->U+7A74, U+2F74->U+7ACB, U+2F75->U+7AF9, U+2F76->U+7C73, U+2F77->U+7CF8, U+2F78->U+7F36, U+2F79->U+7F51, U+2F7A->U+7F8A, U+2F7B->U+7FBD, U+2F7C->U+8001, U+2F7D->U+800C, U+2F7E->U+8012, U+2F7F->U+8033,  \
+		U+2F80->U+807F, U+2F81->U+8089, U+2F82->U+81E3, U+2F83->U+81EA, U+2F84->U+81F3, U+2F85->U+81FC, U+2F86->U+820C, U+2F87->U+821B, U+2F88->U+821F, U+2F89->U+826E, U+2F8A->U+8272, U+2F8B->U+8278, U+2F8C->U+864D, U+2F8D->U+866B, U+2F8E->U+8840, U+2F8F->U+884C,  \
+		U+2F90->U+8863, U+2F91->U+897E, U+2F92->U+898B, U+2F93->U+89D2, U+2F94->U+8A00, U+2F95->U+8C37, U+2F96->U+8C46, U+2F97->U+8C55, U+2F98->U+8C78, U+2F99->U+8C9D, U+2F9A->U+8D64, U+2F9B->U+8D70, U+2F9C->U+8DB3, U+2F9D->U+8EAB, U+2F9E->U+8ECA, U+2F9F->U+8F9B,  \
+		U+2FA0->U+8FB0, U+2FA1->U+8FB5, U+2FA2->U+9091, U+2FA3->U+9149, U+2FA4->U+91C6, U+2FA5->U+91CC, U+2FA6->U+91D1, U+2FA7->U+9577, U+2FA8->U+9580, U+2FA9->U+961C, U+2FAA->U+96B6, U+2FAB->U+96B9, U+2FAC->U+96E8, U+2FAD->U+9751, U+2FAE->U+975E, U+2FAF->U+9762,  \
+		U+2FB0->U+9769, U+2FB1->U+97CB, U+2FB2->U+97ED, U+2FB3->U+97F3, U+2FB4->U+9801, U+2FB5->U+98A8, U+2FB6->U+98DB, U+2FB7->U+98DF, U+2FB8->U+9996, U+2FB9->U+9999, U+2FBA->U+99AC, U+2FBB->U+9AA8, U+2FBC->U+9AD8, U+2FBD->U+9ADF, U+2FBE->U+9B25, U+2FBF->U+9B2F,  \
+		U+2FC0->U+9B32, U+2FC1->U+9B3C, U+2FC2->U+9B5A, U+2FC3->U+9CE5, U+2FC4->U+9E75, U+2FC5->U+9E7F, U+2FC6->U+9EA5, U+2FC7->U+9EBB, U+2FC8->U+9EC3, U+2FC9->U+9ECD, U+2FCA->U+9ED1, U+2FCB->U+9EF9, U+2FCC->U+9EFD, U+2FCD->U+9F0E, U+2FCE->U+9F13, U+2FCF->U+9F20,  \
+		U+2FD0->U+9F3B, U+2FD1->U+9F4A, U+2FD2->U+9F52, U+2FD3->U+9F8D, U+2FD4->U+9F9C, U+2FD5->U+9FA0, U+3042->U+3041, U+3044->U+3043, U+3046->U+3045, U+3048->U+3047, U+304A->U+3049, U+304C->U+304B, U+304E->U+304D, U+3050->U+304F, U+3052->U+3051, U+3054->U+3053,  \
+		U+3056->U+3055, U+3058->U+3057, U+305A->U+3059, U+305C->U+305B, U+305E->U+305D, U+3060->U+305F, U+3062->U+3061, U+3064->U+3063, U+3065->U+3063, U+3067->U+3066, U+3069->U+3068, U+3070->U+306F, U+3071->U+306F, U+3073->U+3072, U+3074->U+3072, U+3076->U+3075,  \
+		U+3077->U+3075, U+3079->U+3078, U+307A->U+3078, U+307C->U+307B, U+307D->U+307B, U+3084->U+3083, U+3086->U+3085, U+3088->U+3087, U+308F->U+308E, U+3094->U+3046, U+3095->U+304B, U+3096->U+3051, U+30A2->U+30A1, U+30A4->U+30A3, U+30A6->U+30A5, U+30A8->U+30A7,  \
+		U+30AA->U+30A9, U+30AC->U+30AB, U+30AE->U+30AD, U+30B0->U+30AF, U+30B2->U+30B1, U+30B4->U+30B3, U+30B6->U+30B5, U+30B8->U+30B7, U+30BA->U+30B9, U+30BC->U+30BB, U+30BE->U+30BD, U+30C0->U+30BF, U+30C2->U+30C1, U+30C5->U+30C4, U+30C7->U+30C6, U+30C9->U+30C8,  \
+		U+30D0->U+30CF, U+30D1->U+30CF, U+30D3->U+30D2, U+30D4->U+30D2, U+30D6->U+30D5, U+30D7->U+30D5, U+30D9->U+30D8, U+30DA->U+30D8, U+30DC->U+30DB, U+30DD->U+30DB, U+30E4->U+30E3, U+30E6->U+30E5, U+30E8->U+30E7, U+30EF->U+30EE, U+30F4->U+30A6, U+30AB->U+30F5,  \
+		U+30B1->U+30F6, U+30F7->U+30EF, U+30F8->U+30F0, U+30F9->U+30F1, U+30FA->U+30F2, U+30AF->U+31F0, U+30B7->U+31F1, U+30B9->U+31F2, U+30C8->U+31F3, U+30CC->U+31F4, U+30CF->U+31F5, U+30D2->U+31F6, U+30D5->U+31F7, U+30D8->U+31F8, U+30DB->U+31F9, U+30E0->U+31FA,  \
+		U+30E9->U+31FB, U+30EA->U+31FC, U+30EB->U+31FD, U+30EC->U+31FE, U+30ED->U+31FF, U+FF66->U+30F2, U+FF67->U+30A1, U+FF68->U+30A3, U+FF69->U+30A5, U+FF6A->U+30A7, U+FF6B->U+30A9, U+FF6C->U+30E3, U+FF6D->U+30E5, U+FF6E->U+30E7, U+FF6F->U+30C3, U+FF71->U+30A1,  \
+		U+FF72->U+30A3, U+FF73->U+30A5, U+FF74->U+30A7, U+FF75->U+30A9, U+FF76->U+30AB, U+FF77->U+30AD, U+FF78->U+30AF, U+FF79->U+30B1, U+FF7A->U+30B3, U+FF7B->U+30B5, U+FF7C->U+30B7, U+FF7D->U+30B9, U+FF7E->U+30BB, U+FF7F->U+30BD, U+FF80->U+30BF, U+FF81->U+30C1,  \
+		U+FF82->U+30C3, U+FF83->U+30C6, U+FF84->U+30C8, U+FF85->U+30CA, U+FF86->U+30CB, U+FF87->U+30CC, U+FF88->U+30CD, U+FF89->U+30CE, U+FF8A->U+30CF, U+FF8B->U+30D2, U+FF8C->U+30D5, U+FF8D->U+30D8, U+FF8E->U+30DB, U+FF8F->U+30DE, U+FF90->U+30DF, U+FF91->U+30E0,  \
+		U+FF92->U+30E1, U+FF93->U+30E2, U+FF94->U+30E3, U+FF95->U+30E5, U+FF96->U+30E7, U+FF97->U+30E9, U+FF98->U+30EA, U+FF99->U+30EB, U+FF9A->U+30EC, U+FF9B->U+30ED, U+FF9C->U+30EF, U+FF9D->U+30F3, U+FFA0->U+3164, U+FFA1->U+3131, U+FFA2->U+3132, U+FFA3->U+3133,  \
+		U+FFA4->U+3134, U+FFA5->U+3135, U+FFA6->U+3136, U+FFA7->U+3137, U+FFA8->U+3138, U+FFA9->U+3139, U+FFAA->U+313A, U+FFAB->U+313B, U+FFAC->U+313C, U+FFAD->U+313D, U+FFAE->U+313E, U+FFAF->U+313F, U+FFB0->U+3140, U+FFB1->U+3141, U+FFB2->U+3142, U+FFB3->U+3143,  \
+		U+FFB4->U+3144, U+FFB5->U+3145, U+FFB6->U+3146, U+FFB7->U+3147, U+FFB8->U+3148, U+FFB9->U+3149, U+FFBA->U+314A, U+FFBB->U+314B, U+FFBC->U+314C, U+FFBD->U+314D, U+FFBE->U+314E, U+FFC2->U+314F, U+FFC3->U+3150, U+FFC4->U+3151, U+FFC5->U+3152, U+FFC6->U+3153,  \
+		U+FFC7->U+3154, U+FFCA->U+3155, U+FFCB->U+3156, U+FFCC->U+3157, U+FFCD->U+3158, U+FFCE->U+3159, U+FFCF->U+315A, U+FFD2->U+315B, U+FFD3->U+315C, U+FFD4->U+315D, U+FFD5->U+315E, U+FFD6->U+315F, U+FFD7->U+3160, U+FFDA->U+3161, U+FFDB->U+3162, U+FFDC->U+3163,  \
+		U+3131->U+1100, U+3132->U+1101, U+3133->U+11AA, U+3134->U+1102, U+3135->U+11AC, U+3136->U+11AD, U+3137->U+1103, U+3138->U+1104, U+3139->U+1105, U+313A->U+11B0, U+313B->U+11B1, U+313C->U+11B2, U+313D->U+11B3, U+313E->U+11B4, U+313F->U+11B5, U+3140->U+111A,  \
+		U+3141->U+1106, U+3142->U+1107, U+3143->U+1108, U+3144->U+1121, U+3145->U+1109, U+3146->U+110A, U+3147->U+110B, U+3148->U+110C, U+3149->U+110D, U+314A->U+110E, U+314B->U+110F, U+314C->U+1110, U+314D->U+1111, U+314E->U+1112, U+314F->U+1161, U+3150->U+1162,  \
+		U+3151->U+1163, U+3152->U+1164, U+3153->U+1165, U+3154->U+1166, U+3155->U+1167, U+3156->U+1168, U+3157->U+1169, U+3158->U+116A, U+3159->U+116B, U+315A->U+116C, U+315B->U+116D, U+315C->U+116E, U+315D->U+116F, U+315E->U+1170, U+315F->U+1171, U+3160->U+1172,  \
+		U+3161->U+1173, U+3162->U+1174, U+3163->U+1175, U+3165->U+1114, U+3166->U+1115, U+3167->U+11C7, U+3168->U+11C8, U+3169->U+11CC, U+316A->U+11CE, U+316B->U+11D3, U+316C->U+11D7, U+316D->U+11D9, U+316E->U+111C, U+316F->U+11DD, U+3170->U+11DF, U+3171->U+111D,  \
+		U+3172->U+111E, U+3173->U+1120, U+3174->U+1122, U+3175->U+1123, U+3176->U+1127, U+3177->U+1129, U+3178->U+112B, U+3179->U+112C, U+317A->U+112D, U+317B->U+112E, U+317C->U+112F, U+317D->U+1132, U+317E->U+1136, U+317F->U+1140, U+3180->U+1147, U+3181->U+114C,  \
+		U+3182->U+11F1, U+3183->U+11F2, U+3184->U+1157, U+3185->U+1158, U+3186->U+1159, U+3187->U+1184, U+3188->U+1185, U+3189->U+1188, U+318A->U+1191, U+318B->U+1192, U+318C->U+1194, U+318D->U+119E, U+318E->U+11A1, U+A490->U+A408, U+A491->U+A1B9, U+4E00..U+9FBB,  \
+		U+3400..U+4DB5, U+20000..U+2A6D6, U+FA0E, U+FA0F, U+FA11, U+FA13, U+FA14, U+FA1F, U+FA21, U+FA23, U+FA24, U+FA27, U+FA28, U+FA29, U+3105..U+312C, U+31A0..U+31B7, U+3041, U+3043, U+3045, U+3047, U+3049, U+304B, U+304D, U+304F, U+3051, U+3053, U+3055, U+3057,  \
+		U+3059, U+305B, U+305D, U+305F, U+3061, U+3063, U+3066, U+3068, U+306A..U+306F, U+3072, U+3075, U+3078, U+307B, U+307E..U+3083, U+3085, U+3087, U+3089..U+308E, U+3090..U+3093, U+30A1, U+30A3, U+30A5, U+30A7, U+30A9, U+30AD, U+30AF, U+30B3, U+30B5, U+30BB,  \
+		U+30BD, U+30BF, U+30C1, U+30C3, U+30C4, U+30C6, U+30CA, U+30CB, U+30CD, U+30CE, U+30DE, U+30DF, U+30E1, U+30E2, U+30E3, U+30E5, U+30E7, U+30EE, U+30F0..U+30F3, U+30F5, U+30F6, U+31F0, U+31F1, U+31F2, U+31F3, U+31F4, U+31F5, U+31F6, U+31F7, U+31F8, U+31F9,  \
+		U+31FA, U+31FB, U+31FC, U+31FD, U+31FE, U+31FF, U+AC00..U+D7A3, U+1100..U+1159, U+1161..U+11A2, U+11A8..U+11F9, U+A000..U+A48C, U+A492..U+A4C6, U+2C80->U+2C81, U+2C81, U+2C82->U+2C83, U+2C83, U+2C84->U+2C85, U+2C85, U+2C86->U+2C87, U+2C87, U+2C88->U+2C89,  \
+		U+2C89, U+2C8A->U+2C8B, U+2C8B, U+2C8C->U+2C8D, U+2C8D, U+2C8E->U+2C8F, U+2C8F, U+2C90->U+2C91, U+2C91, U+2C92->U+2C93, U+2C93, U+2C94->U+2C95, U+2C95, U+2C96->U+2C97, U+2C97, U+2C98->U+2C99, U+2C99, U+2C9A->U+2C9B, U+2C9B, U+2C9C->U+2C9D, U+2C9D, U+2C9E->U+2C9F,  \
+		U+2C9F, U+2CA0->U+2CA1, U+2CA1, U+2CA2->U+2CA3, U+2CA3, U+2CA4->U+2CA5, U+2CA5, U+2CA6->U+2CA7, U+2CA7, U+2CA8->U+2CA9, U+2CA9, U+2CAA->U+2CAB, U+2CAB, U+2CAC->U+2CAD, U+2CAD, U+2CAE->U+2CAF, U+2CAF, U+2CB0->U+2CB1, U+2CB1, U+2CB2->U+2CB3, U+2CB3, U+2CB4->U+2CB5,  \
+		U+2CB5, U+2CB6->U+2CB7, U+2CB7, U+2CB8->U+2CB9, U+2CB9, U+2CBA->U+2CBB, U+2CBB, U+2CBC->U+2CBD, U+2CBD, U+2CBE->U+2CBF, U+2CBF, U+2CC0->U+2CC1, U+2CC1, U+2CC2->U+2CC3, U+2CC3, U+2CC4->U+2CC5, U+2CC5, U+2CC6->U+2CC7, U+2CC7, U+2CC8->U+2CC9, U+2CC9, U+2CCA->U+2CCB,  \
+		U+2CCB, U+2CCC->U+2CCD, U+2CCD, U+2CCE->U+2CCF, U+2CCF, U+2CD0->U+2CD1, U+2CD1, U+2CD2->U+2CD3, U+2CD3, U+2CD4->U+2CD5, U+2CD5, U+2CD6->U+2CD7, U+2CD7, U+2CD8->U+2CD9, U+2CD9, U+2CDA->U+2CDB, U+2CDB, U+2CDC->U+2CDD, U+2CDD, U+2CDE->U+2CDF, U+2CDF, U+2CE0->U+2CE1,  \
+		U+2CE1, U+2CE2->U+2CE3, U+2CE3, U+0400->U+0435, U+0401->U+0435, U+0402->U+0452, U+0452, U+0403->U+0433, U+0404->U+0454, U+0454, U+0405->U+0455, U+0455, U+0406->U+0456, U+0407->U+0456, U+0457->U+0456, U+0456, U+0408..U+040B->U+0458..U+045B, U+0458..U+045B,  \
+		U+040C->U+043A, U+040D->U+0438, U+040E->U+0443, U+040F->U+045F, U+045F, U+0450->U+0435, U+0451->U+0435, U+0453->U+0433, U+045C->U+043A, U+045D->U+0438, U+045E->U+0443, U+0460->U+0461, U+0461, U+0462->U+0463, U+0463, U+0464->U+0465, U+0465, U+0466->U+0467,  \
+		U+0467, U+0468->U+0469, U+0469, U+046A->U+046B, U+046B, U+046C->U+046D, U+046D, U+046E->U+046F, U+046F, U+0470->U+0471, U+0471, U+0472->U+0473, U+0473, U+0474->U+0475, U+0476->U+0475, U+0477->U+0475, U+0475, U+0478->U+0479, U+0479, U+047A->U+047B, U+047B,  \
+		U+047C->U+047D, U+047D, U+047E->U+047F, U+047F, U+0480->U+0481, U+0481, U+048A->U+0438, U+048B->U+0438, U+048C->U+044C, U+048D->U+044C, U+048E->U+0440, U+048F->U+0440, U+0490->U+0433, U+0491->U+0433, U+0490->U+0433, U+0491->U+0433, U+0492->U+0433, U+0493->U+0433,  \
+		U+0494->U+0433, U+0495->U+0433, U+0496->U+0436, U+0497->U+0436, U+0498->U+0437, U+0499->U+0437, U+049A->U+043A, U+049B->U+043A, U+049C->U+043A, U+049D->U+043A, U+049E->U+043A, U+049F->U+043A, U+04A0->U+043A, U+04A1->U+043A, U+04A2->U+043D, U+04A3->U+043D,  \
+		U+04A4->U+043D, U+04A5->U+043D, U+04A6->U+043F, U+04A7->U+043F, U+04A8->U+04A9, U+04A9, U+04AA->U+0441, U+04AB->U+0441, U+04AC->U+0442, U+04AD->U+0442, U+04AE->U+0443, U+04AF->U+0443, U+04B0->U+0443, U+04B1->U+0443, U+04B2->U+0445, U+04B3->U+0445, U+04B4->U+04B5,  \
+		U+04B5, U+04B6->U+0447, U+04B7->U+0447, U+04B8->U+0447, U+04B9->U+0447, U+04BA->U+04BB, U+04BB, U+04BC->U+04BD, U+04BE->U+04BD, U+04BF->U+04BD, U+04BD, U+04C0->U+04CF, U+04CF, U+04C1->U+0436, U+04C2->U+0436, U+04C3->U+043A, U+04C4->U+043A, U+04C5->U+043B,  \
+		U+04C6->U+043B, U+04C7->U+043D, U+04C8->U+043D, U+04C9->U+043D, U+04CA->U+043D, U+04CB->U+0447, U+04CC->U+0447, U+04CD->U+043C, U+04CE->U+043C, U+04D0->U+0430, U+04D1->U+0430, U+04D2->U+0430, U+04D3->U+0430, U+04D4->U+00E6, U+04D5->U+00E6, U+04D6->U+0435,  \
+		U+04D7->U+0435, U+04D8->U+04D9, U+04DA->U+04D9, U+04DB->U+04D9, U+04D9, U+04DC->U+0436, U+04DD->U+0436, U+04DE->U+0437, U+04DF->U+0437, U+04E0->U+04E1, U+04E1, U+04E2->U+0438, U+04E3->U+0438, U+04E4->U+0438, U+04E5->U+0438, U+04E6->U+043E, U+04E7->U+043E,  \
+		U+04E8->U+043E, U+04E9->U+043E, U+04EA->U+043E, U+04EB->U+043E, U+04EC->U+044D, U+04ED->U+044D, U+04EE->U+0443, U+04EF->U+0443, U+04F0->U+0443, U+04F1->U+0443, U+04F2->U+0443, U+04F3->U+0443, U+04F4->U+0447, U+04F5->U+0447, U+04F6->U+0433, U+04F7->U+0433,  \
+		U+04F8->U+044B, U+04F9->U+044B, U+04FA->U+0433, U+04FB->U+0433, U+04FC->U+0445, U+04FD->U+0445, U+04FE->U+0445, U+04FF->U+0445, U+0410..U+0418->U+0430..U+0438, U+0419->U+0438, U+0430..U+0438, U+041A..U+042F->U+043A..U+044F, U+043A..U+044F, U+0929->U+0928,  \
+		U+0931->U+0930, U+0934->U+0933, U+0958->U+0915, U+0959->U+0916, U+095A->U+0917, U+095B->U+091C, U+095C->U+0921, U+095D->U+0922, U+095E->U+092B, U+095F->U+092F, U+0904..U+0928, U+092A..U+0930, U+0932, U+0933, U+0935..U+0939, U+0960, U+0961, U+0966..U+096F,  \
+		U+097B..U+097F, U+10FC->U+10DC, U+10D0..U+10FA, U+10A0..U+10C5->U+2D00..U+2D25, U+2D00..U+2D25, U+0386->U+03B1, U+0388->U+03B5, U+0389->U+03B7, U+038A->U+03B9, U+038C->U+03BF, U+038E->U+03C5, U+038F->U+03C9, U+0390->U+03B9, U+03AA->U+03B9, U+03AB->U+03C5,  \
+		U+03AC->U+03B1, U+03AD->U+03B5, U+03AE->U+03B7, U+03AF->U+03B9, U+03B0->U+03C5, U+03CA->U+03B9, U+03CB->U+03C5, U+03CC->U+03BF, U+03CD->U+03C5, U+03CE->U+03C9, U+03D0->U+03B2, U+03D1->U+03B8, U+03D2->U+03C5, U+03D3->U+03C5, U+03D4->U+03C5, U+03D5->U+03C6,  \
+		U+03D6->U+03C0, U+03D8->U+03D9, U+03DA->U+03DB, U+03DC->U+03DD, U+03DE->U+03DF, U+03E0->U+03E1, U+03E2->U+03E3, U+03E4->U+03E5, U+03E6->U+03E7, U+03E8->U+03E9, U+03EA->U+03EB, U+03EC->U+03ED, U+03EE->U+03EF, U+03F0->U+03BA, U+03F1->U+03C1, U+03F2->U+03C3,  \
+		U+03F4->U+03B8, U+03F5->U+03B5, U+03F6->U+03B5, U+03F7->U+03F8, U+03F9->U+03C3, U+03FA->U+03FB, U+1F00->U+03B1, U+1F01->U+03B1, U+1F02->U+03B1, U+1F03->U+03B1, U+1F04->U+03B1, U+1F05->U+03B1, U+1F06->U+03B1, U+1F07->U+03B1, U+1F08->U+03B1, U+1F09->U+03B1,  \
+		U+1F0A->U+03B1, U+1F0B->U+03B1, U+1F0C->U+03B1, U+1F0D->U+03B1, U+1F0E->U+03B1, U+1F0F->U+03B1, U+1F10->U+03B5, U+1F11->U+03B5, U+1F12->U+03B5, U+1F13->U+03B5, U+1F14->U+03B5, U+1F15->U+03B5, U+1F18->U+03B5, U+1F19->U+03B5, U+1F1A->U+03B5, U+1F1B->U+03B5,  \
+		U+1F1C->U+03B5, U+1F1D->U+03B5, U+1F20->U+03B7, U+1F21->U+03B7, U+1F22->U+03B7, U+1F23->U+03B7, U+1F24->U+03B7, U+1F25->U+03B7, U+1F26->U+03B7, U+1F27->U+03B7, U+1F28->U+03B7, U+1F29->U+03B7, U+1F2A->U+03B7, U+1F2B->U+03B7, U+1F2C->U+03B7, U+1F2D->U+03B7,  \
+		U+1F2E->U+03B7, U+1F2F->U+03B7, U+1F30->U+03B9, U+1F31->U+03B9, U+1F32->U+03B9, U+1F33->U+03B9, U+1F34->U+03B9, U+1F35->U+03B9, U+1F36->U+03B9, U+1F37->U+03B9, U+1F38->U+03B9, U+1F39->U+03B9, U+1F3A->U+03B9, U+1F3B->U+03B9, U+1F3C->U+03B9, U+1F3D->U+03B9,  \
+		U+1F3E->U+03B9, U+1F3F->U+03B9, U+1F40->U+03BF, U+1F41->U+03BF, U+1F42->U+03BF, U+1F43->U+03BF, U+1F44->U+03BF, U+1F45->U+03BF, U+1F48->U+03BF, U+1F49->U+03BF, U+1F4A->U+03BF, U+1F4B->U+03BF, U+1F4C->U+03BF, U+1F4D->U+03BF, U+1F50->U+03C5, U+1F51->U+03C5,  \
+		U+1F52->U+03C5, U+1F53->U+03C5, U+1F54->U+03C5, U+1F55->U+03C5, U+1F56->U+03C5, U+1F57->U+03C5, U+1F59->U+03C5, U+1F5B->U+03C5, U+1F5D->U+03C5, U+1F5F->U+03C5, U+1F60->U+03C9, U+1F61->U+03C9, U+1F62->U+03C9, U+1F63->U+03C9, U+1F64->U+03C9, U+1F65->U+03C9,  \
+		U+1F66->U+03C9, U+1F67->U+03C9, U+1F68->U+03C9, U+1F69->U+03C9, U+1F6A->U+03C9, U+1F6B->U+03C9, U+1F6C->U+03C9, U+1F6D->U+03C9, U+1F6E->U+03C9, U+1F6F->U+03C9, U+1F70->U+03B1, U+1F71->U+03B1, U+1F72->U+03B5, U+1F73->U+03B5, U+1F74->U+03B7, U+1F75->U+03B7,  \
+		U+1F76->U+03B9, U+1F77->U+03B9, U+1F78->U+03BF, U+1F79->U+03BF, U+1F7A->U+03C5, U+1F7B->U+03C5, U+1F7C->U+03C9, U+1F7D->U+03C9, U+1F80->U+03B1, U+1F81->U+03B1, U+1F82->U+03B1, U+1F83->U+03B1, U+1F84->U+03B1, U+1F85->U+03B1, U+1F86->U+03B1, U+1F87->U+03B1,  \
+		U+1F88->U+03B1, U+1F89->U+03B1, U+1F8A->U+03B1, U+1F8B->U+03B1, U+1F8C->U+03B1, U+1F8D->U+03B1, U+1F8E->U+03B1, U+1F8F->U+03B1, U+1F90->U+03B7, U+1F91->U+03B7, U+1F92->U+03B7, U+1F93->U+03B7, U+1F94->U+03B7, U+1F95->U+03B7, U+1F96->U+03B7, U+1F97->U+03B7,  \
+		U+1F98->U+03B7, U+1F99->U+03B7, U+1F9A->U+03B7, U+1F9B->U+03B7, U+1F9C->U+03B7, U+1F9D->U+03B7, U+1F9E->U+03B7, U+1F9F->U+03B7, U+1FA0->U+03C9, U+1FA1->U+03C9, U+1FA2->U+03C9, U+1FA3->U+03C9, U+1FA4->U+03C9, U+1FA5->U+03C9, U+1FA6->U+03C9, U+1FA7->U+03C9,  \
+		U+1FA8->U+03C9, U+1FA9->U+03C9, U+1FAA->U+03C9, U+1FAB->U+03C9, U+1FAC->U+03C9, U+1FAD->U+03C9, U+1FAE->U+03C9, U+1FAF->U+03C9, U+1FB0->U+03B1, U+1FB1->U+03B1, U+1FB2->U+03B1, U+1FB3->U+03B1, U+1FB4->U+03B1, U+1FB6->U+03B1, U+1FB7->U+03B1, U+1FB8->U+03B1,  \
+		U+1FB9->U+03B1, U+1FBA->U+03B1, U+1FBB->U+03B1, U+1FBC->U+03B1, U+1FC2->U+03B7, U+1FC3->U+03B7, U+1FC4->U+03B7, U+1FC6->U+03B7, U+1FC7->U+03B7, U+1FC8->U+03B5, U+1FC9->U+03B5, U+1FCA->U+03B7, U+1FCB->U+03B7, U+1FCC->U+03B7, U+1FD0->U+03B9, U+1FD1->U+03B9,  \
+		U+1FD2->U+03B9, U+1FD3->U+03B9, U+1FD6->U+03B9, U+1FD7->U+03B9, U+1FD8->U+03B9, U+1FD9->U+03B9, U+1FDA->U+03B9, U+1FDB->U+03B9, U+1FE0->U+03C5, U+1FE1->U+03C5, U+1FE2->U+03C5, U+1FE3->U+03C5, U+1FE4->U+03C1, U+1FE5->U+03C1, U+1FE6->U+03C5, U+1FE7->U+03C5,  \
+		U+1FE8->U+03C5, U+1FE9->U+03C5, U+1FEA->U+03C5, U+1FEB->U+03C5, U+1FEC->U+03C1, U+1FF2->U+03C9, U+1FF3->U+03C9, U+1FF4->U+03C9, U+1FF6->U+03C9, U+1FF7->U+03C9, U+1FF8->U+03BF, U+1FF9->U+03BF, U+1FFA->U+03C9, U+1FFB->U+03C9, U+1FFC->U+03C9, U+0391..U+03A1->U+03B1..U+03C1,  \
+		U+03B1..U+03C1, U+03A3..U+03A9->U+03C3..U+03C9, U+03C3..U+03C9, U+03C2, U+03D9, U+03DB, U+03DD, U+03DF, U+03E1, U+03E3, U+03E5, U+03E7, U+03E9, U+03EB, U+03ED, U+03EF, U+03F3, U+03F8, U+03FB, U+0A85..U+0A8C, U+0A8F, U+0A90, U+0A93..U+0AB0, U+0AB2, U+0AB3,  \
+		U+0AB5..U+0AB9, U+0AE0, U+0AE1, U+0AE6..U+0AEF, U+0A33->U+0A32, U+0A36->U+0A38, U+0A59->U+0A16, U+0A5A->U+0A17, U+0A5B->U+0A1C, U+0A5E->U+0A2B, U+0A05..U+0A0A, U+0A0F, U+0A10, U+0A13..U+0A28, U+0A2A..U+0A30, U+0A32, U+0A35, U+0A38, U+0A39, U+0A5C, U+0A66..U+0A6F,  \
+		U+FB1D->U+05D9, U+FB1F->U+05F2, U+FB20->U+05E2, U+FB21->U+05D0, U+FB22->U+05D3, U+FB23->U+05D4, U+FB24->U+05DB, U+FB25->U+05DC, U+FB26->U+05DD, U+FB27->U+05E8, U+FB28->U+05EA, U+FB2A->U+05E9, U+FB2B->U+05E9, U+FB2C->U+05E9, U+FB2D->U+05E9, U+FB2E->U+05D0,  \
+		U+FB2F->U+05D0, U+FB30->U+05D0, U+FB31->U+05D1, U+FB32->U+05D2, U+FB33->U+05D3, U+FB34->U+05D4, U+FB35->U+05D5, U+FB36->U+05D6, U+FB38->U+05D8, U+FB39->U+05D9, U+FB3A->U+05DA, U+FB3B->U+05DB, U+FB3C->U+05DC, U+FB3E->U+05DE, U+FB40->U+05E0, U+FB41->U+05E1,  \
+		U+FB43->U+05E3, U+FB44->U+05E4, U+FB46->U+05E6, U+FB47->U+05E7, U+FB48->U+05E8, U+FB49->U+05E9, U+FB4A->U+05EA, U+FB4B->U+05D5, U+FB4C->U+05D1, U+FB4D->U+05DB, U+FB4E->U+05E4, U+FB4F->U+05D0, U+05D0..U+05F2, U+0C85..U+0C8C, U+0C8E..U+0C90, U+0C92..U+0CA8,  \
+		U+0CAA..U+0CB3, U+0CB5..U+0CB9, U+0CE0, U+0CE1, U+0CE6..U+0CEF, U+1900..U+191C, U+1930..U+1938, U+1946..U+194F, U+0D05..U+0D0C, U+0D0E..U+0D10, U+0D12..U+0D28, U+0D2A..U+0D39, U+0D60, U+0D61, U+0D66..U+0D6F, U+0B94->U+0B92, U+0B85..U+0B8A, U+0B8E..U+0B90,  \
+		U+0B92, U+0B93, U+0B95, U+0B99, U+0B9A, U+0B9C, U+0B9E, U+0B9F, U+0BA3, U+0BA4, U+0BA8..U+0BAA, U+0BAE..U+0BB9, U+0BE6..U+0BEF, U+0E01..U+0E30, U+0E32, U+0E33, U+0E40..U+0E46, U+0E50..U+0E5B, U+FF10..U+FF19->0..9, U+FF21..U+FF3A->a..z, U+FF41..U+FF5A->a..z,  \
+		0..9, A..Z->a..z, a..z
+
+	# ignored characters list
+	# optional, default value is empty
+	#
+	# ignore_chars		= U+00AD
+
+	# minimum word prefix length to index
+	# optional, default is 0 (do not index prefixes)
+	#
+	# min_prefix_len		= 0
+
+	# minimum word infix length to index
+	# optional, default is 0 (do not index infixes)
+	#
+	# min_infix_len		= 0
+
+	# list of fields to limit prefix/infix indexing to
+	# optional, default value is empty (index all fields in prefix/infix mode)
+	#
+	# prefix_fields		= filename
+	# infix_fields		= url, domain
+
+	# enable star-syntax (wildcards) when searching prefix/infix indexes
+	# search-time only, does not affect indexing, can be 0 or 1
+	# optional, default is 0 (do not use wildcard syntax)
+	#
+	# enable_star		= 1
+
+	# expand keywords with exact forms and/or stars when searching fit indexes
+	# search-time only, does not affect indexing, can be 0 or 1
+	# optional, default is 0 (do not expand keywords)
+	#
+	# expand_keywords		= 1
+
+	# n-gram length to index, for CJK indexing
+	# only supports 0 and 1 for now, other lengths to be implemented
+	# optional, default is 0 (disable n-grams)
+	#
+	ngram_len		= 1
+
+	# n-gram characters list, for CJK indexing
+	# optional, default is empty
+	#
+	ngram_chars		= U+4E00..U+9FBB, U+3400..U+4DB5, U+20000..U+2A6D6, U+FA0E, U+FA0F, U+FA11, U+FA13, U+FA14, U+FA1F, U+FA21, U+FA23, U+FA24, U+FA27, U+FA28, U+FA29, U+3105..U+312C, U+31A0..U+31B7, U+3041, U+3043, U+3045, U+3047, U+3049, U+304B, U+304D, U+304F, U+3051, U+3053, U+3055, U+3057, U+3059, U+305B, U+305D, U+305F, U+3061, U+3063, U+3066, U+3068, U+306A..U+306F, U+3072, U+3075, U+3078, U+307B, U+307E..U+3083, U+3085, U+3087, U+3089..U+308E, U+3090..U+3093, U+30A1, U+30A3, U+30A5, U+30A7, U+30A9, U+30AD, U+30AF, U+30B3, U+30B5, U+30BB, U+30BD, U+30BF, U+30C1, U+30C3, U+30C4, U+30C6, U+30CA, U+30CB, U+30CD, U+30CE, U+30DE, U+30DF, U+30E1, U+30E2, U+30E3, U+30E5, U+30E7, U+30EE, U+30F0..U+30F3, U+30F5, U+30F6, U+31F0, U+31F1, U+31F2, U+31F3, U+31F4, U+31F5, U+31F6, U+31F7, U+31F8, U+31F9, U+31FA, U+31FB, U+31FC, U+31FD, U+31FE, U+31FF, U+AC00..U+D7A3, U+1100..U+1159, U+1161..U+11A2, U+11A8..U+11F9, U+A000..U+A48C, U+A492..U+A4C6
+
+	# phrase boundary characters list
+	# optional, default is empty
+	#
+	# phrase_boundary		= ., ?, !, U+2026 # horizontal ellipsis
+
+	# phrase boundary word position increment
+	# optional, default is 0
+	#
+	# phrase_boundary_step	= 100
+
+	# blended characters list
+	# blended chars are indexed both as separators and valid characters
+	# for instance, AT&T will results in 3 tokens ("at", "t", and "at&t")
+	# optional, default is empty
+	#
+	# blend_chars		= +, &, U+23
+
+	# blended token indexing mode
+	# a comma separated list of blended token indexing variants
+	# known variants are trim_none, trim_head, trim_tail, trim_both, skip_pure
+	# optional, default is trim_none
+	#
+	# blend_mode		= trim_tail, skip_pure
+
+	# whether to strip HTML tags from incoming documents
+	# known values are 0 (do not strip) and 1 (do strip)
+	# optional, default is 0
+	html_strip		= 0
+
+	# what HTML attributes to index if stripping HTML
+	# optional, default is empty (do not index anything)
+	#
+	# html_index_attrs	= img=alt,title; a=title;
+
+	# what HTML elements contents to strip
+	# optional, default is empty (do not strip element contents)
+	#
+	# html_remove_elements	= style, script
+
+	# whether to preopen index data files on startup
+	# optional, default is 0 (do not preopen), searchd-only
+	#
+	# preopen			= 1
+
+	# whether to keep dictionary (.spi) on disk, or cache it in RAM
+	# optional, default is 0 (cache in RAM), searchd-only
+	#
+	# ondisk_dict		= 1
+
+	# whether to enable in-place inversion (2x less disk, 90-95% speed)
+	# optional, default is 0 (use separate temporary files), indexer-only
+	#
+	# inplace_enable		= 1
+
+	# in-place fine-tuning options
+	# optional, defaults are listed below
+	#
+	# inplace_hit_gap		= 0 # preallocated hitlist gap size
+	# inplace_docinfo_gap	= 0 # preallocated docinfo gap size
+	# inplace_reloc_factor	= 0.1 # relocation buffer size within arena
+	# inplace_write_factor	= 0.1 # write buffer size within arena
+
+	# whether to index original keywords along with stemmed versions
+	# enables "=exactform" operator to work
+	# optional, default is 0
+	#
+	# index_exact_words	= 1
+
+	# position increment on overshort (less that min_word_len) words
+	# optional, allowed values are 0 and 1, default is 1
+	#
+	# overshort_step		= 1
+
+	# position increment on stopword
+	# optional, allowed values are 0 and 1, default is 1
+	#
+	# stopword_step		= 1
+
+	# hitless words list
+	# positions for these keywords will not be stored in the index
+	# optional, allowed values are 'all', or a list file name
+	#
+	# hitless_words		= all
+	# hitless_words		= hitless.txt
+
+	# detect and index sentence and paragraph boundaries
+	# required for the SENTENCE and PARAGRAPH operators to work
+	# optional, allowed values are 0 and 1, default is 0
+	#
+	# index_sp			= 1
+
+	# index zones, delimited by HTML/XML tags
+	# a comma separated list of tags and wildcards
+	# required for the ZONE operator to work
+	# optional, default is empty string (do not index zones)
+	#
+	# index_zones		= title, h*, th
+}
+
+#############################################################################
+## searchd settings
+#############################################################################
+
+searchd
+{
+	# [hostname:]port[:protocol], or /unix/socket/path to listen on
+	# known protocols are 'sphinx' (SphinxAPI) and 'mysql41' (SphinxQL)
+	#
+	# multi-value, multiple listen points are allowed
+	# optional, defaults are 9312:sphinx and 9306:mysql41, as below
+	#
+	# listen			= 127.0.0.1
+	# listen			= 192.168.0.1:9312
+	# listen			= 9312
+	# listen			= /var/run/searchd.sock
+	listen			= %(ip_address)s:%(port)s:sphinx
+	listen			= %(ip_address)s:%(sql_port)s:mysql41
+
+	# log file, searchd run info is logged here
+	# optional, default is 'searchd.log'
+	log			= %(log_directory)s/sphinx-searchd.log
+
+	# query log file, all search queries are logged here
+	# optional, default is empty (do not log queries)
+	query_log		= %(log_directory)s/sphinx-query.log
+
+	# client read timeout, seconds
+	# optional, default is 5
+	read_timeout		= 5
+
+	# request timeout, seconds
+	# optional, default is 5 minutes
+	client_timeout		= 300
+
+	# maximum amount of children to fork (concurrent searches to run)
+	# optional, default is 0 (unlimited)
+	max_children		= 30
+
+	# PID file, searchd process ID file name
+	# mandatory
+	pid_file		= %(data_directory)s/sphinx-searchd.pid
+
+	# max amount of matches the daemon ever keeps in RAM, per-index
+	# WARNING, THERE'S ALSO PER-QUERY LIMIT, SEE SetLimits() API CALL
+	# default is 1000 (just like Google)
+	max_matches		= 1000
+
+	# seamless rotate, prevents rotate stalls if precaching huge datasets
+	# optional, default is 1
+	seamless_rotate		= 1
+
+	# whether to forcibly preopen all indexes on startup
+	# optional, default is 0 (do not preopen)
+	preopen_indexes		= 0
+
+	# whether to unlink .old index copies on succesful rotation.
+	# optional, default is 1 (do unlink)
+	unlink_old		= 1
+
+	# attribute updates periodic flush timeout, seconds
+	# updates will be automatically dumped to disk this frequently
+	# optional, default is 0 (disable periodic flush)
+	#
+	# attr_flush_period	= 900
+
+	# instance-wide ondisk_dict defaults (per-index value take precedence)
+	# optional, default is 0 (precache all dictionaries in RAM)
+	#
+	# ondisk_dict_default	= 1
+
+	# MVA updates pool size
+	# shared between all instances of searchd, disables attr flushes!
+	# optional, default size is 1M
+	mva_updates_pool	= 1M
+
+	# max allowed network packet size
+	# limits both query packets from clients, and responses from agents
+	# optional, default size is 8M
+	max_packet_size		= 8M
+
+	# crash log path
+	# searchd will (try to) log crashed query to 'crash_log_path.PID' file
+	# optional, default is empty (do not create crash logs)
+	#
+	# crash_log_path		= %(log_directory)s
+
+	# max allowed per-query filter count
+	# optional, default is 256
+	max_filters		= 256
+
+	# max allowed per-filter values count
+	# optional, default is 4096
+	max_filter_values	= 4096
+
+	# socket listen queue length
+	# optional, default is 5
+	#
+	# listen_backlog		= 5
+
+	# per-keyword read buffer size
+	# optional, default is 256K
+	#
+	# read_buffer		= 256K
+
+	# unhinted read size (currently used when reading hits)
+	# optional, default is 32K
+	#
+	# read_unhinted		= 32K
+
+	# max allowed per-batch query count (aka multi-query count)
+	# optional, default is 32
+	max_batch_queries	= 32
+
+	# max common subtree document cache size, per-query
+	# optional, default is 0 (disable subtree optimization)
+	#
+	# subtree_docs_cache	= 4M
+
+	# max common subtree hit cache size, per-query
+	# optional, default is 0 (disable subtree optimization)
+	#
+	# subtree_hits_cache	= 8M
+
+	# multi-processing mode (MPM)
+	# known values are none, fork, prefork, and threads
+	# optional, default is fork
+	#
+	workers			= threads # for RT to work
+
+	# max threads to create for searching local parts of a distributed index
+	# optional, default is 0, which means disable multi-threaded searching
+	# should work with all MPMs (ie. does NOT require workers=threads)
+	#
+	# dist_threads		= 4
+
+	# binlog files path; use empty string to disable binlog
+	# optional, default is build-time configured data directory
+	#
+	binlog_path		= # disable logging
+	# binlog_path		= %(data_directory)s # binlog.001 etc will be created there
+
+	# binlog flush/sync mode
+	# 0 means flush and sync every second
+	# 1 means flush and sync every transaction
+	# 2 means flush every transaction, sync every second
+	# optional, default is 2
+	#
+	# binlog_flush		= 2
+
+	# binlog per-file size limit
+	# optional, default is 128M, 0 means no limit
+	#
+	# binlog_max_log_size	= 256M
+}
diff --git a/slapos/recipe/erp5/template/zeo.conf.in b/slapos/recipe/erp5/template/zeo.conf.in
index a37dd6cd78993affe4e027ab60a6d9816567fb9b..94ffbb7859acf2d1f92306b0557cb985497d115e 100644
--- a/slapos/recipe/erp5/template/zeo.conf.in
+++ b/slapos/recipe/erp5/template/zeo.conf.in
@@ -1,8 +1,6 @@
 # ZEO configuration file generated by SlapOS
 <zeo>
   address %(zeo_ip)s:%(zeo_port)s
-  read-only false
-  invalidation-queue-size 100
   pid-filename %(zeo_pid)s
 </zeo>
 
@@ -10,6 +8,7 @@
 
 <eventlog>
   <logfile>
+    dateformat
     path %(zeo_event_log)s
   </logfile>
 </eventlog>
diff --git a/slapos/recipe/erp5/template/zope-zeo-snippet.conf.in b/slapos/recipe/erp5/template/zope-zeo-snippet.conf.in
index 71ff1567956b72075cd497c164bf7464dbab9397..dbd9bcac222f08ad6697e74c5e8cd62017ce9648 100644
--- a/slapos/recipe/erp5/template/zope-zeo-snippet.conf.in
+++ b/slapos/recipe/erp5/template/zope-zeo-snippet.conf.in
@@ -1,6 +1,8 @@
 <zodb_db %(storage_name)s>
+  cache-size %(zodb_cache_size)d
   mount-point %(mount_point)s
   <zeoclient>
+    cache-size %(zeo_client_cache_size)s
     server %(address)s
     storage %(storage_name)s
     name %(storage_name)s
diff --git a/slapos/recipe/erp5/template/zope-zodb-snippet.conf.in b/slapos/recipe/erp5/template/zope-zodb-snippet.conf.in
index d0b3ed1357779e9f4db7c93b2df16848874eb8ce..4eb6a051bdcbc2a575d34bf9f299dfdfb43b1596 100644
--- a/slapos/recipe/erp5/template/zope-zodb-snippet.conf.in
+++ b/slapos/recipe/erp5/template/zope-zodb-snippet.conf.in
@@ -1,4 +1,5 @@
 <zodb_db root>
+    cache-size %(zodb_cache_size)d
     <filestorage>
       path %(zodb_root_path)s
     </filestorage>
diff --git a/slapos/recipe/erp5/template/zope.conf.in b/slapos/recipe/erp5/template/zope.conf.in
index 90a1ac96b1f9d5e3f50c93ee149939b91b845d57..e8576401314fb57a5154abd98bba69cb744f43d6 100644
--- a/slapos/recipe/erp5/template/zope.conf.in
+++ b/slapos/recipe/erp5/template/zope.conf.in
@@ -7,10 +7,8 @@ instancehome $INSTANCE
 # Used products
 %(products)s
 
-# Environment override
-<environment>
-%(environment)s
-</environment>
+# Environment is setup in running wrapper script
+# Reason: zope.conf is read too late for some componets
 
 # No need to debug
 debug-mode off
@@ -34,11 +32,13 @@ lock-filename %(lock-filename)s
 # Logging configuration
 <eventlog>
   <logfile>
+    dateformat
     path %(event_log)s
   </logfile>
 </eventlog>
 <logger access>
   <logfile>
+    dateformat
     path %(z2_log)s
   </logfile>
 </logger>
diff --git a/slapos/recipe/kvm/__init__.py b/slapos/recipe/kvm/__init__.py
index 24ea90a059fc4643496d03bc14c9d7b348da26c5..40c123135c85fddd8b483295c3664e09872f2899 100644
--- a/slapos/recipe/kvm/__init__.py
+++ b/slapos/recipe/kvm/__init__.py
@@ -37,6 +37,9 @@ import hashlib
 
 class Recipe(BaseSlapRecipe):
 
+  # To avoid magic numbers
+  VNC_BASE_PORT = 5900
+
   def _install(self):
     """
     Set the connection dictionnary for the computer partition and create a list
@@ -49,43 +52,58 @@ class Recipe(BaseSlapRecipe):
     self.path_list = []
 
     self.requirements, self.ws           = self.egg.working_set()
-    self.cron_d                          = self.installCrond()    
+    self.cron_d                          = self.installCrond()
 
     self.ca_conf                         = self.installCertificateAuthority()
     self.key_path, self.certificate_path = self.requestCertificate('noVNC')
-     
+
+    # Install the socket_connection_attempt script
+    catcher = zc.buildout.easy_install.scripts(
+      [('check_port_listening', __name__ + 'socket_connection_attempt', 'connection_attempt')],
+      self.ws,
+      sys.executable,
+      self.bin_directory,
+    )
+    # Save the check_port_listening script path
+    check_port_listening_script = catcher[0]
+    # Get the port_listening_promise template path, and save it
+    self.port_listening_promise_path = pkg_resources.resource_filename(
+      __name__, 'template/port_listening_promise.in')
+    self.port_listening_promise_conf = dict(
+     check_port_listening_script=check_port_listening_script,
+    )
+
     kvm_conf = self.installKvm(vnc_ip = self.getLocalIPv4Address())
-    
-    vnc_port = 5900 + kvm_conf['vnc_display']
-    
+
+    vnc_port = Recipe.VNC_BASE_PORT + kvm_conf['vnc_display']
+
     noVNC_conf = self.installNoVnc(source_ip   = self.getGlobalIPv6Address(),
                                    source_port = 6080,
                                    target_ip   = kvm_conf['vnc_ip'],
-                                   target_port = vnc_port,
-                                   python_path = kvm_conf['python_path'])
-    
+                                   target_port = vnc_port)
+
     self.linkBinary()
     self.computer_partition.setConnectionDict(dict(
         url = "https://[%s]:%s/vnc.html?host=[%s]&port=%s&encrypt=1" % (noVNC_conf['source_ip'],
                                                      noVNC_conf['source_port'],
                                                      noVNC_conf['source_ip'],
                                                      noVNC_conf['source_port']
-                                                     ), 
+                                                     ),
         password = kvm_conf['vnc_passwd']))
-    
+
     return self.path_list
 
   def installKvm(self, vnc_ip):
     """
-    Create kvm configuration dictionnary and instanciate a wrapper for kvm and 
-    kvm controller 
+    Create kvm configuration dictionnary and instanciate a wrapper for kvm and
+    kvm controller
 
     Parameters : IP the vnc server is listening on
 
     Returns    : Dictionnary kvm_conf
     """
     kvm_conf = dict(vnc_ip = vnc_ip)
-    
+
     connection_found = False
     for tap_interface, dummy in self.parameter_dict['ip_list']:
       # Get an ip associated to a tap interface
@@ -95,13 +113,13 @@ class Recipe(BaseSlapRecipe):
       raise NotImplementedError("Do not support ip without tap interface")
 
     kvm_conf['tap_interface'] = tap_interface
-    
+
     # Disk path
     kvm_conf['disk_path'] = os.path.join(self.data_root_directory,
         'virtual.qcow2')
     kvm_conf['socket_path'] = os.path.join(self.var_directory, 'qmp_socket')
     # XXX Weak password
-    ##XXX -Vivien: add an option to generate one password for all instances 
+    ##XXX -Vivien: add an option to generate one password for all instances
     # and/or to input it yourself
     kvm_conf['vnc_passwd'] = binascii.hexlify(os.urandom(4))
 
@@ -120,7 +138,7 @@ class Recipe(BaseSlapRecipe):
           int(self.options['disk_size']))], shell=True)
       if retcode != 0:
         raise OSError, "Disk creation failed!"
-    
+
     # Options nbd_ip and nbd_port are provided by slapos master
     kvm_conf['nbd_ip']   = self.parameter_dict['nbd_ip']
     kvm_conf['nbd_port'] = self.parameter_dict['nbd_port']
@@ -134,41 +152,48 @@ class Recipe(BaseSlapRecipe):
     kvm_conf['ram_size']    = self.options['ram_size']
 
     kvm_conf['vnc_display'] = 1
-    
+
     # Instanciate KVM
-    kvm_template_location = pkg_resources.resource_filename(          
-                                             __name__, os.path.join(         
-                                             'template', 'kvm_run.in'))     
-    
-    kvm_runner_path = self.createRunningWrapper("kvm",                        
+    kvm_template_location = pkg_resources.resource_filename(
+      __name__, 'template/kvm_run.in')
+
+    kvm_runner_path = self.createRunningWrapper("kvm",
           self.substituteTemplate(kvm_template_location,
                                   kvm_conf))
-   
+
     self.path_list.append(kvm_runner_path)
 
     # Instanciate KVM controller
-    kvm_controller_template_location = pkg_resources.resource_filename(          
-                                             __name__, os.path.join(         
-                                             'template',
-                                             'kvm_controller_run.in' ))     
-    
-    kvm_controller_runner_path = self.createRunningWrapper("kvm_controller",                        
+    kvm_controller_template_location = pkg_resources.resource_filename(
+      __name__, 'template/kvm_controller_run.in')
+
+    kvm_controller_runner_path = self.createRunningWrapper("kvm_controller",
           self.substituteTemplate(kvm_controller_template_location,
                                   kvm_conf))
-   
+
     self.path_list.append(kvm_controller_runner_path)
-   
+
     # Instanciate Slapmonitor
     ##slapmonitor_runner_path = self.instanciate_wrapper("slapmonitor",
     #    [database_path, pid_file_path, python_path])
     # Instanciate Slapreport
     ##slapreport_runner_path = self.instanciate_wrapper("slapreport",
     #    [database_path, python_path])
-    
+
+    # Add VNC promise
+    self.port_listening_promise_conf.update(
+      hostname=kvm_conf['vnc_ip'],
+      port=Recipe.VNC_BASE_PORT + kvm_conf['vnc_display'],
+    )
+    self.createPromiseWrapper("vnc_promise",
+        self.substituteTemplate(self.port_listening_promise_path,
+                                self.port_listening_promise_conf,
+                               )
+                             )
+
     return kvm_conf
 
-  def installNoVnc(self, source_ip, source_port, target_ip, target_port, 
-                   python_path):
+  def installNoVnc(self, source_ip, source_port, target_ip, target_port):
     """
     Create noVNC configuration dictionnary and instanciate Websockify proxy
 
@@ -181,14 +206,20 @@ class Recipe(BaseSlapRecipe):
 
     noVNC_conf = {}
    
-    noVNC_conf['source_ip']   = source_ip                                          
+    noVNC_conf['source_ip']   = source_ip
     noVNC_conf['source_port'] = source_port
     
+    # Install numpy.
+    # XXX-Cedric : this looks like a hack. Do we have better solution, knowing
+    # That websockify is not an egg?
+    numpy = zc.buildout.easy_install.install(['numpy'], self.options['eggs-directory'])
+    environment = dict(PYTHONPATH='%s' % numpy.entries[0])
+    
     # Instanciate Websockify
     websockify_runner_path = zc.buildout.easy_install.scripts([('websockify',
-      'slapos.recipe.librecipe.execute', 'execute_wait')], self.ws,
+      'slapos.recipe.librecipe.execute', 'executee_wait')], self.ws,
       sys.executable, self.wrapper_directory, arguments=[
-        [python_path.strip(),
+        [sys.executable.strip(),
          self.options['websockify_path'],
          '--web',
          self.options['noVNC_location'],
@@ -197,11 +228,22 @@ class Recipe(BaseSlapRecipe):
          '--ssl-only',
          '%s:%s' % (source_ip, source_port),
          '%s:%s' % (target_ip, target_port)],
-        [self.certificate_path, self.key_path]]
+        [self.certificate_path, self.key_path],
+        environment]
        )[0]
-    
+
     self.path_list.append(websockify_runner_path)
-  
+
+    # Add noVNC promise
+    self.port_listening_promise_conf.update(hostname=noVNC_conf['source_ip'],
+                                            port=noVNC_conf['source_port'],
+                                           )
+    self.createPromiseWrapper("novnc_promise",
+        self.substituteTemplate(self.port_listening_promise_path,
+                                self.port_listening_promise_conf,
+                               )
+                             )
+
     return noVNC_conf
 
   def linkBinary(self):
@@ -225,7 +267,7 @@ class Recipe(BaseSlapRecipe):
       os.symlink(target, link)
       self.logger.debug('Created link %r -> %r' % (link, target))
       self.path_list.append(link)
-      
+
   def installCertificateAuthority(self, ca_country_code='XX',
       ca_email='xx@example.com', ca_state='State', ca_city='City',
       ca_company='Company'):
@@ -284,7 +326,7 @@ class Recipe(BaseSlapRecipe):
       ca_crl=os.path.join(config['ca_dir'], 'crl'),
       certificate_authority_path=config['ca_dir']
     )
-  
+
   def requestCertificate(self, name):
     hash = hashlib.sha512(name).hexdigest()
     key = os.path.join(self.ca_private, hash + self.ca_key_ext)
@@ -296,7 +338,7 @@ class Recipe(BaseSlapRecipe):
     parser.set('certificate', 'certificate_file', certificate)
     parser.write(open(os.path.join(self.ca_request_dir, hash), 'w'))
     return key, certificate
-  
+
   def installCrond(self):
     timestamps = self.createDataDirectory('cronstamps')
     cron_output = os.path.join(self.log_directory, 'cron-output')
diff --git a/slapos/recipe/kvm/socket_connection_attempt.py b/slapos/recipe/kvm/socket_connection_attempt.py
new file mode 100644
index 0000000000000000000000000000000000000000..dfd9fad4b930518c6ef94a88c69ed40ce6b22539
--- /dev/null
+++ b/slapos/recipe/kvm/socket_connection_attempt.py
@@ -0,0 +1,26 @@
+import socket
+import sys
+
+def connection_attempt():
+
+  try:
+    hostname, port = sys.argv[1:3]
+  except ValueError:
+    print >> sys.stderr, """Bad command line.
+  Usage: %s hostname|ip port""" % sys.argv[0]
+    sys.exit(1)
+
+  connection_okay = False
+
+  try:
+    s = socket.create_connection((hostname, port))
+    connection_okay = True
+    s.close()
+  except (socket.error, socket.timeout):
+    connection_okay = False
+
+  if not connection_okay:
+    print >> sys.stderr, "%(port)s on %(ip)s isn't listening" % {
+      'port': port, 'ip': hostname
+    }
+    sys.exit(127)
diff --git a/slapos/recipe/kvm/template/port_listening_promise.in b/slapos/recipe/kvm/template/port_listening_promise.in
new file mode 100644
index 0000000000000000000000000000000000000000..15fa390d01e38096cfb8a9d01356bf1687e3f740
--- /dev/null
+++ b/slapos/recipe/kvm/template/port_listening_promise.in
@@ -0,0 +1,4 @@
+#!/usr/bin/env sh
+
+"%(check_port_listening_script)s" "%(hostname)s" "%(port)s"
+exit $?
diff --git a/slapos/recipe/lamp/__init__.py b/slapos/recipe/lamp/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..64af4334163cc01532300f91a6e5b810861ea6e8
--- /dev/null
+++ b/slapos/recipe/lamp/__init__.py
@@ -0,0 +1,300 @@
+##############################################################################
+#
+# Copyright (c) 2010 Vifib SARL and Contributors. All Rights Reserved.
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsibility of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# guarantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+#
+##############################################################################
+from slapos.recipe.librecipe import BaseSlapRecipe
+import os
+import shutil
+import pkg_resources
+import zc.buildout
+import sys
+import zc.recipe.egg
+import urlparse
+
+class BaseRecipe(BaseSlapRecipe):
+  def getTemplateFilename(self, template_name):
+    return pkg_resources.resource_filename(__name__,
+        'template/%s' % template_name)
+
+  def installMysqlServer(self, ip=None, port=None):
+    if ip is None:
+      ip = self.getLocalIPv4Address()
+    if port is None:
+      port = '3306'
+    mysql_conf = dict(
+        ip=ip,
+        data_directory=os.path.join(self.data_root_directory,
+          'mysql'),
+        tcp_port=port,
+        pid_file=os.path.join(self.run_directory, 'mysqld.pid'),
+        socket=os.path.join(self.run_directory, 'mysqld.sock'),
+        error_log=os.path.join(self.log_directory, 'mysqld.log'),
+        slow_query_log=os.path.join(self.log_directory,
+        'mysql-slow.log'),
+        database='appdb',
+        user='appuser',
+        password=self.generatePassword(),
+    )
+    self._createDirectory(mysql_conf['data_directory'])
+
+    mysql_conf_path = self.createConfigurationFile("my.cnf",
+        self.substituteTemplate(pkg_resources.resource_filename(__name__,
+        'template/my.cnf.in'), mysql_conf))
+
+    mysql_script = pkg_resources.resource_string(__name__,
+        'template/mysqlinit.sql.in') % mysql_conf
+    self.path_list.extend(zc.buildout.easy_install.scripts([('mysql_update',
+      __name__ + '.mysql', 'updateMysql')], self.ws,
+      sys.executable, self.wrapper_directory, arguments=[dict(
+        mysql_script=mysql_script,
+        mysql_binary=self.options['mysql_binary'].strip(),
+        mysql_upgrade_binary=self.options['mysql_upgrade_binary'].strip(),
+        socket=mysql_conf['socket'],
+        )]))
+    self.path_list.extend(zc.buildout.easy_install.scripts([('mysqld',
+      __name__ + '.mysql', 'runMysql')], self.ws,
+        sys.executable, self.wrapper_directory, arguments=[dict(
+        mysql_install_binary=self.options['mysql_install_binary'].strip(),
+        mysqld_binary=self.options['mysqld_binary'].strip(),
+        data_directory=mysql_conf['data_directory'].strip(),
+        mysql_binary=self.options['mysql_binary'].strip(),
+        socket=mysql_conf['socket'].strip(),
+        configuration_file=mysql_conf_path,
+       )]))
+    self.path_list.extend([mysql_conf_path])
+    return dict(
+      mysql_host=mysql_conf['ip'],
+      mysql_port=mysql_conf['tcp_port'],
+      mysql_user=mysql_conf['user'],
+      mysql_password=mysql_conf['password'],
+      mysql_database=mysql_conf['database'],
+    )
+
+  def createHtdocs(self, source, document_root):
+    source = self.options['source'].strip()
+    document_root = self.createDataDirectory('htdocs')
+    for p in os.listdir(document_root):
+      path = os.path.join(document_root, p)
+      if os.path.isdir(path):
+        shutil.rmtree(path)
+      else:
+        os.unlink(path)
+    for p in os.listdir(source):
+      path = os.path.join(source, p)
+      if os.path.isdir(path):
+        shutil.copytree(path, os.path.join(document_root, p))
+      else:
+        shutil.copy2(path, os.path.join(document_root, p))
+
+  def installApache(self, document_root, ip=None, port=None):
+    if ip is None:
+      ip=self.getGlobalIPv6Address()
+    if port is None:
+      port = '9080'
+    apache_config = dict(
+        pid_file=os.path.join(self.run_directory, 'httpd.pid'),
+        lock_file=os.path.join(self.run_directory, 'httpd.lock'),
+        ip=ip,
+        port=port,
+        error_log=os.path.join(self.log_directory, 'httpd-error.log'),
+        access_log=os.path.join(self.log_directory, 'httpd-access.log'),
+        document_root=document_root,
+        php_ini_dir=self.etc_directory
+    )
+    config_file = self.createConfigurationFile('httpd.conf',
+        self.substituteTemplate(pkg_resources.resource_filename(__name__,
+          'template/apache.in'), apache_config))
+    self.path_list.append(config_file)
+    self.path_list.append(self.createConfigurationFile('php.ini',
+        self.substituteTemplate(pkg_resources.resource_filename(__name__,
+          'template/php.ini.in'), dict(tmp_directory=self.tmp_directory))))
+    self.path_list.extend(zc.buildout.easy_install.scripts([(
+      'httpd',
+        __name__ + '.apache', 'runApache')], self.ws,
+          sys.executable, self.wrapper_directory, arguments=[
+            dict(
+              required_path_list=[],
+              binary=self.options['httpd_binary'],
+              config=config_file
+            )
+          ]))
+    return 'http://[%s]:%s' % (ip, port)
+
+  def createConfiguration(self, template, document_root, destination, d):
+    directory = os.path.dirname(destination)
+    file = os.path.basename(destination)
+    path = document_root
+    if directory:
+      path = os.path.join(document_root, directory)
+      if not os.path.exists(path):
+        os.makedirs(path)
+    destination = os.path.join(path, file)
+    open(destination, 'w').write(open(template, 'r').read() % d)
+
+  def configureInstallation(self, document_root, mysql_conf, url):
+    """Start process which can launch python scripts, move or remove files or 
+    directories when installing software.
+    """
+    if not self.options.has_key('delete') and not self.options.has_key('rename') and not\
+        self.options.has_key('chmod') and not self.options.has_key('script'):
+      return
+    delete = []
+    chmod = []
+    data = []
+    rename = []
+    rename_list = ""
+    argument = [self.options['lampconfigure_directory'].strip()]
+    if not self.options.has_key('file_token'):
+      argument = argument + ["-d", mysql_conf['mysql_database'],
+                             "-H", mysql_conf['mysql_host'], "-P", mysql_conf['mysql_port'],
+                             "-p", mysql_conf['mysql_password'], "-u", mysql_conf['mysql_user'],
+                             "--table", self.options['table_name'].strip(), "--cond",
+                             self.options['constraint'].strip()]
+    else:
+      argument = argument + ["-f", self.options['file_token'].strip()]
+    argument += ["-t", document_root]
+    
+    if self.options.has_key('delete'):
+      delete = ["delete"]
+      for fname in self.options['delete'].split(','):
+        delete.append(fname.strip())
+    if self.options.has_key('rename'):
+      for fname in self.options['rename'].split(','):
+        if fname.find("=>") < 0:
+          old_name = fname
+          fname = []
+          fname.append(old_name)
+          fname.append(old_name + '-' + mysql_conf['mysql_user'])
+        else:
+          fname = fname.split("=>")
+        cmd = ["rename"]
+        if self.options.has_key('rename_chmod'):
+          cmd += ["--chmod", self.options['rename_chmod'].strip()]
+        rename.append(cmd + [fname[0].strip(), fname[1].strip()])
+        rename_list += fname[0] + "=>" + fname[1] + " "
+    if self.options.has_key('chmod'):
+      chmod = ["chmod ", self.options['mode'].strip()]
+      for fname in self.options['chmod'].split(','):
+        chmod.append(fname.strip())
+    if self.options.has_key('script') and \
+        self.options['script'].strip().endswith(".py"):
+      data = ["run", self.options['script'].strip(), "-v", mysql_conf['mysql_database'], url, document_root]
+    self.path_list.extend(zc.buildout.easy_install.scripts(
+        [('configureInstall', __name__ + '.runner', 'executeRunner')], self.ws,
+        sys.executable, self.wrapper_directory, arguments=[argument, delete, rename,
+            chmod, data]))
+    return rename_list
+
+class Static(BaseRecipe):
+  def _install(self):
+    self.path_list = []
+    self.requirements, self.ws = self.egg.working_set()
+    document_root = self.createDataDirectory('htdocs')
+    self.createHtdocs(self.options['source'].strip(), document_root)
+    url = self.installApache(document_root)
+    self.setConnectionDict(dict(url = url))
+    return self.path_list
+
+class Simple(BaseRecipe):
+  def _install(self):
+    self.path_list = []
+    self.requirements, self.ws = self.egg.working_set()
+    document_root = self.createDataDirectory('htdocs')
+    self.createHtdocs(self.options['source'].strip(), document_root)
+    mysql_conf = self.installMysqlServer()
+    url = self.installApache(document_root)
+    renamed = self.configureInstallation(document_root, mysql_conf, url)
+    self.setConnectionDict(dict(
+      url=url,
+      rename=renamed,
+      **mysql_conf
+    ))
+    if self.options.has_key('template') and self.options.has_key('configuration'):
+      self.createConfiguration(self.options['template'], document_root,
+          self.options['configuration'], mysql_conf)
+    return self.path_list
+
+class Request(BaseRecipe):
+  def _install(self):
+    self.path_list = []
+    self.requirements, self.ws = self.egg.working_set()
+    software_type = self.parameter_dict['slap_software_type']
+
+    document_root = self.createDataDirectory('htdocs')
+    self.createHtdocs(self.options['source'].strip(), document_root)
+
+    if software_type == 'Backuped':
+      davstorage = self.request(self.options['davstorage-software-url'],
+        software_type, 'Backup Server').getConnectionParameter('url')
+
+      parameters = {'remote_backup': davstorage}
+    elif software_type == 'PersonnalBackup':
+      parameters = {'remote_backup': self.parameter_dict['remote_backup']}
+    else:
+      parameters = {}
+
+    mysql = self.request(self.options['mariadb-software-url'],
+      software_type, 'MariaDB Server', partition_parameter_kw=parameters
+    ).getConnectionParameter('url')
+    mysql_parsed = urlparse.urlparse(mysql)
+
+    mysql_host, mysql_port = mysql_parsed.hostname, mysql_parsed.port
+    if mysql_parsed.scheme == 'mysqls': # Listen over stunnel
+      mysql_host, mysql_port = self.installStunnelClient(mysql_host,
+                                                         mysql_port)
+
+    mysql_conf = dict(mysql_database=mysql_parsed.path.strip('/'),
+                      mysql_user=mysql_parsed.username,
+                      mysql_password=mysql_parsed.password,
+                      mysql_host='%s:%s' % (mysql_host,mysql_port))
+
+    url = self.installApache(document_root)
+
+    self.setConnectionDict(dict(
+      url=url,
+    ))
+
+    self.createConfiguration(self.options['template'], document_root,
+        self.options['configuration'], mysql_conf)
+    return self.path_list
+
+  def installStunnelClient(self, remote_host, remote_port):
+    local_host = self.getLocalIPv4Address()
+    local_port = 8888
+    stunnel_conf_path = self.createConfigurationFile('stunnel.conf',
+      self.substituteTemplate(
+      self.getTemplateFilename('stunnel.conf.in'), {
+        'log': os.path.join(self.log_directory, 'stunnel.log'),
+        'pid_file': os.path.join(self.run_directory, 'stunnel.pid'),
+        'remote_host': remote_host, 'remote_port': remote_port,
+        'local_host': local_host, 'local_port': local_port,
+      }))
+    wrapper = zc.buildout.easy_install.scripts([('stunnel',
+      'slapos.recipe.librecipe.execute', 'execute')], self.ws,
+      sys.executable, self.wrapper_directory, arguments=[
+        self.options['stunnel_binary'].strip(), stunnel_conf_path]
+      )[0]
+    self.path_list.append(wrapper)
+    return (local_host, local_port,)
diff --git a/slapos/recipe/lamp/apache.py b/slapos/recipe/lamp/apache.py
new file mode 100644
index 0000000000000000000000000000000000000000..861f787d09fed9c80b38f561528fbd86b5c7ad44
--- /dev/null
+++ b/slapos/recipe/lamp/apache.py
@@ -0,0 +1,22 @@
+import os
+import sys
+import time
+
+
+def runApache(args):
+  sleep = 60
+  conf = args[0]
+  while True:
+    ready = True
+    for f in conf.get('required_path_list', []):
+      if not os.path.exists(f):
+        print 'File %r does not exists, sleeping for %s' % (f, sleep)
+        ready = False
+    if ready:
+      break
+    time.sleep(sleep)
+  apache_wrapper_list = [conf['binary'], '-f', conf['config'], '-DFOREGROUND']
+  apache_wrapper_list.extend(sys.argv[1:])
+  sys.stdout.flush()
+  sys.stderr.flush()
+  os.execl(apache_wrapper_list[0], *apache_wrapper_list)
diff --git a/slapos/recipe/lamp/mysql.py b/slapos/recipe/lamp/mysql.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0f399084738ea3aee5020e33d1cf2b5f3db730d
--- /dev/null
+++ b/slapos/recipe/lamp/mysql.py
@@ -0,0 +1,72 @@
+import os
+import subprocess
+import time
+import sys
+
+
+def runMysql(args):
+  sleep = 60
+  conf = args[0]
+  mysqld_wrapper_list = [conf['mysqld_binary'], '--defaults-file=%s' %
+      conf['configuration_file']]
+  # we trust mysql_install that if mysql directory is available mysql was
+  # correctly initalised
+  if not os.path.isdir(os.path.join(conf['data_directory'], 'mysql')):
+    while True:
+      # XXX: Protect with proper root password
+      popen = subprocess.Popen([conf['mysql_install_binary'],
+        '--skip-name-resolve', '--no-defaults', '--datadir=%s' %
+        conf['data_directory']],
+        stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+      result = popen.communicate()[0]
+      if popen.returncode is None or popen.returncode != 0:
+        print "Failed to initialise server.\nThe error was: %s" % result
+        print "Waiting for %ss and retrying" % sleep
+        time.sleep(sleep)
+      else:
+        print "Mysql properly initialised"
+        break
+  else:
+    print "MySQL already initialised"
+  print "Starting %r" % mysqld_wrapper_list[0]
+  sys.stdout.flush()
+  sys.stderr.flush()
+  os.execl(mysqld_wrapper_list[0], *mysqld_wrapper_list)
+
+
+def updateMysql(args):
+  conf = args[0]
+  sleep = 30
+  is_succeed = False
+  while True:
+    if not is_succeed:
+      mysql_upgrade_list = [conf['mysql_upgrade_binary'], '--no-defaults', '--user=root', '--socket=%s' % conf['socket']]
+      mysql_upgrade = subprocess.Popen(mysql_upgrade_list, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+      result = mysql_upgrade.communicate()[0]
+      if mysql_upgrade.returncode is None:
+        mysql_upgrade.kill()
+      if mysql_upgrade.returncode != 0 and not 'is already upgraded' in result:
+        print "Command %r failed with result:\n%s" % (mysql_upgrade_list, result)
+        print 'Sleeping for %ss and retrying' % sleep
+      else:
+        if mysql_upgrade.returncode == 0:
+          print "MySQL database upgraded with result:\n%s" % result
+        else:
+          print "No need to upgrade MySQL database"
+        mysql_script = conf.get('mysql_script')
+        if mysql_script:
+          mysql_list = [conf['mysql_binary'].strip(), '--no-defaults', '-B', '--user=root', '--socket=%s' % conf['socket']]
+          mysql = subprocess.Popen(mysql_list, stdin=subprocess.PIPE,
+              stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+          result = mysql.communicate(conf['mysql_script'])[0]
+          if mysql.returncode is None:
+            mysql.kill()
+          if mysql.returncode != 0:
+            print 'Command %r failed with:\n%s' % (mysql_list, result)
+            print 'Sleeping for %ss and retrying' % sleep
+          else:
+            is_succeed = True
+            print 'SlapOS initialisation script succesfully applied on database.'
+    sys.stdout.flush()
+    sys.stderr.flush()
+    time.sleep(sleep)
diff --git a/slapos/recipe/lamp/runner.py b/slapos/recipe/lamp/runner.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ec60ce88a9ec26dda687520d288b148509019c3
--- /dev/null
+++ b/slapos/recipe/lamp/runner.py
@@ -0,0 +1,27 @@
+import sys
+import subprocess
+    
+def executeRunner(args):
+  """Start the instance configure. this may run a python script, move or/and rename 
+  file or directory when dondition is filled. the condition may be when file exist or when an entry
+  exist into database.
+  """
+  arguments, delete, rename, chmod, data = args
+  if delete != []:
+    print "Calling lampconfigure with 'delete' arguments"
+    result = subprocess.Popen(arguments + delete)
+    result.wait()
+  if rename != []:
+    for parameters in rename:
+      print "Calling lampconfigure with 'rename' arguments"
+      result = subprocess.Popen(arguments + parameters)
+      result.wait()
+  if chmod != []:
+    print "Calling lampconfigure with 'chmod' arguments"
+    result = subprocess.Popen(arguments + chmod)
+    result.wait()
+  if data != []:
+    print "Calling lampconfigure with 'run' arguments"
+    result = subprocess.Popen(arguments + data)
+    result.wait()
+    return
diff --git a/slapos/recipe/lamp/template/apache.in b/slapos/recipe/lamp/template/apache.in
new file mode 100644
index 0000000000000000000000000000000000000000..1d7d6a94d878c91a13533a3af66c426a2dde3490
--- /dev/null
+++ b/slapos/recipe/lamp/template/apache.in
@@ -0,0 +1,58 @@
+# Apache static configuration
+# Automatically generated
+
+# Basic server configuration
+PidFile "%(pid_file)s"
+LockFile "%(lock_file)s"
+Listen %(ip)s:%(port)s
+PHPINIDir %(php_ini_dir)s
+ServerAdmin someone@email
+DefaultType text/plain
+TypesConfig conf/mime.types
+AddType application/x-compress .Z
+AddType application/x-gzip .gz .tgz
+AddType application/x-httpd-php .php .phtml .php5 .php4
+AddType application/x-httpd-php-source .phps
+
+# Log configuration
+ErrorLog "%(error_log)s"
+LogLevel warn
+LogFormat "%%h %%{REMOTE_USER}i %%l %%u %%t \"%%r\" %%>s %%b \"%%{Referer}i\" \"%%{User-Agent}i\"" combined
+LogFormat "%%h %%{REMOTE_USER}i %%l %%u %%t \"%%r\" %%>s %%b" common
+CustomLog "%(access_log)s" common
+
+# Directory protection
+<Directory />
+    Options FollowSymLinks
+    AllowOverride None
+    Order deny,allow
+    Deny from all
+</Directory>
+
+<Directory %(document_root)s>
+  Options FollowSymLinks
+  AllowOverride All
+  Order allow,deny
+  Allow from all
+</Directory>
+DocumentRoot %(document_root)s
+DirectoryIndex index.html index.php
+
+# List of modules
+LoadModule authz_host_module modules/mod_authz_host.so
+LoadModule log_config_module modules/mod_log_config.so
+LoadModule setenvif_module modules/mod_setenvif.so
+LoadModule version_module modules/mod_version.so
+LoadModule proxy_module modules/mod_proxy.so
+LoadModule proxy_http_module modules/mod_proxy_http.so
+LoadModule mime_module modules/mod_mime.so
+LoadModule dav_module modules/mod_dav.so
+LoadModule dav_fs_module modules/mod_dav_fs.so
+LoadModule negotiation_module modules/mod_negotiation.so
+LoadModule rewrite_module modules/mod_rewrite.so
+LoadModule headers_module modules/mod_headers.so
+LoadModule dir_module modules/mod_dir.so
+LoadModule php5_module modules/libphp5.so
+LoadModule alias_module modules/mod_alias.so
+LoadModule env_module modules/mod_env.so
+LoadModule autoindex_module modules/mod_autoindex.so
diff --git a/slapos/recipe/lamp/template/my.cnf.in b/slapos/recipe/lamp/template/my.cnf.in
new file mode 100644
index 0000000000000000000000000000000000000000..043fb3ad56bbea3b3e35766a9b019cd8dbf7cc77
--- /dev/null
+++ b/slapos/recipe/lamp/template/my.cnf.in
@@ -0,0 +1,52 @@
+# ERP5 buildout my.cnf template based on my-huge.cnf shipped with mysql
+# The MySQL server
+[mysqld]
+# ERP5 by default requires InnoDB storage. MySQL by default fallbacks to using
+# different engine, like MyISAM. Such behaviour generates problems only, when
+# tables requested as InnoDB are silently created with MyISAM engine.
+#
+# Loud fail is really required in such case.
+sql-mode="NO_ENGINE_SUBSTITUTION"
+
+skip-show-database
+port = %(tcp_port)s
+bind-address = %(ip)s
+socket = %(socket)s
+datadir = %(data_directory)s
+pid-file = %(pid_file)s
+log-error = %(error_log)s
+log-slow-file = %(slow_query_log)s
+long_query_time = 5
+max_allowed_packet = 128M
+query_cache_size = 32M
+
+plugin-load = ha_innodb_plugin.so
+
+# The following are important to configure and depend a lot on to the size of
+# your database and the available resources.
+#innodb_buffer_pool_size = 4G
+#innodb_log_file_size = 256M
+#innodb_log_buffer_size = 8M
+
+# Some dangerous settings you may want to uncomment if you only want
+# performance or less disk access. Useful for unit tests.
+#innodb_flush_log_at_trx_commit = 0
+#innodb_flush_method = nosync
+#innodb_doublewrite = 0
+#sync_frm = 0
+
+# Uncomment the following if you need binary logging, which is recommended
+# on production instances (either for replication or incremental backups).
+#log-bin=mysql-bin
+
+# Force utf8 usage
+collation_server = utf8_unicode_ci
+character_set_server = utf8
+skip-character-set-client-handshake
+
+[mysql]
+no-auto-rehash
+socket = %(socket)s
+
+[mysqlhotcopy]
+interactive-timeout
diff --git a/slapos/recipe/lamp/template/mysqlinit.sql.in b/slapos/recipe/lamp/template/mysqlinit.sql.in
new file mode 100644
index 0000000000000000000000000000000000000000..9189d8d13005b57f36817409f9f873eaec5ef189
--- /dev/null
+++ b/slapos/recipe/lamp/template/mysqlinit.sql.in
@@ -0,0 +1,7 @@
+CREATE DATABASE IF NOT EXISTS %(database)s;
+GRANT ALL PRIVILEGES ON %(database)s.* TO %(user)s@localhost IDENTIFIED BY %(password)r;
+GRANT ALL PRIVILEGES ON %(database)s.* TO %(user)s@'%%' IDENTIFIED BY %(password)r;
+GRANT SHOW DATABASES ON *.* TO %(user)s@localhost IDENTIFIED BY %(password)r;
+GRANT SHOW DATABASES ON *.* TO %(user)s@'%%' IDENTIFIED BY %(password)r;
+FLUSH PRIVILEGES;
+EXIT
diff --git a/slapos/recipe/lamp/template/php.ini.in b/slapos/recipe/lamp/template/php.ini.in
new file mode 100644
index 0000000000000000000000000000000000000000..76dfd3a88e64902fd08f2bcc5f2318a3c7ed6a8b
--- /dev/null
+++ b/slapos/recipe/lamp/template/php.ini.in
@@ -0,0 +1,18 @@
+[PHP]
+engine = On
+safe_mode = Off
+expose_php = Off
+error_reporting = E_ALL & ~(E_DEPRECATED|E_NOTICE|E_WARNING)
+display_errors = On
+display_startup_errors = Off
+log_errors = On
+log_errors_max_len = 1024
+ignore_repeated_errors = Off
+ignore_repeated_source = Off
+session.save_path = "%(tmp_directory)s"
+session.auto_start = 0
+date.timezone = Europe/Paris
+file_uploads = On
+upload_max_filesize = 16M
+post_max_size = 16M
+magic_quotes_gpc=Off
diff --git a/slapos/recipe/lamp/template/stunnel.conf.in b/slapos/recipe/lamp/template/stunnel.conf.in
new file mode 100644
index 0000000000000000000000000000000000000000..f72634ec3d8f258c5b7caf4ea1e8adf47e9f8847
--- /dev/null
+++ b/slapos/recipe/lamp/template/stunnel.conf.in
@@ -0,0 +1,9 @@
+foreground = yes
+output = %(log)s
+pid = %(pid_file)s
+syslog = no
+
+[service]
+client = yes
+accept = %(local_host)s:%(local_port)s
+connect = %(remote_host)s:%(remote_port)s
diff --git a/slapos/recipe/librecipe/__init__.py b/slapos/recipe/librecipe/__init__.py
index e0eb9d547d421d05f9067d909e8ae1a008c54228..9a892bf0d85c78dfb6aa432c78e2643c1e43ea25 100644
--- a/slapos/recipe/librecipe/__init__.py
+++ b/slapos/recipe/librecipe/__init__.py
@@ -33,9 +33,16 @@ from hashlib import md5
 import stat
 import netaddr
 import time
+import re
+import urlparse
+
+# Use to do from slapos.recipe.librecipe import GenericBaseRecipe
+from generic import GenericBaseRecipe
+from genericslap import GenericSlapRecipe
 
 class BaseSlapRecipe:
   """Base class for all slap.recipe.*"""
+
   def __init__(self, buildout, name, options):
     """Default initialisation"""
     self.name = name
@@ -60,6 +67,7 @@ class BaseSlapRecipe:
         'xml_report')
     self.destroy_script_location = os.path.join(self, self.work_directory,
         'sbin', 'destroy')
+    self.promise_directory = os.path.join(self.etc_directory, 'promise')
 
     # default directory structure information
     self.default_directory_list = [
@@ -71,6 +79,7 @@ class BaseSlapRecipe:
       self.etc_directory, # CP/etc - configuration container
       self.wrapper_directory, # CP/etc/run - for wrappers
       self.wrapper_report_directory, # CP/etc/report - for report wrappers
+      self.promise_directory, # CP/etc/promise - for promise checking scripts
       self.var_directory, # CP/var - partition "internal" container for logs,
                           # and another metadata
       self.wrapper_xml_report_directory, # CP/var/xml_report - for xml_report wrappers
@@ -81,16 +90,19 @@ class BaseSlapRecipe:
 
     # SLAP related information
     slap_connection = buildout['slap_connection']
-    self.computer_id=slap_connection['computer_id']
-    self.computer_partition_id=slap_connection['partition_id']
-    self.server_url=slap_connection['server_url']
-    self.software_release_url=slap_connection['software_release_url']
-    self.key_file=slap_connection.get('key_file')
-    self.cert_file=slap_connection.get('cert_file')
+    self.computer_id = slap_connection['computer_id']
+    self.computer_partition_id = slap_connection['partition_id']
+    self.server_url = slap_connection['server_url']
+    self.software_release_url = slap_connection['software_release_url']
+    self.key_file = slap_connection.get('key_file')
+    self.cert_file = slap_connection.get('cert_file')
 
     # setup egg to give possibility to generate scripts
     self.egg = zc.recipe.egg.Egg(buildout, options['recipe'], options)
 
+    # Hook options
+    self._options(options)
+
     # setup auto uninstall/install
     self._setupAutoInstallUninstall()
 
@@ -243,3 +255,55 @@ class BaseSlapRecipe:
   def _install(self):
     """Hook which shall be implemented in children class"""
     raise NotImplementedError('Shall be implemented by subclass')
+
+  def _options(self, options):
+    """Hook which can be implemented in children class"""
+    pass
+
+  def createPromiseWrapper(self, promise_name, file_content):
+    """Create a promise wrapper.
+
+    This wrapper aim to check if the software release is doing its job.
+
+    Return the promise file path.
+    """
+    promise_path = os.path.join(self.promise_directory, promise_name)
+    self._writeExecutable(promise_path, file_content)
+    return promise_path
+
+  def setConnectionUrl(self, *args, **kwargs):
+    url = self._unparseUrl(*args, **kwargs)
+    self.setConnectionDict(dict(url=url))
+
+  def _unparseUrl(self, scheme, host, path='', params='', query='',
+                  fragment='', port=None, auth=None):
+    """Join a url with auth, host, and port.
+
+    * auth can be either a login string or a tuple (login, password).
+    * if the host is an ipv6 address, brackets will be added to surround it.
+
+    """
+    # XXX-Antoine: I didn't find any standard module to join an url with
+    # login, password, ipv6 host and port.
+    # So instead of copy and past in every recipe I factorized it right here.
+    netloc = ''
+    if auth is not None:
+      auth = tuple(auth)
+      netloc = str(auth[0]) # Login
+      if len(auth) > 1:
+        netloc += ':%s' % auth[1] # Password
+      netloc += '@'
+
+    # host is an ipv6 address whithout brackets
+    if ':' in host and not re.match(r'^\[.*\]$', host):
+      netloc += '[%s]' % host
+    else:
+      netloc += str(host)
+
+    if port is not None:
+      netloc += ':%s' % port
+
+    url = urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
+
+    return url
+
diff --git a/slapos/recipe/librecipe/execute.py b/slapos/recipe/librecipe/execute.py
index ec2862116b0787b16c75287cbc89bdf860d73d94..d9e04caafc0ac84b76458ae72c98aef9bd4d1010 100644
--- a/slapos/recipe/librecipe/execute.py
+++ b/slapos/recipe/librecipe/execute.py
@@ -23,6 +23,8 @@ def execute_wait(args):
         ready = False
     if ready:
       break
+    # XXX: It's the same as ../ca/certificate_authoritiy.py
+    #      We should use pyinotify as well. Or select() on socket.
     time.sleep(sleep)
   os.execv(exec_list[0], exec_list + sys.argv[1:])
 
@@ -39,6 +41,25 @@ def executee(args):
     env[k] = v
   os.execve(exec_list[0], exec_list + sys.argv[1:], env)
 
+def executee_wait(args):
+  """Portable execution with process replacement and environment manipulation"""
+  exec_list = list(args[0])
+  file_list = list(args[1])
+  environment = args[2]
+  env = os.environ.copy()
+  for k,v in environment.iteritems():
+    env[k] = v
+  sleep = 60
+  while True:
+    ready = True
+    for f in file_list:
+      if not os.path.exists(f):
+        print 'File %r does not exists, sleeping for %s' % (f, sleep)
+        ready = False
+    if ready:
+      break
+    time.sleep(sleep)
+  os.execve(exec_list[0], exec_list + sys.argv[1:], env)
 
 def sig_handler(signal, frame):
   print 'Received signal %r, killing children and exiting' % signal
diff --git a/slapos/recipe/librecipe/generic.py b/slapos/recipe/librecipe/generic.py
new file mode 100644
index 0000000000000000000000000000000000000000..a203ce9b4714b772996843c8ae6001827e1dc61a
--- /dev/null
+++ b/slapos/recipe/librecipe/generic.py
@@ -0,0 +1,123 @@
+##############################################################################
+#
+# Copyright (c) 2010 Vifib SARL and Contributors. All Rights Reserved.
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsibility of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# guarantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+#
+##############################################################################
+import logging
+import os
+import sys
+import inspect
+
+import pkg_resources
+import zc.buildout
+
+class GenericBaseRecipe(object):
+
+  TRUE_VALUES = ['y', 'yes', '1', 'true']
+
+  def __init__(self, buildout, name, options):
+    """Recipe initialisation"""
+    self.name = name
+    self.options = options
+    self.buildout = buildout
+    self.logger = logging.getLogger(name)
+
+    self._options(options) # Options Hook
+
+    self._ws = self.getWorkingSet()
+
+  def update(self):
+    """By default update method does the same thing than install"""
+    return self.install()
+
+  def install(self):
+    """Install method of the recipe. This must be overriden in child
+    classes """
+    raise NotImplementedError("install method is not implemented.")
+
+  def getWorkingSet(self):
+    """If you want do override the default working set"""
+    egg = zc.recipe.egg.Egg(self.buildout, 'slapos.cookbook',
+                                  self.options.copy())
+    requirements, ws = egg.working_set()
+    return ws
+
+  def _options(self, options):
+    """Options Hook method. This method can be overriden in child classes"""
+    return
+
+  def createFile(self, name, content, mode=0600):
+    """Create a file with content
+
+    The parent directory should exists, else it would raise IOError"""
+    with open(name, 'w') as fileobject:
+      fileobject.write(content)
+      os.chmod(fileobject.name, mode)
+    return os.path.abspath(name)
+
+  def createExecutable(self, name, content, mode=0700):
+    return self.createFile(name, content, mode)
+
+  def createPythonScript(self, name, absolute_function, arguments=''):
+    """Create a python script using zc.buildout.easy_install.scripts
+
+     * function should look like 'module.function', or only 'function'
+       if it is a builtin function."""
+    absolute_function = tuple(absolute_function.rsplit('.', 1))
+    if len(absolute_function) == 1:
+      absolute_function = ('__builtin__',) + absolute_function
+    if len(absolute_function) != 2:
+      raise ValueError("A non valid function was given")
+
+    module, function = absolute_function
+    path, filename = os.path.split(os.path.abspath(name))
+
+    script = zc.buildout.easy_install.scripts(
+      [(filename, module, function)], self._ws, sys.executable,
+      path, arguments=arguments)[0]
+    return script
+
+  def substituteTemplate(self, template_location, mapping_dict):
+    """Read from file template_location an substitute content with
+       mapping_dict douing a dummy python format."""
+    with open(template_location, 'r') as template:
+      return template.read() % mapping_dict
+
+  def getTemplateFilename(self, template_name):
+    caller = inspect.stack()[1]
+    caller_frame = caller[0]
+    name = caller_frame.f_globals['__name__']
+    return pkg_resources.resource_filename(name,
+        'template/%s' % template_name)
+
+  def generatePassword(self, len_=32):
+    # TODO: implement a real password generator which remember the last
+    # call.
+    return "insecure"
+
+  def isTrueValue(self, value):
+    return str(value).lower() in GenericBaseRecipe.TRUE_VALUES
+
+  def optionIsTrue(self, optionname, default=None):
+    return self.isTrueValue(self.options[optionname])
diff --git a/slapos/recipe/librecipe/genericslap.py b/slapos/recipe/librecipe/genericslap.py
new file mode 100644
index 0000000000000000000000000000000000000000..0181a0da6c78852e5051ca30bef9e97e20fb34e3
--- /dev/null
+++ b/slapos/recipe/librecipe/genericslap.py
@@ -0,0 +1,129 @@
+##############################################################################
+#
+# Copyright (c) 2010 Vifib SARL and Contributors. All Rights Reserved.
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsibility of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# guarantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+#
+##############################################################################
+import logging
+from slapos import slap
+import zc.buildout
+import zc.recipe.egg
+import time
+import re
+import urlparse
+
+class GenericSlapRecipe(object):
+  """Base class for all slap.recipe.*"""
+
+  def __init__(self, buildout, name, options):
+    """Default initialisation"""
+    self.name = name
+    options['eggs'] = 'slapos.cookbook'
+    self.options = options
+    self.logger = logging.getLogger(self.name)
+    self.slap = slap.slap()
+
+    # SLAP related information
+    slap_connection = buildout['slap-connection']
+    self.computer_id = slap_connection['computer-id']
+    self.computer_partition_id = slap_connection['partition-id']
+    self.server_url = slap_connection['server-url']
+    self.software_release_url = slap_connection['software-release-url']
+    self.key_file = slap_connection.get('key-file')
+    self.cert_file = slap_connection.get('cert-file')
+
+    # setup egg to give possibility to generate scripts
+    self.egg = zc.recipe.egg.Egg(buildout, options['recipe'], options)
+
+    # Hook options
+    self._options(options)
+
+    # setup auto uninstall/install
+    self._setupAutoInstallUninstall()
+
+  def _setupAutoInstallUninstall(self):
+    """By default SlapOS recipes are reinstalled each time"""
+    # Note: It is possible to create in future subclass which will do no-op in
+    # this method
+    self.options['slapos-timestamp'] = str(time.time())
+
+  def install(self):
+    self.slap.initializeConnection(self.server_url, self.key_file,
+        self.cert_file)
+    self.computer_partition = self.slap.registerComputerPartition(
+      self.computer_id,
+      self.computer_partition_id)
+    self.request = self.computer_partition.request
+    self.setConnectionDict = self.computer_partition.setConnectionDict
+    self.parameter_dict = self.computer_partition.getInstanceParameterDict()
+
+    # call children part of install
+    path_list = self._install()
+
+    return path_list
+
+  update = install
+
+  def _install(self):
+    """Hook which shall be implemented in children class"""
+    raise NotImplementedError('Shall be implemented by subclass')
+
+  def _options(self, options):
+    """Hook which can be implemented in children class"""
+    pass
+
+  def setConnectionUrl(self, *args, **kwargs):
+    url = self._unparseUrl(*args, **kwargs)
+    self.setConnectionDict(dict(url=url))
+
+  def _unparseUrl(self, scheme, host, path='', params='', query='',
+                  fragment='', port=None, auth=None):
+    """Join a url with auth, host, and port.
+
+    * auth can be either a login string or a tuple (login, password).
+    * if the host is an ipv6 address, brackets will be added to surround it.
+
+    """
+    # XXX-Antoine: I didn't find any standard module to join an url with
+    # login, password, ipv6 host and port.
+    # So instead of copy and past in every recipe I factorized it right here.
+    netloc = ''
+    if auth is not None:
+      auth = tuple(auth)
+      netloc = str(auth[0]) # Login
+      if len(auth) > 1:
+        netloc += ':%s' % auth[1] # Password
+      netloc += '@'
+
+    # host is an ipv6 address whithout brackets
+    if ':' in host and not re.match(r'^\[.*\]$', host):
+      netloc += '[%s]' % host
+    else:
+      netloc += str(host)
+
+    if port is not None:
+      netloc += ':%s' % port
+
+    url = urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
+
+    return url
diff --git a/slapos/recipe/logrotate.py b/slapos/recipe/logrotate.py
new file mode 100644
index 0000000000000000000000000000000000000000..9551952d80696883462224718d686139ccfda66e
--- /dev/null
+++ b/slapos/recipe/logrotate.py
@@ -0,0 +1,120 @@
+##############################################################################
+#
+# Copyright (c) 2010 Vifib SARL and Contributors. All Rights Reserved.
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsibility of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# guarantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+#
+##############################################################################
+import os
+
+from slapos.recipe.librecipe import GenericBaseRecipe
+
+class Recipe(GenericBaseRecipe):
+
+  def _options(self, options):
+    if 'name' not in options:
+      options['name'] = self.name
+
+  def install(self):
+    path_list = []
+
+    logrotate_backup = self.options['backup']
+    logrotate_d = self.options['logrotate-entries']
+    logrotate_conf_file = self.options['conf']
+
+    logrotate_conf = []
+    logrotate_conf.append("include %s" % logrotate_d)
+    logrotate_conf.append("olddir %s" % logrotate_backup)
+    logrotate_conf.append("dateext")
+
+    frequency = 'daily'
+    if 'frequency' in self.options:
+      frequency = self.options['frequency']
+    logrotate_conf.append(frequency)
+
+    num_rotate = 30
+    if 'num-rotate' in self.options:
+      num_rotate = self.options['num-rotate']
+    logrotate_conf.append("rotate %s" % num_rotate)
+
+    logrotate_conf.append("compress")
+    logrotate_conf.append("compresscmd %s" % self.options['gzip-binary'])
+    logrotate_conf.append("compressoptions -9")
+    logrotate_conf.append("uncompresscmd %s" % self.options['gunzip-binary'])
+
+    logrotate_conf_file = self.createFile(logrotate_conf_file, '\n'.join(logrotate_conf))
+    logrotate_conf.append(logrotate_conf_file)
+
+    state_file = self.options['state-file']
+
+    logrotate = self.createPythonScript(
+      self.options['wrapper'],
+      'slapos.recipe.librecipe.exceute.execute',
+      [self.options['logrotate-binary'], '-s', state_file, logrotate_conf_file, ]
+    )
+    path_list.append(logrotate)
+
+    return path_list
+
+class Part(GenericBaseRecipe):
+
+  def _options(self, options):
+    if 'name' not in options:
+      options['name'] = self.name
+
+  def install(self):
+
+    logrotate_d = self.options['logrotate-entries']
+
+    part_path = os.path.join(logrotate_d, self.options['name'])
+
+    conf = []
+
+    if 'frequency' in self.options:
+      conf.append(self.options['frequency'])
+    if 'num-rotate' in self.options:
+      conf.append('rotate %s' % self.options['num-rotate'])
+
+    if 'post' in self.options:
+      conf.append("postrotate\n%s\nendscript" % self.options['post'])
+    if 'pre' in self.options:
+      conf.append("prerotate\n%s\nendscript" % self.options['pre'])
+
+    if self.optionIsTrue('sharedscripts', False):
+      conf.append("sharedscripts")
+
+    if self.optionIsTrue('notifempty', False):
+      conf.append('notifempty')
+
+    if self.optionIsTrue('create', True):
+      conf.append('create')
+
+    log = self.options['log']
+
+    self.createFile(os.path.join(logrotate_d, self.options['name']),
+                    "%(logfiles)s {\n%(conf)s\n}" % {
+                      'logfiles': log,
+                      'conf': '\n'.join(conf),
+                    }
+                   )
+
+    return [part_path]
diff --git a/slapos/recipe/mkdirectory.py b/slapos/recipe/mkdirectory.py
new file mode 100644
index 0000000000000000000000000000000000000000..d715da3d5932d5d0ecac8b69813cb47bdc057335
--- /dev/null
+++ b/slapos/recipe/mkdirectory.py
@@ -0,0 +1,53 @@
+##############################################################################
+#
+# Copyright (c) 2010 Vifib SARL and Contributors. All Rights Reserved.
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsibility of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# guarantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+#
+##############################################################################
+import os
+
+from slapos.recipe.librecipe import GenericBaseRecipe
+
+class Recipe(GenericBaseRecipe):
+
+  def _options(self, options):
+    self.directory = options.copy()
+    del self.directory['recipe']
+
+    str_mode = '0700'
+    if 'mode' in self.directory:
+      str_mode = self.directory['mode']
+      del self.directory['mode']
+    self.mode = int(str_mode, 8)
+
+  def install(self):
+
+    for directory in self.directory.values():
+      path = directory
+
+      if not os.path.exists(path):
+        os.mkdir(path, self.mode)
+      elif not os.path.isdir(path):
+        raise OSError("%s path exits, but it's not a directory.")
+
+    return []
diff --git a/slapos/recipe/mysql/__init__.py b/slapos/recipe/mysql/__init__.py
index 0976d0411d1a17a09ca65920d0fecffa308ed5ac..14c63140ff21d62b50dcf3218bd8083eaec151bc 100644
--- a/slapos/recipe/mysql/__init__.py
+++ b/slapos/recipe/mysql/__init__.py
@@ -24,312 +24,135 @@
 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 #
 ##############################################################################
-from slapos.recipe.librecipe import BaseSlapRecipe
-import hashlib
+from slapos.recipe.librecipe import GenericBaseRecipe
 import os
-import pkg_resources
-import sys
-import zc.buildout
-import ConfigParser
 
-class Recipe(BaseSlapRecipe):
-  def getTemplateFilename(self, template_name):
-    return pkg_resources.resource_filename(__name__,
-        'template/%s' % template_name)
+class Recipe(GenericBaseRecipe):
 
-  def _install(self):
-    self.path_list = []
+  def _options(self, options):
+    options['password'] = self.generatePassword()
 
-    self.requirements, self.ws = self.egg.working_set()
-    # self.cron_d is a directory, where cron jobs can be registered
-    self.cron_d = self.installCrond()
-    self.logrotate_d, self.logrotate_backup = self.installLogrotate()
-    
-    mysql_conf = self.installMysqlServer(self.getLocalIPv4Address(), 45678)
-      
-    ca_conf = self.installCertificateAuthority()
-    key, certificate = self.requestCertificate('MySQL')
-    
-    stunnel_conf = self.installStunnel(self.getGlobalIPv6Address(),
-        self.getLocalIPv4Address(), 12345, mysql_conf['tcp_port'],
-        certificate, key, ca_conf['ca_crl'],
-        ca_conf['certificate_authority_path'])
-    
-    self.linkBinary()
-    self.setConnectionDict(dict(
-      stunnel_ip = stunnel_conf['public_ip'],
-      stunnel_port = stunnel_conf['public_port'],
-      mysql_database = mysql_conf['mysql_database'],
-      mysql_user = mysql_conf['mysql_user'],
-      mysql_password = mysql_conf['mysql_password'],
-    ))
-    return self.path_list
+  def install(self):
+    path_list = []
 
-  def linkBinary(self):
-    """Links binaries to instance's bin directory for easier exposal"""
-    for linkline in self.options.get('link_binary_list', '').splitlines():
-      if not linkline:
-        continue
-      target = linkline.split()
-      if len(target) == 1:
-        target = target[0]
-        path, linkname = os.path.split(target)
-      else:
-        linkname = target[1]
-        target = target[0]
-      link = os.path.join(self.bin_directory, linkname)
-      if os.path.lexists(link):
-        if not os.path.islink(link):
-          raise zc.buildout.UserError(
-              'Target link already %r exists but it is not link' % link)
-        os.unlink(link)
-      os.symlink(target, link)
-      self.logger.debug('Created link %r -> %r' % (link, target))
-      self.path_list.append(link)
+    template_filename = self.getTemplateFilename('my.cnf.in')
 
-  def installCrond(self):
-    timestamps = self.createDataDirectory('cronstamps')
-    cron_output = os.path.join(self.log_directory, 'cron-output')
-    self._createDirectory(cron_output)
-    catcher = zc.buildout.easy_install.scripts([('catchcron',
-      __name__ + '.catdatefile', 'catdatefile')], self.ws, sys.executable,
-      self.bin_directory, arguments=[cron_output])[0]
-    self.path_list.append(catcher)
-    cron_d = os.path.join(self.etc_directory, 'cron.d')
-    crontabs = os.path.join(self.etc_directory, 'crontabs')
-    self._createDirectory(cron_d)
-    self._createDirectory(crontabs)
-    wrapper = zc.buildout.easy_install.scripts([('crond',
-      'slapos.recipe.librecipe.execute', 'execute')], self.ws, sys.executable,
-      self.wrapper_directory, arguments=[
-        self.options['dcrond_binary'].strip(), '-s', cron_d, '-c', crontabs,
-        '-t', timestamps, '-f', '-l', '5', '-M', catcher]
-      )[0]
-    self.path_list.append(wrapper)
-    return cron_d
-  
-  def installLogrotate(self):
-    """Installs logortate main configuration file and registers its to cron"""
-    logrotate_d = os.path.abspath(os.path.join(self.etc_directory,
-      'logrotate.d'))
-    self._createDirectory(logrotate_d)
-    logrotate_backup = self.createBackupDirectory('logrotate')
-    logrotate_conf = self.createConfigurationFile("logrotate.conf",
-        "include %s" % logrotate_d)
-    logrotate_cron = os.path.join(self.cron_d, 'logrotate')
-    state_file = os.path.join(self.data_root_directory, 'logrotate.status')
-    open(logrotate_cron, 'w').write('0 0 * * * %s -s %s %s' %
-        (self.options['logrotate_binary'], state_file, logrotate_conf))
-    self.path_list.extend([logrotate_d, logrotate_conf, logrotate_cron])
-    return logrotate_d, logrotate_backup
+    mysql_conf = dict(
+        ip=self.options['ip'],
+        data_directory=self.options['data-directory'],
+        tcp_port=self.options['port'],
+        pid_file=self.options['pid-file'],
+        socket=self.options['socket'],
+        error_log=self.options['error-log'],
+        slow_query_log=self.options['slow-query-log'],
+        mysql_database=self.options['database'],
+        mysql_user=self.options['user'],
+        mysql_password=self.options['password'],
+    )
 
-  def registerLogRotation(self, name, log_file_list, postrotate_script):
-    """Register new log rotation requirement"""
-    open(os.path.join(self.logrotate_d, name), 'w').write(
-        self.substituteTemplate(self.getTemplateFilename(
-          'logrotate_entry.in'),
-          dict(file_list=' '.join(['"'+q+'"' for q in log_file_list]),
-            postrotate=postrotate_script, olddir=self.logrotate_backup)))
+    mysql_binary = self.options['mysql-binary']
+    socket = self.options['socket'],
+    post_rotate = self.createPythonScript(
+      self.options['logrotate-post'],
+      'slapos.recipe.librecipe.execute.execute',
+      [mysql_binary, '--no-defaults', '-B', '--socket=%s' % socket, '-e',
+       'FLUSH LOGS']
+    )
+    path_list.append(post_rotate)
 
-  def installCertificateAuthority(self, ca_country_code='XX',
-      ca_email='xx@example.com', ca_state='State', ca_city='City',
-      ca_company='Company'):
-    backup_path = self.createBackupDirectory('ca')
-    self.ca_dir = os.path.join(self.data_root_directory, 'ca')
-    self._createDirectory(self.ca_dir)
-    self.ca_request_dir = os.path.join(self.ca_dir, 'requests')
-    self._createDirectory(self.ca_request_dir)
-    config = dict(ca_dir=self.ca_dir, request_dir=self.ca_request_dir)
-    self.ca_private = os.path.join(self.ca_dir, 'private')
-    self.ca_certs = os.path.join(self.ca_dir, 'certs')
-    self.ca_crl = os.path.join(self.ca_dir, 'crl')
-    self.ca_newcerts = os.path.join(self.ca_dir, 'newcerts')
-    self.ca_key_ext = '.key'
-    self.ca_crt_ext = '.crt'
-    for d in [self.ca_private, self.ca_crl, self.ca_newcerts, self.ca_certs]:
-      self._createDirectory(d)
-    for f in ['crlnumber', 'serial']:
-      if not os.path.exists(os.path.join(self.ca_dir, f)):
-        open(os.path.join(self.ca_dir, f), 'w').write('01')
-    if not os.path.exists(os.path.join(self.ca_dir, 'index.txt')):
-      open(os.path.join(self.ca_dir, 'index.txt'), 'w').write('')
-    openssl_configuration = os.path.join(self.ca_dir, 'openssl.cnf')
-    config.update(
-        working_directory=self.ca_dir,
-        country_code=ca_country_code,
-        state=ca_state,
-        city=ca_city,
-        company=ca_company,
-        email_address=ca_email,
+    mysql_conf_file = self.createFile(
+      self.options['conf-file'],
+      self.substituteTemplate(template_filename, mysql_conf)
     )
-    self._writeFile(openssl_configuration, pkg_resources.resource_string(
-      __name__, 'template/openssl.cnf.ca.in') % config)
-    self.path_list.extend(zc.buildout.easy_install.scripts([
-      ('certificate_authority',
-        __name__ + '.certificate_authority', 'runCertificateAuthority')],
-        self.ws, sys.executable, self.wrapper_directory, arguments=[dict(
-          openssl_configuration=openssl_configuration,
-          openssl_binary=self.options['openssl_binary'],
-          certificate=os.path.join(self.ca_dir, 'cacert.pem'),
-          key=os.path.join(self.ca_private, 'cakey.pem'),
-          crl=os.path.join(self.ca_crl),
-          request_dir=self.ca_request_dir
-          )]))
-    # configure backup
-    backup_cron = os.path.join(self.cron_d, 'ca_rdiff_backup')
-    open(backup_cron, 'w').write(
-        '''0 0 * * * %(rdiff_backup)s %(source)s %(destination)s'''%dict(
-          rdiff_backup=self.options['rdiff_backup_binary'],
-          source=self.ca_dir,
-          destination=backup_path))
-    self.path_list.append(backup_cron)
+    path_list.append(mysql_conf_file)
+
+    mysql_script_list = []
 
-    return dict(
-      ca_certificate=os.path.join(config['ca_dir'], 'cacert.pem'),
-      ca_crl=os.path.join(config['ca_dir'], 'crl'),
-      certificate_authority_path=config['ca_dir']
+    init_script = self.substituteTemplate(
+      self.getTemplateFilename('initmysql.sql.in'),
+      {
+        'mysql_database': mysql_conf['mysql_database'],
+        'mysql_user': mysql_conf['mysql_user'],
+        'mysql_password': mysql_conf['mysql_password']
+      }
     )
+    mysql_script_list.append(init_script)
+    mysql_script_list.append('EXIT')
+    mysql_script = '\n'.join(mysql_script_list)
 
-  def requestCertificate(self, name):
-    hash = hashlib.sha512(name).hexdigest()
-    key = os.path.join(self.ca_private, hash + self.ca_key_ext)
-    certificate = os.path.join(self.ca_certs, hash + self.ca_crt_ext)
-    parser = ConfigParser.RawConfigParser()
-    parser.add_section('certificate')
-    parser.set('certificate', 'name', name)
-    parser.set('certificate', 'key_file', key)
-    parser.set('certificate', 'certificate_file', certificate)
-    parser.write(open(os.path.join(self.ca_request_dir, hash), 'w'))
-    return key, certificate
+    mysql_upgrade_binary = self.options['mysql-upgrade-binary']
+    mysql_update = self.createPythonScript(
+      self.options['update-wrapper'],
+      '%s.mysql.updateMysql' % __name__,
+      dict(
+        mysql_script=mysql_script,
+        mysql_binary=mysql_binary,
+        mysql_upgrade_binary=mysql_upgrade_binary,
+        socket=socket,
+      )
+    )
+    path_list.append(mysql_update)
 
-  def installStunnel(self, public_ip, private_ip, public_port, private_port,
-      ca_certificate, key, ca_crl, ca_path):
-    """Installs stunnel"""
-    template_filename = self.getTemplateFilename('stunnel.conf.in')
-    log = os.path.join(self.log_directory, 'stunnel.log')
-    pid_file = os.path.join(self.run_directory, 'stunnel.pid')
-    stunnel_conf = dict(
-        public_ip=public_ip,
-        private_ip=private_ip,
-        public_port=public_port,
-        pid_file=pid_file,
-        log=log,
-        cert = ca_certificate,
-        key = key,
-        ca_crl = ca_crl,
-        ca_path = ca_path,
-        private_port = private_port,
+    mysqld_binary = self.options['mysqld-binary']
+    mysqld = self.createPythonScript(
+      self.options['wrapper'],
+      '%s.mysql.runMysql' % __name__,
+      dict(
+        mysql_install_binary=self.options['mysql-install-binary'],
+        mysqld_binary=mysqld_binary,
+        data_directory=mysql_conf['data_directory'],
+        mysql_binary=mysql_binary,
+        socket=socket,
+        configuration_file=mysql_conf_file,
+       )
     )
-    stunnel_conf_path = self.createConfigurationFile("stunnel.conf",
-        self.substituteTemplate(template_filename,
-          stunnel_conf))
-    wrapper = zc.buildout.easy_install.scripts([('stunnel',
-      'slapos.recipe.librecipe.execute', 'execute_wait')], self.ws,
-      sys.executable, self.wrapper_directory, arguments=[
-        [self.options['stunnel_binary'].strip(), stunnel_conf_path],
-        [ca_certificate, key]]
-      )[0]
-    self.path_list.append(wrapper)
-    return stunnel_conf
+    path_list.append(mysqld)
 
-    
-  def installMysqlServer(self, ip, port, database='db', user='user',
-      template_filename=None, mysql_conf=None):
-    if mysql_conf is None:
-      mysql_conf = {}
-    backup_directory = self.createBackupDirectory('mysql')
-    if template_filename is None:
-      template_filename = self.getTemplateFilename('my.cnf.in')
-    error_log = os.path.join(self.log_directory, 'mysqld.log')
-    slow_query_log = os.path.join(self.log_directory, 'mysql-slow.log')
-    mysql_conf.update(
-        ip=ip,
-        data_directory=os.path.join(self.data_root_directory,
-          'mysql'),
-        tcp_port=port,
-        pid_file=os.path.join(self.run_directory, 'mysqld.pid'),
-        socket=os.path.join(self.run_directory, 'mysqld.sock'),
-        error_log=error_log,
-        slow_query_log=slow_query_log,
-        mysql_database=database,
-        mysql_user=user,
-        mysql_password=self.generatePassword(),
+    # backup configuration
+    mysqldump_binary = self.options['mysqldump-binary']
+    backup_directory = self.options['backup-directory']
+    pending_backup_dir = self.options['backup-pending-directory']
+    dump_filename = self.options['dumpname']
+
+    mysqldump_cmd = [mysqldump_binary,
+                     mysql_conf['mysql_database'],
+                     '-u', 'root',
+                     '-S', mysql_conf['socket'].strip(),
+                     '--single-transaction', '--opt',
+                    ]
+    dump_file = os.path.join(backup_directory, dump_filename)
+    tmpdump_file = os.path.join(pending_backup_dir, dump_filename)
+    backup_script = self.createPythonScript(
+      self.options['backup-script'],
+      '%s.backup.do_backup' % __name__,
+      {
+        'mysqldump': mysqldump_cmd,
+        'gzip': self.options['gzip-binary'],
+        'tmpdump': tmpdump_file,
+        'dumpfile': dump_file,
+      },
     )
-    self.registerLogRotation('mysql', [error_log, slow_query_log],
-        '%(mysql_binary)s --no-defaults -B --user=root '
-        '--socket=%(mysql_socket)s -e "FLUSH LOGS"' % dict(
-          mysql_binary=self.options['mysql_binary'],
-          mysql_socket=mysql_conf['socket']))
-    self._createDirectory(mysql_conf['data_directory'])
+    path_list.append(backup_script)
 
-    mysql_conf_path = self.createConfigurationFile("my.cnf",
-        self.substituteTemplate(template_filename,
-          mysql_conf))
+    # Recovering backup
+    if self.optionIsTrue('recovering', default=False):
+      recovering_script = self.createPythonScript(
+        self.options['recovering-wrapper'],
+        '%s.recover.import_remote_dump' % __name__,
+        {
+          'lock_file': os.path.join(self.work_directory,
+                                    'import_done'),
+          'database': mysql_conf['mysql_database'],
+          'mysql_binary': self.options['mysql-binary'],
+          'mysql_socket': mysql_conf['socket'],
+          'duplicity_binary': self.options['duplicity-binary'],
+          'remote_backup': self.parameter_dict['remote-backup'],
+          'local_directory': self.mysql_backup_directory,
+          'dump_name': dump_filename,
+          'zcat_binary': self.options['zcat-binary'],
+        }
+      )
+      path_list.append(recovering_script)
 
-    mysql_script_list = []
-    for x_database, x_user, x_password in \
-          [(mysql_conf['mysql_database'],
-            mysql_conf['mysql_user'],
-            mysql_conf['mysql_password']),
-          ]:
-      mysql_script_list.append(pkg_resources.resource_string(__name__,
-                     'template/initmysql.sql.in') % {
-                        'mysql_database': x_database,
-                        'mysql_user': x_user,
-                        'mysql_password': x_password})
-    mysql_script_list.append('EXIT')
-    mysql_script = '\n'.join(mysql_script_list)
-    self.path_list.extend(zc.buildout.easy_install.scripts([('mysql_update',
-      __name__ + '.mysql', 'updateMysql')], self.ws,
-      sys.executable, self.wrapper_directory, arguments=[dict(
-        mysql_script=mysql_script,
-        mysql_binary=self.options['mysql_binary'].strip(),
-        mysql_upgrade_binary=self.options['mysql_upgrade_binary'].strip(),
-        socket=mysql_conf['socket'],
-        )]))
-    self.path_list.extend(zc.buildout.easy_install.scripts([('mysqld',
-      __name__ + '.mysql', 'runMysql')], self.ws,
-        sys.executable, self.wrapper_directory, arguments=[dict(
-        mysql_install_binary=self.options['mysql_install_binary'].strip(),
-        mysqld_binary=self.options['mysqld_binary'].strip(),
-        data_directory=mysql_conf['data_directory'].strip(),
-        mysql_binary=self.options['mysql_binary'].strip(),
-        socket=mysql_conf['socket'].strip(),
-        configuration_file=mysql_conf_path,
-       )]))
-    self.path_list.extend([mysql_conf_path])
 
-    # backup configuration
-    backup_directory = self.createBackupDirectory('mysql')
-    full_backup = os.path.join(backup_directory, 'full')
-    incremental_backup = os.path.join(backup_directory, 'incremental')
-    self._createDirectory(full_backup)
-    self._createDirectory(incremental_backup)
-    innobackupex_argument_list = [self.options['perl_binary'],
-        self.options['innobackupex_binary'],
-        '--defaults-file=%s' % mysql_conf_path,
-        '--socket=%s' %mysql_conf['socket'].strip(), '--user=root']
-    environment = dict(PATH='%s' % self.bin_directory)
-    innobackupex_incremental = zc.buildout.easy_install.scripts([(
-      'innobackupex_incremental', 'slapos.recipe.librecipe.execute', 'executee')],
-      self.ws, sys.executable, self.bin_directory, arguments=[
-        innobackupex_argument_list + ['--incremental'],
-        environment])[0]
-    self.path_list.append(innobackupex_incremental)
-    innobackupex_full = zc.buildout.easy_install.scripts([('innobackupex_full',
-      'slapos.recipe.librecipe.execute', 'executee')], self.ws,
-      sys.executable, self.bin_directory, arguments=[
-        innobackupex_argument_list,
-        environment])[0]
-    self.path_list.append(innobackupex_full)
-    backup_controller = zc.buildout.easy_install.scripts([
-      ('innobackupex_controller', __name__ + '.innobackupex', 'controller')],
-      self.ws, sys.executable, self.bin_directory,
-      arguments=[innobackupex_incremental, innobackupex_full, full_backup,
-        incremental_backup])[0]
-    self.path_list.append(backup_controller)
-    mysql_backup_cron = os.path.join(self.cron_d, 'mysql_backup')
-    open(mysql_backup_cron, 'w').write('0 0 * * * ' + backup_controller)
-    self.path_list.append(mysql_backup_cron)
-    # The return could be more explicit database, user ...
-    return mysql_conf
+    return path_list
diff --git a/slapos/recipe/mysql/backup.py b/slapos/recipe/mysql/backup.py
new file mode 100644
index 0000000000000000000000000000000000000000..590771d6b73a33bf8014c8b15b6a60fb2ba905aa
--- /dev/null
+++ b/slapos/recipe/mysql/backup.py
@@ -0,0 +1,25 @@
+import subprocess
+import os
+
+# Replace mysqldump | gzip > tmpdump && mv -f tmpdump dumpfile
+def do_backup(kwargs):
+  mysqldump_cmd = kwargs['mysqldump']
+  gzip_bin = kwargs['gzip']
+  tmpdump = kwargs['tmpdump']
+  dumpfile = kwargs['dumpfile']
+
+  # mysqldump | gzip > tmpdump
+  with open(tmpdump, 'w') as output:
+    mysqldump = subprocess.Popen(mysqldump_cmd,
+                                 stdout=subprocess.PIPE,
+                                 stderr=subprocess.STDOUT)
+    gzip = subprocess.Popen([gzip_bin],
+                            stdin=mysqldump.stdout,
+                            stdout=output,
+                            stderr=subprocess.STDOUT)
+    mysqldump.stdout.close()
+
+    if gzip.wait() != 0:
+      raise ValueError("Gzip return a non zero value.")
+
+  os.rename(tmpdump, dumpfile)
diff --git a/slapos/recipe/mysql/catdatefile.py b/slapos/recipe/mysql/catdatefile.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3de298b272cce0eba88570aa6734ee74115e58e
--- /dev/null
+++ b/slapos/recipe/mysql/catdatefile.py
@@ -0,0 +1,14 @@
+import os
+import sys
+import time
+def catdatefile(args):
+  directory = args[0]
+  try:
+    suffix = args[1]
+  except IndexError:
+    suffix = '.log'
+  f = open(os.path.join(directory,
+    time.strftime('%Y-%m-%d.%H:%M.%s') + suffix), 'aw')
+  for line in sys.stdin.read():
+    f.write(line)
+  f.close()
diff --git a/slapos/recipe/mysql/mysql.py b/slapos/recipe/mysql/mysql.py
index e2036076a611cd308755a93f77c763ba91e3fb73..24812bc8503756845f016acd07089819bfdf66d9 100644
--- a/slapos/recipe/mysql/mysql.py
+++ b/slapos/recipe/mysql/mysql.py
@@ -4,9 +4,8 @@ import time
 import sys
 
 
-def runMysql(args):
+def runMysql(conf):
   sleep = 60
-  conf = args[0]
   mysqld_wrapper_list = [conf['mysqld_binary'], '--defaults-file=%s' %
       conf['configuration_file']]
   # we trust mysql_install that if mysql directory is available mysql was
@@ -16,8 +15,8 @@ def runMysql(args):
       # XXX: Protect with proper root password
       # XXX: Follow http://dev.mysql.com/doc/refman/5.0/en/default-privileges.html
       popen = subprocess.Popen([conf['mysql_install_binary'],
-        '--skip-name-resolve', '--no-defaults', '--datadir=%s' %
-        conf['data_directory']],
+        '--skip-name-resolve', '--skip-host-cache', '--no-defaults',
+        '--datadir=%s' % conf['data_directory']],
         stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
       result = popen.communicate()[0]
       if popen.returncode is None or popen.returncode != 0:
@@ -35,8 +34,7 @@ def runMysql(args):
   os.execl(mysqld_wrapper_list[0], *mysqld_wrapper_list)
 
 
-def updateMysql(args):
-  conf = args[0]
+def updateMysql(conf):
   sleep = 30
   is_succeed = False
   while True:
diff --git a/slapos/recipe/mysql/recover.py b/slapos/recipe/mysql/recover.py
new file mode 100644
index 0000000000000000000000000000000000000000..7641321a9ddab55a290af8bb9ad0faff97fd0af6
--- /dev/null
+++ b/slapos/recipe/mysql/recover.py
@@ -0,0 +1,42 @@
+import sys
+import os
+import time
+import subprocess
+
+def import_remote_dump(kwargs):
+  # Get data from kwargs
+  lock_file = kwargs['lock_file']
+  database = kwargs['database']
+  mysql_binary = kwargs['mysql_binary']
+  mysql_socket = kwargs['mysql_socket']
+  duplicity_binary = kwargs['duplicity_binary']
+  remote_backup = kwargs['remote_backup']
+  local_directory = kwargs['local_directory']
+  dump_name = kwargs['dump_name']
+  zcat_binary = kwargs['zcat_binary']
+
+  # The script start really here
+  if os.path.exists(lock_file):
+    sys.exit(127)
+
+  while subprocess.call([mysql_binary, '--socket=%s' % mysql_socket,
+                         '-u', 'root', '-e', 'use %s;' % database]) != 0:
+    time.sleep(10)
+
+  subprocess.check_call([duplicity_binary, 'restore', '--no-encryption',
+                         remote_backup, local_directory])
+
+  zcat = subprocess.Popen([zcat_binary, os.path.join(local_directory,
+                                                     dump_name)],
+                          stdout=subprocess.PIPE)
+  mysql = subprocess.Popen([mysql_binary, '--socket=%s' % mysql_socket,
+                            '-D', database, '-u', 'root'],
+                           stdin=zcat.stdout)
+  zcat.stdout.close()
+
+  returncode = mysql.poll()
+
+  if returncode == 0:
+    open(lock_file, 'w').close() # Just a touch
+
+  sys.exit(returncode)
diff --git a/slapos/recipe/publishurl.py b/slapos/recipe/publishurl.py
new file mode 100644
index 0000000000000000000000000000000000000000..98655433a986cc414050498cf0933e5fae7fd5e0
--- /dev/null
+++ b/slapos/recipe/publishurl.py
@@ -0,0 +1,69 @@
+##############################################################################
+#
+# Copyright (c) 2010 Vifib SARL and Contributors. All Rights Reserved.
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsibility of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# guarantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+#
+##############################################################################
+import zc.buildout
+
+from slapos.recipe.librecipe import GenericSlapRecipe
+
+class Recipe(GenericSlapRecipe):
+
+  def _options(self, options):
+
+    self.useparts = True
+
+    if 'url' in options:
+      self.useparts = False
+      self.url = options['url']
+    else:
+      self.urlparts = {}
+
+      if 'scheme' not in options:
+        raise zc.buildout.UserError("No scheme specified.")
+      else:
+        self.urlparts.update(scheme=options['scheme'])
+      if 'host' not in options:
+        raise zc.buildout.UserError("No host specified.")
+      else:
+        self.urlparts.update(host=options['host'])
+
+  def _install(self):
+
+    if self.useparts:
+      for option in ['path', 'params', 'query', 'fragment', 'port']:
+        if option in self.options:
+          self.urlparts[option] = self.options[option]
+
+      if 'user' in self.options:
+        self.urlparts.update(auth=(self.options['user'],))
+        if 'password' in self.options:
+          self.urlparts.update(auth=(self.options['user'],
+                                     self.options['password']))
+
+      self.setConnectionUrl(**self.urlparts)
+    else:
+      self.setConnectionDict(dict(url=self.url))
+
+    return []
diff --git a/slapos/recipe/request.py b/slapos/recipe/request.py
new file mode 100644
index 0000000000000000000000000000000000000000..66de7394bdac9e8564661ede2866d2b0009b209e
--- /dev/null
+++ b/slapos/recipe/request.py
@@ -0,0 +1,96 @@
+##############################################################################
+#
+# Copyright (c) 2010 Vifib SARL and Contributors. All Rights Reserved.
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsibility of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# guarantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+#
+##############################################################################
+import logging
+import os
+
+from slapos import slap as slapmodule
+
+class Recipe(object):
+
+  def parseMultiValues(self, string):
+    return dict([ [str(column).strip() for column in line.split('=', 1)]
+                 for line in str(string).splitlines() if '=' in line])
+
+  def __init__(self, buildout, name, options):
+    self.logger = logging.getLogger(name)
+
+    slap = slapmodule.slap()
+
+    slap_connection = buildout['slap_connection']
+    self.software_release_url = slap_connection['software_release_url']
+
+    # XXX: Dirty network interation stuff
+    slap.initializeConnection(slap_connection['server_url'],
+                              slap_connection.get('key_file'),
+                              slap_connection.get('cert_file'),
+                             )
+    computer_partition = slap.registerComputerPartition(
+      slap_connection['computer_id'], slap_connection['partition_id'])
+    self.request = computer_partition.request
+
+    if 'software-url' not in options:
+      options['software-url'] = self.software_release_url
+
+    if 'name' not in options:
+      options['name'] = name
+
+    self.return_parameters = []
+    if 'return' in options:
+      self.return_parameters = [str(parameter).strip()
+                               for parameter in options['return'].splitlines()]
+    else:
+      self.logger.warning("No parameter to return to main instance."
+                          "Be careful about that...")
+
+    software_type = 'RootInstanceSoftware'
+    if 'software-type' in options:
+      software_type = options['software-type']
+
+    filter_kw = {}
+    if 'sla' in options:
+      filter_kw = self.parseMultiValues(options['sla'])
+
+    partition_parameter_kw = {}
+    if 'config' in options:
+      partition_parameter_kw = self.parseMultiValues(options['config'])
+
+    instance = self.request(options['software-url'], software_type,
+      options['name'], partition_parameter_kw=partition_parameter_kw,
+        filter_kw=filter_kw)
+
+    result = {}
+    for param in self.return_parameters:
+      result[param] = instance.getConnectionParameter(param)
+
+    # Return the connections parameters in options dict
+    for key, value in result.items():
+      options['connection-%s' % key] = value
+
+  def install(self):
+    return []
+
+  update = install
diff --git a/slapos/recipe/simplelogger.py b/slapos/recipe/simplelogger.py
new file mode 100644
index 0000000000000000000000000000000000000000..494ef81f1b8278783dc8c7bc0a4384c6d01f1e59
--- /dev/null
+++ b/slapos/recipe/simplelogger.py
@@ -0,0 +1,54 @@
+##############################################################################
+#
+# Copyright (c) 2010 Vifib SARL and Contributors. All Rights Reserved.
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsibility of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# guarantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+#
+##############################################################################
+import shutil
+import os
+import sys
+import time
+
+from slapos.recipe.librecipe import GenericBaseRecipe
+
+def log(args):
+  directory, suffix = args
+  filename = time.strftime('%Y-%m-%d.%H:%M.%s') + suffix
+  with open(os.path.join(directory, filename), 'aw') as logfile:
+    shutil.copyfileobj(sys.stdin, logfile)
+
+class Recipe(GenericBaseRecipe):
+
+  def install(self):
+    self.logger.info("Simple logger installation")
+    binary = self.options['binary']
+    output = self.options['output']
+    suffix = self.options.get('suffix', '.log')
+
+    script = self.createPythonScript(binary,
+                                     'slapos.recipe.simplelogger.log',
+                                     arguments=[output, suffix])
+    self.logger.debug("Logger script created at : %r", script)
+    self.logger.info("Simple logger installed.")
+
+    return [script]
diff --git a/slapos/recipe/softwaretype.py b/slapos/recipe/softwaretype.py
new file mode 100644
index 0000000000000000000000000000000000000000..661adfbe1f3d9c851472744b5e690ffd848c26d3
--- /dev/null
+++ b/slapos/recipe/softwaretype.py
@@ -0,0 +1,134 @@
+##############################################################################
+#
+# Copyright (c) 2010 Vifib SARL and Contributors. All Rights Reserved.
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsibility of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# guarantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+#
+##############################################################################
+
+import os
+import sys
+import copy
+from ConfigParser import ConfigParser
+import subprocess
+import slapos.slap
+import netaddr
+import logging
+
+import zc.buildout
+
+class Recipe:
+
+  def __init__(self, buildout, name, options):
+    self.buildout = buildout
+    self.options = options
+    self.name = name
+    self.logger = logging.getLogger(self.name)
+
+  def _getIpAddress(self, test_method):
+    """Internal helper method to fetch ip address"""
+    if not 'ip_list' in self.parameter_dict:
+      raise AttributeError
+    for name, ip in self.parameter_dict['ip_list']:
+      if test_method(ip):
+        return ip
+    raise AttributeError
+
+  def getLocalIPv4Address(self):
+    """Returns local IPv4 address available on partition"""
+    # XXX: Lack checking for locality of address
+    return self._getIpAddress(netaddr.valid_ipv4)
+
+  def getGlobalIPv6Address(self):
+    """Returns global IPv6 address available on partition"""
+    # XXX: Lack checking for globality of address
+    return self._getIpAddress(netaddr.valid_ipv6)
+
+  def install(self):
+    slap = slapos.slap.slap()
+    slap_connection = self.buildout['slap_connection']
+    computer_id = slap_connection['computer_id']
+    computer_partition_id = slap_connection['partition_id']
+    server_url = slap_connection['server_url']
+    key_file = slap_connection.get('key_file')
+    cert_file = slap_connection.get('cert_file')
+    slap.initializeConnection(server_url, key_file, cert_file)
+    self.computer_partition = slap.registerComputerPartition(
+      computer_id,
+      computer_partition_id)
+    self.parameter_dict = self.computer_partition.getInstanceParameterDict()
+    software_type = self.parameter_dict['slap_software_type']
+
+    if software_type not in self.options:
+      if 'default' in self.options:
+        software_type = 'default'
+      else:
+        raise zc.buildout.UserError("This software type isn't mapped. And"
+                                    "there's no default software type.")
+
+    instance_file_path = self.options[software_type]
+
+    if not os.path.exists(instance_file_path):
+      raise zc.buildout.UserError("The specified buildout config file does not"
+                                  "exist.")
+
+    buildout = ConfigParser()
+    with open(instance_file_path) as instance_path:
+      buildout.readfp(instance_path)
+
+    buildout.set('buildout', 'installed',
+                 '.installed-%s.cfg' % software_type)
+
+    buildout.add_section('slap-parameter')
+    for parameter, value in self.parameter_dict.items():
+      buildout.set('slap-parameter', parameter, value)
+
+    buildout.add_section('slap-network-information')
+    buildout.set('slap-network-information', 'local-ipv4', 
+                 self.getLocalIPv4Address())
+    buildout.set('slap-network-information', 'global-ipv6', 
+                 self.getGlobalIPv6Address())
+
+    # Copy/paste slap_connection
+    buildout.add_section('slap-connection')
+    for key, value in self.buildout['slap_connection'].iteritems():
+      # XXX: Waiting for SlapBaseRecipe to use dash instead of underscores
+      buildout.set('slap-connection', key.replace('_', '-'), value)
+
+    work_directory = os.path.abspath(self.buildout['buildout'][
+      'directory'])
+    buildout_filename = os.path.join(work_directory,
+                                     'buildout-%s.cfg' % software_type)
+    with open(buildout_filename, 'w') as buildout_file:
+      buildout.write(buildout_file)
+
+    # XXX-Antoine: We gotta find a better way to do this. I tried to check
+    # out how slapgrid-cp was running buildout. But it is worse than that.
+    command_line_args = copy.copy(sys.argv) + ['-c', buildout_filename]
+
+    self.logger.info("Invoking commandline : '%s'",
+                     ' '.join(command_line_args))
+
+    subprocess.check_call(command_line_args, cwd=work_directory,
+                          env=os.environ.copy())
+    return []
+  update = install
diff --git a/slapos/recipe/stunnel/__init__.py b/slapos/recipe/stunnel/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..06bad67a21d0942f8d2077777083ea709c86a0d4
--- /dev/null
+++ b/slapos/recipe/stunnel/__init__.py
@@ -0,0 +1,94 @@
+##############################################################################
+#
+# Copyright (c) 2010 Vifib SARL and Contributors. All Rights Reserved.
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsibility of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# guarantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 3
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+#
+##############################################################################
+import itertools
+
+import zc.buildout
+
+from slapos.recipe.librecipe import GenericBaseRecipe
+
+class Recipe(GenericBaseRecipe):
+
+  def _options(self, options):
+    self.types = ['local', 'remote']
+    self.datas = ['address', 'port']
+    for type_ in self.types:
+      for data in self.datas:
+        opt = '%s-%s' % (type_, data)
+        if opt not in options:
+          raise zc.buildout.UserError("No %s for %s connections." % (data, type_))
+
+    self.isClient = self.optionIsTrue('client', default=False)
+    if self.isClient:
+      self.logger.info("Client mode")
+    else:
+      self.logger.info("Server mode")
+
+    if 'name' not in options:
+      options['name'] = self.name
+
+
+  def install(self):
+    path_list = []
+    conf = {}
+
+    gathered_options = ['%s-%s' % option
+                       for option in itertools.product(self.types,
+                                                        self.datas)]
+    for option in gathered_options:
+      # XXX: Because the options are using dash and the template uses
+      # underscore
+      conf[option.replace('-', '_')] = self.options[option]
+
+    pid_file = self.options['pid-file']
+    conf.update(pid_file=pid_file)
+    path_list.append(pid_file)
+
+    log_file = self.options['log-file']
+    conf.update(log=log_file)
+
+    if self.isClient:
+      template = self.getTemplateFilename('client.conf.in')
+
+    else:
+      template = self.getTemplateFilename('server.conf.in')
+      key = self.options['key-file']
+      cert = self.options['cert-file']
+      conf.update(key=key, cert=cert)
+
+    conf_file = self.createFile(
+      self.options['config-file'],
+      self.substituteTemplate(template, conf))
+    path_list.append(conf_file)
+
+    wrapper = self.createPythonScript(
+      self.options['wrapper'],
+      'slapos.recipe.librecipe.execute.execute',
+      [self.options['stunnel-binary'], conf_file]
+    )
+    path_list.append(wrapper)
+
+    return path_list
diff --git a/slapos/recipe/stunnel/template/client.conf.in b/slapos/recipe/stunnel/template/client.conf.in
new file mode 100644
index 0000000000000000000000000000000000000000..f72634ec3d8f258c5b7caf4ea1e8adf47e9f8847
--- /dev/null
+++ b/slapos/recipe/stunnel/template/client.conf.in
@@ -0,0 +1,9 @@
+foreground = yes
+output = %(log)s
+pid = %(pid_file)s
+syslog = no
+
+[service]
+client = yes
+accept = %(local_host)s:%(local_port)s
+connect = %(remote_host)s:%(remote_port)s
diff --git a/slapos/recipe/stunnel/template/server.conf.in b/slapos/recipe/stunnel/template/server.conf.in
new file mode 100644
index 0000000000000000000000000000000000000000..e53bbc5c11aadee509cae300a54086d1b957cc4b
--- /dev/null
+++ b/slapos/recipe/stunnel/template/server.conf.in
@@ -0,0 +1,10 @@
+foreground = yes
+output = %(log)s
+pid = %(pid_file)s
+syslog = no
+key = %(key)s
+cert = %(cert)s
+
+[service]
+accept = %(remote_address)s:%(remote_port)s
+connect = %(local_address)s:%(local_port)s
diff --git a/slapos/recipe/vifib.py b/slapos/recipe/vifib.py
index 833def71752d05f688b3272201612a750de89203..55ec2243b508aeb3e05114585825ca9e0e59f59d 100644
--- a/slapos/recipe/vifib.py
+++ b/slapos/recipe/vifib.py
@@ -31,60 +31,8 @@ import zc.buildout
 import sys
 
 class Recipe(slapos.recipe.erp5.Recipe):
-  
-  default_bt5_list = []
 
-  def installKeyAuthorisationApache(self, ip, port, backend, key, certificate,
-      ca_conf, key_auth_path='/erp5/portal_slap'):
-    ssl_template = """SSLEngine on
-SSLVerifyClient require
-RequestHeader set REMOTE_USER %%{SSL_CLIENT_S_DN_CN}s
-SSLCertificateFile %(key_auth_certificate)s
-SSLCertificateKeyFile %(key_auth_key)s
-SSLCACertificateFile %(ca_certificate)s
-SSLCARevocationPath %(ca_crl)s"""
-    apache_conf = self._getApacheConfigurationDict('key_auth_apache', ip, port)
-    apache_conf['ssl_snippet'] = ssl_template % dict(
-        key_auth_certificate=certificate,
-        key_auth_key=key,
-        ca_certificate=ca_conf['ca_certificate'],
-        ca_crl=ca_conf['ca_crl']
-        )
-    prefix = 'ssl_key_auth_apache'
-    rewrite_rule_template = \
-      "RewriteRule (.*) http://%(backend)s%(key_auth_path)s$1 [L,P]"
-    path_template = pkg_resources.resource_string('slapos.recipe.erp5',
-      'template/apache.zope.conf.path.in')
-    path = path_template % dict(path='/')
-    d = dict(
-          path=path,
-          backend=backend,
-          backend_path='/',
-          port=apache_conf['port'],
-          vhname=path.replace('/', ''),
-          key_auth_path=key_auth_path,
-    )
-    rewrite_rule = rewrite_rule_template % d
-    apache_conf.update(**dict(
-      path_enable=path,
-      rewrite_rule=rewrite_rule
-    ))
-    apache_config_file = self.createConfigurationFile(prefix + '.conf',
-        pkg_resources.resource_string('slapos.recipe.erp5',
-          'template/apache.zope.conf.in') % apache_conf)
-    self.path_list.append(apache_config_file)
-    self.path_list.extend(zc.buildout.easy_install.scripts([(
-      'key_auth_apache',
-        'slapos.recipe.erp5.apache', 'runApache')], self.ws,
-          sys.executable, self.wrapper_directory, arguments=[
-            dict(
-              required_path_list=[certificate, key, ca_conf['ca_certificate'],
-                ca_conf['ca_crl']],
-              binary=self.options['httpd_binary'],
-              config=apache_config_file
-            )
-          ]))
-    return 'https://%(ip)s:%(port)s' % apache_conf
+  default_bt5_list = []
 
   def _getZeoClusterDict(self):
     site_path = '/erp5/'
@@ -119,8 +67,8 @@ SSLCARevocationPath %(ca_crl)s"""
         self.getTemplateFilename('zope-zeo-snippet.conf.in'), dict(
         storage_name=storage_dict['storage_name'],
         address='%s:%s' % (storage_dict['ip'], storage_dict['port']),
-        mount_point=mount_point
-        )))
+        mount_point=mount_point, zodb_cache_size=self.zodb_cache_size,
+        zeo_client_cache_size=self.zeo_client_cache_size)))
     tidstorage_config = dict(host=self.getLocalIPv4Address(), port='6001')
     zodb_configuration_string = '\n'.join(zodb_configuration_list)
     zope_port = 12000
@@ -150,9 +98,15 @@ SSLCARevocationPath %(ca_crl)s"""
         login_url_list)
     apache_login = self.installBackendApache(self.getGlobalIPv6Address(), 15000,
         login_haproxy, backend_key, backend_certificate)
+
+    # Install Frontend
+    frontend_domain_name = self.parameter_dict.get("domain_name", 'vifib')
+    frontend_key, frontend_certificate = \
+                  self.requestCertificate(frontend_domain_name)
     apache_frontend_login = self.installFrontendZopeApache(
-        self.getGlobalIPv6Address(), 4443, 'vifib', '/',
-        apache_login, '/', backend_key, backend_certificate)
+        self.getGlobalIPv6Address(), 4443, frontend_domain_name, '/',
+        apache_login, '', frontend_key, frontend_certificate)
+
     # Four Web Service Nodes (Machine access)
     service_url_list = []
     for i in (1, 2, 3, 4):
@@ -166,9 +120,9 @@ SSLCARevocationPath %(ca_crl)s"""
 
     key_auth_key, key_auth_certificate = self.requestCertificate(
         'Key Based Access')
-    apache_keyauth = self.installKeyAuthorisationApache(
-        self.getLocalIPv4Address(), 15500, service_haproxy, key_auth_key,
-        key_auth_certificate, ca_conf, key_auth_path=self.key_auth_path)
+    apache_keyauth = self.installKeyAuthorisationApache(False, 15500,
+        service_haproxy, key_auth_key, key_auth_certificate, ca_conf,
+        key_auth_path=self.key_auth_path)
     memcached_conf = self.installMemcached(ip=self.getLocalIPv4Address(),
         port=11000)
     kumo_conf = self.installKumo(self.getLocalIPv4Address())
@@ -179,7 +133,7 @@ SSLCARevocationPath %(ca_crl)s"""
     # Connect direct to Zope to create the instance.
     self.installERP5Site(user, password, service_url_list[-1], mysql_conf,
              conversion_server_conf, memcached_conf, kumo_conf,
-             self.site_id, self.default_bt5_list)
+             self.site_id, self.default_bt5_list, ca_conf)
 
     self.setConnectionDict(dict(
       front_end_url=apache_frontend_login,
@@ -191,12 +145,6 @@ SSLCARevocationPath %(ca_crl)s"""
       kumo_url=kumo_conf['kumo_address'],
       conversion_server_url='%(conversion_server_ip)s:%(conversion_server_port)s' %
         conversion_server_conf,
-      # openssl binary might be removed, as soon as CP environment will be
-      # fully controlled
-      openssl_binary=self.options['openssl_binary'],
-      # As soon as there would be Vifib ERP5 configuration and possibility to
-      # call it over the network this can be removed
-      certificate_authority_path=ca_conf['certificate_authority_path'],
       # as installERP5Site is not trusted (yet) and this recipe is production
       # ready expose more information
       mysql_url='%(mysql_database)s@%(ip)s:%(tcp_port)s %(mysql_user)s %(mysql_password)s' % mysql_conf,
@@ -213,13 +161,14 @@ SSLCARevocationPath %(ca_crl)s"""
     user, password = self.installERP5()
     zodb_dir = os.path.join(self.data_root_directory, 'zodb')
     self._createDirectory(zodb_dir)
-    zodb_root_path = os.path.join(zodb_dir, 'root.fs')
+    zodb_root_path = os.path.join(zodb_dir, 'main.fs')
     ip = self.getLocalIPv4Address()
     zope_port = '18080'
     zope_access = self.installZope(ip, zope_port, 'zope_development',
         zodb_configuration_string=self.substituteTemplate(
           self.getTemplateFilename('zope-zodb-snippet.conf.in'),
-          dict(zodb_root_path=zodb_root_path)),
+          dict(zodb_root_path=zodb_root_path,
+            zodb_cache_size=self.zodb_cache_size)),
           thread_amount=8, with_timerservice=True)
     service_haproxy = self.installHaproxy(ip, 15000, 'service',
         self.site_check_path, [zope_access])
@@ -238,7 +187,7 @@ SSLCARevocationPath %(ca_crl)s"""
     self.linkBinary()
     self.installERP5Site(user, password, zope_access, mysql_conf,
              conversion_server_conf, memcached_conf, kumo_conf,
-             self.site_id, self.default_bt5_list)
+             self.site_id, self.default_bt5_list, ca_conf)
 
     self.setConnectionDict(dict(
       development_zope='http://%s:%s/' % (ip, zope_port),
@@ -249,12 +198,6 @@ SSLCARevocationPath %(ca_crl)s"""
       kumo_url=kumo_conf['kumo_address'],
       conversion_server_url='%(conversion_server_ip)s:%(conversion_server_port)s' %
         conversion_server_conf,
-      # openssl binary might be removed, as soon as CP environment will be
-      # fully controlled
-      openssl_binary=self.options['openssl_binary'],
-      # As soon as there would be Vifib ERP5 configuration and possibility to
-      # call it over the network this can be removed
-      certificate_authority_path=ca_conf['certificate_authority_path'],
       # as installERP5Site is not trusted (yet) and this recipe is production
       # ready expose more information
       mysql_url='%(mysql_database)s@%(ip)s:%(tcp_port)s %(mysql_user)s %(mysql_password)s' % mysql_conf,
@@ -267,6 +210,9 @@ SSLCARevocationPath %(ca_crl)s"""
     self.path_list = []
     self.requirements, self.ws = self.egg.working_set()
     # self.cron_d is a directory, where cron jobs can be registered
+    self.zodb_cache_size = int(self.options.get('zodb_cache_size', 5000))
+    self.zeo_client_cache_size = self.options.get('zeo_client_cache_size',
+      '20MB')
     self.cron_d = self.installCrond()
     self.logrotate_d, self.logrotate_backup = self.installLogrotate()
     self.killpidfromfile = zc.buildout.easy_install.scripts(
@@ -276,8 +222,6 @@ SSLCARevocationPath %(ca_crl)s"""
     if self.parameter_dict.get("flavour", "default") == 'configurator':
       self.default_bt5_list = self.options.get("configurator_bt5_list", '').split()
 
-    if self.parameter_dict.get('development', 'false').lower() == 'true':
-      return self.installDevelopment()
     if self.parameter_dict.get('production', 'false').lower() == 'true':
       return self.installProduction()
-    raise NotImplementedError('Flavour of instance have to be given.')
+    return self.installDevelopment()
diff --git a/slapos/recipe/xwiki/__init__.py b/slapos/recipe/xwiki/__init__.py
index 8dbfcd7c72ebf544072c2a6828d4c9194b62b070..5f55423d78a69e38c42c7af950decb57887aabb6 100644
--- a/slapos/recipe/xwiki/__init__.py
+++ b/slapos/recipe/xwiki/__init__.py
@@ -35,11 +35,12 @@ import zc.buildout
 class Recipe(BaseSlapRecipe):
 
   def _install(self):
+    self.requirements, self.ws = self.egg.working_set()
     parameter_dict = self.computer_partition.getInstanceParameterDict()
-    ipv4 = self.getLocalIPv4Address(parameter_dict)
-    ipv6 = self.getGlobalIPv6Address(parameter_dict)
+    ipv4 = self.getLocalIPv4Address()
+    ipv6 = self.getGlobalIPv6Address()
 
-    self.install_mysql_server_configuration(self.getLocalIPv4Address(parameter_dict))
+    self.install_mysql_server_configuration(ipv4)
 
     port = '8900'
     tomcat_home = os.path.join(self.data_root_directory, 'tomcat')
@@ -56,8 +57,8 @@ class Recipe(BaseSlapRecipe):
           shtuil.rmtree(dst)
           raise
 
-    shutil.copy(self.options['hsql_location'].strip(), os.path.join(tomcat_lib,
-      'hsqldb.jar'))
+    shutil.copy(self.options['jdbc_location'].strip(), os.path.join(tomcat_lib,
+      'jdbc.jar'))
     # headless mode
     self._writeFile(os.path.join(tomcat_home, 'bin', 'setenv.sh'), '''#!/bin/sh
 export JAVA_OPTS="${JAVA_OPTS} -Djava.awt.headless=true"
diff --git a/slapos/recipe/xwiki/template/hibernate.cfg.xml.in b/slapos/recipe/xwiki/template/hibernate.cfg.xml.in
index 91196e74a928ca7c008ed82396aa8c074eb4586e..78d25f78bfa77b23b263ef21875e237470d5995b 100644
--- a/slapos/recipe/xwiki/template/hibernate.cfg.xml.in
+++ b/slapos/recipe/xwiki/template/hibernate.cfg.xml.in
@@ -49,7 +49,7 @@
          Uncomment if you want to use MySQL and comment out other database configurations.
          We need to set the sql_mode to a less strict value, see XWIKI-1945
     -->
-    <property name="connection.url">jdbc:mysql://%(mysql_ip)s:%(mysql_port)s/xwiki?useServerPrepStmts=false&amp;useUnicode=true&amp;characterEncoding=UTF-8&amp;sessionVariables=sql_mode=''</property>
+    <property name="connection.url">jdbc:mysql://%(mysql_ip)s:%(mysql_port)s/xwiki?useServerPrepStmts=false&amp;useUnicode=true&amp;characterEncoding=UTF-8&amp;sessionVariables=&amp;sql_mode=''</property>
     <property name="connection.username">xwiki</property>
     <property name="connection.password">xwiki</property>
     <property name="connection.driver_class">com.mysql.jdbc.Driver</property>
diff --git a/slapos/recipe/xwiki/template/my.cnf.in b/slapos/recipe/xwiki/template/my.cnf.in
index 5e2742175bab0e7406b7e9c01d586f9b2859fb8a..09171fb6d9837199f4030f574791df31c251dc54 100644
--- a/slapos/recipe/xwiki/template/my.cnf.in
+++ b/slapos/recipe/xwiki/template/my.cnf.in
@@ -30,9 +30,6 @@ query_cache_size = 32M
 # Try number of CPU's*2 for thread_concurrency
 thread_concurrency = 8
 
-# Disable Federated by default
-skip-federated
-
 # Replication Master Server (default)
 # binary logging is required for replication
 log-bin=mysql-bin
diff --git a/slapos/recipe/zabbixagent/__init__.py b/slapos/recipe/zabbixagent/__init__.py
index ef587e32ae39bb410ddb7107283990a9c9c22540..41d9a469b6f38417bf5ba754ea51d8d354846e7a 100644
--- a/slapos/recipe/zabbixagent/__init__.py
+++ b/slapos/recipe/zabbixagent/__init__.py
@@ -79,35 +79,50 @@ class Recipe(BaseSlapRecipe):
     self.path_list.append(wrapper)
     return cron_d
 
-  def _install(self):
-    self.path_list = []
-    self.requirements, self.ws = self.egg.working_set()
-    # self.cron_d is a directory, where cron jobs can be registered
-    self.cron_d = self.installCrond()
-    self.logrotate_d, self.logrotate_backup = self.installLogrotate()
-    zabbix_log_file = os.path.join(self.log_directory, 'zabbix_agentd.log')
-    self.registerLogRotation('zabbix_agentd', [zabbix_log_file])
-    zabbix_agentd = dict(
+  def installZabbixAgentd(self, ip, port, hostname, server_ip,
+                          user_parameter_string=''):
+    log_file = os.path.join(self.log_directory, 'zabbix_agentd.log')
+    self.registerLogRotation('zabbix_agentd', [log_file])
+
+    zabbix_agentd_conf = dict(
       pid_file=os.path.join(self.run_directory, "zabbix_agentd.pid"),
-      log_file=zabbix_log_file,
-      ip=self.getGlobalIPv6Address(),
-      server=self.parameter_dict['server'],
-      hostname=self.parameter_dict['hostname'],
-      port='10050'
-    )
-    zabbix_agentd_conf = self.createConfigurationFile("zabbix_agentd.conf",
-         pkg_resources.resource_string(__name__,
-         'template/zabbix_agentd.conf.in') % zabbix_agentd)
-    self.path_list.append(zabbix_agentd_conf)
+      log_file=log_file,
+      ip=ip,
+      server=server_ip,
+      hostname=hostname,
+      port=port,
+      user_parameter_string=user_parameter_string)
+
+    zabbix_agentd_path = self.createConfigurationFile(
+      "zabbix_agentd.conf",
+      pkg_resources.resource_string(
+        __name__, 'template/zabbix_agentd.conf.in') % zabbix_agentd_conf)
+
+    self.path_list.append(zabbix_agentd_path)
+
     wrapper = zc.buildout.easy_install.scripts([('zabbixagentd',
       'slapos.recipe.librecipe.execute', 'execute')], self.ws, sys.executable,
       self.bin_directory, arguments=[
         self.options['zabbix_agentd_binary'].strip(), '-c',
-        zabbix_agentd_conf])[0]
+        zabbix_agentd_path])[0]
+
     self.path_list.extend(zc.buildout.easy_install.scripts([
       ('zabbixagentd', __name__ + '.svcdaemon', 'svcdaemon')],
       self.ws, sys.executable, self.wrapper_directory, arguments=[dict(
-        real_binary=wrapper, pid_file=zabbix_agentd['pid_file'])]))
-    self.setConnectionDict(dict(ip=zabbix_agentd['ip'],
-      name=zabbix_agentd['hostname'], port=zabbix_agentd['port']))
+        real_binary=wrapper, pid_file=zabbix_agentd_conf['pid_file'])]))
+
+    return zabbix_agentd_conf
+
+  def _install(self):
+    self.path_list = []
+    self.requirements, self.ws = self.egg.working_set()
+    # self.cron_d is a directory, where cron jobs can be registered
+    self.cron_d = self.installCrond()
+    self.logrotate_d, self.logrotate_backup = self.installLogrotate()
+    zabbix_agentd_conf = self.installZabbixAgentd(self.getGlobalIPv6Address(),
+                                                  10050,
+                                                  self.parameter_dict['hostname'],
+                                                  self.parameter_dict['server'])
+    self.setConnectionDict(dict(ip=zabbix_agentd_conf['ip'],
+      name=zabbix_agentd_conf['hostname'], port=zabbix_agentd_conf['port']))
     return self.path_list
diff --git a/slapos/recipe/zabbixagent/template/zabbix_agentd.conf.in b/slapos/recipe/zabbixagent/template/zabbix_agentd.conf.in
index 9aef9930ca937ea7e1ef55a667676b2303b0d0c5..d5ba48be5664ebecc7c7d980d583e74086cbb9fd 100644
--- a/slapos/recipe/zabbixagent/template/zabbix_agentd.conf.in
+++ b/slapos/recipe/zabbixagent/template/zabbix_agentd.conf.in
@@ -229,3 +229,5 @@ ListenIP=%(ip)s
 # Mandatory: no
 # Default:
 # UserParameter=
+
+%(user_parameter_string)s
diff --git a/stack/cloudooo.cfg b/stack/cloudooo.cfg
index 3acba15f7054c270a78a757ecc1c413f5354d996..5c854069d1ace6e509096cd9a5bccf9bc50411ba 100755
--- a/stack/cloudooo.cfg
+++ b/stack/cloudooo.cfg
@@ -60,7 +60,7 @@ parts =
 
 [cloudooo]
 recipe = plone.recipe.command
-location = ${buildout:directory}/src/${:_buildout_section_name_}
+location = ${buildout:parts-directory}/${:_buildout_section_name_}
 stop-on-error = true
 repository = http://git.erp5.org/repos/cloudooo.git
 branch = master