[packages/mysql] - removed outdated patches

qboosh qboosh at pld-linux.org
Sat Jul 13 14:20:36 CEST 2013


commit 47dafa00dd66f27be9bd61a900c90abe95f4e3eb
Author: Jakub Bogusz <qboosh at pld-linux.org>
Date:   Sat Jul 13 14:20:25 2013 +0200

    - removed outdated patches

 bug45702.patch                                  |  728 ----
 bug54330.patch                                  |  236 -
 bug580324.patch                                 |  106 -
 bug813587.patch                                 |   30 -
 bug860910.patch                                 |   92 -
 bug917246.patch                                 |   17 -
 bug933969.patch                                 |   92 -
 bug966844_page_size_error_on_5520_upgrade.patch |   22 -
 control_online_alter_index.patch                |   74 -
 error_pad.patch                                 |  266 --
 file-contents.patch                             |   25 -
 group_commit.patch                              | 2610 -----------
 innodb_adaptive_hash_index_partitions.patch     | 1439 -------
 innodb_admin_command_base.patch                 |  207 -
 innodb_buffer_pool_pages_i_s.patch              |  810 ----
 innodb_buffer_pool_shm.patch                    |   99 -
 innodb_bug60788.patch                           |  123 -
 innodb_deadlock_count.patch                     |  161 -
 innodb_dict_size_limit.patch                    |  534 ---
 innodb_expand_fast_index_creation.patch         | 1435 ------
 innodb_expand_import.patch                      | 1135 -----
 innodb_extend_slow.patch                        | 1036 -----
 innodb_extra_rseg.patch                         |  208 -
 innodb_fake_changes.patch                       |  767 ----
 innodb_fast_checksum.patch                      |  288 --
 innodb_files_extend.patch                       |  565 ---
 innodb_fix_misc.patch                           |  921 ----
 innodb_io_patches.patch                         | 1648 -------
 innodb_kill_idle_transaction.patch              |  455 --
 innodb_lru_dump_restore.patch                   |  711 ---
 innodb_opt_lru_count.patch                      |  307 --
 innodb_overwrite_relay_log_info.patch           |  494 ---
 innodb_pass_corrupt_table.patch                 | 1373 ------
 innodb_recovery_patches.patch                   |  496 ---
 innodb_separate_doublewrite.patch               | 1086 -----
 innodb_show_lock_name.patch                     |  506 ---
 innodb_show_status.patch                        |  526 ---
 innodb_show_status_extend.patch                 |  547 ---
 innodb_show_sys_tables.patch                    | 1791 --------
 innodb_split_buf_pool_mutex.patch               | 3802 ----------------
 innodb_stats.patch                              | 2172 ----------
 innodb_thread_concurrency_timer_based.patch     |  188 -
 log_connection_error.patch                      |  131 -
 log_warnings_suppress.patch                     |  188 -
 memory_dynamic_rows.patch                       | 5276 -----------------------
 microsec_process.patch                          |   50 -
 mysql-test.diff                                 |  861 ----
 mysql.spec                                      |    1 -
 mysql_dump_ignore_ct.patch                      |   75 -
 mysql_remove_eol_carret.patch                   |   69 -
 mysql_syslog.patch                              |  126 -
 optimizer_fix.patch                             |  153 -
 percona.sh                                      |   86 -
 percona_support.patch                           |   19 -
 processlist_row_stats.patch                     |  267 --
 query_cache_enhance.patch                       | 3168 --------------
 remove_fcntl_excessive_calls.patch              |   71 -
 response_time_distribution.patch                | 3633 ----------------
 show_slave_status_nolock.patch                  |  371 --
 show_temp.patch                                 |  649 ---
 slave_timeout_fix.patch                         |   11 -
 slow_extended.patch                             | 3054 -------------
 sql_no_fcache.patch                             |  420 --
 start-stop-messages.patch                       |  192 -
 subunit.patch                                   |  246 --
 userstat.patch                                  | 3498 ---------------
 utf8_general50_ci.patch                         |  400 --
 valgrind_zlib_suppression.patch                 |   20 -
 warning_fixes.patch                             |   31 -
 xtradb_bug317074.patch                          |   58 -
 70 files changed, 53252 deletions(-)
---
diff --git a/mysql.spec b/mysql.spec
index 1a0a501..1f9ca43 100644
--- a/mysql.spec
+++ b/mysql.spec
@@ -1,5 +1,4 @@
 # TODO:
-# - make response_time_distribution.patch compatible with i386 alpha sparc ppc arches
 # - mysqldump ... (invalid usage) prints to stdout not stderr (idiotic if you want to create dump and get usage in .sql)
 # - http://bugs.mysql.com/bug.php?id=16470
 # - innodb are dynamic (= as plugins) ?
diff --git a/bug45702.patch b/bug45702.patch
deleted file mode 100644
index 7800f01..0000000
--- a/bug45702.patch
+++ /dev/null
@@ -1,728 +0,0 @@
---- a/include/my_sys.h
-+++ b/include/my_sys.h
-@@ -320,8 +320,8 @@
- typedef struct st_dynamic_array
- {
-   uchar *buffer;
--  uint elements,max_element;
--  uint alloc_increment;
-+  ulong elements, max_element;
-+  ulong alloc_increment;
-   uint size_of_element;
- } DYNAMIC_ARRAY;
- 
-@@ -758,21 +758,21 @@
- #define my_init_dynamic_array2(A,B,C,D,E) init_dynamic_array2(A,B,C,D,E)
- #define my_init_dynamic_array2_ci(A,B,C,D,E) init_dynamic_array2(A,B,C,D,E)
- extern my_bool init_dynamic_array2(DYNAMIC_ARRAY *array, uint element_size,
--                                   void *init_buffer, uint init_alloc,
--                                   uint alloc_increment);
-+                                   void *init_buffer, ulong init_alloc,
-+                                   ulong alloc_increment);
- /* init_dynamic_array() function is deprecated */
- extern my_bool init_dynamic_array(DYNAMIC_ARRAY *array, uint element_size,
--                                  uint init_alloc, uint alloc_increment);
-+                                  ulong init_alloc, ulong alloc_increment);
- extern my_bool insert_dynamic(DYNAMIC_ARRAY *array,uchar * element);
- extern uchar *alloc_dynamic(DYNAMIC_ARRAY *array);
- extern uchar *pop_dynamic(DYNAMIC_ARRAY*);
--extern my_bool set_dynamic(DYNAMIC_ARRAY *array,uchar * element,uint array_index);
--extern my_bool allocate_dynamic(DYNAMIC_ARRAY *array, uint max_elements);
--extern void get_dynamic(DYNAMIC_ARRAY *array,uchar * element,uint array_index);
-+extern my_bool set_dynamic(DYNAMIC_ARRAY *array, uchar * element, ulong array_index);
-+extern my_bool allocate_dynamic(DYNAMIC_ARRAY *array, ulong max_elements);
-+extern void get_dynamic(DYNAMIC_ARRAY *array, uchar * element, ulong array_index);
- extern void delete_dynamic(DYNAMIC_ARRAY *array);
--extern void delete_dynamic_element(DYNAMIC_ARRAY *array, uint array_index);
-+extern void delete_dynamic_element(DYNAMIC_ARRAY *array, ulong array_index);
- extern void freeze_size(DYNAMIC_ARRAY *array);
--extern int  get_index_dynamic(DYNAMIC_ARRAY *array, uchar * element);
-+extern long get_index_dynamic(DYNAMIC_ARRAY *array, uchar * element);
- #define dynamic_array_ptr(array,array_index) ((array)->buffer+(array_index)*(array)->size_of_element)
- #define dynamic_element(array,array_index,type) ((type)((array)->buffer) +(array_index))
- #define push_dynamic(A,B) insert_dynamic((A),(B))
---- a/mysys/array.c
-+++ b/mysys/array.c
-@@ -41,8 +41,8 @@
- */
- 
- my_bool init_dynamic_array2(DYNAMIC_ARRAY *array, uint element_size,
--                            void *init_buffer, uint init_alloc, 
--                            uint alloc_increment)
-+                            void *init_buffer, ulong init_alloc, 
-+                            ulong alloc_increment)
- {
-   DBUG_ENTER("init_dynamic_array");
-   if (!alloc_increment)
-@@ -73,7 +73,7 @@
- } 
- 
- my_bool init_dynamic_array(DYNAMIC_ARRAY *array, uint element_size,
--                           uint init_alloc, uint alloc_increment)
-+                           ulong init_alloc, ulong alloc_increment)
- {
-   /* placeholder to preserve ABI */
-   return my_init_dynamic_array_ci(array, element_size, init_alloc, 
-@@ -196,7 +196,7 @@
-     FALSE	Ok
- */
- 
--my_bool set_dynamic(DYNAMIC_ARRAY *array, uchar* element, uint idx)
-+my_bool set_dynamic(DYNAMIC_ARRAY *array, uchar* element, ulong idx)
- {
-   if (idx >= array->elements)
-   {
-@@ -228,11 +228,11 @@
-     TRUE	Allocation of new memory failed
- */
- 
--my_bool allocate_dynamic(DYNAMIC_ARRAY *array, uint max_elements)
-+my_bool allocate_dynamic(DYNAMIC_ARRAY *array, ulong max_elements)
- {
-   if (max_elements >= array->max_element)
-   {
--    uint size;
-+    ulong size;
-     uchar *new_ptr;
-     size= (max_elements + array->alloc_increment)/array->alloc_increment;
-     size*= array->alloc_increment;
-@@ -273,11 +273,11 @@
-       idx	Index of element wanted. 
- */
- 
--void get_dynamic(DYNAMIC_ARRAY *array, uchar* element, uint idx)
-+void get_dynamic(DYNAMIC_ARRAY *array, uchar* element, ulong idx)
- {
-   if (idx >= array->elements)
-   {
--    DBUG_PRINT("warning",("To big array idx: %d, array size is %d",
-+    DBUG_PRINT("warning",("To big array idx: %lu, array size is %lu",
-                           idx,array->elements));
-     bzero(element,array->size_of_element);
-     return;
-@@ -320,7 +320,7 @@
-       idx        Index of element to be deleted
- */
- 
--void delete_dynamic_element(DYNAMIC_ARRAY *array, uint idx)
-+void delete_dynamic_element(DYNAMIC_ARRAY *array, ulong idx)
- {
-   char *ptr= (char*) array->buffer+array->size_of_element*idx;
-   array->elements--;
-@@ -340,7 +340,7 @@
- 
- void freeze_size(DYNAMIC_ARRAY *array)
- {
--  uint elements=max(array->elements,1);
-+  ulong elements= max(array->elements, 1);
- 
-   /*
-     Do nothing if we are using a static buffer
-@@ -368,7 +368,7 @@
- 
- */
- 
--int get_index_dynamic(DYNAMIC_ARRAY *array, uchar* element)
-+long get_index_dynamic(DYNAMIC_ARRAY *array, uchar* element)
- {
-   size_t ret;
-   if (array->buffer > element)
---- a/storage/myisam/mi_check.c
-+++ b/storage/myisam/mi_check.c
-@@ -2429,7 +2429,7 @@
- 
-     if (_create_index_by_sort(&sort_param,
- 			      (my_bool) (!(param->testflag & T_VERBOSE)),
--			      (uint) param->sort_buffer_length))
-+			      param->sort_buffer_length))
-     {
-       param->retry_repair=1;
-       goto err;
---- a/storage/myisam/sort.c
-+++ b/storage/myisam/sort.c
-@@ -45,42 +45,42 @@
- 
- /* Functions defined in this file */
- 
--static ha_rows find_all_keys(MI_SORT_PARAM *info,uint keys,
-+static ha_rows find_all_keys(MI_SORT_PARAM *info, ulong keys,
-                              uchar **sort_keys,
--                             DYNAMIC_ARRAY *buffpek,int *maxbuffer,
-+                             DYNAMIC_ARRAY *buffpek, long *maxbuffer,
-                              IO_CACHE *tempfile,
-                              IO_CACHE *tempfile_for_exceptions);
- static int write_keys(MI_SORT_PARAM *info,uchar **sort_keys,
--                      uint count, BUFFPEK *buffpek,IO_CACHE *tempfile);
-+                      ulong count, BUFFPEK *buffpek,IO_CACHE *tempfile);
- static int write_key(MI_SORT_PARAM *info, uchar *key,
-                      IO_CACHE *tempfile);
- static int write_index(MI_SORT_PARAM *info,uchar * *sort_keys,
--                       uint count);
--static int merge_many_buff(MI_SORT_PARAM *info,uint keys,
-+                       ulong count);
-+static int merge_many_buff(MI_SORT_PARAM *info, ulong keys,
-                            uchar * *sort_keys,
--                           BUFFPEK *buffpek,int *maxbuffer,
-+                           BUFFPEK *buffpek, long *maxbuffer,
-                            IO_CACHE *t_file);
--static uint read_to_buffer(IO_CACHE *fromfile,BUFFPEK *buffpek,
-+static ulong read_to_buffer(IO_CACHE *fromfile,BUFFPEK *buffpek,
-                            uint sort_length);
--static int merge_buffers(MI_SORT_PARAM *info,uint keys,
-+static int merge_buffers(MI_SORT_PARAM *info, ulong keys,
-                          IO_CACHE *from_file, IO_CACHE *to_file,
-                          uchar * *sort_keys, BUFFPEK *lastbuff,
-                          BUFFPEK *Fb, BUFFPEK *Tb);
--static int merge_index(MI_SORT_PARAM *,uint,uchar **,BUFFPEK *, int,
-+static int merge_index(MI_SORT_PARAM *, ulong, uchar **, BUFFPEK *, long,
-                        IO_CACHE *);
- static int flush_ft_buf(MI_SORT_PARAM *info);
- 
- static int write_keys_varlen(MI_SORT_PARAM *info,uchar **sort_keys,
--                             uint count, BUFFPEK *buffpek,
-+                             ulong count, BUFFPEK *buffpek,
-                              IO_CACHE *tempfile);
--static uint read_to_buffer_varlen(IO_CACHE *fromfile,BUFFPEK *buffpek,
--                                  uint sort_length);
-+static ulong read_to_buffer_varlen(IO_CACHE *fromfile,BUFFPEK *buffpek,
-+                                   uint sort_length);
- static int write_merge_key(MI_SORT_PARAM *info, IO_CACHE *to_file,
--                           uchar *key, uint sort_length, uint count);
-+                           uchar *key, uint sort_length, ulong count);
- static int write_merge_key_varlen(MI_SORT_PARAM *info,
-                                   IO_CACHE *to_file,
-                                   uchar* key, uint sort_length,
--                                  uint count);
-+                                  ulong count);
- static inline int
- my_var_write(MI_SORT_PARAM *info, IO_CACHE *to_file, uchar *bufs);
- 
-@@ -101,8 +101,9 @@
- int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages,
- 			  ulong sortbuff_size)
- {
--  int error,maxbuffer,skr;
--  uint memavl,old_memavl,keys,sort_length;
-+  int error;
-+  long maxbuffer, skr;
-+  ulong memavl, old_memavl, keys, sort_length;
-   DYNAMIC_ARRAY buffpek;
-   ha_rows records;
-   uchar **sort_keys;
-@@ -136,25 +137,25 @@
- 
-   while (memavl >= MIN_SORT_BUFFER)
-   {
--    if ((records < UINT_MAX32) && 
-+    if ((records < ULONG_MAX) &&
-        ((my_off_t) (records + 1) * 
-         (sort_length + sizeof(char*)) <= (my_off_t) memavl))
--      keys= (uint)records+1;
-+      keys= (ulong) records + 1;
-     else
-       do
-       {
- 	skr=maxbuffer;
--	if (memavl < sizeof(BUFFPEK)*(uint) maxbuffer ||
--	    (keys=(memavl-sizeof(BUFFPEK)*(uint) maxbuffer)/
-+        if (memavl < sizeof(BUFFPEK) * (ulong) maxbuffer ||
-+            (keys = (memavl - sizeof(BUFFPEK) * (ulong) maxbuffer) /
-              (sort_length+sizeof(char*))) <= 1 ||
--            keys < (uint) maxbuffer)
-+            keys < (ulong) maxbuffer)
- 	{
- 	  mi_check_print_error(info->sort_info->param,
- 			       "myisam_sort_buffer_size is too small");
- 	  goto err;
- 	}
-       }
--      while ((maxbuffer= (int) (records/(keys-1)+1)) != skr);
-+      while ((maxbuffer= (long) (records / (keys - 1) + 1)) != skr);
- 
-     if ((sort_keys=(uchar **)my_malloc(keys*(sort_length+sizeof(char*))+
- 				       HA_FT_MAXBYTELEN, MYF(0))))
-@@ -180,7 +181,7 @@
-   (*info->lock_in_memory)(info->sort_info->param);/* Everything is allocated */
- 
-   if (!no_messages)
--    printf("  - Searching for keys, allocating buffer for %d keys\n",keys);
-+    printf("  - Searching for keys, allocating buffer for %lu keys\n", keys);
- 
-   if ((records=find_all_keys(info,keys,sort_keys,&buffpek,&maxbuffer,
-                                   &tempfile,&tempfile_for_exceptions))
-@@ -190,7 +191,7 @@
-   {
-     if (!no_messages)
-       printf("  - Dumping %lu keys\n", (ulong) records);
--    if (write_index(info,sort_keys, (uint) records))
-+    if (write_index(info,sort_keys, (ulong) records))
-       goto err; /* purecov: inspected */
-   }
-   else
-@@ -253,13 +254,13 @@
- 
- /* Search after all keys and place them in a temp. file */
- 
--static ha_rows find_all_keys(MI_SORT_PARAM *info, uint keys,
-+static ha_rows find_all_keys(MI_SORT_PARAM *info, ulong keys,
-                              uchar **sort_keys, DYNAMIC_ARRAY *buffpek,
--                             int *maxbuffer, IO_CACHE *tempfile,
-+                             long *maxbuffer, IO_CACHE *tempfile,
-                              IO_CACHE *tempfile_for_exceptions)
- {
-   int error;
--  uint idx;
-+  ulong idx;
-   DBUG_ENTER("find_all_keys");
- 
-   idx=error=0;
-@@ -308,8 +309,8 @@
- {
-   MI_SORT_PARAM *sort_param= (MI_SORT_PARAM*) arg;
-   int error;
--  uint memavl,old_memavl,keys,sort_length;
--  uint idx, maxbuffer;
-+  ulong memavl,old_memavl,keys,sort_length;
-+  ulong idx, maxbuffer;
-   uchar **sort_keys=0;
- 
-   LINT_INIT(keys);
-@@ -345,7 +346,7 @@
-     sort_keys= (uchar **) NULL;
- 
-     memavl=       max(sort_param->sortbuff_size, MIN_SORT_BUFFER);
--    idx=          (uint)sort_param->sort_info->max_records;
-+    idx=          (ulong) sort_param->sort_info->max_records;
-     sort_length=  sort_param->key_length;
-     maxbuffer=    1;
- 
-@@ -356,21 +357,21 @@
-         keys= idx+1;
-       else
-       {
--        uint skr;
-+        ulong skr;
-         do
-         {
-           skr= maxbuffer;
-           if (memavl < sizeof(BUFFPEK)*maxbuffer ||
-               (keys=(memavl-sizeof(BUFFPEK)*maxbuffer)/
-                (sort_length+sizeof(char*))) <= 1 ||
--              keys < (uint) maxbuffer)
-+              keys < maxbuffer)
-           {
-             mi_check_print_error(sort_param->sort_info->param,
-                                  "myisam_sort_buffer_size is too small");
-             goto err;
-           }
-         }
--        while ((maxbuffer= (int) (idx/(keys-1)+1)) != skr);
-+        while ((maxbuffer= (idx/(keys-1)+1)) != skr);
-       }
-       if ((sort_keys= (uchar**)
-            my_malloc(keys*(sort_length+sizeof(char*))+
-@@ -399,7 +400,7 @@
-     }
- 
-     if (sort_param->sort_info->param->testflag & T_VERBOSE)
--      printf("Key %d - Allocating buffer for %d keys\n",
-+      printf("Key %d - Allocating buffer for %lu keys\n",
-              sort_param->key + 1, keys);
-     sort_param->sort_keys= sort_keys;
- 
-@@ -553,7 +554,7 @@
-     }
-     if (sinfo->buffpek.elements)
-     {
--      uint maxbuffer=sinfo->buffpek.elements-1;
-+      ulong maxbuffer=sinfo->buffpek.elements-1;
-       if (!mergebuf)
-       {
-         length=param->sort_buffer_length;
-@@ -576,7 +577,7 @@
-           printf("Key %d  - Merging %u keys\n",sinfo->key+1, sinfo->keys);
-         if (merge_many_buff(sinfo, keys, (uchar **)mergebuf,
- 			    dynamic_element(&sinfo->buffpek, 0, BUFFPEK *),
--			    (int*) &maxbuffer, &sinfo->tempfile))
-+			    (long *) &maxbuffer, &sinfo->tempfile))
-         {
-           got_error=1;
-           continue;
-@@ -640,7 +641,7 @@
-         /* Write all keys in memory to file for later merge */
- 
- static int write_keys(MI_SORT_PARAM *info, register uchar **sort_keys,
--                      uint count, BUFFPEK *buffpek, IO_CACHE *tempfile)
-+                      ulong count, BUFFPEK *buffpek, IO_CACHE *tempfile)
- {
-   uchar **end;
-   uint sort_length=info->key_length;
-@@ -682,7 +683,7 @@
- 
- static int write_keys_varlen(MI_SORT_PARAM *info,
-                              register uchar **sort_keys,
--                             uint count, BUFFPEK *buffpek,
-+                             ulong count, BUFFPEK *buffpek,
-                              IO_CACHE *tempfile)
- {
-   uchar **end;
-@@ -727,7 +728,7 @@
- /* Write index */
- 
- static int write_index(MI_SORT_PARAM *info, register uchar **sort_keys,
--                       register uint count)
-+                       register ulong count)
- {
-   DBUG_ENTER("write_index");
- 
-@@ -744,11 +745,11 @@
- 
-         /* Merge buffers to make < MERGEBUFF2 buffers */
- 
--static int merge_many_buff(MI_SORT_PARAM *info, uint keys,
-+static int merge_many_buff(MI_SORT_PARAM *info, ulong keys,
-                            uchar **sort_keys, BUFFPEK *buffpek,
--                           int *maxbuffer, IO_CACHE *t_file)
-+                           long *maxbuffer, IO_CACHE *t_file)
- {
--  register int i;
-+  register long i;
-   IO_CACHE t_file2, *from_file, *to_file, *temp;
-   BUFFPEK *lastbuff;
-   DBUG_ENTER("merge_many_buff");
-@@ -778,7 +779,7 @@
-     if (flush_io_cache(to_file))
-       break;                                    /* purecov: inspected */
-     temp=from_file; from_file=to_file; to_file=temp;
--    *maxbuffer= (int) (lastbuff-buffpek)-1;
-+    *maxbuffer= (long) (lastbuff-buffpek)-1;
-   }
- cleanup:
-   close_cached_file(to_file);                   /* This holds old result */
-@@ -807,18 +808,18 @@
-     -1	Error
- */
- 
--static uint read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek,
--                           uint sort_length)
-+static ulong read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek,
-+                            uint sort_length)
- {
--  register uint count;
--  uint length;
-+  register ulong count;
-+  ulong length;
- 
--  if ((count=(uint) min((ha_rows) buffpek->max_keys,buffpek->count)))
-+  if ((count=(ulong) min((ha_rows) buffpek->max_keys,buffpek->count)))
-   {
-     if (mysql_file_pread(fromfile->file, (uchar*) buffpek->base,
-                          (length= sort_length*count),
-                          buffpek->file_pos, MYF_RW))
--      return((uint) -1);                        /* purecov: inspected */
-+      return((ulong) -1);                        /* purecov: inspected */
-     buffpek->key=buffpek->base;
-     buffpek->file_pos+= length;                 /* New filepos */
-     buffpek->count-=    count;
-@@ -827,15 +828,15 @@
-   return (count*sort_length);
- } /* read_to_buffer */
- 
--static uint read_to_buffer_varlen(IO_CACHE *fromfile, BUFFPEK *buffpek,
-+static ulong read_to_buffer_varlen(IO_CACHE *fromfile, BUFFPEK *buffpek,
-                                   uint sort_length)
- {
--  register uint count;
-+  register ulong count;
-   uint16 length_of_key = 0;
--  uint idx;
-+  ulong idx;
-   uchar *buffp;
- 
--  if ((count=(uint) min((ha_rows) buffpek->max_keys,buffpek->count)))
-+  if ((count=(ulong) min((ha_rows) buffpek->max_keys,buffpek->count)))
-   {
-     buffp = buffpek->base;
- 
-@@ -843,11 +844,11 @@
-     {
-       if (mysql_file_pread(fromfile->file, (uchar*)&length_of_key,
-                            sizeof(length_of_key), buffpek->file_pos, MYF_RW))
--        return((uint) -1);
-+        return((ulong) -1);
-       buffpek->file_pos+=sizeof(length_of_key);
-       if (mysql_file_pread(fromfile->file, (uchar*) buffp,
-                            length_of_key, buffpek->file_pos, MYF_RW))
--        return((uint) -1);
-+        return((ulong) -1);
-       buffpek->file_pos+=length_of_key;
-       buffp = buffp + sort_length;
-     }
-@@ -861,9 +862,9 @@
- 
- static int write_merge_key_varlen(MI_SORT_PARAM *info,
-                                   IO_CACHE *to_file, uchar* key,
--                                  uint sort_length, uint count)
-+                                  uint sort_length, ulong count)
- {
--  uint idx;
-+  ulong idx;
-   uchar *bufs = key;
- 
-   for (idx=1;idx<=count;idx++)
-@@ -879,7 +880,7 @@
- 
- static int write_merge_key(MI_SORT_PARAM *info __attribute__((unused)),
-                            IO_CACHE *to_file, uchar *key,
--                           uint sort_length, uint count)
-+                           uint sort_length, ulong count)
- {
-   return my_b_write(to_file, key, (size_t) sort_length*count);
- }
-@@ -890,12 +891,13 @@
- */
- 
- static int
--merge_buffers(MI_SORT_PARAM *info, uint keys, IO_CACHE *from_file,
-+merge_buffers(MI_SORT_PARAM *info, ulong keys, IO_CACHE *from_file,
-               IO_CACHE *to_file, uchar **sort_keys, BUFFPEK *lastbuff,
-               BUFFPEK *Fb, BUFFPEK *Tb)
- {
--  int error;
--  uint sort_length,maxcount;
-+  ulong error;
-+  uint sort_length;
-+  ulong maxcount;
-   ha_rows count;
-   my_off_t UNINIT_VAR(to_start_filepos);
-   uchar *strpos;
-@@ -905,7 +907,7 @@
-   DBUG_ENTER("merge_buffers");
- 
-   count=error=0;
--  maxcount=keys/((uint) (Tb-Fb) +1);
-+  maxcount= keys / ((ulong) (Tb-Fb) + 1);
-   DBUG_ASSERT(maxcount > 0);
-   LINT_INIT(to_start_filepos);
-   if (to_file)
-@@ -913,7 +915,7 @@
-   strpos=(uchar*) sort_keys;
-   sort_length=info->key_length;
- 
--  if (init_queue(&queue,(uint) (Tb-Fb)+1,offsetof(BUFFPEK,key),0,
-+  if (init_queue(&queue, (uint) (Tb-Fb)+1, offsetof(BUFFPEK,key), 0,
-                  (int (*)(void*, uchar *,uchar*)) info->key_cmp,
-                  (void*) info))
-     DBUG_RETURN(1); /* purecov: inspected */
-@@ -923,9 +925,8 @@
-     count+= buffpek->count;
-     buffpek->base= strpos;
-     buffpek->max_keys=maxcount;
--    strpos+= (uint) (error=(int) info->read_to_buffer(from_file,buffpek,
--                                                      sort_length));
--    if (error == -1)
-+    strpos+= (error= info->read_to_buffer(from_file,buffpek, sort_length));
-+    if (error == (ulong) -1)
-       goto err; /* purecov: inspected */
-     queue_insert(&queue,(uchar*) buffpek);
-   }
-@@ -957,10 +958,10 @@
-       buffpek->key+=sort_length;
-       if (! --buffpek->mem_count)
-       {
--        if (!(error=(int) info->read_to_buffer(from_file,buffpek,sort_length)))
-+        if (!(error= info->read_to_buffer(from_file,buffpek,sort_length)))
-         {
-           uchar *base=buffpek->base;
--          uint max_keys=buffpek->max_keys;
-+          ulong max_keys=buffpek->max_keys;
- 
-           (void) queue_remove(&queue,0);
- 
-@@ -985,7 +986,7 @@
-           break;                /* One buffer have been removed */
-         }
-       }
--      else if (error == -1)
-+      else if (error == (ulong) -1)
-         goto err;               /* purecov: inspected */
-       queue_replaced(&queue);   /* Top element has been replaced */
-     }
-@@ -1018,23 +1019,23 @@
-       }
-     }
-   }
--  while ((error=(int) info->read_to_buffer(from_file,buffpek,sort_length)) != -1 &&
--         error != 0);
-+  while ((error= info->read_to_buffer(from_file,buffpek,sort_length))
-+         != (ulong) -1 && error != 0);
- 
-   lastbuff->count=count;
-   if (to_file)
-     lastbuff->file_pos=to_start_filepos;
- err:
-   delete_queue(&queue);
--  DBUG_RETURN(error);
-+  DBUG_RETURN(error != 0);
- } /* merge_buffers */
- 
- 
-         /* Do a merge to output-file (save only positions) */
- 
- static int
--merge_index(MI_SORT_PARAM *info, uint keys, uchar **sort_keys,
--            BUFFPEK *buffpek, int maxbuffer, IO_CACHE *tempfile)
-+merge_index(MI_SORT_PARAM *info, ulong keys, uchar **sort_keys,
-+            BUFFPEK *buffpek, long maxbuffer, IO_CACHE *tempfile)
- {
-   DBUG_ENTER("merge_index");
-   if (merge_buffers(info,keys,tempfile,(IO_CACHE*) 0,sort_keys,buffpek,buffpek,
---- /dev/null
-+++ b/mysql-test/r/percona_bug45702.result
-@@ -0,0 +1,21 @@
-+CREATE TABLE t1 (a BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY) ENGINE=MyISAM;
-+INSERT INTO t1 VALUES (), (), (), (), (), (), (), ();
-+INSERT INTO t1 SELECT NULL FROM t1;
-+INSERT INTO t1 SELECT NULL FROM t1;
-+INSERT INTO t1 SELECT NULL FROM t1;
-+INSERT INTO t1 SELECT NULL FROM t1;
-+INSERT INTO t1 SELECT NULL FROM t1;
-+INSERT INTO t1 SELECT NULL FROM t1;
-+INSERT INTO t1 SELECT NULL FROM t1;
-+INSERT INTO t1 SELECT NULL FROM t1;
-+INSERT INTO t1 SELECT NULL FROM t1;
-+SET @old_myisam_sort_buffer_size = @@myisam_sort_buffer_size;
-+SET @@myisam_sort_buffer_size = 4 * 1024 * 1024 * 1024;
-+REPAIR TABLE t1;
-+Table	Op	Msg_type	Msg_text
-+test.t1	repair	status	OK
-+- recovering (with sort) MyISAM-table 'MYSQLD_DATADIR/test/t1'
-+Data records: 4096
-+- Fixing index 1
-+SET @@myisam_sort_buffer_size = @old_myisam_sort_buffer_size;
-+DROP TABLE t1;
---- /dev/null
-+++ b/mysql-test/t/percona_bug45702.test
-@@ -0,0 +1,34 @@
-+###############################################################################
-+# Bug #45702: Impossible to specify myisam_sort_buffer > 4GB on 64 bit machines
-+###############################################################################
-+
-+--source include/have_64bit.inc
-+
-+# Check that having data larger than MIN_SORT_BUFFER bytes can be handled by
-+# _create_index_by_sort() with myisam_sort_buffer_size = 4 GB without errors.
-+# The full test with large data volumes can not be a part of the test suite.
-+
-+CREATE TABLE t1 (a BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY) ENGINE=MyISAM;
-+INSERT INTO t1 VALUES (), (), (), (), (), (), (), ();
-+INSERT INTO t1 SELECT NULL FROM t1;
-+INSERT INTO t1 SELECT NULL FROM t1;
-+INSERT INTO t1 SELECT NULL FROM t1;
-+INSERT INTO t1 SELECT NULL FROM t1;
-+INSERT INTO t1 SELECT NULL FROM t1;
-+INSERT INTO t1 SELECT NULL FROM t1;
-+INSERT INTO t1 SELECT NULL FROM t1;
-+INSERT INTO t1 SELECT NULL FROM t1;
-+INSERT INTO t1 SELECT NULL FROM t1;
-+
-+SET @old_myisam_sort_buffer_size = @@myisam_sort_buffer_size;
-+SET @@myisam_sort_buffer_size = 4 * 1024 * 1024 * 1024;
-+
-+REPAIR TABLE t1;
-+
-+--let $MYSQLD_DATADIR= `select @@datadir`
-+--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
-+--exec $MYISAMCHK -r --sort_buffer_size=4G $MYSQLD_DATADIR/test/t1
-+
-+SET @@myisam_sort_buffer_size = @old_myisam_sort_buffer_size;
-+
-+DROP TABLE t1;
---- a/sql/opt_range.cc
-+++ b/sql/opt_range.cc
-@@ -11728,7 +11728,7 @@
-   }
-   if (min_max_ranges.elements > 0)
-   {
--    fprintf(DBUG_FILE, "%*susing %d quick_ranges for MIN/MAX:\n",
-+    fprintf(DBUG_FILE, "%*susing %lu quick_ranges for MIN/MAX:\n",
-             indent, "", min_max_ranges.elements);
-   }
- }
---- a/mysys/my_pread.c
-+++ b/mysys/my_pread.c
-@@ -49,6 +49,7 @@
-                 myf MyFlags)
- {
-   size_t readbytes;
-+  size_t total_readbytes= 0;
-   int error= 0;
- #if !defined (HAVE_PREAD) && !defined (_WIN32)
-   int save_errno;
-@@ -76,8 +77,30 @@
- #endif
-     error= (readbytes != Count);
- #endif
-+    if (readbytes > 0)
-+      total_readbytes+= readbytes;
-+
-     if(error)
-     {
-+      if (readbytes > 0 && readbytes < Count && errno == 0)
-+      {
-+        /*
-+          pread() may return less bytes than requested even if enough bytes are
-+          available according to the Linux man page.
-+          This makes determining the end-of-file condition a bit harder.
-+          We just do another pread() call to see if more bytes can be read,
-+          since all my_pread() users expect it to always return all available
-+          bytes. For end-of-file 0 bytes is returned. This can never be the case
-+          for a partial read, since according to the man page, -1 is returned
-+          with errno set to EINTR if no data has been read.
-+        */
-+        Buffer+= readbytes;
-+        offset+= readbytes;
-+        Count-= readbytes;
-+
-+        continue;
-+      }
-+
-       my_errno= errno ? errno : -1;
-       if (errno == 0 || (readbytes != (size_t) -1 &&
-                       (MyFlags & (MY_NABP | MY_FNABP))))
-@@ -107,7 +130,7 @@
-     }
-     if (MyFlags & (MY_NABP | MY_FNABP))
-       DBUG_RETURN(0);                      /* Read went ok; Return 0 */
--    DBUG_RETURN(readbytes);                /* purecov: inspected */
-+    DBUG_RETURN(total_readbytes);                /* purecov: inspected */
-   }
- } /* my_pread */
- 
---- a/storage/myisam/myisamdef.h
-+++ b/storage/myisam/myisamdef.h
-@@ -340,10 +340,10 @@
-   int (*key_write)(struct st_mi_sort_param *, const void *);
-   void (*lock_in_memory)(MI_CHECK *);
-   int (*write_keys)(struct st_mi_sort_param *, register uchar **,
--                    uint , struct st_buffpek *, IO_CACHE *);
--  uint (*read_to_buffer)(IO_CACHE *,struct st_buffpek *, uint);
-+                    ulong , struct st_buffpek *, IO_CACHE *);
-+  ulong (*read_to_buffer)(IO_CACHE *,struct st_buffpek *, uint);
-   int (*write_key)(struct st_mi_sort_param *, IO_CACHE *,uchar *,
--                   uint, uint);
-+                   uint, ulong);
- } MI_SORT_PARAM;
- 
- 	/* Some defines used by isam-funktions */
---- a/sql/rpl_mi.cc
-+++ b/sql/rpl_mi.cc
-@@ -491,7 +491,7 @@
-                          (1 + mi->ignore_server_ids.elements), MYF(MY_WME));
-     if (!ignore_server_ids_buf)
-       DBUG_RETURN(1);
--    ulong cur_len= sprintf(ignore_server_ids_buf, "%u",
-+    ulong cur_len= sprintf(ignore_server_ids_buf, "%lu",
-                            mi->ignore_server_ids.elements);
-     for (ulong i= 0; i < mi->ignore_server_ids.elements; i++)
-     {
diff --git a/bug54330.patch b/bug54330.patch
deleted file mode 100644
index 850f838..0000000
--- a/bug54330.patch
+++ /dev/null
@@ -1,236 +0,0 @@
---- a/storage/innobase/row/row0merge.c
-+++ b/storage/innobase/row/row0merge.c
-@@ -1607,22 +1607,28 @@
- 	const dict_index_t*	index,	/*!< in: index being created */
- 	merge_file_t*		file,	/*!< in/out: file containing
- 					index entries */
--	ulint*			half,	/*!< in/out: half the file */
- 	row_merge_block_t*	block,	/*!< in/out: 3 buffers */
- 	int*			tmpfd,	/*!< in/out: temporary file handle */
--	struct TABLE*		table)	/*!< in/out: MySQL table, for
-+	struct TABLE*		table,	/*!< in/out: MySQL table, for
- 					reporting erroneous key value
- 					if applicable */
-+	ulint*			num_run,/*!< in/out: Number of runs remain
-+					to be merged */
-+	ulint*			run_offset) /*!< in/out: Array contains the
-+					first offset number for each merge
-+					run */
- {
- 	ulint		foffs0;	/*!< first input offset */
- 	ulint		foffs1;	/*!< second input offset */
- 	ulint		error;	/*!< error code */
- 	merge_file_t	of;	/*!< output file */
--	const ulint	ihalf	= *half;
-+	const ulint	ihalf	= run_offset[*num_run / 2];
- 				/*!< half the input file */
--	ulint		ohalf;	/*!< half the output file */
-+	ulint		n_run	= 0;
-+				/*!< num of runs generated from this merge */
- 
- 	UNIV_MEM_ASSERT_W(block[0], 3 * sizeof block[0]);
-+
- 	ut_ad(ihalf < file->offset);
- 
- 	of.fd = *tmpfd;
-@@ -1638,17 +1644,20 @@
- #endif /* POSIX_FADV_SEQUENTIAL */
- 
- 	/* Merge blocks to the output file. */
--	ohalf = 0;
- 	foffs0 = 0;
- 	foffs1 = ihalf;
- 
-+	UNIV_MEM_INVALID(run_offset, *num_run * sizeof *run_offset);
-+
- 	for (; foffs0 < ihalf && foffs1 < file->offset; foffs0++, foffs1++) {
--		ulint	ahalf;	/*!< arithmetic half the input file */
- 
- 		if (UNIV_UNLIKELY(trx_is_interrupted(trx))) {
- 			return(DB_INTERRUPTED);
- 		}
- 
-+		/* Remember the offset number for this run */
-+		run_offset[n_run++] = of.offset;
-+
- 		error = row_merge_blocks(index, file, block,
- 					 &foffs0, &foffs1, &of, table);
- 
-@@ -1656,21 +1665,6 @@
- 			return(error);
- 		}
- 
--		/* Record the offset of the output file when
--		approximately half the output has been generated.  In
--		this way, the next invocation of row_merge() will
--		spend most of the time in this loop.  The initial
--		estimate is ohalf==0. */
--		ahalf = file->offset / 2;
--		ut_ad(ohalf <= of.offset);
--
--		/* Improve the estimate until reaching half the input
--		file size, or we can not get any closer to it.  All
--		comparands should be non-negative when !(ohalf < ahalf)
--		because ohalf <= of.offset. */
--		if (ohalf < ahalf || of.offset - ahalf < ohalf - ahalf) {
--			ohalf = of.offset;
--		}
- 	}
- 
- 	/* Copy the last blocks, if there are any. */
-@@ -1680,6 +1674,9 @@
- 			return(DB_INTERRUPTED);
- 		}
- 
-+		/* Remember the offset number for this run */
-+		run_offset[n_run++] = of.offset;
-+
- 		if (!row_merge_blocks_copy(index, file, block, &foffs0, &of)) {
- 			return(DB_CORRUPTION);
- 		}
-@@ -1692,6 +1689,9 @@
- 			return(DB_INTERRUPTED);
- 		}
- 
-+		/* Remember the offset number for this run */
-+		run_offset[n_run++] = of.offset;
-+
- 		if (!row_merge_blocks_copy(index, file, block, &foffs1, &of)) {
- 			return(DB_CORRUPTION);
- 		}
-@@ -1703,10 +1703,23 @@
- 		return(DB_CORRUPTION);
- 	}
- 
-+	ut_ad(n_run <= *num_run);
-+
-+	*num_run = n_run;
-+
-+	/* Each run can contain one or more offsets. As merge goes on,
-+	the number of runs (to merge) will reduce until we have one
-+	single run. So the number of runs will always be smaller than
-+	the number of offsets in file */
-+	ut_ad((*num_run) <= file->offset);
-+
-+	/* The number of offsets in output file is always equal or
-+	smaller than input file */
-+	ut_ad(of.offset <= file->offset);
-+
- 	/* Swap file descriptors for the next pass. */
- 	*tmpfd = file->fd;
- 	*file = of;
--	*half = ohalf;
- 
- 	UNIV_MEM_INVALID(block[0], 3 * sizeof block[0]);
- 
-@@ -1731,27 +1744,44 @@
- 					if applicable */
- {
- 	ulint	half = file->offset / 2;
-+	ulint	num_runs;
-+	ulint*	run_offset;
-+	ulint	error = DB_SUCCESS;
-+
-+	/* Record the number of merge runs we need to perform */
-+	num_runs = file->offset;
-+
-+	/* If num_runs are less than 1, nothing to merge */
-+	if (num_runs <= 1) {
-+		return(error);
-+	}
-+
-+	/* "run_offset" records each run's first offset number */
-+	run_offset = (ulint*) mem_alloc(file->offset * sizeof(ulint));
-+
-+	/* This tells row_merge() where to start for the first round
-+	of merge. */
-+	run_offset[half] = half;
- 
- 	/* The file should always contain at least one byte (the end
- 	of file marker).  Thus, it must be at least one block. */
- 	ut_ad(file->offset > 0);
- 
-+	/* Merge the runs until we have one big run */
- 	do {
--		ulint	error;
-+		error = row_merge(trx, index, file, block, tmpfd,
-+				  table, &num_runs, run_offset);
- 
--		error = row_merge(trx, index, file, &half,
--				  block, tmpfd, table);
-+		UNIV_MEM_ASSERT_RW(run_offset, num_runs * sizeof *run_offset);
- 
- 		if (error != DB_SUCCESS) {
--			return(error);
-+			break;
- 		}
-+	} while (num_runs > 1);
- 
--		/* half > 0 should hold except when the file consists
--		of one block.  No need to merge further then. */
--		ut_ad(half > 0 || file->offset == 1);
--	} while (half < file->offset && half > 0);
-+	mem_free(run_offset);
- 
--	return(DB_SUCCESS);
-+	return(error);
- }
- 
- /*************************************************************//**
---- /dev/null
-+++ b/mysql-test/suite/innodb/r/bug54330.result
-@@ -0,0 +1,13 @@
-+DROP TABLE IF EXISTS t1;
-+CREATE TABLE t1 (
-+id BIGINT(20) AUTO_INCREMENT PRIMARY KEY,
-+bar BIGINT(20)
-+) ENGINE=InnoDB;
-+SELECT COUNT(*) FROM t1;
-+COUNT(*)
-+517672
-+ALTER TABLE t1 ADD INDEX baz (bar);
-+SELECT COUNT(*) FROM t1 FORCE INDEX (baz);
-+COUNT(*)
-+517672
-+DROP TABLE t1;
---- /dev/null
-+++ b/mysql-test/suite/innodb/t/bug54330.test
-@@ -0,0 +1,38 @@
-+# Testcase for MySQL bug #54330 - broken fast index creation
-+
-+--disable_warnings
-+DROP TABLE IF EXISTS t1;
-+--enable_warnings
-+
-+CREATE TABLE t1 (
-+       id BIGINT(20) AUTO_INCREMENT PRIMARY KEY,
-+       bar BIGINT(20)
-+) ENGINE=InnoDB;
-+
-+--disable_query_log
-+SET @old_autocommit=@@AUTOCOMMIT;
-+SET AUTOCOMMIT=0;
-+let $1= 515641;
-+while ($1)
-+{
-+  eval INSERT INTO t1 (bar) VALUES (NULL);
-+  dec $1;
-+}
-+let $1= 2031;
-+while ($1)
-+{
-+  eval INSERT INTO t1 (bar) VALUES ($1);
-+  dec $1;
-+}
-+COMMIT;
-+SET AUTOCOMMIT=@old_autocommit;
-+--enable_query_log
-+
-+SELECT COUNT(*) FROM t1;
-+
-+ALTER TABLE t1 ADD INDEX baz (bar);
-+
-+# With the bug present this will differ from the SELECT above!
-+SELECT COUNT(*) FROM t1 FORCE INDEX (baz);
-+
-+DROP TABLE t1;
diff --git a/bug580324.patch b/bug580324.patch
deleted file mode 100644
index f8f5bc3..0000000
--- a/bug580324.patch
+++ /dev/null
@@ -1,106 +0,0 @@
-# name       : bug580324.patch
-# introduced : 11 or before
-# maintainer : Oleg
-#
-#!!! notice !!!
-# Any small change to this file in the main branch
-# should be done or reviewed by the maintainer!
---- a/sql/sql_base.cc
-+++ b/sql/sql_base.cc
-@@ -314,8 +314,12 @@
-                           const TABLE_LIST *table_list,
-                           bool tmp_table)
- {
--  uint key_length= (uint) (strmov(strmov(key, table_list->db)+1,
--                                  table_list->table_name)-key)+1;
-+  char *db_end= strnmov(key, table_list->db, MAX_DBKEY_LENGTH - 2);
-+  *db_end++= '\0';
-+  char *table_end= strnmov(db_end, table_list->table_name,
-+                           key + MAX_DBKEY_LENGTH - 1 - db_end);
-+  *table_end++= '\0';
-+  uint key_length= (uint) (table_end-key);
-   if (tmp_table)
-   {
-     int4store(key + key_length, thd->server_id);
---- a/sql/sql_parse.cc
-+++ b/sql/sql_parse.cc
-@@ -1116,11 +1116,18 @@
-     break;
- #else
-   {
--    char *fields, *packet_end= packet + packet_length, *arg_end;
-+    char *fields;
-+    char *packet_end= packet + packet_length;
-+    char *wildcard;
-     /* Locked closure of all tables */
-     TABLE_LIST table_list;
-+    char table_name_buff[NAME_LEN+1];
-     LEX_STRING table_name;
-+    uint dummy_errors;
-     LEX_STRING db;
-+
-+    table_name.str= table_name_buff;
-+    table_name.length= 0;
-     /*
-       SHOW statements should not add the used tables to the list of tables
-       used in a transaction.
-@@ -1133,24 +1140,23 @@
-     /*
-       We have name + wildcard in packet, separated by endzero
-     */
--    arg_end= strend(packet);
--    uint arg_length= arg_end - packet;
--
--    /* Check given table name length. */
--    if (arg_length >= packet_length || arg_length > NAME_LEN)
-+    wildcard= strend(packet);
-+    table_name.length= wildcard - packet;
-+    wildcard++;
-+    uint query_length= (uint) (packet_end - wildcard); // Don't count end \0
-+    if (table_name.length > NAME_LEN || query_length > NAME_LEN)
-     {
-       my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0));
-       break;
-     }
--    thd->convert_string(&table_name, system_charset_info,
--			packet, arg_length, thd->charset());
--    if (check_table_name(table_name.str, table_name.length, FALSE))
--    {
--      /* this is OK due to convert_string() null-terminating the string */
--      my_error(ER_WRONG_TABLE_NAME, MYF(0), table_name.str);
-+    table_name.length= copy_and_convert(table_name.str,
-+                                        sizeof(table_name_buff)-1,
-+                                        system_charset_info,
-+                                        packet, table_name.length,
-+                                        thd->charset(), &dummy_errors);
-+    table_name.str[table_name.length]= '\0';
-+    if (!(fields= (char *) thd->memdup(wildcard, query_length + 1)))
-       break;
--    }
--    packet= arg_end + 1;
-     mysql_reset_thd_for_next_command(thd);
-     lex_start(thd);
-     /* Must be before we init the table list. */
-@@ -1175,9 +1181,6 @@
-         table_list.schema_table= schema_table;
-     }
- 
--    uint query_length= (uint) (packet_end - packet); // Don't count end \0
--    if (!(fields= (char *) thd->memdup(packet, query_length + 1)))
--      break;
-     thd->set_query(fields, query_length);
-     general_log_print(thd, command, "%s %s", table_list.table_name, fields);
- 
---- a/strings/ctype-utf8.c
-+++ b/strings/ctype-utf8.c
-@@ -4212,6 +4212,10 @@
- {
-   int code;
-   char hex[]= "0123456789abcdef";
-+
-+  if (s >= e)
-+    return MY_CS_TOOSMALL;
-+
-   if (wc < 128 && filename_safe_char[wc])
-   {
-     *s= (uchar) wc;
diff --git a/bug813587.patch b/bug813587.patch
deleted file mode 100644
index ea8e838..0000000
--- a/bug813587.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-# name       : bug813587.patch
-# maintainer : Alexey
-#
-# Fix for LP bug #813587 / MySQL bug #51196 / MySQL bug #61790
-#
-# Clear MySQL connection errors in ha_federated::close(), since they
-# can affect queries on other tables due to table cache eviction.
-#
---- a/storage/federated/ha_federated.cc
-+++ b/storage/federated/ha_federated.cc
-@@ -1675,6 +1675,8 @@
- 
- int ha_federated::close(void)
- {
-+  THD *thd= current_thd;
-+
-   DBUG_ENTER("ha_federated::close");
- 
-   free_result();
-@@ -1685,6 +1687,10 @@
-   mysql_close(mysql);
-   mysql= NULL;
- 
-+  /* Clear possible errors from mysql_close(), see LP bug #813587. */
-+  if (thd)
-+    thd->clear_error();
-+
-   DBUG_RETURN(free_share(share));
- }
- 
diff --git a/bug860910.patch b/bug860910.patch
deleted file mode 100644
index f35a187..0000000
--- a/bug860910.patch
+++ /dev/null
@@ -1,92 +0,0 @@
-# name       : bug860910.patch
-# maintainer : Alexey
-#
-# Fixes LP bug #860910 / MySQL bug #62557
-#
---- /dev/null
-+++ b/mysql-test/suite/rpl/r/percona_bug860910.result
-@@ -0,0 +1,17 @@
-+*** Set up master (server_1) <-> master (server_2) replication  ***
-+include/rpl_init.inc [topology=1->2->1]
-+
-+SELECT @@global.log_slave_updates;
-+@@global.log_slave_updates
-+1
-+SELECT @@global.log_slave_updates;
-+@@global.log_slave_updates
-+1
-+CREATE TABLE t1(a INT);
-+SET @var:=0;
-+INSERT INTO t1 VALUES (@var);
-+INSERT INTO t1 VALUES (1);
-+DROP TABLE t1;
-+include/rpl_sync.inc
-+include/check_slave_param.inc [Exec_Master_Log_Pos]
-+include/rpl_end.inc
---- /dev/null
-+++ b/mysql-test/suite/rpl/t/percona_bug860910.cnf
-@@ -0,0 +1,8 @@
-+!include ../my.cnf
-+
-+[mysqld.1]
-+log-slave-updates
-+
-+[mysqld.2]
-+log-slave-updates
-+
---- /dev/null
-+++ b/mysql-test/suite/rpl/t/percona_bug860910.test
-@@ -0,0 +1,38 @@
-+########################################################################
-+# Bug #860910: SHOW SLAVE STATUS gives wrong output with master-master
-+#              and using SET uservars
-+########################################################################
-+
-+--echo *** Set up master (server_1) <-> master (server_2) replication  ***
-+--let $rpl_topology= 1->2->1
-+--source include/rpl_init.inc
-+--echo
-+
-+--connection server_1
-+SELECT @@global.log_slave_updates;
-+
-+--connection server_2
-+SELECT @@global.log_slave_updates;
-+
-+--connection server_1
-+CREATE TABLE t1(a INT);
-+SET @var:=0;
-+INSERT INTO t1 VALUES (@var);
-+INSERT INTO t1 VALUES (1);
-+
-+DROP TABLE t1;
-+
-+# The following would hang with the bug not fixed due to incorrect
-+# Exec_Master_Log_Pos
-+--source include/rpl_sync.inc
-+
-+--connection server_2
-+--let $master_log_pos= query_get_value(SHOW MASTER STATUS, Position, 1)
-+
-+--connection server_1
-+--let $slave_param= Exec_Master_Log_Pos
-+--let $slave_param_value= $master_log_pos
-+--source include/check_slave_param.inc
-+
-+# Cleanup
-+--source include/rpl_end.inc
---- a/sql/log.cc
-+++ b/sql/log.cc
-@@ -5112,6 +5112,12 @@
-                                  user_var_event->type,
-                                  user_var_event->charset_number,
-                                  flags);
-+             /*
-+               These User_var_log_events must be logged with event_info's
-+               server_id, rather than the current one.
-+             */
-+             e.server_id= event_info->server_id;
-+
-             if (e.write(file))
-               goto err;
-             if (file == &log_file)
diff --git a/bug917246.patch b/bug917246.patch
deleted file mode 100644
index 9c43316..0000000
--- a/bug917246.patch
+++ /dev/null
@@ -1,17 +0,0 @@
---- /dev/null
-+++ b/mysql-test/r/percona_innodb_version.result
-@@ -0,0 +1,3 @@
-+SELECT @@innodb_version;
-+@@innodb_version
-+1.1.x-x.x
---- /dev/null
-+++ b/mysql-test/t/percona_innodb_version.test
-@@ -0,0 +1,8 @@
-+--source include/have_innodb.inc
-+
-+########################################################################
-+# Bug #917246: Unintentional change of innodb_version format in 5.1.60
-+########################################################################
-+
-+--replace_regex /1\.1\.[0-9]+-[0-9]+\.[0-9]+/1.1.x-x.x/
-+SELECT @@innodb_version;
diff --git a/bug933969.patch b/bug933969.patch
deleted file mode 100644
index 2590716..0000000
--- a/bug933969.patch
+++ /dev/null
@@ -1,92 +0,0 @@
---- /dev/null
-+++ b/mysql-test/r/percona_bug933969.result
-@@ -0,0 +1,16 @@
-+RESET MASTER;
-+DROP TABLE IF EXISTS t1;
-+CREATE TABLE t1 (word VARCHAR(20));
-+INSERT INTO t1 VALUES ("hamite");
-+INSERT INTO t1 VALUES ("hoho");
-+INSERT INTO t1 VALUES ("znamenito");
-+INSERT INTO t1 VALUES ("mrachny");
-+INSERT INTO t1 VALUES ("mrak");
-+INSERT INTO t1 VALUES ("zhut");
-+INSERT INTO t1 VALUES ("parnisha");
-+INSERT INTO t1 VALUES ("krrasota!");
-+INSERT INTO t1 VALUES ("podumayesh");
-+INSERT INTO t1 VALUES ("ogo!");
-+FLUSH LOGS;
-+DROP TABLE t1;
-+RESET MASTER;
---- /dev/null
-+++ b/mysql-test/t/percona_bug933969.test
-@@ -0,0 +1,42 @@
-+###################### percona_bug933969.test ########################
-+# Bug #933969: mysqlbinlog doesn't accept stdin                      #
-+#                                                                    #
-+# The goal of this testcase is to test that mysqlbinlog handle       #
-+# stdin correctly when stdin is pipe.                                #
-+# i.e. "cat log | mysqlbinlog -" don't cause mysqlbinlog failure     #
-+######################################################################
-+-- source include/have_log_bin.inc
-+-- source include/not_windows.inc
-+-- source include/not_embedded.inc
-+
-+# deletes all the binary logs
-+RESET MASTER;
-+
-+--disable_warnings
-+DROP TABLE IF EXISTS t1;
-+--enable_warnings
-+
-+# produce some statements for binlog
-+
-+CREATE TABLE t1 (word VARCHAR(20));
-+
-+INSERT INTO t1 VALUES ("hamite");
-+INSERT INTO t1 VALUES ("hoho");
-+INSERT INTO t1 VALUES ("znamenito");
-+INSERT INTO t1 VALUES ("mrachny");
-+INSERT INTO t1 VALUES ("mrak");
-+INSERT INTO t1 VALUES ("zhut");
-+INSERT INTO t1 VALUES ("parnisha");
-+INSERT INTO t1 VALUES ("krrasota!");
-+INSERT INTO t1 VALUES ("podumayesh");
-+INSERT INTO t1 VALUES ("ogo!");
-+
-+FLUSH LOGS;
-+
-+# run mysqlbinlog and make sure it ends normally
-+
-+let $MYSQLD_DATADIR= `SELECT @@datadir`;
-+--system cat $MYSQLD_DATADIR/master-bin.000001 | $MYSQL_BINLOG - >/dev/null
-+
-+DROP TABLE t1;
-+RESET MASTER;
---- a/client/mysqlbinlog.cc
-+++ b/client/mysqlbinlog.cc
-@@ -1760,7 +1760,7 @@
-   }
- 
-   pos= my_b_tell(file);
--  my_b_seek(file, (my_off_t)0);
-+  DBUG_ASSERT(pos == 0);
-   if (my_b_read(file, header, sizeof(header)))
-   {
-     error("Failed reading header; probably an empty file.");
-@@ -1920,7 +1920,7 @@
-     /* read from normal file */
-     if ((fd = my_open(logname, O_RDONLY | O_BINARY, MYF(MY_WME))) < 0)
-       return ERROR_STOP;
--    if (init_io_cache(file, fd, 0, READ_CACHE, start_position_mot, 0,
-+    if (init_io_cache(file, fd, 0, READ_CACHE, (my_off_t) 0, 0,
- 		      MYF(MY_WME | MY_NABP)))
-     {
-       my_close(fd, MYF(MY_WME));
-@@ -1928,6 +1928,7 @@
-     }
-     if ((retval= check_header(file, print_event_info, logname)) != OK_CONTINUE)
-       goto end;
-+    my_b_seek(file, start_position_mot);
-   }
-   else
-   {
diff --git a/bug966844_page_size_error_on_5520_upgrade.patch b/bug966844_page_size_error_on_5520_upgrade.patch
deleted file mode 100644
index 826ad4c..0000000
--- a/bug966844_page_size_error_on_5520_upgrade.patch
+++ /dev/null
@@ -1,22 +0,0 @@
-Bug#966844 "page size 1024 but the only supported page size in this release is=16384" error on upgrade to 5.5.2[01]
-
-Patch is from http://bugs.mysql.com/bug.php?id=64160
-http://bugs.mysql.com/file.php?id=18031&text=1
-By Kevin Lewis
-
-Adapted to Percona Server by Stewart Smith
-
---- a/storage/innobase/srv/srv0start.c
-+++ b/storage/innobase/srv/srv0start.c
-@@ -925,8 +925,9 @@
- #endif /* UNIV_LOG_ARCHIVE */
- 				min_flushed_lsn, max_flushed_lsn);
- 
--			if (UNIV_PAGE_SIZE
--			    != fsp_flags_get_page_size(flags)) {
-+			if (!one_opened
-+			    && UNIV_PAGE_SIZE
-+			       != fsp_flags_get_page_size(flags)) {
- 
- 				ut_print_timestamp(stderr);
- 				fprintf(stderr,
diff --git a/control_online_alter_index.patch b/control_online_alter_index.patch
deleted file mode 100644
index 29c0051..0000000
--- a/control_online_alter_index.patch
+++ /dev/null
@@ -1,74 +0,0 @@
-# name       : control_online_alter_index.patch
-# introduced : 12
-# maintainer : Yasufumi
-#
-#!!! notice !!!
-# Any small change to this file in the main branch
-# should be done or reviewed by the maintainer!
---- a/sql/handler.h
-+++ b/sql/handler.h
-@@ -206,6 +206,8 @@
- #define HA_INPLACE_DROP_UNIQUE_INDEX_NO_WRITE      (1L << 9)
- #define HA_INPLACE_ADD_PK_INDEX_NO_WRITE           (1L << 10)
- #define HA_INPLACE_DROP_PK_INDEX_NO_WRITE          (1L << 11)
-+
-+#define HA_INPLACE_ALTER_INDEX_MASK                ((1L << 12) - 1)
- /*
-   HA_PARTITION_FUNCTION_SUPPORTED indicates that the function is
-   supported at all.
---- a/sql/sql_class.h
-+++ b/sql/sql_class.h
-@@ -479,6 +479,8 @@
-   my_bool engine_condition_pushdown;
-   my_bool keep_files_on_create;
- 
-+  my_bool online_alter_index;
-+
-   my_bool old_alter_table;
-   my_bool old_passwords;
-   my_bool big_tables;
---- a/sql/sql_partition.cc
-+++ b/sql/sql_partition.cc
-@@ -4637,7 +4637,12 @@
-         alter_info->num_parts= curr_part_no - new_part_no;
-       }
-     }
--    if (!(flags= new_table->file->alter_table_flags(alter_info->flags)))
-+    flags= new_table->file->alter_table_flags(alter_info->flags);
-+    if (!thd->variables.online_alter_index)
-+    {
-+      flags&= ~((uint)HA_INPLACE_ALTER_INDEX_MASK);
-+    }
-+    if (!flags)
-     {
-       my_error(ER_PARTITION_FUNCTION_FAILURE, MYF(0));
-       goto err;
---- a/sql/sql_table.cc
-+++ b/sql/sql_table.cc
-@@ -6146,6 +6146,10 @@
-     uint  *idx_end_p;
- 
-     alter_flags= table->file->alter_table_flags(alter_info->flags);
-+    if (!thd->variables.online_alter_index)
-+    {
-+      alter_flags&= ~((ulong)HA_INPLACE_ALTER_INDEX_MASK);
-+    }
-     DBUG_PRINT("info", ("alter_flags: %lu", alter_flags));
-     /* Check dropped indexes. */
-     for (idx_p= index_drop_buffer, idx_end_p= idx_p + index_drop_count;
---- a/sql/sys_vars.cc
-+++ b/sql/sys_vars.cc
-@@ -2265,6 +2265,13 @@
-        GLOBAL_VAR(opt_optimizer_fix),
-        NO_CMD_LINE, DEFAULT(TRUE));
- 
-+static Sys_var_mybool Sys_fast_index_creation(
-+       "fast_index_creation",
-+       "If disabled, suppresses online operations for indexes of ALTER TABLE "
-+       "(e.g. fast index creation of InnoDB Plugin) for the session.",
-+       SESSION_VAR(online_alter_index), NO_CMD_LINE,
-+       DEFAULT(TRUE));
-+
- /** propagates changes to the relevant flag of @@optimizer_switch */
- static bool fix_engine_condition_pushdown(sys_var *self, THD *thd,
-                                           enum_var_type type)
diff --git a/error_pad.patch b/error_pad.patch
deleted file mode 100644
index 87d396a..0000000
--- a/error_pad.patch
+++ /dev/null
@@ -1,266 +0,0 @@
-# name       : error_pad.patch
-# introduced : 12
-# maintainer : Oleg
-#
-#!!! notice !!!
-# Any small change to this file in the main branch
-# should be done or reviewed by the maintainer!
---- a/extra/comp_err.c
-+++ b/extra/comp_err.c
-@@ -32,11 +32,12 @@
- #include <assert.h>
- #include <my_dir.h>
- 
--#define MAX_ROWS  1000
-+#define MAX_ROWS  5000
- #define HEADER_LENGTH 32                /* Length of header in errmsg.sys */
- #define DEFAULT_CHARSET_DIR "../sql/share/charsets"
- #define ER_PREFIX "ER_"
- #define WARN_PREFIX "WARN_"
-+#define PADD_PREFIX "PADD_"
- static char *OUTFILE= (char*) "errmsg.sys";
- static char *HEADERFILE= (char*) "mysqld_error.h";
- static char *NAMEFILE= (char*) "mysqld_ername.h";
-@@ -91,6 +92,7 @@
-   const char *sql_code1;		/* sql state */
-   const char *sql_code2;		/* ODBC state */
-   struct errors *next_error;            /* Pointer to next error */
-+  my_bool is_padding;                   /* If true - padd this er_name while er_code != d_code*/
-   DYNAMIC_ARRAY msg;                    /* All language texts for this error */
- };
- 
-@@ -129,6 +131,7 @@
- 
- 
- static struct languages *parse_charset_string(char *str);
-+static struct errors *parse_padd_string(char *ptr, int er_count);
- static struct errors *parse_error_string(char *ptr, int er_count);
- static struct message *parse_message_string(struct message *new_message,
- 					    char *str);
-@@ -253,6 +256,11 @@
- 
-   for (tmp_error= error_head; tmp_error; tmp_error= tmp_error->next_error)
-   {
-+    if (tmp_error->is_padding)
-+    {
-+      er_last= tmp_error->d_code;
-+      continue;
-+    }
-     /*
-        generating mysqld_error.h
-        fprintf() will automatically add \r on windows
-@@ -345,12 +353,29 @@
- 		"language\n", tmp_error->er_name, tmp_lang->lang_short_name);
- 	goto err;
-       }
--      if (copy_rows(to, tmp->text, row_nr, start_pos))
-+      if (tmp_error->is_padding)
-       {
--	fprintf(stderr, "Failed to copy rows to %s\n", outfile);
--	goto err;
-+        uint padd_to= tmp_error->d_code;
-+        char* padd_message= tmp->text;
-+        while ((row_nr+er_offset) < padd_to)
-+        {
-+          if (copy_rows(to, padd_message,row_nr,start_pos))
-+          {
-+            fprintf(stderr, "Failed to copy rows to %s\n", outfile);
-+            goto err;
-+          }
-+          row_nr++;
-+        }
-+      }
-+      else
-+      {
-+        if (copy_rows(to, tmp->text, row_nr, start_pos))
-+        {
-+          fprintf(stderr, "Failed to copy rows to %s\n", outfile);
-+          goto err;
-+        }
-+        row_nr++;
-       }
--      row_nr++;
-     }
- 
-     /* continue with header of the errmsg.sys file */
-@@ -501,14 +526,26 @@
- 	DBUG_RETURN(0);
-       continue;
-     }
--    if (is_prefix(str, ER_PREFIX) || is_prefix(str, WARN_PREFIX))
-+    if (is_prefix(str, ER_PREFIX) || is_prefix(str, WARN_PREFIX) || is_prefix(str, PADD_PREFIX))
-     {
--      if (!(current_error= parse_error_string(str, rcount)))
-+      if (is_prefix(str, PADD_PREFIX))
-       {
--	fprintf(stderr, "Failed to parse the error name string\n");
--	DBUG_RETURN(0);
-+        if (!(current_error= parse_padd_string(str, rcount)))
-+        {
-+          fprintf(stderr, "Failed to parse the error pad string\n");
-+          DBUG_RETURN(0);
-+        }
-+        rcount= current_error->d_code - er_offset;  /* Count number of unique errors */
-+      }
-+      else
-+      {
-+        if (!(current_error= parse_error_string(str, rcount)))
-+        {
-+          fprintf(stderr, "Failed to parse the error name string\n");
-+          DBUG_RETURN(0);
-+        }
-+        rcount++;                         /* Count number of unique errors */
-       }
--      rcount++;                         /* Count number of unique errors */
- 
-       /* add error to the list */
-       *tail_error= current_error;
-@@ -849,78 +886,122 @@
-   DBUG_RETURN(new_message);
- }
- 
-+static struct errors* create_new_error(my_bool is_padding, char *er_name, int d_code, const char *sql_code1, const char *sql_code2)
-+{
-+  struct errors *new_error;
-+  DBUG_ENTER("create_new_error");
-+  /* create a new element */
-+  new_error= (struct errors *) my_malloc(sizeof(*new_error), MYF(MY_WME));
-+  if (my_init_dynamic_array(&new_error->msg, sizeof(struct message), 0, 0))
-+    DBUG_RETURN(0);				/* OOM: Fatal error */
-+  new_error->is_padding= is_padding;
-+  DBUG_PRINT("info", ("is_padding: %s", (is_padding ? "true" : "false")));
-+  new_error->er_name= er_name;
-+  DBUG_PRINT("info", ("er_name: %s", er_name));
-+  new_error->d_code= d_code;
-+  DBUG_PRINT("info", ("d_code: %d", d_code));
-+  new_error->sql_code1= sql_code1;
-+  DBUG_PRINT("info", ("sql_code1: %s", sql_code1));
-+  new_error->sql_code2= sql_code2;
-+  DBUG_PRINT("info", ("sql_code2: %s", sql_code2));
-+  DBUG_RETURN(new_error);
-+}
- 
- /*
--  Parsing the string with error name and codes; returns the pointer to
-+  Parsing the string with padd syntax (name + error to pad); returns the pointer to
-   the errors struct
- */
- 
--static struct errors *parse_error_string(char *str, int er_count)
-+static struct errors *parse_padd_string(char* str, int er_count)
- {
--  struct errors *new_error;
-+  char *er_name;
-+  uint d_code;
-+  char *start;
-   DBUG_ENTER("parse_error_string");
-   DBUG_PRINT("enter", ("str: %s", str));
- 
--  /* create a new element */
--  new_error= (struct errors *) my_malloc(sizeof(*new_error), MYF(MY_WME));
-+  start= str;
-+  str= skip_delimiters(str);
- 
--  if (my_init_dynamic_array(&new_error->msg, sizeof(struct message), 0, 0))
-+  /* getting the error name */
-+
-+  if (!(er_name= get_word(&str)))
-     DBUG_RETURN(0);				/* OOM: Fatal error */
- 
--  /* getting the error name */
-   str= skip_delimiters(str);
- 
--  if (!(new_error->er_name= get_word(&str)))
-+  if (!(d_code= parse_error_offset(start)))
-+  {
-+    fprintf(stderr, "Failed to parse the error pad string '%s' '%s' (d_code doesn't parse)!\n",er_name,str);
-+    DBUG_RETURN(0);
-+  }
-+  if (d_code < (uint)(er_offset + er_count))
-+  {
-+    fprintf(stderr, "Error to padding less current error number!\n");
-+    DBUG_RETURN(0);
-+  }
-+  DBUG_RETURN(create_new_error(TRUE,er_name,d_code,empty_string,empty_string));
-+}
-+
-+/*
-+  Parsing the string with error name and codes; returns the pointer to
-+  the errors struct
-+*/
-+
-+static struct errors *parse_error_string(char *str, int er_count)
-+{
-+  char *er_name;
-+  int d_code;
-+  const char *sql_code1= empty_string;
-+  const char *sql_code2= empty_string;
-+  DBUG_ENTER("parse_error_string");
-+  DBUG_PRINT("enter", ("str: %s", str));
-+
-+  str= skip_delimiters(str);
-+
-+  /* getting the error name */
-+
-+  if (!(er_name= get_word(&str)))
-     DBUG_RETURN(0);				/* OOM: Fatal error */
--  DBUG_PRINT("info", ("er_name: %s", new_error->er_name));
- 
-   str= skip_delimiters(str);
- 
-   /* getting the code1 */
--
--  new_error->d_code= er_offset + er_count;
--  DBUG_PRINT("info", ("d_code: %d", new_error->d_code));
-+  d_code= er_offset + er_count;
- 
-   str= skip_delimiters(str);
- 
-   /* if we reached EOL => no more codes, but this can happen */
-   if (!*str)
-   {
--    new_error->sql_code1= empty_string;
--    new_error->sql_code2= empty_string;
-     DBUG_PRINT("info", ("str: %s", str));
--    DBUG_RETURN(new_error);
-+    goto complete_create;
-   }
--
-   /* getting the sql_code 1 */
--
--  if (!(new_error->sql_code1= get_word(&str)))
-+  if (!(sql_code1= get_word(&str)))
-     DBUG_RETURN(0);				/* OOM: Fatal error */
--  DBUG_PRINT("info", ("sql_code1: %s", new_error->sql_code1));
- 
-   str= skip_delimiters(str);
- 
-   /* if we reached EOL => no more codes, but this can happen */
-   if (!*str)
-   {
--    new_error->sql_code2= empty_string;
-     DBUG_PRINT("info", ("str: %s", str));
--    DBUG_RETURN(new_error);
-+    goto complete_create;
-   }
--
-   /* getting the sql_code 2 */
--  if (!(new_error->sql_code2= get_word(&str)))
-+  if (!(sql_code2= get_word(&str)))
-     DBUG_RETURN(0);				/* OOM: Fatal error */
--  DBUG_PRINT("info", ("sql_code2: %s", new_error->sql_code2));
- 
-   str= skip_delimiters(str);
-+
-   if (*str)
-   {
-     fprintf(stderr, "The error line did not end with sql/odbc code!");
-     DBUG_RETURN(0);
-   }
--
--  DBUG_RETURN(new_error);
-+complete_create:
-+  DBUG_RETURN(create_new_error(FALSE,er_name,d_code,sql_code1,sql_code2));
- }
- 
- 
diff --git a/file-contents.patch b/file-contents.patch
deleted file mode 100644
index 5cbd412..0000000
--- a/file-contents.patch
+++ /dev/null
@@ -1,25 +0,0 @@
---- a/mysql-test/t/file_contents.test
-+++ b/mysql-test/t/file_contents.test
-@@ -20,7 +20,7 @@
-     $dir_docs = glob "$dir_docs/packages/MySQL-server*";
-   } else {
-     # RedHat: version number in directory name
--    $dir_docs = glob "$dir_docs/MySQL-server*";
-+    $dir_docs = glob "$dir_docs/Percona-Server-server*";
-   }
- } elsif ($dir_bin =~ m|/usr$|) {
-   # RPM build during development
-@@ -28,9 +28,12 @@
-   if(-d "$dir_docs/packages") {
-     # SuSE: "packages/" in the documentation path
-     $dir_docs = glob "$dir_docs/packages/MySQL-server*";
-+  } elsif (glob "$dir_docs/percona-server-server*") {
-+    # Debian
-+    $dir_docs = glob "$dir_docs/percona-server-server*";
-   } else {
-     # RedHat: version number in directory name
--    $dir_docs = glob "$dir_docs/MySQL-server*";
-+    $dir_docs = glob "$dir_docs/Percona-Server-server*";
-   }
- } else {
-   # tar.gz package, Windows, or developer work (in BZR)
diff --git a/group_commit.patch b/group_commit.patch
deleted file mode 100644
index ec57a9e..0000000
--- a/group_commit.patch
+++ /dev/null
@@ -1,2610 +0,0 @@
---- a/include/my_sys.h
-+++ b/include/my_sys.h
-@@ -524,6 +524,8 @@
- 
- #define my_b_tell(info) ((info)->pos_in_file + \
- 			 (size_t) (*(info)->current_pos - (info)->request_pos))
-+#define my_b_write_tell(info) ((info)->pos_in_file + \
-+			       ((info)->write_pos - (info)->write_buffer))
- 
- #define my_b_get_buffer_start(info) (info)->request_pos 
- #define my_b_get_bytes_in_buffer(info) (char*) (info)->read_end -   \
---- a/include/mysql/plugin.h
-+++ b/include/mysql/plugin.h
-@@ -559,6 +559,8 @@
- 
- #define EXTENDED_FOR_USERSTAT
- 
-+#define EXTENDED_FOR_COMMIT_ORDERED
-+
- /**
-   Create a temporary file.
- 
---- a/sql/handler.cc
-+++ b/sql/handler.cc
-@@ -90,6 +90,8 @@
- static TYPELIB known_extensions= {0,"known_exts", NULL, NULL};
- uint known_extensions_id= 0;
- 
-+static int commit_one_phase_low(THD *thd, bool all, THD_TRANS *trans,
-+                                bool is_real_trans);
- 
- 
- static plugin_ref ha_default_plugin(THD *thd)
-@@ -1119,7 +1121,8 @@
-   */
-   bool is_real_trans= all || thd->transaction.all.ha_list == 0;
-   Ha_trx_info *ha_info= trans->ha_list;
--  my_xid xid= thd->transaction.xid_state.xid.get_my_xid();
-+  bool need_commit_ordered;
-+  my_xid xid;
-   DBUG_ENTER("ha_commit_trans");
- 
-   /*
-@@ -1152,13 +1155,20 @@
-     DBUG_RETURN(2);
-   }
- 
--  if (ha_info)
-+  if (!ha_info)
-+  {
-+    /* Free resources and perform other cleanup even for 'empty' transactions. */
-+    if (is_real_trans)
-+      thd->transaction.cleanup();
-+    DBUG_RETURN(0);
-+  }
-+  else
-   {
-     uint rw_ha_count;
-     bool rw_trans;
-     MDL_request mdl_request;
- 
--    DBUG_EXECUTE_IF("crash_commit_before", DBUG_SUICIDE(););
-+    DBUG_EXECUTE_IF("crash_commit_before", abort(););
- 
-     /* Close all cursors that can not survive COMMIT */
-     if (is_real_trans)                          /* not a statement commit */
-@@ -1197,57 +1207,80 @@
-         !thd->slave_thread)
-     {
-       my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--read-only");
--      ha_rollback_trans(thd, all);
--      error= 1;
--      goto end;
-+      goto err;
-     }
- 
--    if (!trans->no_2pc && (rw_ha_count > 1))
-+    if (trans->no_2pc || (rw_ha_count <= 1))
-     {
--      for (; ha_info && !error; ha_info= ha_info->next())
-+      error= ha_commit_one_phase(thd, all);
-+      DBUG_EXECUTE_IF("crash_commit_after", DBUG_ABORT(););
-+      goto end;
-+    }
-+
-+    need_commit_ordered= FALSE;
-+    xid= thd->transaction.xid_state.xid.get_my_xid();
-+
-+      for (Ha_trx_info *hi= ha_info; hi; hi= hi->next())
-       {
-         int err;
--        handlerton *ht= ha_info->ht();
-+        handlerton *ht= hi->ht();
-         /*
-           Do not call two-phase commit if this particular
-           transaction is read-only. This allows for simpler
-           implementation in engines that are always read-only.
-         */
--        if (! ha_info->is_trx_read_write())
-+        if (! hi->is_trx_read_write())
-           continue;
-         /*
-           Sic: we know that prepare() is not NULL since otherwise
-           trans->no_2pc would have been set.
-         */
--        if ((err= ht->prepare(ht, thd, all)))
--        {
--          my_error(ER_ERROR_DURING_COMMIT, MYF(0), err);
--          error= 1;
--        }
-+        err= ht->prepare(ht, thd, all);
-         status_var_increment(thd->status_var.ha_prepare_count);
-+        if (err)
-+          my_error(ER_ERROR_DURING_COMMIT, MYF(0), err);
-+
-+        if (err)
-+          goto err;
-+
-+        need_commit_ordered|= (ht->commit_ordered != NULL);
-       }
--      DBUG_EXECUTE_IF("crash_commit_after_prepare", DBUG_SUICIDE(););
--      if (error || (is_real_trans && xid &&
--                    (error= !(cookie= tc_log->log_xid(thd, xid)))))
-+      DBUG_EXECUTE_IF("crash_commit_after_prepare", DBUG_ABORT(););
-+
-+      if (!is_real_trans)
-       {
--        ha_rollback_trans(thd, all);
--        error= 1;
-+        error= commit_one_phase_low(thd, all, trans, is_real_trans);
-+        DBUG_EXECUTE_IF("crash_commit_after", DBUG_ABORT(););
-         goto end;
-       }
--      DBUG_EXECUTE_IF("crash_commit_after_log", DBUG_SUICIDE(););
--    }
--    error=ha_commit_one_phase(thd, all) ? (cookie ? 2 : 1) : 0;
--    DBUG_EXECUTE_IF("crash_commit_before_unlog", DBUG_SUICIDE(););
--    if (cookie)
-+
-+    cookie= tc_log->log_and_order(thd, xid, all, need_commit_ordered);
-+    if (!cookie)
-+      goto err;
-+
-+    DBUG_EXECUTE_IF("crash_commit_after_log", DBUG_ABORT(););
-+
-+    error= commit_one_phase_low(thd, all, trans, is_real_trans) ? 2 : 0;
-+    DBUG_EXECUTE_IF("crash_commit_after", DBUG_ABORT(););
-+    if (is_real_trans)          /* userstat.patch */
-+      thd->diff_commit_trans++; /* userstat.patch */
-+    RUN_HOOK(transaction, after_commit, (thd, FALSE));
-+
-+    DBUG_EXECUTE_IF("crash_commit_before_unlog", DBUG_ABORT(););
-       if(tc_log->unlog(cookie, xid))
-       {
-         error= 2;
-         goto end;
-       }
--    DBUG_EXECUTE_IF("crash_commit_after", DBUG_SUICIDE(););
--    if (is_real_trans)
--      thd->diff_commit_trans++;
--    RUN_HOOK(transaction, after_commit, (thd, FALSE));
-+
-+    DBUG_EXECUTE_IF("crash_commit_after", DBUG_ABORT(););
-+    goto end;
-+
-+    /* Come here if error and we need to rollback. */
-+err:
-+    error= 1;                                  /* Transaction was rolled back */
-+    ha_rollback_trans(thd, all);
-+
- end:
-     if (rw_trans && mdl_request.ticket)
-     {
-@@ -1260,9 +1293,6 @@
-       thd->mdl_context.release_lock(mdl_request.ticket);
-     }
-   }
--  /* Free resources and perform other cleanup even for 'empty' transactions. */
--  else if (is_real_trans)
--    thd->transaction.cleanup();
-   DBUG_RETURN(error);
- }
- 
-@@ -1279,7 +1309,6 @@
- 
- int ha_commit_one_phase(THD *thd, bool all)
- {
--  int error=0;
-   THD_TRANS *trans=all ? &thd->transaction.all : &thd->transaction.stmt;
-   /*
-     "real" is a nick name for a transaction for which a commit will
-@@ -1295,8 +1324,16 @@
-     transaction.all.ha_list, see why in trans_register_ha()).
-   */
-   bool is_real_trans=all || thd->transaction.all.ha_list == 0;
--  Ha_trx_info *ha_info= trans->ha_list, *ha_info_next;
-   DBUG_ENTER("ha_commit_one_phase");
-+  DBUG_RETURN(commit_one_phase_low(thd, all, trans, is_real_trans));
-+}
-+
-+static int
-+commit_one_phase_low(THD *thd, bool all, THD_TRANS *trans, bool is_real_trans)
-+{
-+  int error= 0;
-+  Ha_trx_info *ha_info= trans->ha_list, *ha_info_next;
-+  DBUG_ENTER("commit_one_phase_low");
- 
-   if (ha_info)
-   {
-@@ -1894,7 +1931,16 @@
- {
-   bool warn= true;
- 
-+  /*
-+    Holding the LOCK_commit_ordered mutex ensures that we get the same
-+    snapshot for all engines (including the binary log).  This allows us
-+    among other things to do backups with
-+    START TRANSACTION WITH CONSISTENT SNAPSHOT and
-+    have a consistent binlog position.
-+  */
-+  mysql_mutex_lock(&LOCK_commit_ordered);
-   plugin_foreach(thd, snapshot_handlerton, MYSQL_STORAGE_ENGINE_PLUGIN, &warn);
-+  mysql_mutex_unlock(&LOCK_commit_ordered);
- 
-   /*
-     Same idea as when one wants to CREATE TABLE in one engine which does not
---- a/sql/handler.h
-+++ b/sql/handler.h
-@@ -756,6 +756,53 @@
-      and 'real commit' mean the same event.
-    */
-    int  (*commit)(handlerton *hton, THD *thd, bool all);
-+   /*
-+     The commit_ordered() method is called prior to the commit() method, after
-+     the transaction manager has decided to commit (not rollback) the
-+     transaction. Unlike commit(), commit_ordered() is called only when the
-+     full transaction is committed, not for each commit of statement
-+     transaction in a multi-statement transaction.
-+
-+     Not that like prepare(), commit_ordered() is only called when 2-phase
-+     commit takes place. Ie. when no binary log and only a single engine
-+     participates in a transaction, one commit() is called, no
-+     commit_ordered(). So engines must be prepared for this.
-+
-+     The calls to commit_ordered() in multiple parallel transactions is
-+     guaranteed to happen in the same order in every participating
-+     handler. This can be used to ensure the same commit order among multiple
-+     handlers (eg. in table handler and binlog). So if transaction T1 calls
-+     into commit_ordered() of handler A before T2, then T1 will also call
-+     commit_ordered() of handler B before T2.
-+
-+     Engines that implement this method should during this call make the
-+     transaction visible to other transactions, thereby making the order of
-+     transaction commits be defined by the order of commit_ordered() calls.
-+
-+     The intention is that commit_ordered() should do the minimal amount of
-+     work that needs to happen in consistent commit order among handlers. To
-+     preserve ordering, calls need to be serialised on a global mutex, so
-+     doing any time-consuming or blocking operations in commit_ordered() will
-+     limit scalability.
-+
-+     Handlers can rely on commit_ordered() calls to be serialised (no two
-+     calls can run in parallel, so no extra locking on the handler part is
-+     required to ensure this).
-+
-+     Note that commit_ordered() can be called from a different thread than the
-+     one handling the transaction! So it can not do anything that depends on
-+     thread local storage, in particular it can not call my_error() and
-+     friends (instead it can store the error code and delay the call of
-+     my_error() to the commit() method).
-+
-+     Similarly, since commit_ordered() returns void, any return error code
-+     must be saved and returned from the commit() method instead.
-+
-+     The commit_ordered method is optional, and can be left unset if not
-+     needed in a particular handler (then there will be no ordering guarantees
-+     wrt. other engines and binary log).
-+   */
-+   void (*commit_ordered)(handlerton *hton, THD *thd, bool all);
-    int  (*rollback)(handlerton *hton, THD *thd, bool all);
-    int  (*prepare)(handlerton *hton, THD *thd, bool all);
-    int  (*recover)(handlerton *hton, XID *xid_list, uint len);
---- a/sql/log.cc
-+++ b/sql/log.cc
-@@ -71,6 +71,25 @@
- static int binlog_rollback(handlerton *hton, THD *thd, bool all);
- static int binlog_prepare(handlerton *hton, THD *thd, bool all);
- 
-+static LEX_STRING const write_error_msg=
-+  { C_STRING_WITH_LEN("error writing to the binary log") };
-+
-+static my_bool mutexes_inited;
-+mysql_mutex_t LOCK_group_commit_queue;
-+mysql_mutex_t LOCK_commit_ordered;
-+
-+static ulonglong binlog_status_var_num_commits;
-+static ulonglong binlog_status_var_num_group_commits;
-+
-+static SHOW_VAR binlog_status_vars_detail[]=
-+{
-+  {"commits",
-+    (char *)&binlog_status_var_num_commits, SHOW_LONGLONG},
-+  {"group_commits",
-+    (char *)&binlog_status_var_num_group_commits, SHOW_LONGLONG},
-+  {NullS, NullS, SHOW_LONG}
-+};
-+
- /**
-    purge logs, master and slave sides both, related error code
-    convertor.
-@@ -167,41 +186,6 @@
- }
- 
- /*
--  Helper class to hold a mutex for the duration of the
--  block.
--
--  Eliminates the need for explicit unlocking of mutexes on, e.g.,
--  error returns.  On passing a null pointer, the sentry will not do
--  anything.
-- */
--class Mutex_sentry
--{
--public:
--  Mutex_sentry(mysql_mutex_t *mutex)
--    : m_mutex(mutex)
--  {
--    if (m_mutex)
--      mysql_mutex_lock(mutex);
--  }
--
--  ~Mutex_sentry()
--  {
--    if (m_mutex)
--      mysql_mutex_unlock(m_mutex);
--#ifndef DBUG_OFF
--    m_mutex= 0;
--#endif
--  }
--
--private:
--  mysql_mutex_t *m_mutex;
--
--  // It's not allowed to copy this object in any way
--  Mutex_sentry(Mutex_sentry const&);
--  void operator=(Mutex_sentry const&);
--};
--
--/*
-   Helper classes to store non-transactional and transactional data
-   before copying it to the binary log.
- */
-@@ -211,7 +195,8 @@
-   binlog_cache_data(): m_pending(0), before_stmt_pos(MY_OFF_T_UNDEF),
-   incident(FALSE), changes_to_non_trans_temp_table_flag(FALSE),
-   saved_max_binlog_cache_size(0), ptr_binlog_cache_use(0),
--  ptr_binlog_cache_disk_use(0)
-+  ptr_binlog_cache_disk_use(0), commit_bin_log_file_pos(0),
-+  using_xa(FALSE), xa_xid(0)
-   { }
-   
-   ~binlog_cache_data()
-@@ -270,6 +255,8 @@
-       variable after truncating the cache.
-     */
-     cache_log.disk_writes= 0;
-+    using_xa= FALSE;
-+    commit_bin_log_file_pos= 0;
-     DBUG_ASSERT(empty());
-   }
- 
-@@ -411,6 +398,20 @@
-  
-   binlog_cache_data& operator=(const binlog_cache_data& info);
-   binlog_cache_data(const binlog_cache_data& info);
-+
-+public:
-+  /*
-+    Binlog position after current commit, available to storage engines during
-+    commit_ordered() and commit().
-+  */
-+  ulonglong commit_bin_log_file_pos;
-+
-+  /*
-+    Flag set true if this transaction is committed with log_xid() as part of
-+    XA, false if not.
-+  */
-+  bool using_xa;
-+  my_xid xa_xid;
- };
- 
- class binlog_cache_mngr {
-@@ -1624,7 +1625,7 @@
- */
- static inline int
- binlog_flush_cache(THD *thd, binlog_cache_data* cache_data, Log_event *end_evt,
--                   bool is_transactional)
-+                   bool is_transactional, bool all)
- {
-   DBUG_ENTER("binlog_flush_cache");
-   int error= 0;
-@@ -1643,8 +1644,8 @@
-       were, we would have to ensure that we're not ending a statement
-       inside a stored function.
-     */
--    error= mysql_bin_log.write(thd, &cache_data->cache_log, end_evt,
--                               cache_data->has_incident());
-+    error= mysql_bin_log.write_transaction_to_binlog(thd, cache_data,
-+                                                     end_evt, all);
-   }
-   cache_data->reset();
- 
-@@ -1663,12 +1664,12 @@
- */
- static inline int
- binlog_commit_flush_stmt_cache(THD *thd,
--                               binlog_cache_mngr *cache_mngr)
-+                               binlog_cache_mngr *cache_mngr, bool all)
- {
-   Query_log_event end_evt(thd, STRING_WITH_LEN("COMMIT"),
-                           FALSE, FALSE, TRUE, 0);
-   return (binlog_flush_cache(thd, &cache_mngr->stmt_cache, &end_evt,
--                             FALSE));
-+                             FALSE, all));
- }
- 
- /**
-@@ -1681,12 +1682,12 @@
-     nonzero if an error pops up when flushing the cache.
- */
- static inline int
--binlog_commit_flush_trx_cache(THD *thd, binlog_cache_mngr *cache_mngr)
-+binlog_commit_flush_trx_cache(THD *thd, binlog_cache_mngr *cache_mngr, bool all)
- {
-   Query_log_event end_evt(thd, STRING_WITH_LEN("COMMIT"),
-                           TRUE, FALSE, TRUE, 0);
-   return (binlog_flush_cache(thd, &cache_mngr->trx_cache, &end_evt,
--                             TRUE));
-+                             TRUE, all));
- }
- 
- /**
-@@ -1699,12 +1700,12 @@
-     nonzero if an error pops up when flushing the cache.
- */
- static inline int
--binlog_rollback_flush_trx_cache(THD *thd, binlog_cache_mngr *cache_mngr)
-+binlog_rollback_flush_trx_cache(THD *thd, binlog_cache_mngr *cache_mngr, bool all)
- {
-   Query_log_event end_evt(thd, STRING_WITH_LEN("ROLLBACK"),
-                           TRUE, FALSE, TRUE, 0);
-   return (binlog_flush_cache(thd, &cache_mngr->trx_cache, &end_evt,
--                             TRUE));
-+                             TRUE, all));
- }
- 
- /**
-@@ -1719,11 +1720,11 @@
- */
- static inline int
- binlog_commit_flush_trx_cache(THD *thd, binlog_cache_mngr *cache_mngr,
--                              my_xid xid)
-+                              my_xid xid, bool all)
- {
-   Xid_log_event end_evt(thd, xid);
-   return (binlog_flush_cache(thd, &cache_mngr->trx_cache, &end_evt,
--                             TRUE));
-+                             TRUE, all));
- }
- 
- /**
-@@ -1785,7 +1786,7 @@
-     do nothing.
-     just pretend we can do 2pc, so that MySQL won't
-     switch to 1pc.
--    real work will be done in MYSQL_BIN_LOG::log_xid()
-+    real work will be done in MYSQL_BIN_LOG::log_and_order()
-   */
-   return 0;
- }
-@@ -1818,7 +1819,7 @@
- 
-   if (!cache_mngr->stmt_cache.empty())
-   {
--    error= binlog_commit_flush_stmt_cache(thd, cache_mngr);
-+    error= binlog_commit_flush_stmt_cache(thd, cache_mngr, all);
-   }
- 
-   if (cache_mngr->trx_cache.empty())
-@@ -1837,7 +1838,7 @@
-     Otherwise, we accumulate the changes.
-   */
-   if (!error && ending_trans(thd, all))
--    error= binlog_commit_flush_trx_cache(thd, cache_mngr);
-+    error= binlog_commit_flush_trx_cache(thd, cache_mngr, all);
- 
-   /*
-     This is part of the stmt rollback.
-@@ -1881,7 +1882,7 @@
-   }
-   else if (!cache_mngr->stmt_cache.empty())
-   {
--    error= binlog_commit_flush_stmt_cache(thd, cache_mngr);
-+    error= binlog_commit_flush_stmt_cache(thd, cache_mngr, all);
-   }
- 
-   if (cache_mngr->trx_cache.empty())
-@@ -1929,7 +1930,7 @@
-          (trans_has_updated_non_trans_table(thd) &&
-           ending_single_stmt_trans(thd,all) &&
-           thd->variables.binlog_format == BINLOG_FORMAT_MIXED)))
--      error= binlog_rollback_flush_trx_cache(thd, cache_mngr);
-+      error= binlog_rollback_flush_trx_cache(thd, cache_mngr, all);
-     /*
-       Truncate the cache if:
-         . aborting a single or multi-statement transaction or;
-@@ -2904,6 +2905,7 @@
- MYSQL_BIN_LOG::MYSQL_BIN_LOG(uint *sync_period)
-   :bytes_written(0), prepared_xids(0), file_id(1), open_count(1),
-    need_start_event(TRUE),
-+   group_commit_queue(0), num_commits(0), num_group_commits(0),
-    sync_period_ptr(sync_period),
-    is_relay_log(0), signal_cnt(0),
-    description_event_for_exec(0), description_event_for_queue(0)
-@@ -5361,19 +5363,15 @@
-   SYNOPSIS
-     write_cache()
-     cache    Cache to write to the binary log
--    lock_log True if the LOCK_log mutex should be aquired, false otherwise
--    sync_log True if the log should be flushed and synced
- 
-   DESCRIPTION
-     Write the contents of the cache to the binary log. The cache will
-     be reset as a READ_CACHE to be able to read the contents from it.
-  */
- 
--int MYSQL_BIN_LOG::write_cache(THD *thd, IO_CACHE *cache,
--                               bool lock_log, bool sync_log)
-+int MYSQL_BIN_LOG::write_cache(THD *thd, IO_CACHE *cache)
- {
--  Mutex_sentry sentry(lock_log ? &LOCK_log : NULL);
--
-+  mysql_mutex_assert_owner(&LOCK_log);
-   if (reinit_io_cache(cache, READ_CACHE, 0, 0, 0))
-     return ER_ERROR_ON_WRITE;
-   uint length= my_b_bytes_in_cache(cache), group, carry, hdr_offs;
-@@ -5484,6 +5482,8 @@
-     }
- 
-     /* Write data to the binary log file */
-+    DBUG_EXECUTE_IF("fail_binlog_write_1",
-+                    errno= 28; return ER_ERROR_ON_WRITE;);
-     if (my_b_write(&log_file, cache->read_pos, length))
-       return ER_ERROR_ON_WRITE;
-     thd->binlog_bytes_written+= length;
-@@ -5492,9 +5492,6 @@
- 
-   DBUG_ASSERT(carry == 0);
- 
--  if (sync_log)
--    return flush_and_sync(0);
--
-   return 0;                                     // All OK
- }
- 
-@@ -5535,8 +5532,6 @@
-   if (!is_open())
-     DBUG_RETURN(error);
- 
--  LEX_STRING const write_error_msg=
--    { C_STRING_WITH_LEN("error writing to the binary log") };
-   Incident incident= INCIDENT_LOST_EVENTS;
-   Incident_log_event ev(thd, incident, write_error_msg);
-   if (lock)
-@@ -5585,112 +5580,332 @@
-     'cache' needs to be reinitialized after this functions returns.
- */
- 
--bool MYSQL_BIN_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event,
--                          bool incident)
-+bool
-+MYSQL_BIN_LOG::write_transaction_to_binlog(THD *thd, binlog_cache_data *cache_data,
-+                                           Log_event *end_ev, bool all)
-+{
-+  group_commit_entry entry;
-+  bool ret;
-+  DBUG_ENTER("MYSQL_BIN_LOG::write_transaction_to_binlog");
-+
-+  entry.thd= thd;
-+  entry.cache_data= cache_data;
-+  entry.error= 0;
-+  entry.all= all;
-+
-+  /*
-+    Log "BEGIN" at the beginning of every transaction.  Here, a transaction is
-+    either a BEGIN..COMMIT block or a single statement in autocommit mode.
-+
-+    Create the necessary events here, where we have the correct THD (and
-+    thread context).
-+
-+    Due to group commit the actual writing to binlog may happen in a different
-+    thread.
-+  */
-+  Query_log_event qinfo(thd, STRING_WITH_LEN("BEGIN"), TRUE, FALSE, TRUE, 0);
-+  entry.begin_event= &qinfo;
-+  entry.end_event= end_ev;
-+  if (cache_data->has_incident())
-+  {
-+    Incident_log_event inc_ev(thd, INCIDENT_LOST_EVENTS, write_error_msg);
-+    entry.incident_event= &inc_ev;
-+    ret = write_transaction_to_binlog_events(&entry);
-+  }
-+  else
-+  {
-+    entry.incident_event= NULL;
-+    ret = write_transaction_to_binlog_events(&entry);
-+  }
-+  if (!ret)                                          /* userstat.patch */
-+    thd->binlog_bytes_written += qinfo.data_written; /* userstat.patch */
-+  DBUG_RETURN(ret);
-+}
-+
-+bool
-+MYSQL_BIN_LOG::write_transaction_to_binlog_events(group_commit_entry *entry)
- {
--  DBUG_ENTER("MYSQL_BIN_LOG::write(THD *, IO_CACHE *, Log_event *)");
-+  /*
-+    To facilitate group commit for the binlog, we first queue up ourselves in
-+    the group commit queue. Then the first thread to enter the queue waits for
-+    the LOCK_log mutex, and commits for everyone in the queue once it gets the
-+    lock. Any other threads in the queue just wait for the first one to finish
-+    the commit and wake them up.
-+  */
-+  entry->thd->clear_wakeup_ready();
-+  mysql_mutex_lock(&LOCK_group_commit_queue);
-+  group_commit_entry *orig_queue= group_commit_queue;
-+  entry->next= orig_queue;
-+  group_commit_queue= entry;
-+  DEBUG_SYNC(entry->thd, "commit_group_commit_queue");
-+  mysql_mutex_unlock(&LOCK_group_commit_queue);
-+
-+  /*
-+    The first in the queue handle group commit for all; the others just wait
-+    to be signalled when group commit is done.
-+  */
-+  if (orig_queue != NULL)
-+    entry->thd->wait_for_wakeup_ready();
-+  else
-+    trx_group_commit_leader(entry);
-+
-+  if (likely(!entry->error))
-+    return 0;
-+
-+  switch (entry->error)
-+  {
-+  case ER_ERROR_ON_WRITE:
-+    my_error(ER_ERROR_ON_WRITE, MYF(ME_NOREFRESH), name, entry->commit_errno);
-+    break;
-+  case ER_ERROR_ON_READ:
-+    my_error(ER_ERROR_ON_READ, MYF(ME_NOREFRESH),
-+             entry->cache_data->cache_log.file_name, entry->commit_errno);
-+    break;
-+  default:
-+    /*
-+      There are not (and should not be) any errors thrown not covered above.
-+      But just in case one is added later without updating the above switch
-+      statement, include a catch-all.
-+    */
-+    my_printf_error(entry->error,
-+                    "Error writing transaction to binary log: %d",
-+                    MYF(ME_NOREFRESH), entry->error);
-+  }
- 
-+  /*
-+    Since we return error, this transaction XID will not be committed, so
-+    we need to mark it as not needed for recovery (unlog() is not called
-+    for a transaction if log_xid() fails).
-+  */
-+  if (entry->cache_data->using_xa && entry->cache_data->xa_xid)
-+    mark_xid_done();
-+
-+  return 1;
-+}
-+
-+/*
-+  Do binlog group commit as the lead thread.
-+
-+  This must be called when this thread/transaction is queued at the start of
-+  the group_commit_queue. It will wait to obtain the LOCK_log mutex, then group
-+  commit all the transactions in the queue (more may have entered while waiting
-+  for LOCK_log). After commit is done, all other threads in the queue will be
-+  signalled.
-+
-+ */
-+void
-+MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
-+{
-+  DBUG_ENTER("MYSQL_BIN_LOG::trx_group_commit_leader");
-+  uint xid_count= 0;
-+  uint write_count= 0;
-+  bool check_purge= false;
-+  group_commit_entry *current= 0;
-   DBUG_ASSERT(is_open());
-   if (likely(is_open()))                       // Should always be true
-   {
--    bool check_purge;
--    
-+    /*
-+      Lock the LOCK_log(), and once we get it, collect any additional writes
-+      that queued up while we were waiting.
-+    */
-     mysql_mutex_lock(&LOCK_log);
-+
-+    DEBUG_SYNC(leader->thd, "commit_after_get_LOCK_log");
-+    mysql_mutex_lock(&LOCK_group_commit_queue);
-+    current= group_commit_queue;
-+    group_commit_queue= NULL;
-+    mysql_mutex_unlock(&LOCK_group_commit_queue);
-+
-+    /* As the queue is in reverse order of entering, reverse it. */
-+    group_commit_entry *queue= NULL;
-+    while (current)
-+    {
-+      group_commit_entry *next= current->next;
-+      current->next= queue;
-+      queue= current;
-+      current= next;
-+    }
-+    DBUG_ASSERT(leader == queue /* the leader should be first in queue */);
-     /*
--      We only bother to write to the binary log if there is anything
--      to write.
--     */
--    if (my_b_tell(cache) > 0)
-+      Now we have in queue the list of transactions to be committed in order.
-+
-+      Commit every transaction in the queue.
-+
-+      Note that we are doing this in a different thread than the one running
-+      the transaction! So we are limited in the operations we can do. In
-+      particular, we cannot call my_error() on behalf of a transaction, as
-+      that obtains the THD from thread local storage. Instead, we must set
-+      current->error and let the thread do the error reporting itself once
-+      we wake it up.
-+    */
-+    for (current= queue; current != NULL; current= current->next)
-     {
-+      binlog_cache_data *cache_data= current->cache_data;
-+      IO_CACHE *cache= &cache_data->cache_log;
-+
-       /*
--        Log "BEGIN" at the beginning of every transaction.  Here, a
--        transaction is either a BEGIN..COMMIT block or a single
--        statement in autocommit mode.
-+        We only bother to write to the binary log if there is anything
-+        to write.
-       */
--      Query_log_event qinfo(thd, STRING_WITH_LEN("BEGIN"), TRUE, FALSE, TRUE, 0);
--      if (qinfo.write(&log_file))
--        goto err;
--      thd->binlog_bytes_written+= qinfo.data_written;
--      DBUG_EXECUTE_IF("crash_before_writing_xid",
--                      {
--                        if ((write_error= write_cache(thd, cache, false, true)))
--                          DBUG_PRINT("info", ("error writing binlog cache: %d",
--                                               write_error));
--                        DBUG_PRINT("info", ("crashing before writing xid"));
--                        DBUG_SUICIDE();
--                      });
--
--      if ((write_error= write_cache(thd, cache, false, false)))
--        goto err;
--
--      if (commit_event && commit_event->write(&log_file))
--        goto err;
--      if (commit_event)
--        thd->binlog_bytes_written+= commit_event->data_written;
-+      if (my_b_tell(cache) > 0)
-+      {
-+        if ((current->error= write_transaction(current)))
-+          current->commit_errno= errno;
-+        write_count++;
-+      }
- 
--      if (incident && write_incident(thd, FALSE))
--        goto err;
-+      cache_data->commit_bin_log_file_pos= my_b_write_tell(&log_file);
-+      if (cache_data->using_xa && cache_data->xa_xid)
-+        xid_count++;
-+    }
- 
-+    if (write_count > 0)
-+    {
-       bool synced= 0;
-       if (flush_and_sync(&synced))
--        goto err;
--      DBUG_EXECUTE_IF("half_binlogged_transaction", DBUG_SUICIDE(););
--      if (cache->error)				// Error on read
-       {
--        sql_print_error(ER(ER_ERROR_ON_READ), cache->file_name, errno);
--        write_error=1;				// Don't give more errors
--        goto err;
-+        for (current= queue; current != NULL; current= current->next)
-+        {
-+          if (!current->error)
-+          {
-+            current->error= ER_ERROR_ON_WRITE;
-+            current->commit_errno= errno;
-+          }
-+        }
-+      }
-+      else
-+      {
-+        signal_update();
-       }
- 
-       if (RUN_HOOK(binlog_storage, after_flush,
--                   (thd, log_file_name, log_file.pos_in_file, synced)))
-+                   (leader->thd, log_file_name, log_file.pos_in_file, synced)))
-       {
-         sql_print_error("Failed to run 'after_flush' hooks");
--        write_error=1;
--        goto err;
-+        for (current= queue; current != NULL; current= current->next)
-+        {
-+          if (!current->error)
-+          {
-+            current->error= ER_ERROR_ON_WRITE;
-+            current->commit_errno= errno;
-+          }
-+        }
-       }
- 
--      signal_update();
-     }
- 
-     /*
--      if commit_event is Xid_log_event, increase the number of
--      prepared_xids (it's decreasd in ::unlog()). Binlog cannot be rotated
-+      if any commit_events are Xid_log_event, increase the number of
-+      prepared_xids (it's decreased in ::unlog()). Binlog cannot be rotated
-       if there're prepared xids in it - see the comment in new_file() for
-       an explanation.
--      If the commit_event is not Xid_log_event (then it's a Query_log_event)
--      rotate binlog, if necessary.
-+      If no Xid_log_events (then it's all Query_log_event) rotate binlog,
-+      if necessary.
-     */
--    if (commit_event && commit_event->get_type_code() == XID_EVENT)
-+    if (xid_count > 0)
-     {
--      mysql_mutex_lock(&LOCK_prep_xids);
--      prepared_xids++;
--      mysql_mutex_unlock(&LOCK_prep_xids);
--      mysql_mutex_unlock(&LOCK_log);
-+      mark_xids_active(xid_count);
-     }
-     else
-     {
-       if (rotate(false, &check_purge))
--        goto err;
--      mysql_mutex_unlock(&LOCK_log);
--      if (check_purge) 
--        purge();
-+      {
-+        for (current= queue; current != NULL; current= current->next)
-+        {
-+          if (!current->error)
-+          {
-+            current->error= ER_ERROR_ON_WRITE;
-+            current->commit_errno= errno;
-+          }
-+        }
-+      }
-     }
--  }
- 
--  DBUG_RETURN(0);
-+    DEBUG_SYNC(leader->thd, "commit_before_get_LOCK_commit_ordered");
-+    mysql_mutex_lock(&LOCK_commit_ordered);
-+    /*
-+      We cannot unlock LOCK_log until we have locked LOCK_commit_ordered;
-+      otherwise scheduling could allow the next group commit to run ahead of us,
-+      messing up the order of commit_ordered() calls. But as soon as
-+      LOCK_commit_ordered is obtained, we can let the next group commit start.
-+    */
- 
--err:
--  if (!write_error)
--  {
--    write_error= 1;
--    sql_print_error(ER(ER_ERROR_ON_WRITE), name, errno);
-+    mysql_mutex_unlock(&LOCK_log);
-+
-+    if (xid_count > 0 && check_purge)
-+    {
-+      purge();
-+    }
-+
-+    DEBUG_SYNC(leader->thd, "commit_after_release_LOCK_log");
-+    ++num_group_commits;
-+
-+    /*
-+      Wakeup each participant waiting for our group commit, first calling the
-+      commit_ordered() methods for any transactions doing 2-phase commit.
-+    */
-+    current= queue;
-+    while (current != NULL)
-+    {
-+      group_commit_entry *next;
-+
-+      DEBUG_SYNC(leader->thd, "commit_loop_entry_commit_ordered");
-+      ++num_commits;
-+      if (current->cache_data->using_xa && !current->error)
-+        run_commit_ordered(current->thd, current->all);
-+
-+      /*
-+        Careful not to access current->next after waking up the other thread! As
-+        it may change immediately after wakeup.
-+      */
-+      next= current->next;
-+      if (current != leader)                      // Don't wake up ourself
-+        current->thd->signal_wakeup_ready();
-+      current= next;
-+    }
-+    DEBUG_SYNC(leader->thd, "commit_after_group_run_commit_ordered");
-+    mysql_mutex_unlock(&LOCK_commit_ordered);
-   }
--  mysql_mutex_unlock(&LOCK_log);
--  DBUG_RETURN(1);
-+
-+  DBUG_VOID_RETURN;
- }
- 
- 
-+int
-+MYSQL_BIN_LOG::write_transaction(group_commit_entry *entry)
-+{
-+  binlog_cache_data *cache_data= entry->cache_data;
-+  IO_CACHE *cache= &cache_data->cache_log;
-+
-+  if (entry->begin_event->write(&log_file))
-+    return ER_ERROR_ON_WRITE;
-+
-+  DBUG_EXECUTE_IF("crash_before_writing_xid",
-+                  {
-+                    if ((write_cache(entry->thd, cache)))
-+                      DBUG_PRINT("info", ("error writing binlog cache"));
-+                    else
-+                      flush_and_sync(0);
-+
-+                    DBUG_PRINT("info", ("crashing before writing xid"));
-+                    abort();
-+                  });
-+
-+  if (write_cache(entry->thd, cache))
-+    return ER_ERROR_ON_WRITE;
-+
-+  if (entry->end_event->write(&log_file))
-+    return ER_ERROR_ON_WRITE;
-+
-+  if (entry->incident_event && entry->incident_event->write(&log_file))
-+    return ER_ERROR_ON_WRITE;
-+
-+  if (cache->error)                           // Error on read
-+    return ER_ERROR_ON_READ;
-+
-+  return 0;
-+}
-+
- /**
-   Wait until we get a signal that the relay log has been updated.
- 
-@@ -6095,6 +6310,68 @@
- }
- 
- 
-+void
-+TC_init()
-+{
-+  mysql_mutex_init(key_LOCK_group_commit_queue, &LOCK_group_commit_queue, MY_MUTEX_INIT_SLOW);
-+  mysql_mutex_init(key_LOCK_commit_ordered, &LOCK_commit_ordered, MY_MUTEX_INIT_SLOW);
-+  mutexes_inited= TRUE;
-+}
-+
-+
-+void
-+TC_destroy()
-+{
-+  if (mutexes_inited)
-+  {
-+    mysql_mutex_destroy(&LOCK_group_commit_queue);
-+    mysql_mutex_destroy(&LOCK_commit_ordered);
-+    mutexes_inited= FALSE;
-+  }
-+}
-+
-+
-+void
-+TC_LOG::run_commit_ordered(THD *thd, bool all)
-+{
-+  Ha_trx_info *ha_info=
-+    all ? thd->transaction.all.ha_list : thd->transaction.stmt.ha_list;
-+
-+  mysql_mutex_assert_owner(&LOCK_commit_ordered);
-+  for (; ha_info; ha_info= ha_info->next())
-+  {
-+    handlerton *ht= ha_info->ht();
-+    if (!ht->commit_ordered)
-+      continue;
-+    ht->commit_ordered(ht, thd, all);
-+    DEBUG_SYNC(thd, "commit_after_run_commit_ordered");
-+  }
-+}
-+
-+int TC_LOG_MMAP::log_and_order(THD *thd, my_xid xid, bool all,
-+                               bool need_commit_ordered)
-+{
-+  int cookie;
-+
-+  cookie= 0;
-+  if (xid)
-+    cookie= log_one_transaction(xid);
-+
-+  if (need_commit_ordered)
-+  {
-+    /* Only run commit_ordered() if log_xid was successful. */
-+    if (cookie)
-+    {
-+      mysql_mutex_lock(&LOCK_commit_ordered);
-+      run_commit_ordered(thd, all);
-+      mysql_mutex_unlock(&LOCK_commit_ordered);
-+    }
-+  }
-+
-+  return cookie;
-+}
-+
-+
- /********* transaction coordinator log for 2pc - mmap() based solution *******/
- 
- /*
-@@ -6231,6 +6508,7 @@
-   mysql_mutex_init(key_LOCK_pool, &LOCK_pool, MY_MUTEX_INIT_FAST);
-   mysql_cond_init(key_COND_active, &COND_active, 0);
-   mysql_cond_init(key_COND_pool, &COND_pool, 0);
-+  mysql_cond_init(key_COND_queue_busy, &COND_queue_busy, 0);
- 
-   inited=6;
- 
-@@ -6238,6 +6516,8 @@
-   active=pages;
-   pool=pages+1;
-   pool_last=pages+npages-1;
-+  commit_ordered_queue= NULL;
-+  commit_ordered_queue_busy= false;
- 
-   return 0;
- 
-@@ -6343,7 +6623,7 @@
-     to the position in memory where xid was logged to.
- */
- 
--int TC_LOG_MMAP::log_xid(THD *thd, my_xid xid)
-+int TC_LOG_MMAP::log_one_transaction(my_xid xid)
- {
-   int err;
-   PAGE *p;
-@@ -6482,7 +6762,9 @@
-     mysql_mutex_destroy(&LOCK_sync);
-     mysql_mutex_destroy(&LOCK_active);
-     mysql_mutex_destroy(&LOCK_pool);
-+    mysql_cond_destroy(&COND_active);
-     mysql_cond_destroy(&COND_pool);
-+    mysql_cond_destroy(&COND_queue_busy);
-   case 5:
-     data[0]='A'; // garble the first (signature) byte, in case mysql_file_delete fails
-   case 4:
-@@ -6692,42 +6974,87 @@
-   mysql_cond_destroy(&COND_prep_xids);
- }
- 
--/**
--  @todo
--  group commit
-+/*
-+  Do a binlog log_xid() for a group of transactions, linked through
-+  thd->next_commit_ordered.
- 
-   @retval
-     0    error
-   @retval
-     1    success
- */
--int TC_LOG_BINLOG::log_xid(THD *thd, my_xid xid)
-+int TC_LOG_BINLOG::log_and_order(THD *thd, my_xid xid, bool all,
-+                                 bool need_commit_ordered __attribute__((unused)))
- {
--  DBUG_ENTER("TC_LOG_BINLOG::log");
-+  DBUG_ENTER("TC_LOG_BINLOG::log_and_order");
-   binlog_cache_mngr *cache_mngr=
-     (binlog_cache_mngr*) thd_get_ha_data(thd, binlog_hton);
-+
-+  cache_mngr->trx_cache.using_xa= TRUE;
-+  cache_mngr->trx_cache.xa_xid= xid;
-   /*
-     We always commit the entire transaction when writing an XID. Also
-     note that the return value is inverted.
-    */
--  DBUG_RETURN(!binlog_commit_flush_stmt_cache(thd, cache_mngr) &&
--              !binlog_commit_flush_trx_cache(thd, cache_mngr, xid));
-+  DBUG_RETURN(!binlog_commit_flush_stmt_cache(thd, cache_mngr, all) &&
-+              !binlog_commit_flush_trx_cache(thd, cache_mngr, xid, all));
- }
- 
--int TC_LOG_BINLOG::unlog(ulong cookie, my_xid xid)
-+/*
-+  After an XID is logged, we need to hold on to the current binlog file until
-+  it is fully committed in the storage engine. The reason is that crash
-+  recovery only looks at the latest binlog, so we must make sure there are no
-+  outstanding prepared (but not committed) transactions before rotating the
-+  binlog.
-+
-+  To handle this, we keep a count of outstanding XIDs. This function is used
-+  to increase this count when committing one or more transactions to the
-+  binary log.
-+*/
-+void
-+TC_LOG_BINLOG::mark_xids_active(uint xid_count)
- {
--  DBUG_ENTER("TC_LOG_BINLOG::unlog");
-+  DBUG_ENTER("TC_LOG_BINLOG::mark_xids_active");
-+  DBUG_PRINT("info", ("xid_count=%u", xid_count));
-+  mysql_mutex_lock(&LOCK_prep_xids);
-+  prepared_xids+= xid_count;
-+  mysql_mutex_unlock(&LOCK_prep_xids);
-+  DBUG_VOID_RETURN;
-+}
-+
-+/*
-+  Once an XID is committed, it is safe to rotate the binary log, as it can no
-+  longer be needed during crash recovery.
-+
-+  This function is called to mark an XID this way. It needs to decrease the
-+  count of pending XIDs, and signal the log rotator thread when it reaches zero.
-+*/
-+void
-+TC_LOG_BINLOG::mark_xid_done()
-+{
-+  my_bool send_signal;
-+
-+  DBUG_ENTER("TC_LOG_BINLOG::mark_xid_done");
-   mysql_mutex_lock(&LOCK_prep_xids);
-   // prepared_xids can be 0 if the transaction had ignorable errors.
-   DBUG_ASSERT(prepared_xids >= 0);
-   if (prepared_xids > 0)
-     prepared_xids--;
--  if (prepared_xids == 0) {
-+  send_signal= (prepared_xids == 0);
-+  mysql_mutex_unlock(&LOCK_prep_xids);
-+  if (send_signal) {
-     DBUG_PRINT("info", ("prepared_xids=%lu", prepared_xids));
-     mysql_cond_signal(&COND_prep_xids);
-   }
--  mysql_mutex_unlock(&LOCK_prep_xids);
--  DBUG_RETURN(rotate_and_purge(0));     // as ::write() did not rotate
-+  DBUG_VOID_RETURN;
-+}
-+
-+int TC_LOG_BINLOG::unlog(ulong cookie, my_xid xid)
-+{
-+  DBUG_ENTER("TC_LOG_BINLOG::unlog");
-+  if (xid)
-+    mark_xid_done();
-+  DBUG_RETURN(rotate_and_purge(0));
- }
- 
- int TC_LOG_BINLOG::recover(IO_CACHE *log, Format_description_log_event *fdle)
-@@ -6796,9 +7123,67 @@
- {
-   return (ulonglong) mysql_bin_log.get_log_file()->pos_in_file;
- }
-+/*
-+  Get the current position of the MySQL binlog for transaction currently being
-+  committed.
-+
-+  This is valid to call from within storage engine commit_ordered() and
-+  commit() methods only.
-+
-+  Since it stores the position inside THD, it is safe to call without any
-+  locking.
-+
-+  Note that currently the binlog file name is not stored inside THD, but this
-+  is still safe as it can only change when the log is rotated, and we never
-+  rotate the binlog while commits are pending inside storage engines.
-+*/
-+extern "C"
-+void mysql_bin_log_commit_pos(THD *thd, ulonglong *out_pos, const char **out_file)
-+{
-+  binlog_cache_mngr *cache_mngr;
-+  if (binlog_hton->state == SHOW_OPTION_YES
-+      && (cache_mngr= (binlog_cache_mngr*) thd_get_ha_data(thd, binlog_hton)))
-+  {
-+    *out_pos= cache_mngr->trx_cache.commit_bin_log_file_pos;
-+    *out_file= mysql_bin_log.get_log_fname();
-+  }
-+  else
-+  {
-+    *out_pos= 0ULL;
-+    *out_file= NULL;
-+  }
-+}
- #endif /* INNODB_COMPATIBILITY_HOOKS */
- 
- 
-+static int show_binlog_vars(THD *thd, SHOW_VAR *var, char *buff)
-+{
-+  mysql_bin_log.set_status_variables();
-+  var->type= SHOW_ARRAY;
-+  var->value= (char *)&binlog_status_vars_detail;
-+  return 0;
-+}
-+
-+static SHOW_VAR binlog_status_vars_top[]= {
-+  {"binlog", (char *) &show_binlog_vars, SHOW_FUNC},
-+  {NullS, NullS, SHOW_LONG}
-+};
-+
-+/*
-+  Copy out current values of status variables, for SHOW STATUS or
-+  information_schema.global_status.
-+
-+  This is called only under LOCK_status, so we can fill in a static array.
-+*/
-+void
-+TC_LOG_BINLOG::set_status_variables()
-+{
-+  mysql_mutex_lock(&LOCK_commit_ordered);
-+  binlog_status_var_num_commits= this->num_commits;
-+  binlog_status_var_num_group_commits= this->num_group_commits;
-+  mysql_mutex_unlock(&LOCK_commit_ordered);
-+}
-+
- struct st_mysql_storage_engine binlog_storage_engine=
- { MYSQL_HANDLERTON_INTERFACE_VERSION };
- 
-@@ -6813,7 +7198,7 @@
-   binlog_init, /* Plugin Init */
-   NULL, /* Plugin Deinit */
-   0x0100 /* 1.0 */,
--  NULL,                       /* status variables                */
-+  binlog_status_vars_top,     /* status variables                */
-   NULL,                       /* system variables                */
-   NULL,                       /* config options                  */
-   0,                          /* flags                           */
---- a/sql/log.h
-+++ b/sql/log.h
-@@ -44,17 +44,42 @@
- 
-   virtual int open(const char *opt_name)=0;
-   virtual void close()=0;
--  virtual int log_xid(THD *thd, my_xid xid)=0;
-+  virtual int log_and_order(THD *thd, my_xid xid, bool all,
-+                            bool need_commit_ordered)=0;
-   virtual int unlog(ulong cookie, my_xid xid)=0;
-+
-+  protected:
-+  void run_commit_ordered(THD *thd, bool all);
- };
- 
-+/*
-+  Locks used to ensure serialised execution of
-+  TC_LOG::run_commit_ordered(), or any other code that calls handler
-+  commit_ordered() methods.
-+*/
-+extern mysql_mutex_t LOCK_group_commit_queue;
-+extern mysql_mutex_t LOCK_commit_ordered;
-+
-+extern void TC_init();
-+extern void TC_destroy();
-+
- class TC_LOG_DUMMY: public TC_LOG // use it to disable the logging
- {
- public:
-   TC_LOG_DUMMY() {}
-   int open(const char *opt_name)        { return 0; }
-   void close()                          { }
--  int log_xid(THD *thd, my_xid xid)         { return 1; }
-+  /*
-+    TC_LOG_DUMMY is only used when there are <= 1 XA-capable engines, and we
-+    only use internal XA during commit when >= 2 XA-capable engines
-+    participate.
-+  */
-+  int log_and_order(THD *thd, my_xid xid, bool all,
-+                    bool need_commit_ordered)
-+  {
-+    DBUG_ASSERT(0 /* Internal error - TC_LOG_DUMMY::log_and_order() called */);
-+    return 1;
-+  }
-   int unlog(ulong cookie, my_xid xid)  { return 0; }
- };
- 
-@@ -80,6 +105,13 @@
-     mysql_cond_t  cond; // to wait for a sync
-   } PAGE;
- 
-+  /* List of THDs for which to invoke commit_ordered(), in order. */
-+  struct commit_entry
-+  {
-+    struct commit_entry *next;
-+    THD *thd;
-+  };
-+
-   char logname[FN_REFLEN];
-   File fd;
-   my_off_t file_length;
-@@ -94,16 +126,38 @@
-   */
-   mysql_mutex_t LOCK_active, LOCK_pool, LOCK_sync;
-   mysql_cond_t COND_pool, COND_active;
-+  /*
-+    Queue of threads that need to call commit_ordered().
-+    Access to this queue must be protected by LOCK_group_commit_queue
-+  */
-+  commit_entry *commit_ordered_queue;
-+  /*
-+    This flag and condition is used to reserve the queue while threads in it
-+    each run the commit_ordered() methods one after the other. Only once the
-+    last commit_ordered() in the queue is done can we start on a new queue
-+    run.
-+
-+    Since we start this process in the first thread in the queue and finish in
-+    the last (and possibly different) thread, we need a condition variable for
-+    this (we cannot unlock a mutex in a different thread than the one who
-+    locked it).
-+
-+    The condition is used together with the LOCK_group_commit_queue mutex.
-+  */
-+  my_bool commit_ordered_queue_busy;
-+  mysql_cond_t COND_queue_busy;
- 
-   public:
-   TC_LOG_MMAP(): inited(0) {}
-   int open(const char *opt_name);
-   void close();
--  int log_xid(THD *thd, my_xid xid);
-+  int log_and_order(THD *thd, my_xid xid, bool all,
-+                    bool need_commit_ordered);
-   int unlog(ulong cookie, my_xid xid);
-   int recover();
- 
-   private:
-+  int log_one_transaction(my_xid xid);
-   void get_active_from_pool();
-   int sync();
-   int overflow();
-@@ -271,9 +325,31 @@
-   time_t last_time;
- };
- 
-+class binlog_cache_data;
- class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
- {
-  private:
-+  struct group_commit_entry
-+  {
-+    struct group_commit_entry *next;
-+    THD *thd;
-+    binlog_cache_data *cache_data;
-+    /*
-+      Extra events (BEGIN, COMMIT/ROLLBACK/XID, and possibly INCIDENT) to be
-+      written during group commit. The incident_event is only valid if
-+      trx_data->has_incident() is true.
-+    */
-+    Log_event *begin_event;
-+    Log_event *end_event;
-+    Log_event *incident_event;
-+    /* Set during group commit to record any per-thread error. */
-+    int error;
-+    int commit_errno;
-+    /* This is the `all' parameter for ha_commit_ordered(). */
-+    bool all;
-+    /* True if we come in through XA log_and_order(), false otherwise. */
-+  };
-+
- #ifdef HAVE_PSI_INTERFACE
-   /** The instrumentation key to use for @ LOCK_index. */
-   PSI_mutex_key m_key_LOCK_index;
-@@ -325,6 +401,12 @@
-     In 5.0 it's 0 for relay logs too!
-   */
-   bool no_auto_events;
-+  /* Queue of transactions queued up to participate in group commit. */
-+  group_commit_entry *group_commit_queue;
-+  /* Total number of committed transactions. */
-+  ulonglong num_commits;
-+  /* Number of group commits done. */
-+  ulonglong num_group_commits;
- 
-   /* pointer to the sync period variable, for binlog this will be
-      sync_binlog_period, for relay log this will be
-@@ -346,6 +428,11 @@
-   */
-   int new_file_without_locking();
-   int new_file_impl(bool need_lock);
-+  int write_transaction(group_commit_entry *entry);
-+  bool write_transaction_to_binlog_events(group_commit_entry *entry);
-+  void trx_group_commit_leader(group_commit_entry *leader);
-+  void mark_xid_done();
-+  void mark_xids_active(uint xid_count);
- 
- public:
-   MYSQL_LOG::generate_name;
-@@ -387,7 +474,8 @@
- 
-   int open(const char *opt_name);
-   void close();
--  int log_xid(THD *thd, my_xid xid);
-+  int log_and_order(THD *thd, my_xid xid, bool all,
-+                    bool need_commit_ordered);
-   int unlog(ulong cookie, my_xid xid);
-   int recover(IO_CACHE *log, Format_description_log_event *fdle);
- #if !defined(MYSQL_CLIENT)
-@@ -434,11 +522,11 @@
-   int new_file();
- 
-   bool write(Log_event* event_info); // binary log write
--  bool write(THD *thd, IO_CACHE *cache, Log_event *commit_event, bool incident);
-+  bool write_transaction_to_binlog(THD *thd, binlog_cache_data *cache_data,
-+                                   Log_event *end_ev, bool all);
-   bool write_incident(THD *thd, bool lock);
- 
--  int  write_cache(THD *thd, IO_CACHE *cache,
--                   bool lock_log, bool flush_and_sync);
-+  int  write_cache(THD *thd, IO_CACHE *cache);
-   void set_write_error(THD *thd, bool is_transactional);
-   bool check_write_error(THD *thd);
- 
-@@ -509,6 +597,7 @@
-   inline void unlock_index() { mysql_mutex_unlock(&LOCK_index);}
-   inline IO_CACHE *get_index_file() { return &index_file;}
-   inline uint32 get_open_count() { return open_count; }
-+  void set_status_variables();
- };
- 
- class Log_event_handler
---- a/sql/mysqld.cc
-+++ b/sql/mysqld.cc
-@@ -1495,6 +1495,7 @@
-   ha_end();
-   if (tc_log)
-     tc_log->close();
-+  TC_destroy();
-   delegates_destroy();
-   xid_cache_free();
-   table_def_free();
-@@ -3911,6 +3912,8 @@
-   query_response_time_init();
- #endif // HAVE_RESPONSE_TIME_DISTRIBUTION
-   /* We have to initialize the storage engines before CSV logging */
-+  TC_init();
-+
-   init_global_table_stats();
-   init_global_index_stats();
- 
-@@ -7872,6 +7875,7 @@
-   key_LOCK_error_messages, key_LOG_INFO_lock, key_LOCK_thread_count,
-   key_PARTITION_LOCK_auto_inc;
- PSI_mutex_key key_RELAYLOG_LOCK_index;
-+PSI_mutex_key key_LOCK_wakeup_ready, key_LOCK_group_commit_queue, key_LOCK_commit_ordered;
- 
- static PSI_mutex_info all_server_mutexes[]=
- {
-@@ -7892,6 +7896,7 @@
-   { &key_delayed_insert_mutex, "Delayed_insert::mutex", 0},
-   { &key_hash_filo_lock, "hash_filo::lock", 0},
-   { &key_LOCK_active_mi, "LOCK_active_mi", PSI_FLAG_GLOBAL},
-+  { &key_LOCK_commit_ordered, "LOCK_commit_ordered", PSI_FLAG_GLOBAL},
-   { &key_LOCK_connection_count, "LOCK_connection_count", PSI_FLAG_GLOBAL},
-   { &key_LOCK_crypt, "LOCK_crypt", PSI_FLAG_GLOBAL},
-   { &key_LOCK_delayed_create, "LOCK_delayed_create", PSI_FLAG_GLOBAL},
-@@ -7907,6 +7912,7 @@
-     "LOCK_global_index_stats", PSI_FLAG_GLOBAL},
-   { &key_LOCK_gdl, "LOCK_gdl", PSI_FLAG_GLOBAL},
-   { &key_LOCK_global_system_variables, "LOCK_global_system_variables", PSI_FLAG_GLOBAL},
-+  { &key_LOCK_group_commit_queue, "LOCK_group_commit_queue", PSI_FLAG_GLOBAL},
-   { &key_LOCK_manager, "LOCK_manager", PSI_FLAG_GLOBAL},
-   { &key_LOCK_prepared_stmt_count, "LOCK_prepared_stmt_count", PSI_FLAG_GLOBAL},
-   { &key_LOCK_rpl_status, "LOCK_rpl_status", PSI_FLAG_GLOBAL},
-@@ -7918,6 +7924,7 @@
-   { &key_LOCK_temporary_tables, "THD::LOCK_temporary_tables", 0},
-   { &key_LOCK_user_conn, "LOCK_user_conn", PSI_FLAG_GLOBAL},
-   { &key_LOCK_uuid_generator, "LOCK_uuid_generator", PSI_FLAG_GLOBAL},
-+  { &key_LOCK_wakeup_ready, "THD::LOCK_wakeup_ready", 0},
-   { &key_LOG_LOCK_log, "LOG::LOCK_log", 0},
-   { &key_master_info_data_lock, "Master_info::data_lock", 0},
-   { &key_master_info_run_lock, "Master_info::run_lock", 0},
-@@ -7965,6 +7972,7 @@
-   key_TABLE_SHARE_cond, key_user_level_lock_cond,
-   key_COND_thread_count, key_COND_thread_cache, key_COND_flush_thread_cache;
- PSI_cond_key key_RELAYLOG_update_cond;
-+PSI_cond_key key_COND_wakeup_ready, key_COND_queue_busy;
- 
- static PSI_cond_info all_server_conds[]=
- {
-@@ -7981,8 +7989,10 @@
-   { &key_RELAYLOG_update_cond, "MYSQL_RELAY_LOG::update_cond", 0},
-   { &key_COND_cache_status_changed, "Query_cache::COND_cache_status_changed", 0},
-   { &key_COND_manager, "COND_manager", PSI_FLAG_GLOBAL},
-+  { &key_COND_queue_busy, "COND_queue_busy", PSI_FLAG_GLOBAL},
-   { &key_COND_rpl_status, "COND_rpl_status", PSI_FLAG_GLOBAL},
-   { &key_COND_server_started, "COND_server_started", PSI_FLAG_GLOBAL},
-+  { &key_COND_wakeup_ready, "THD::COND_wakeup_ready", 0},
-   { &key_delayed_insert_cond, "Delayed_insert::cond", 0},
-   { &key_delayed_insert_cond_client, "Delayed_insert::cond_client", 0},
-   { &key_item_func_sleep_cond, "Item_func_sleep::cond", 0},
---- a/sql/mysqld.h
-+++ b/sql/mysqld.h
-@@ -274,6 +274,7 @@
-   key_structure_guard_mutex, key_TABLE_SHARE_LOCK_ha_data,
-   key_LOCK_error_messages, key_LOCK_thread_count, key_PARTITION_LOCK_auto_inc;
- extern PSI_mutex_key key_RELAYLOG_LOCK_index;
-+extern PSI_mutex_key key_LOCK_wakeup_ready, key_LOCK_group_commit_queue, key_LOCK_commit_ordered;
- 
- extern PSI_rwlock_key key_rwlock_LOCK_grant, key_rwlock_LOCK_logger,
-   key_rwlock_LOCK_sys_init_connect, key_rwlock_LOCK_sys_init_slave,
-@@ -294,6 +295,7 @@
-   key_TABLE_SHARE_cond, key_user_level_lock_cond,
-   key_COND_thread_count, key_COND_thread_cache, key_COND_flush_thread_cache;
- extern PSI_cond_key key_RELAYLOG_update_cond;
-+extern PSI_cond_key key_COND_wakeup_ready, key_COND_queue_busy;
- 
- extern PSI_thread_key key_thread_bootstrap, key_thread_delayed_insert,
-   key_thread_handle_manager, key_thread_kill_server, key_thread_main,
---- a/sql/sql_class.cc
-+++ b/sql/sql_class.cc
-@@ -1005,6 +1005,8 @@
-   mysql_mutex_init(key_LOCK_thd_data, &LOCK_thd_data, MY_MUTEX_INIT_FAST);
-   mysql_mutex_init(key_LOCK_temporary_tables, &LOCK_temporary_tables,
-                    MY_MUTEX_INIT_FAST);
-+  mysql_mutex_init(key_LOCK_wakeup_ready, &LOCK_wakeup_ready, MY_MUTEX_INIT_FAST);
-+  mysql_cond_init(key_COND_wakeup_ready, &COND_wakeup_ready, NULL);
- 
-   /* Variables with default values */
-   proc_info="login";
-@@ -1609,6 +1611,8 @@
-   my_free(db);
-   db= NULL;
-   free_root(&transaction.mem_root,MYF(0));
-+  mysql_cond_destroy(&COND_wakeup_ready);
-+  mysql_mutex_destroy(&LOCK_wakeup_ready);
-   mysql_mutex_destroy(&LOCK_thd_data);
-   mysql_mutex_destroy(&LOCK_temporary_tables);
- #ifndef DBUG_OFF
-@@ -5297,6 +5301,24 @@
-   DBUG_RETURN(0);
- }
- 
-+void
-+THD::wait_for_wakeup_ready()
-+{
-+  mysql_mutex_lock(&LOCK_wakeup_ready);
-+  while (!wakeup_ready)
-+    mysql_cond_wait(&COND_wakeup_ready, &LOCK_wakeup_ready);
-+  mysql_mutex_unlock(&LOCK_wakeup_ready);
-+}
-+
-+void
-+THD::signal_wakeup_ready()
-+{
-+  mysql_mutex_lock(&LOCK_wakeup_ready);
-+  wakeup_ready= true;
-+  mysql_mutex_unlock(&LOCK_wakeup_ready);
-+  mysql_cond_signal(&COND_wakeup_ready);
-+}
-+
- bool Discrete_intervals_list::append(ulonglong start, ulonglong val,
-                                  ulonglong incr)
- {
---- a/sql/sql_class.h
-+++ b/sql/sql_class.h
-@@ -3078,6 +3078,14 @@
-   LEX_STRING get_invoker_user() { return invoker_user; }
-   LEX_STRING get_invoker_host() { return invoker_host; }
-   bool has_invoker() { return invoker_user.length > 0; }
-+  void clear_wakeup_ready() { wakeup_ready= false; }
-+  /*
-+    Sleep waiting for others to wake us up with signal_wakeup_ready().
-+    Must call clear_wakeup_ready() before waiting.
-+  */
-+  void wait_for_wakeup_ready();
-+  /* Wake this thread up from wait_for_wakeup_ready(). */
-+  void signal_wakeup_ready();
- private:
- 
-   /** The current internal error handler for this thread, or NULL. */
-@@ -3120,6 +3128,16 @@
-    */
-   LEX_STRING invoker_user;
-   LEX_STRING invoker_host;
-+  /*
-+    Flag, mutex and condition for a thread to wait for a signal from another
-+    thread.
-+
-+    Currently used to wait for group commit to complete, can also be used for
-+    other purposes.
-+  */
-+  bool wakeup_ready;
-+  mysql_mutex_t LOCK_wakeup_ready;
-+  mysql_cond_t COND_wakeup_ready;
- };
- 
- /* Returns string as 'IP' for the client-side of the connection represented by
---- a/sql/sql_parse.cc
-+++ b/sql/sql_parse.cc
-@@ -889,6 +889,10 @@
-   DBUG_ENTER("dispatch_command");
-   DBUG_PRINT("info",("packet: '%*.s'; command: %d", packet_length, packet, command));
- 
-+  DBUG_EXECUTE_IF("crash_dispatch_command_before",
-+                  { DBUG_PRINT("crash_dispatch_command_before", ("now"));
-+                    DBUG_ABORT(); });
-+
- #if defined(ENABLED_PROFILING)
-   thd->profiling.start_new_query();
- #endif
---- a/mysql-test/suite/perfschema/r/dml_setup_instruments.result
-+++ b/mysql-test/suite/perfschema/r/dml_setup_instruments.result
-@@ -11,9 +11,9 @@
- wait/synch/mutex/sql/HA_DATA_PARTITION::LOCK_auto_inc	YES	YES
- wait/synch/mutex/sql/LOCK_active_mi	YES	YES
- wait/synch/mutex/sql/LOCK_audit_mask	YES	YES
-+wait/synch/mutex/sql/LOCK_commit_ordered	YES	YES
- wait/synch/mutex/sql/LOCK_connection_count	YES	YES
- wait/synch/mutex/sql/LOCK_crypt	YES	YES
--wait/synch/mutex/sql/LOCK_delayed_create	YES	YES
- select * from performance_schema.setup_instruments
- where name like 'Wait/Synch/Rwlock/sql/%'
-   and name not in ('wait/synch/rwlock/sql/CRYPTO_dynlock_value::lock')
-@@ -38,6 +38,7 @@
- NAME	ENABLED	TIMED
- wait/synch/cond/sql/COND_flush_thread_cache	YES	YES
- wait/synch/cond/sql/COND_manager	YES	YES
-+wait/synch/cond/sql/COND_queue_busy	YES	YES
- wait/synch/cond/sql/COND_queue_state	YES	YES
- wait/synch/cond/sql/COND_rpl_status	YES	YES
- wait/synch/cond/sql/COND_server_started	YES	YES
-@@ -45,7 +46,6 @@
- wait/synch/cond/sql/COND_thread_count	YES	YES
- wait/synch/cond/sql/Delayed_insert::cond	YES	YES
- wait/synch/cond/sql/Delayed_insert::cond_client	YES	YES
--wait/synch/cond/sql/Event_scheduler::COND_state	YES	YES
- select * from performance_schema.setup_instruments
- where name='Wait';
- select * from performance_schema.setup_instruments
---- a/storage/innobase/handler/ha_innodb.cc
-+++ b/storage/innobase/handler/ha_innodb.cc
-@@ -375,6 +375,9 @@
- static INNOBASE_SHARE *get_share(const char *table_name);
- static void free_share(INNOBASE_SHARE *share);
- static int innobase_close_connection(handlerton *hton, THD* thd);
-+#ifdef EXTENDED_FOR_COMMIT_ORDERED
-+static void innobase_commit_ordered(handlerton *hton, THD* thd, bool all);
-+#endif
- static int innobase_commit(handlerton *hton, THD* thd, bool all);
- static int innobase_rollback(handlerton *hton, THD* thd, bool all);
- static int innobase_rollback_to_savepoint(handlerton *hton, THD* thd,
-@@ -1699,7 +1702,10 @@
- 	trx_t*	trx)	/*!< in/out: InnoDB transaction handle */
- {
- 	DBUG_ENTER("innobase_trx_init");
-+#ifndef EXTENDED_FOR_COMMIT_ORDERED
-+	/* used by innobase_commit_ordered */
- 	DBUG_ASSERT(EQ_CURRENT_THD(thd));
-+#endif
- 	DBUG_ASSERT(thd == trx->mysql_thd);
- 
- 	trx->check_foreigns = !thd_test_options(
-@@ -1760,7 +1766,10 @@
- {
- 	trx_t*&	trx = thd_to_trx(thd);
- 
-+#ifndef EXTENDED_FOR_COMMIT_ORDERED
-+	/* used by innobase_commit_ordered */
- 	ut_ad(EQ_CURRENT_THD(thd));
-+#endif
- 
- 	if (trx == NULL) {
- 		trx = innobase_trx_allocate(thd);
-@@ -1846,6 +1855,7 @@
- {
- 	trx->is_registered = 0;
- 	trx->owns_prepare_mutex = 0;
-+	trx->called_commit_ordered = 0;
- }
- 
- /*********************************************************************//**
-@@ -1861,6 +1871,29 @@
- }
- 
- /*********************************************************************//**
-+*/
-+static inline
-+void
-+trx_called_commit_ordered_set(
-+/*==========================*/
-+	trx_t*	trx)
-+{
-+	ut_a(trx_is_registered_for_2pc(trx));
-+	trx->called_commit_ordered = 1;
-+}
-+
-+/*********************************************************************//**
-+*/
-+static inline
-+bool
-+trx_called_commit_ordered(
-+/*======================*/
-+	const trx_t*	trx)
-+{
-+	return(trx->called_commit_ordered == 1);
-+}
-+
-+/*********************************************************************//**
- Check if transaction is started.
- @reutrn true if transaction is in state started */
- static
-@@ -2435,6 +2468,9 @@
-         innobase_hton->savepoint_set=innobase_savepoint;
-         innobase_hton->savepoint_rollback=innobase_rollback_to_savepoint;
-         innobase_hton->savepoint_release=innobase_release_savepoint;
-+#ifdef EXTENDED_FOR_COMMIT_ORDERED
-+	innobase_hton->commit_ordered=innobase_commit_ordered;
-+#endif
-         innobase_hton->commit=innobase_commit;
-         innobase_hton->rollback=innobase_rollback;
-         innobase_hton->prepare=innobase_xa_prepare;
-@@ -3187,6 +3223,126 @@
- 	DBUG_RETURN(0);
- }
- 
-+#ifdef EXTENDED_FOR_COMMIT_ORDERED
-+/* MEMO:
-+  InnoDB is coded with intention that always trx is accessed by the owner thd.
-+  (not protected by any mutex/lock)
-+  So, the caller of innobase_commit_ordered() should be conscious of
-+  cache coherency between multi CPU about the trx, if called from another thd.
-+
-+  MariaDB's first implementation about it seems the cherency is protected by
-+  the pthread_mutex LOCK_wakeup_ready. So, no problem for now.
-+
-+  But we should be aware the importance of the coherency.
-+ */
-+/*****************************************************************//**
-+low function function innobase_commit_ordered().*/
-+static
-+void
-+innobase_commit_ordered_low(
-+/*========================*/
-+	trx_t*	trx, 	/*!< in: Innodb transaction */
-+	THD*	thd)	/*!< in: MySQL thread handle */
-+{
-+	ulonglong tmp_pos;
-+	DBUG_ENTER("innobase_commit_ordered");
-+
-+	/* This part was from innobase_commit() */
-+
-+	/* We need current binlog position for ibbackup to work.
-+	Note, the position is current because commit_ordered is guaranteed
-+	to be called in same sequenece as writing to binlog. */
-+retry:
-+	if (innobase_commit_concurrency > 0) {
-+		mysql_mutex_lock(&commit_cond_m);
-+		commit_threads++;
-+
-+		if (commit_threads > innobase_commit_concurrency) {
-+			commit_threads--;
-+			mysql_cond_wait(&commit_cond,
-+					  &commit_cond_m);
-+			mysql_mutex_unlock(&commit_cond_m);
-+			goto retry;
-+		}
-+		else {
-+			mysql_mutex_unlock(&commit_cond_m);
-+		}
-+	}
-+
-+	mysql_bin_log_commit_pos(thd, &tmp_pos, &(trx->mysql_log_file_name));
-+	trx->mysql_log_offset = (ib_int64_t) tmp_pos;
-+
-+	/* Don't do write + flush right now. For group commit
-+	   to work we want to do the flush in the innobase_commit()
-+	   method, which runs without holding any locks. */
-+	trx->flush_log_later = TRUE;
-+	innobase_commit_low(trx);
-+	trx->flush_log_later = FALSE;
-+
-+	if (innobase_commit_concurrency > 0) {
-+		mysql_mutex_lock(&commit_cond_m);
-+		commit_threads--;
-+		mysql_cond_signal(&commit_cond);
-+		mysql_mutex_unlock(&commit_cond_m);
-+	}
-+
-+	DBUG_VOID_RETURN;
-+}
-+
-+/*****************************************************************//**
-+Perform the first, fast part of InnoDB commit.
-+
-+Doing it in this call ensures that we get the same commit order here
-+as in binlog and any other participating transactional storage engines.
-+
-+Note that we want to do as little as really needed here, as we run
-+under a global mutex. The expensive fsync() is done later, in
-+innobase_commit(), without a lock so group commit can take place.
-+
-+Note also that this method can be called from a different thread than
-+the one handling the rest of the transaction. */
-+static
-+void
-+innobase_commit_ordered(
-+/*====================*/
-+	handlerton *hton, /*!< in: Innodb handlerton */
-+	THD*	thd,	/*!< in: MySQL thread handle of the user for whom
-+			the transaction should be committed */
-+	bool	all)	/*!< in:	TRUE - commit transaction
-+				FALSE - the current SQL statement ended */
-+{
-+	trx_t*		trx;
-+	DBUG_ENTER("innobase_commit_ordered");
-+	DBUG_ASSERT(hton == innodb_hton_ptr);
-+
-+	trx = check_trx_exists(thd);
-+
-+	/* Since we will reserve the kernel mutex, we have to release
-+	the search system latch first to obey the latching order. */
-+
-+	if (trx->has_search_latch) {
-+		trx_search_latch_release_if_reserved(trx);
-+	}
-+
-+	if (!trx_is_registered_for_2pc(trx) && trx_is_started(trx)) {
-+		/* We cannot throw error here; instead we will catch this error
-+		again in innobase_commit() and report it from there. */
-+		DBUG_VOID_RETURN;
-+	}
-+
-+	/* commit_ordered is only called when committing the whole transaction
-+	(or an SQL statement when autocommit is on). */
-+	DBUG_ASSERT(all ||
-+		(!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)));
-+
-+	innobase_commit_ordered_low(trx, thd);
-+
-+	trx_called_commit_ordered_set(trx);
-+
-+	DBUG_VOID_RETURN;
-+}
-+#endif /* EXTENDED_FOR_COMMIT_ORDERED */
-+
- /*****************************************************************//**
- Commits a transaction in an InnoDB database or marks an SQL statement
- ended.
-@@ -3238,6 +3394,16 @@
- 		/* We were instructed to commit the whole transaction, or
- 		this is an SQL statement end and autocommit is on */
- 
-+#ifdef EXTENDED_FOR_COMMIT_ORDERED
-+		ut_ad(!trx_has_prepare_commit_mutex(trx));
-+
-+		/* Run the fast part of commit if we did not already. */
-+		if (!trx_called_commit_ordered(trx)) {
-+			innobase_commit_ordered_low(trx, thd);
-+		}
-+#else
-+		ut_ad(!trx_called_commit_ordered(trx));
-+
- 		/* We need current binlog position for ibbackup to work.
- 		Note, the position is current because of
- 		prepare_commit_mutex */
-@@ -3292,6 +3458,7 @@
-   
- 			mysql_mutex_unlock(&prepare_commit_mutex);
-   		}
-+#endif /* EXTENDED_FOR_COMMIT_ORDERED */
-   
- 		trx_deregister_from_2pc(trx);
- 
-@@ -10981,6 +11148,7 @@
- 
- 	srv_active_wake_master_thread();
- 
-+#ifndef EXTENDED_FOR_COMMIT_ORDERED
- 	if (thd_sql_command(thd) != SQLCOM_XA_PREPARE
- 	    && (all
- 		|| !thd_test_options(
-@@ -11007,6 +11175,7 @@
- 		mysql_mutex_lock(&prepare_commit_mutex);
- 		trx_owns_prepare_commit_mutex_set(trx);
- 	}
-+#endif /* ifndef EXTENDED_FOR_COMMIT_ORDERED */
- 
- 	return(error);
- }
---- a/storage/innobase/handler/ha_innodb.h
-+++ b/storage/innobase/handler/ha_innodb.h
-@@ -240,6 +240,12 @@
- struct charset_info_st *thd_charset(MYSQL_THD thd);
- LEX_STRING *thd_query_string(MYSQL_THD thd);
- 
-+#ifdef EXTENDED_FOR_COMMIT_ORDERED
-+/** Get the file name and position of the MySQL binlog corresponding to the
-+ * current commit.
-+ */
-+void mysql_bin_log_commit_pos(THD *thd, ulonglong *out_pos, const char **out_file);
-+#else
- /** Get the file name of the MySQL binlog.
-  * @return the name of the binlog file
-  */
-@@ -249,6 +255,7 @@
-  * @return byte offset from the beginning of the binlog
-  */
- ulonglong mysql_bin_log_file_pos(void);
-+#endif
- 
- /**
-   Check if a user thread is a replication slave thread
---- a/storage/innobase/include/trx0trx.h
-+++ b/storage/innobase/include/trx0trx.h
-@@ -494,6 +494,7 @@
- 					this is set to 1 then registered should
- 					also be set to 1. This is used in the
- 					XA code */
-+	unsigned	called_commit_ordered:1;/* 1 if innobase_commit_ordered has run. */
- 	/*------------------------------*/
- 	ulint		isolation_level;/* TRX_ISO_REPEATABLE_READ, ... */
- 	ulint		check_foreigns;	/* normally TRUE, but if the user
---- a/storage/innobase/trx/trx0trx.c
-+++ b/storage/innobase/trx/trx0trx.c
-@@ -111,6 +111,7 @@
- 
- 	trx->is_registered = 0;
- 	trx->owns_prepare_mutex = 0;
-+	trx->called_commit_ordered = 0;
- 
- 	trx->start_time = ut_time();
- 
---- /dev/null
-+++ b/mysql-test/r/group_commit.result
-@@ -0,0 +1,63 @@
-+CREATE TABLE t1 (a VARCHAR(10) PRIMARY KEY) ENGINE=innodb;
-+SELECT variable_value INTO @commits FROM information_schema.global_status
-+WHERE variable_name = 'binlog_commits';
-+SELECT variable_value INTO @group_commits FROM information_schema.global_status
-+WHERE variable_name = 'binlog_group_commits';
-+SET DEBUG_SYNC= "commit_before_get_LOCK_commit_ordered SIGNAL group1_running WAIT_FOR group2_queued";
-+INSERT INTO t1 VALUES ("con1");
-+set DEBUG_SYNC= "now WAIT_FOR group1_running";
-+SET DEBUG_SYNC= "commit_group_commit_queue SIGNAL group2_con2";
-+SET DEBUG_SYNC= "commit_after_release_LOCK_log WAIT_FOR group3_committed";
-+SET DEBUG_SYNC= "commit_after_group_run_commit_ordered SIGNAL group2_visible WAIT_FOR group2_checked";
-+INSERT INTO t1 VALUES ("con2");
-+SET DEBUG_SYNC= "now WAIT_FOR group2_con2";
-+SET DEBUG_SYNC= "commit_group_commit_queue SIGNAL group2_con3";
-+INSERT INTO t1 VALUES ("con3");
-+SET DEBUG_SYNC= "now WAIT_FOR group2_con3";
-+SET DEBUG_SYNC= "commit_group_commit_queue SIGNAL group2_con4";
-+INSERT INTO t1 VALUES ("con4");
-+SET DEBUG_SYNC= "now WAIT_FOR group2_con4";
-+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
-+SELECT * FROM t1 ORDER BY a;
-+a
-+SET DEBUG_SYNC= "now SIGNAL group2_queued";
-+SELECT * FROM t1 ORDER BY a;
-+a
-+con1
-+SET DEBUG_SYNC= "commit_before_get_LOCK_commit_ordered SIGNAL group3_con5";
-+SET DEBUG_SYNC= "commit_after_get_LOCK_log SIGNAL con5_leader WAIT_FOR con6_queued";
-+INSERT INTO t1 VALUES ("con5");
-+SET DEBUG_SYNC= "now WAIT_FOR con5_leader";
-+SET DEBUG_SYNC= "commit_group_commit_queue SIGNAL con6_queued";
-+INSERT INTO t1 VALUES ("con6");
-+SET DEBUG_SYNC= "now WAIT_FOR group3_con5";
-+SELECT * FROM t1 ORDER BY a;
-+a
-+con1
-+SET DEBUG_SYNC= "now SIGNAL group3_committed";
-+SET DEBUG_SYNC= "now WAIT_FOR group2_visible";
-+SELECT * FROM t1 ORDER BY a;
-+a
-+con1
-+con2
-+con3
-+con4
-+SET DEBUG_SYNC= "now SIGNAL group2_checked";
-+SELECT * FROM t1 ORDER BY a;
-+a
-+con1
-+con2
-+con3
-+con4
-+con5
-+con6
-+SELECT variable_value - @commits FROM information_schema.global_status
-+WHERE variable_name = 'binlog_commits';
-+variable_value - @commits
-+6
-+SELECT variable_value - @group_commits FROM information_schema.global_status
-+WHERE variable_name = 'binlog_group_commits';
-+variable_value - @group_commits
-+3
-+SET DEBUG_SYNC= 'RESET';
-+DROP TABLE t1;
---- /dev/null
-+++ b/mysql-test/r/group_commit_binlog_pos.result
-@@ -0,0 +1,35 @@
-+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=innodb;
-+INSERT INTO t1 VALUES (0);
-+SET DEBUG_SYNC= "commit_after_get_LOCK_log SIGNAL con1_waiting WAIT_FOR con3_queued";
-+SET DEBUG_SYNC= "commit_loop_entry_commit_ordered SIGNAL con1_loop WAIT_FOR con1_loop_cont EXECUTE 3";
-+INSERT INTO t1 VALUES (1);
-+SET DEBUG_SYNC= "now WAIT_FOR con1_waiting";
-+SET DEBUG_SYNC= "commit_group_commit_queue SIGNAL con2_queued";
-+INSERT INTO t1 VALUES (2);
-+SET DEBUG_SYNC= "now WAIT_FOR con2_queued";
-+SET DEBUG_SYNC= "commit_group_commit_queue SIGNAL con3_queued";
-+INSERT INTO t1 VALUES (3);
-+SET DEBUG_SYNC= "now WAIT_FOR con1_loop";
-+SET DEBUG_SYNC= "now SIGNAL con1_loop_cont";
-+SET DEBUG_SYNC= "now WAIT_FOR con1_loop";
-+SET DEBUG_SYNC= "now SIGNAL con1_loop_cont";
-+SET DEBUG_SYNC= "now WAIT_FOR con1_loop";
-+SELECT * FROM t1 ORDER BY a;
-+a
-+0
-+1
-+2
-+SET SESSION debug="+d,crash_dispatch_command_before";
-+SELECT 1;
-+Got one of the listed errors
-+Got one of the listed errors
-+Got one of the listed errors
-+SELECT * FROM t1 ORDER BY a;
-+a
-+0
-+1
-+2
-+3
-+InnoDB: Last MySQL binlog file position 0 768, file name ./master-bin.000001
-+SET DEBUG_SYNC= 'RESET';
-+DROP TABLE t1;
---- /dev/null
-+++ b/mysql-test/r/group_commit_crash.result
-@@ -0,0 +1,120 @@
-+CREATE TABLE t1(a CHAR(255),
-+b CHAR(255),
-+c CHAR(255),
-+d CHAR(255),
-+id INT AUTO_INCREMENT,
-+PRIMARY KEY(id)) ENGINE=InnoDB;
-+create table t2 like t1;
-+create procedure setcrash(IN i INT)
-+begin
-+CASE i
-+WHEN 1 THEN SET SESSION debug="d,crash_commit_after_prepare";
-+WHEN 2 THEN SET SESSION debug="d,crash_commit_after_log";
-+WHEN 3 THEN SET SESSION debug="d,crash_commit_before_unlog";
-+WHEN 4 THEN SET SESSION debug="d,crash_commit_after";
-+WHEN 5 THEN SET SESSION debug="d,crash_commit_before";
-+ELSE BEGIN END;
-+END CASE;
-+end //
-+FLUSH TABLES;
-+INSERT INTO t2(a, b, c, d) VALUES ('a', 'b', 'c', 'd');
-+INSERT INTO t2(a, b, c, d) VALUES ('a', 'b', 'c', 'd');
-+INSERT INTO t2(a, b, c, d) VALUES ('a', 'b', 'c', 'd');
-+INSERT INTO t2(a, b, c, d) VALUES ('a', 'b', 'c', 'd');
-+INSERT INTO t2(a, b, c, d) VALUES ('a', 'b', 'c', 'd');
-+INSERT INTO t2(a, b, c, d) VALUES ('a', 'b', 'c', 'd');
-+INSERT INTO t2(a, b, c, d) VALUES ('a', 'b', 'c', 'd');
-+INSERT INTO t2(a, b, c, d) VALUES ('a', 'b', 'c', 'd');
-+INSERT INTO t2(a, b, c, d) VALUES ('a', 'b', 'c', 'd');
-+INSERT INTO t2(a, b, c, d) VALUES ('a', 'b', 'c', 'd');
-+RESET MASTER;
-+START TRANSACTION;
-+insert into t1 select * from t2;
-+call setcrash(5);
-+COMMIT;
-+Got one of the listed errors
-+SELECT * FROM t1 ORDER BY id;
-+a	b	c	d	id
-+SHOW BINLOG EVENTS LIMIT 2,1;
-+Log_name	Pos	Event_type	Server_id	End_log_pos	Info
-+delete from t1;
-+RESET MASTER;
-+START TRANSACTION;
-+insert into t1 select * from t2;
-+call setcrash(4);
-+COMMIT;
-+Got one of the listed errors
-+SELECT * FROM t1 ORDER BY id;
-+a	b	c	d	id
-+a	b	c	d	1
-+a	b	c	d	2
-+a	b	c	d	3
-+a	b	c	d	4
-+a	b	c	d	5
-+a	b	c	d	6
-+a	b	c	d	7
-+a	b	c	d	8
-+a	b	c	d	9
-+a	b	c	d	10
-+SHOW BINLOG EVENTS LIMIT 2,1;
-+Log_name	Pos	Event_type	Server_id	End_log_pos	Info
-+master-bin.000001	175	Query	1	269	use `test`; insert into t1 select * from t2
-+delete from t1;
-+RESET MASTER;
-+START TRANSACTION;
-+insert into t1 select * from t2;
-+call setcrash(3);
-+COMMIT;
-+Got one of the listed errors
-+SELECT * FROM t1 ORDER BY id;
-+a	b	c	d	id
-+a	b	c	d	1
-+a	b	c	d	2
-+a	b	c	d	3
-+a	b	c	d	4
-+a	b	c	d	5
-+a	b	c	d	6
-+a	b	c	d	7
-+a	b	c	d	8
-+a	b	c	d	9
-+a	b	c	d	10
-+SHOW BINLOG EVENTS LIMIT 2,1;
-+Log_name	Pos	Event_type	Server_id	End_log_pos	Info
<Skipped 49767 lines>
================================================================

---- gitweb:

http://git.pld-linux.org/gitweb.cgi/packages/mysql.git/commitdiff/47dafa00dd66f27be9bd61a900c90abe95f4e3eb



More information about the pld-cvs-commit mailing list