diff --git a/mysql-test/suite/innodb/include/autoinc_persist_alter.inc b/mysql-test/suite/innodb/include/autoinc_persist_alter.inc new file mode 100644 index 000000000000..ddff573b0565 --- /dev/null +++ b/mysql-test/suite/innodb/include/autoinc_persist_alter.inc @@ -0,0 +1,61 @@ + +eval CREATE TABLE $table LIKE $template; + +eval INSERT INTO $table SELECT * FROM $template; + +eval SELECT * FROM $table; + +eval SHOW CREATE TABLE $table; + +--echo # This will keep the autoinc counter +eval ALTER TABLE $table AUTO_INCREMENT = 250, ALGORITHM = $algorithm; +--echo # We expect the counter to be 250 +eval SHOW CREATE TABLE $table; + +--echo # This should keep the autoinc counter as well +eval ALTER TABLE $table ADD COLUMN b INT, ALGORITHM = $algorithm; +--echo # We expect the counter to be 250 +eval SHOW CREATE TABLE $table; + +eval DELETE FROM $table WHERE a > 150; + +eval SELECT * FROM $table; + +--echo # This should reset the autoinc counter to the one specified +--echo # Since it's smaller than current one but bigger than existing +--echo # biggest counter in the table +eval ALTER TABLE $table AUTO_INCREMENT = 180, ALGORITHM = $algorithm; +--echo # We expect the counter to be 180 +eval SHOW CREATE TABLE $table; + +--echo # This should reset the autoinc counter to the next value of +--echo # current max counter in the table, since the specified value +--echo # is smaller than the existing biggest value(50 < 123) +eval ALTER TABLE $table DROP COLUMN b, AUTO_INCREMENT = 50, ALGORITHM = $algorithm; +--echo # We expect the counter to be 123 +eval SHOW CREATE TABLE $table; + +eval INSERT INTO $table VALUES(0), (0); + +eval SELECT MAX(a) AS `Expect 124` FROM $table; + +eval OPTIMIZE TABLE $table; + +eval SHOW CREATE TABLE $table; + +--source include/restart_mysqld.inc + +--echo # We expect the counter to still be 125 +eval SHOW CREATE TABLE $table; + +eval DELETE FROM $table WHERE a >= 123; + +eval CREATE UNIQUE INDEX idx_aa ON $table(a); + +--source include/restart_mysqld.inc + +eval INSERT INTO $table VALUES(0), (0); + +eval SELECT MAX(a) AS `Expect 126` FROM $table; + +eval DROP TABLE $table; diff --git a/mysql-test/suite/innodb/r/autoinc_persist.result b/mysql-test/suite/innodb/r/autoinc_persist.result new file mode 100644 index 000000000000..94d60c480cc6 --- /dev/null +++ b/mysql-test/suite/innodb/r/autoinc_persist.result @@ -0,0 +1,1111 @@ +# This test case is introduced to test the persisted autoinc, basic +# autoinc features are not the main part of this one. +# Pre-create several tables +CREATE TABLE t1(a TINYINT AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t1 VALUES(0), (0), (0), (0), (-1), (-10), (0), +(20), (30), (31); +SELECT * FROM t1; +a +-10 +-1 +1 +2 +3 +4 +5 +20 +30 +31 +CREATE TABLE t2(a TINYINT UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t2 VALUES(-5); +ERROR 22003: Out of range value for column 'a' at row 1 +INSERT INTO t2 VALUES(0), (0), (0), (0), (8), (10), (0), +(20), (30), (31); +SELECT * FROM t2; +a +1 +2 +3 +4 +8 +10 +11 +20 +30 +31 +CREATE TABLE t3(a SMALLINT AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t3 VALUES(0), (0), (0), (0), (-1), (-10), (0), +(20), (30), (31), (1024), (4096); +SELECT * FROM t3; +a +-10 +-1 +1 +2 +3 +4 +5 +20 +30 +31 +1024 +4096 +CREATE TABLE t4(a SMALLINT UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t4 VALUES(-5); +ERROR 22003: Out of range value for column 'a' at row 1 +INSERT INTO t4 VALUES(0), (0), (0), (0), (8), (10), (0), +(20), (30), (31), (1024), (4096); +SELECT * FROM t4; +a +1 +2 +3 +4 +8 +10 +11 +20 +30 +31 +1024 +4096 +CREATE TABLE t5(a MEDIUMINT AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t5 VALUES(0), (0), (0), (0), (-1), (-10), (0), +(20), (30), (31), (1000000), (1000005); +SELECT * FROM t5; +a +-10 +-1 +1 +2 +3 +4 +5 +20 +30 +31 +1000000 +1000005 +CREATE TABLE t6(a MEDIUMINT UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t6 VALUES(-5); +ERROR 22003: Out of range value for column 'a' at row 1 +INSERT INTO t6 VALUES(0), (0), (0), (0), (8), (10), (0), +(20), (30), (31), (1000000), (1000005); +SELECT * FROM t6; +a +1 +2 +3 +4 +8 +10 +11 +20 +30 +31 +1000000 +1000005 +CREATE TABLE t7(a INT AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t7 VALUES(0), (0), (0), (0), (-1), (-10), (0), +(20), (30), (31), (100000000), (100000008); +SELECT * FROM t7; +a +-10 +-1 +1 +2 +3 +4 +5 +20 +30 +31 +100000000 +100000008 +CREATE TABLE t8(a INT UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t8 VALUES(-5); +ERROR 22003: Out of range value for column 'a' at row 1 +INSERT INTO t8 VALUES(0), (0), (0), (0), (8), (10), (0), +(20), (30), (31), (100000000), (100000008); +SELECT * FROM t8; +a +1 +2 +3 +4 +8 +10 +11 +20 +30 +31 +100000000 +100000008 +CREATE TABLE t9(a BIGINT AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t9 VALUES(0), (0), (0), (0), (-1), (-10), (0), +(20), (30), (31), (100000000000), (100000000006); +SELECT * FROM t9; +a +-10 +-1 +1 +2 +3 +4 +5 +20 +30 +31 +100000000000 +100000000006 +CREATE TABLE t10(a BIGINT UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t10 VALUES(-5); +ERROR 22003: Out of range value for column 'a' at row 1 +INSERT INTO t10 VALUES(0), (0), (0), (0), (8), (10), (0), +(20), (30), (31), (100000000000), (100000000006); +SELECT * FROM t10; +a +1 +2 +3 +4 +8 +10 +11 +20 +30 +31 +100000000000 +100000000006 +CREATE TABLE t11(a FLOAT AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t11 VALUES(0), (0), (0), (0), (-1), (-10), (0), +(20), (30), (31); +SELECT * FROM t11; +a +-10 +-1 +1 +2 +3 +4 +5 +20 +30 +31 +set global innodb_flush_log_at_trx_commit=1; +CREATE TABLE t12(a DOUBLE AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t12 VALUES(0), (0), (0), (0), (-1), (-10), (0), +(20), (30), (31); +SELECT * FROM t12; +a +-10 +-1 +1 +2 +3 +4 +5 +20 +30 +31 +CREATE TABLE t13(a INT AUTO_INCREMENT PRIMARY KEY) ENGINE = InnoDB, +AUTO_INCREMENT = 1234; +# Scenario 1: Normal restart, to test if the counters are persisted +# restart +# We expect these results should be equal to above SELECTs +SELECT * FROM t1; +a +-10 +-1 +1 +2 +3 +4 +5 +20 +30 +31 +SELECT * FROM t2; +a +1 +2 +3 +4 +8 +10 +11 +20 +30 +31 +SELECT * FROM t3; +a +-10 +-1 +1 +2 +3 +4 +5 +20 +30 +31 +1024 +4096 +SELECT * FROM t4; +a +1 +2 +3 +4 +8 +10 +11 +20 +30 +31 +1024 +4096 +SELECT * FROM t5; +a +-10 +-1 +1 +2 +3 +4 +5 +20 +30 +31 +1000000 +1000005 +SELECT * FROM t6; +a +1 +2 +3 +4 +8 +10 +11 +20 +30 +31 +1000000 +1000005 +SELECT * FROM t7; +a +-10 +-1 +1 +2 +3 +4 +5 +20 +30 +31 +100000000 +100000008 +SELECT * FROM t8; +a +1 +2 +3 +4 +8 +10 +11 +20 +30 +31 +100000000 +100000008 +SELECT * FROM t9; +a +-10 +-1 +1 +2 +3 +4 +5 +20 +30 +31 +100000000000 +100000000006 +SELECT * FROM t10; +a +1 +2 +3 +4 +8 +10 +11 +20 +30 +31 +100000000000 +100000000006 +SELECT * FROM t11; +a +-10 +-1 +1 +2 +3 +4 +5 +20 +30 +31 +SELECT * FROM t12; +a +-10 +-1 +1 +2 +3 +4 +5 +20 +30 +31 +SELECT * FROM t13; +a +SHOW CREATE TABLE t13; +Table Create Table +t13 CREATE TABLE `t13` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=1234 DEFAULT CHARSET=latin1 +INSERT INTO t13 VALUES(0); +SELECT a AS `Expect 1234` FROM t13; +Expect 1234 +1234 +# Scenario 2: Delete some values, to test the counters should not be the +# one which is the largest in current table +set global innodb_flush_log_at_trx_commit=1; +DELETE FROM t1 WHERE a > 30; +SELECT MAX(a) AS `Expect 30` FROM t1; +Expect 30 +30 +DELETE FROM t3 WHERE a > 2000; +SELECT MAX(a) AS `Expect 2000` FROM t3; +Expect 2000 +1024 +DELETE FROM t5 WHERE a > 1000000; +SELECT MAX(a) AS `Expect 1000000` FROM t5; +Expect 1000000 +1000000 +DELETE FROM t7 WHERE a > 100000000; +SELECT MAX(a) AS `Expect 100000000` FROM t7; +Expect 100000000 +100000000 +DELETE FROM t9 WHERE a > 100000000000; +SELECT MAX(a) AS `Expect 100000000000` FROM t9; +Expect 100000000000 +100000000000 +# restart +INSERT INTO t1 VALUES(0), (0); +SELECT MAX(a) AS `Expect 33` FROM t1; +Expect 33 +33 +INSERT INTO t3 VALUES(0), (0); +SELECT MAX(a) AS `Expect 4098` FROM t3; +Expect 4098 +4098 +INSERT INTO t5 VALUES(0), (0); +SELECT MAX(a) AS `Expect 1000007` FROM t5; +Expect 1000007 +1000007 +INSERT INTO t7 VALUES(0), (0); +SELECT MAX(a) AS `Expect 100000010` FROM t7; +Expect 100000010 +100000010 +INSERT INTO t9 VALUES(0), (0); +SELECT MAX(a) AS `Expect 100000000008` FROM t9; +Expect 100000000008 +100000000008 +# Scenario 3: Insert some bigger counters, the next counter should start +# from there +INSERT INTO t1 VALUES(40), (0); +INSERT INTO t1 VALUES(42), (0); +SELECT a AS `Expect 43, 42` FROM t1 ORDER BY a DESC LIMIT 4; +Expect 43, 42 +43 +42 +41 +40 +INSERT INTO t3 VALUES(5000), (0); +INSERT INTO t3 VALUES(5010), (0); +SELECT a AS `Expect 5011, 5010` FROM t3 ORDER BY a DESC LIMIT 4; +Expect 5011, 5010 +5011 +5010 +5001 +5000 +INSERT INTO t5 VALUES(1000010), (0); +INSERT INTO t5 VALUES(1000020), (0); +SELECT a AS `Expect 1000021, 1000020` FROM t5 ORDER BY a DESC LIMIT 4; +Expect 1000021, 1000020 +1000021 +1000020 +1000011 +1000010 +INSERT INTO t7 VALUES(100000020), (0); +INSERT INTO t7 VALUES(100000030), (0); +SELECT a AS `Expect 100000031, 100000030` FROM t7 ORDER BY a DESC LIMIT 4; +Expect 100000031, 100000030 +100000031 +100000030 +100000021 +100000020 +INSERT INTO t9 VALUES(100000000010), (0); +INSERT INTO t9 VALUES(100000000020), (0); +SELECT a AS `Expect 100000000021, 100000000020` FROM t9 ORDER BY a DESC LIMIT 4; +Expect 100000000021, 100000000020 +100000000021 +100000000020 +100000000011 +100000000010 +# Scenario 4: Update some values, to test the counters should be updated +# to the bigger value, but not smaller value. +INSERT INTO t1 VALUES(50), (55); +UPDATE t1 SET a = 105 WHERE a = 5; +UPDATE t1 SET a = 100 WHERE a = 55; +# This should insert 102, 106, 107, and make next counter 109. +INSERT INTO t1 VALUES(102), (0), (0); +SELECT a AS `Expect 107, 106` FROM t1 ORDER BY a DESC LIMIT 2; +Expect 107, 106 +107 +106 +DELETE FROM t1 WHERE a > 105; +INSERT INTO t1 VALUES(0); +SELECT MAX(a) AS `Expect 109` FROM t1; +Expect 109 +109 +# Test the same things on t3, t5, t7, t9, to test if DDTableBuffer would +# be updated accordingly +INSERT INTO t3 VALUES(60), (65); +UPDATE t3 SET a = 6005 WHERE a = 5; +UPDATE t3 SET a = 6000 WHERE a = 60; +# This should insert 6002, 6006, 6007, and make next counter 6009. +INSERT INTO t3 VALUES(6002), (0), (0); +SELECT a AS `Expect 6007, 6006` FROM t3 ORDER BY a DESC LIMIT 2; +Expect 6007, 6006 +6007 +6006 +DELETE FROM t3 WHERE a > 6005; +INSERT INTO t3 VALUES(0); +SELECT MAX(a) AS `Expect 6009` FROM t3; +Expect 6009 +6009 +INSERT INTO t5 VALUES(100), (200); +UPDATE t5 SET a = 1000105 WHERE a = 5; +UPDATE t5 SET a = 1000100 WHERE a = 100; +# This should insert 1000102, 1000106, 1000107, and make next counter +# 1000109. +INSERT INTO t5 VALUES(1000102), (0), (0); +SELECT a AS `Expect 1000107, 1000106` FROM t5 ORDER BY a DESC LIMIT 2; +Expect 1000107, 1000106 +1000107 +1000106 +DELETE FROM t5 WHERE a > 1000105; +INSERT INTO t5 VALUES(0); +SELECT MAX(a) AS `Expect 1000109` FROM t5; +Expect 1000109 +1000109 +INSERT INTO t7 VALUES(100), (200); +UPDATE t7 SET a = 100000105 WHERE a = 5; +UPDATE t7 SET a = 100000100 WHERE a = 100; +# This should insert 100000102, 1100000106, 100000107, and make next +# counter 100000109. +INSERT INTO t7 VALUES(100000102), (0), (0); +SELECT a AS `Expect 100000107, 100000106` FROM t7 ORDER BY a DESC LIMIT 2; +Expect 100000107, 100000106 +100000107 +100000106 +DELETE FROM t7 WHERE a > 100000105; +INSERT INTO t7 VALUES(0); +SELECT MAX(a) AS `Expect 100000109` FROM t7; +Expect 100000109 +100000109 +set global innodb_flush_log_at_trx_commit=1; +INSERT INTO t9 VALUES(100), (200); +UPDATE t9 SET a = 100000000105 WHERE a = 5; +UPDATE t9 SET a = 100000000100 WHERE a = 100; +# This should insert 100000000102, 100000000106, 100000000107, and make +# next counter 100000000109. +INSERT INTO t9 VALUES(100000000102), (0), (0); +SELECT a AS `Expect 100000000107, 100000000106` FROM t9 ORDER BY a DESC LIMIT 2; +Expect 100000000107, 100000000106 +100000000107 +100000000106 +DELETE FROM t9 WHERE a > 100000000105; +INSERT INTO t9 VALUES(0); +SELECT MAX(a) AS `Expect 100000000109` FROM t9; +Expect 100000000109 +100000000109 +# restart +INSERT INTO t1 VALUES(0), (0); +SELECT a AS `Expect 110, 111` FROM t1 ORDER BY a DESC LIMIT 2; +Expect 110, 111 +111 +110 +INSERT INTO t3 VALUES(0), (0); +SELECT a AS `Expect 6010, 6011` FROM t3 ORDER BY a DESC LIMIT 2; +Expect 6010, 6011 +6011 +6010 +INSERT INTO t5 VALUES(0), (0); +SELECT a AS `Expect 1100111, 1100110` FROM t5 ORDER BY a DESC LIMIT 2; +Expect 1100111, 1100110 +1000111 +1000110 +INSERT INTO t7 VALUES(0), (0); +SELECT a AS `Expect 100000111, 100000110` FROM t7 ORDER BY a DESC LIMIT 2; +Expect 100000111, 100000110 +100000111 +100000110 +INSERT INTO t9 VALUES(0), (0); +SELECT a AS `Expect 100000000111, 100000000110` FROM t9 ORDER BY a DESC LIMIT 2; +Expect 100000000111, 100000000110 +100000000111 +100000000110 +# Scenario 5: Test kill the server +INSERT INTO t1 VALUES(125); +DELETE FROM t1 WHERE a = 125; +INSERT INTO t3 VALUES(6100); +DELETE FROM t3 WHERE a = 6100; +INSERT INTO t5 VALUES(1100200); +DELETE FROM t5 WHERE a = 1100200; +INSERT INTO t7 VALUES(100000200); +DELETE FROM t7 WHERE a = 100000200; +set global innodb_flush_log_at_trx_commit=1; +INSERT INTO t9 VALUES(100000000200); +DELETE FROM t9 WHERE a = 100000000200; +# Kill and restart +INSERT INTO t1 VALUES(0); +SELECT a AS `Expect 126` FROM t1 ORDER BY a DESC LIMIT 1; +Expect 126 +126 +INSERT INTO t3 VALUES(0); +SELECT a AS `Expect 6101` FROM t3 ORDER BY a DESC LIMIT 1; +Expect 6101 +6101 +INSERT INTO t5 VALUES(0); +SELECT a AS `Expect 1100201` FROM t5 ORDER BY a DESC LIMIT 1; +Expect 1100201 +1100201 +INSERT INTO t7 VALUES(0); +SELECT a AS `Expect 100000201` FROM t7 ORDER BY a DESC LIMIT 1; +Expect 100000201 +100000201 +INSERT INTO t9 VALUES(0); +SELECT a AS `Expect 100000000201` FROM t9 ORDER BY a DESC LIMIT 1; +Expect 100000000201 +100000000201 +# Scenario 6: Test truncate will reset the counters to 0 +TRUNCATE TABLE t1; +TRUNCATE TABLE t3; +TRUNCATE TABLE t5; +TRUNCATE TABLE t7; +TRUNCATE TABLE t9; +INSERT INTO t1 VALUES(0), (0); +SELECT * FROM t1; +a +1 +2 +INSERT INTO t3 VALUES(0), (0); +SELECT * FROM t3; +a +1 +2 +INSERT INTO t5 VALUES(0), (0); +SELECT * FROM t5; +a +1 +2 +INSERT INTO t7 VALUES(0), (0); +SELECT * FROM t7; +a +1 +2 +INSERT INTO t9 VALUES(0), (0); +SELECT * FROM t9; +a +1 +2 +set global innodb_flush_log_at_trx_commit=1; +TRUNCATE TABLE t1; +TRUNCATE TABLE t3; +TRUNCATE TABLE t5; +TRUNCATE TABLE t7; +TRUNCATE TABLE t9; +# Kill and restart +INSERT INTO t1 VALUES(0), (0); +SELECT * FROM t1; +a +1 +2 +INSERT INTO t3 VALUES(0), (0); +SELECT * FROM t3; +a +1 +2 +INSERT INTO t5 VALUES(0), (0); +SELECT * FROM t5; +a +1 +2 +INSERT INTO t7 VALUES(0), (0); +SELECT * FROM t7; +a +1 +2 +INSERT INTO t9 VALUES(0), (0); +SELECT * FROM t9; +a +1 +2 +# Scenario 7: Test explicit rename table won't change the counter +set global innodb_flush_log_at_trx_commit=1; +RENAME TABLE t9 to t19; +INSERT INTO t19 VALUES(0), (0); +SELECT * FROM t19; +a +1 +2 +3 +4 +DELETE FROM t19 WHERE a = 4; +# Kill and restart +RENAME TABLE t19 to t9; +INSERT INTO t9 VALUES(0), (0); +SELECT * FROM t9; +a +1 +2 +3 +5 +6 +TRUNCATE TABLE t9; +INSERT INTO t9 VALUES(0), (0); +SELECT * FROM t9; +a +1 +2 +# Scenario 8: Test ALTER TABLE operations +INSERT INTO t3 VALUES(0), (0), (100), (200), (1000); +SELECT * FROM t3; +a +1 +2 +3 +4 +100 +200 +1000 +DELETE FROM t3 WHERE a > 300; +SELECT MAX(a) AS `Expect 200` FROM t3; +Expect 200 +200 +# This will not change the counter to 150, but to 201, which is the next +# of current max counter in the table +ALTER TABLE t3 AUTO_INCREMENT = 150; +SHOW CREATE TABLE t3; +Table Create Table +t3 CREATE TABLE `t3` ( + `a` smallint(6) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=201 DEFAULT CHARSET=latin1 +INSERT INTO t3 VALUES(0); +SELECT MAX(a) AS `Expect 201` FROM t3; +Expect 201 +201 +# This will change the counter to 500, which is bigger than any counter +# in the table +ALTER TABLE t3 AUTO_INCREMENT = 500; +SHOW CREATE TABLE t3; +Table Create Table +t3 CREATE TABLE `t3` ( + `a` smallint(6) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=500 DEFAULT CHARSET=latin1 +INSERT INTO t3 VALUES(0); +SELECT MAX(a) AS `Expect 500` FROM t3; +Expect 500 +500 +TRUNCATE TABLE t3; +ALTER TABLE t3 AUTO_INCREMENT = 100; +SHOW CREATE TABLE t3; +Table Create Table +t3 CREATE TABLE `t3` ( + `a` smallint(6) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=100 DEFAULT CHARSET=latin1 +INSERT INTO t3 VALUES(0), (0); +SELECT * FROM t3; +a +100 +101 +INSERT INTO t3 VALUES(150), (180); +UPDATE t3 SET a = 200 WHERE a = 150; +INSERT INTO t3 VALUES(220); +# This still fails to set to 120, but just 221 +ALTER TABLE t3 AUTO_INCREMENT = 120; +SHOW CREATE TABLE t3; +Table Create Table +t3 CREATE TABLE `t3` ( + `a` smallint(6) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=221 DEFAULT CHARSET=latin1 +INSERT INTO t3 VALUES(0); +SELECT MAX(a) AS `Expect 221` FROM t3; +Expect 221 +221 +DELETE FROM t3 WHERE a > 120; +ALTER TABLE t3 AUTO_INCREMENT = 120; +SHOW CREATE TABLE t3; +Table Create Table +t3 CREATE TABLE `t3` ( + `a` smallint(6) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=120 DEFAULT CHARSET=latin1 +# Kill and restart +INSERT INTO t3 VALUES(0); +SELECT MAX(a) AS `Expect 120` FROM t3; +Expect 120 +120 +set global innodb_flush_log_at_trx_commit=1; +INSERT INTO t3 VALUES(0), (0), (200), (210); +# Test the different algorithms in ALTER TABLE +CREATE TABLE t_inplace LIKE t3; +INSERT INTO t_inplace SELECT * FROM t3; +SELECT * FROM t_inplace; +a +100 +101 +120 +121 +122 +200 +210 +SHOW CREATE TABLE t_inplace; +Table Create Table +t_inplace CREATE TABLE `t_inplace` ( + `a` smallint(6) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=211 DEFAULT CHARSET=latin1 +# This will keep the autoinc counter +ALTER TABLE t_inplace AUTO_INCREMENT = 250, ALGORITHM = INPLACE; +# We expect the counter to be 250 +SHOW CREATE TABLE t_inplace; +Table Create Table +t_inplace CREATE TABLE `t_inplace` ( + `a` smallint(6) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=250 DEFAULT CHARSET=latin1 +# This should keep the autoinc counter as well +ALTER TABLE t_inplace ADD COLUMN b INT, ALGORITHM = INPLACE; +# We expect the counter to be 250 +SHOW CREATE TABLE t_inplace; +Table Create Table +t_inplace CREATE TABLE `t_inplace` ( + `a` smallint(6) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=250 DEFAULT CHARSET=latin1 +DELETE FROM t_inplace WHERE a > 150; +SELECT * FROM t_inplace; +a b +100 NULL +101 NULL +120 NULL +121 NULL +122 NULL +# This should reset the autoinc counter to the one specified +# Since it's smaller than current one but bigger than existing +# biggest counter in the table +ALTER TABLE t_inplace AUTO_INCREMENT = 180, ALGORITHM = INPLACE; +# We expect the counter to be 180 +SHOW CREATE TABLE t_inplace; +Table Create Table +t_inplace CREATE TABLE `t_inplace` ( + `a` smallint(6) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=180 DEFAULT CHARSET=latin1 +# This should reset the autoinc counter to the next value of +# current max counter in the table, since the specified value +# is smaller than the existing biggest value(50 < 123) +ALTER TABLE t_inplace DROP COLUMN b, AUTO_INCREMENT = 50, ALGORITHM = INPLACE; +# We expect the counter to be 123 +SHOW CREATE TABLE t_inplace; +Table Create Table +t_inplace CREATE TABLE `t_inplace` ( + `a` smallint(6) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=123 DEFAULT CHARSET=latin1 +INSERT INTO t_inplace VALUES(0), (0); +SELECT MAX(a) AS `Expect 124` FROM t_inplace; +Expect 124 +124 +OPTIMIZE TABLE t_inplace; +Table Op Msg_type Msg_text +test.t_inplace optimize note Table does not support optimize, doing recreate + analyze instead +test.t_inplace optimize status OK +SHOW CREATE TABLE t_inplace; +Table Create Table +t_inplace CREATE TABLE `t_inplace` ( + `a` smallint(6) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=125 DEFAULT CHARSET=latin1 +# restart +# We expect the counter to still be 125 +SHOW CREATE TABLE t_inplace; +Table Create Table +t_inplace CREATE TABLE `t_inplace` ( + `a` smallint(6) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=125 DEFAULT CHARSET=latin1 +DELETE FROM t_inplace WHERE a >= 123; +CREATE UNIQUE INDEX idx_aa ON t_inplace(a); +# restart +INSERT INTO t_inplace VALUES(0), (0); +SELECT MAX(a) AS `Expect 126` FROM t_inplace; +Expect 126 +126 +DROP TABLE t_inplace; +CREATE TABLE t_copy LIKE t3; +INSERT INTO t_copy SELECT * FROM t3; +SELECT * FROM t_copy; +a +100 +101 +120 +121 +122 +200 +210 +SHOW CREATE TABLE t_copy; +Table Create Table +t_copy CREATE TABLE `t_copy` ( + `a` smallint(6) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=211 DEFAULT CHARSET=latin1 +# This will keep the autoinc counter +ALTER TABLE t_copy AUTO_INCREMENT = 250, ALGORITHM = COPY; +# We expect the counter to be 250 +SHOW CREATE TABLE t_copy; +Table Create Table +t_copy CREATE TABLE `t_copy` ( + `a` smallint(6) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=250 DEFAULT CHARSET=latin1 +# This should keep the autoinc counter as well +ALTER TABLE t_copy ADD COLUMN b INT, ALGORITHM = COPY; +# We expect the counter to be 250 +SHOW CREATE TABLE t_copy; +Table Create Table +t_copy CREATE TABLE `t_copy` ( + `a` smallint(6) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=250 DEFAULT CHARSET=latin1 +DELETE FROM t_copy WHERE a > 150; +SELECT * FROM t_copy; +a b +100 NULL +101 NULL +120 NULL +121 NULL +122 NULL +# This should reset the autoinc counter to the one specified +# Since it's smaller than current one but bigger than existing +# biggest counter in the table +ALTER TABLE t_copy AUTO_INCREMENT = 180, ALGORITHM = COPY; +# We expect the counter to be 180 +SHOW CREATE TABLE t_copy; +Table Create Table +t_copy CREATE TABLE `t_copy` ( + `a` smallint(6) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=180 DEFAULT CHARSET=latin1 +# This should reset the autoinc counter to the next value of +# current max counter in the table, since the specified value +# is smaller than the existing biggest value(50 < 123) +ALTER TABLE t_copy DROP COLUMN b, AUTO_INCREMENT = 50, ALGORITHM = COPY; +# We expect the counter to be 123 +SHOW CREATE TABLE t_copy; +Table Create Table +t_copy CREATE TABLE `t_copy` ( + `a` smallint(6) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=123 DEFAULT CHARSET=latin1 +INSERT INTO t_copy VALUES(0), (0); +SELECT MAX(a) AS `Expect 124` FROM t_copy; +Expect 124 +124 +OPTIMIZE TABLE t_copy; +Table Op Msg_type Msg_text +test.t_copy optimize note Table does not support optimize, doing recreate + analyze instead +test.t_copy optimize status OK +SHOW CREATE TABLE t_copy; +Table Create Table +t_copy CREATE TABLE `t_copy` ( + `a` smallint(6) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=125 DEFAULT CHARSET=latin1 +# restart +# We expect the counter to still be 125 +SHOW CREATE TABLE t_copy; +Table Create Table +t_copy CREATE TABLE `t_copy` ( + `a` smallint(6) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=InnoDB AUTO_INCREMENT=125 DEFAULT CHARSET=latin1 +DELETE FROM t_copy WHERE a >= 123; +CREATE UNIQUE INDEX idx_aa ON t_copy(a); +# restart +INSERT INTO t_copy VALUES(0), (0); +SELECT MAX(a) AS `Expect 126` FROM t_copy; +Expect 126 +126 +DROP TABLE t_copy; +# Scenario 9: Test the sql_mode = NO_AUTO_VALUE_ON_ZERO +CREATE TABLE t30 (a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b INT, key(b)) ENGINE = InnoDB; +set SQL_MODE = NO_AUTO_VALUE_ON_ZERO; +Warnings: +Warning 3090 Changing sql mode 'NO_AUTO_CREATE_USER' is deprecated. It will be removed in a future release. +INSERT INTO t30 VALUES(NULL, 1), (200, 2), (0, 3); +INSERT INTO t30(b) VALUES(4), (5), (6), (7); +SELECT * FROM t30 ORDER BY b; +a b +1 1 +200 2 +0 3 +201 4 +202 5 +203 6 +204 7 +ALTER TABLE t30 MODIFY b MEDIUMINT; +SELECT * FROM t30 ORDER BY b; +a b +1 1 +200 2 +0 3 +201 4 +202 5 +203 6 +204 7 +set global innodb_flush_log_at_trx_commit=1; +CREATE TABLE t31 (a INT) ENGINE = InnoDB; +INSERT INTO t31 VALUES(1), (2); +ALTER TABLE t31 ADD b INT AUTO_INCREMENT PRIMARY KEY; +INSERT INTO t31 VALUES(3, 0), (4, NULL), (5, NULL); +INSERT INTO t31 VALUES(6, 0); +ERROR 23000: Duplicate entry '0' for key 'PRIMARY' +SELECT * FROM t31; +a b +3 0 +1 1 +2 2 +4 3 +5 4 +# Kill and restart +# This will not insert 0 +INSERT INTO t31(a) VALUES(6), (0); +SELECT * FROM t31; +a b +3 0 +1 1 +2 2 +4 3 +5 4 +6 5 +0 6 +DROP TABLE t31; +set SQL_MODE = NO_AUTO_VALUE_ON_ZERO; +Warnings: +Warning 3090 Changing sql mode 'NO_AUTO_CREATE_USER' is deprecated. It will be removed in a future release. +DELETE FROM t30 WHERE a = 0; +UPDATE t30 set a = 0 where b = 5; +SELECT * FROM t30 ORDER BY b; +a b +1 1 +200 2 +201 4 +0 5 +203 6 +204 7 +DELETE FROM t30 WHERE a = 0; +UPDATE t30 SET a = NULL WHERE b = 6; +Warnings: +Warning 1048 Column 'a' cannot be null +UPDATE t30 SET a = 300 WHERE b = 7; +SELECT * FROM t30 ORDER BY b; +a b +1 1 +200 2 +201 4 +0 6 +300 7 +SET SQL_MODE = 0; +# Scenario 10: Rollback would not rollback the counter +CREATE TABLE t32 ( +a BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT) ENGINE=InnoDB; +INSERT INTO t32 VALUES(0), (0); +set global innodb_flush_log_at_trx_commit=1; +START TRANSACTION; +INSERT INTO t32 VALUES(0), (0); +SELECT MAX(a) AS `Expect 4` FROM t32; +Expect 4 +4 +DELETE FROM t32 WHERE a >= 2; +ROLLBACK; +# Kill and restart +SELECT MAX(a) AS `Expect 2` FROM t32; +Expect 2 +2 +INSERT INTO t32 VALUES(0), (0); +SELECT MAX(a) AS `Expect 6` FROM t32; +Expect 6 +6 +# Scenario 11: Test duplicate primary key/secondary key will not stop +# increasing the counter +CREATE TABLE t33 ( +a BIGINT NOT NULL PRIMARY KEY, +b BIGINT NOT NULL AUTO_INCREMENT, +KEY(b)) ENGINE = InnoDB; +INSERT INTO t33 VALUES(1, NULL); +INSERT INTO t33 VALUES(2, NULL); +INSERT INTO t33 VALUES(2, NULL); +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +INSERT INTO t33 VALUES(3, NULL); +SELECT MAX(b) AS `Expect 4` FROM t33; +Expect 4 +4 +TRUNCATE TABLE t33; +INSERT INTO t33 VALUES(1, NULL); +INSERT INTO t33 VALUES(2, NULL); +# To make sure the redo logs for autoinc counter must have been flushed +# after commit, we start the transaction manually and do some other +# operations to generate some redo logs. Or else the redo logs in the +# single INSERT statement would not be flushed since the INSERT would fail +set global innodb_flush_log_at_trx_commit=1; +START TRANSACTION; +UPDATE t33 SET a = 10 WHERE a = 1; +INSERT INTO t33 VALUES(2, NULL); +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +COMMIT; +# Kill and restart +INSERT INTO t33 VALUES(3, NULL); +SELECT MAX(b) AS `Expect 4` FROM t33; +Expect 4 +4 +DROP TABLE t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t30, t32, t33; diff --git a/mysql-test/suite/innodb/r/autoinc_persist_debug.result b/mysql-test/suite/innodb/r/autoinc_persist_debug.result new file mode 100644 index 000000000000..96b615aebaf1 --- /dev/null +++ b/mysql-test/suite/innodb/r/autoinc_persist_debug.result @@ -0,0 +1,240 @@ +# Pre-create some tables, so that DDTableBuffer is not empty +set global innodb_flush_log_at_trx_commit=1; +CREATE TABLE t1(a TINYINT AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t1 VALUES(0), (0), (0), (0), (-1), (-10), (0), +(20), (30), (31); +SELECT * FROM t1; +a +-10 +-1 +1 +2 +3 +4 +5 +20 +30 +31 +CREATE TABLE t2(a TINYINT UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t2 VALUES(-5); +ERROR 22003: Out of range value for column 'a' at row 1 +INSERT INTO t2 VALUES(0), (0), (0), (0), (8), (10), (0), +(20), (30), (31); +SELECT * FROM t2; +a +1 +2 +3 +4 +8 +10 +11 +20 +30 +31 +CREATE TABLE t3(a SMALLINT AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t3 VALUES(0), (0), (0), (0), (-1), (-10), (0), +(20), (30), (31), (1024), (4096); +SELECT * FROM t3; +a +-10 +-1 +1 +2 +3 +4 +5 +20 +30 +31 +1024 +4096 +# restart +set global innodb_flush_log_at_trx_commit=1; +# Scenario 1: Create two new tables, with simple DMLs on them, +# and force a checkpoint, then some other DMLs on them +CREATE TABLE t4(a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b INT, KEY b(b)) ENGINE = InnoDB; +CREATE TABLE t5(a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b INT, KEY b(b)) ENGINE = InnoDB; +SET @start_global_value = @@global.innodb_log_checkpoint_now; +SELECT @start_global_value; +@start_global_value +0 +# Invoke a checkpoint, which would persist the autoinc counters +# of t4 and t5 into DD Table Buffer +set global innodb_log_checkpoint_now=ON; +INSERT INTO t4 VALUES(0, 1), (0, 2), (10, 3), (12, 4), (0, 5); +SELECT * FROM t4; +a b +1 1 +2 2 +10 3 +12 4 +13 5 +SHOW CREATE TABLE t4; +Table Create Table +t4 CREATE TABLE `t4` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `b` (`b`) +) ENGINE=InnoDB AUTO_INCREMENT=14 DEFAULT CHARSET=latin1 +INSERT INTO t5 VALUES(0, 1), (100, 2), (50, 3); +INSERT INTO t5 VALUES(0, 4), (109, 5), (0, 6); +SELECT * FROM t5; +a b +1 1 +100 2 +50 3 +101 4 +109 5 +110 6 +SHOW CREATE TABLE t5; +Table Create Table +t5 CREATE TABLE `t5` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `b` (`b`) +) ENGINE=InnoDB AUTO_INCREMENT=111 DEFAULT CHARSET=latin1 +set global innodb_log_checkpoint_now=ON; +SELECT @start_global_value; +@start_global_value +0 +SET @@global.innodb_log_checkpoint_now = @start_global_value; +SELECT @@global.innodb_log_checkpoint_now; +@@global.innodb_log_checkpoint_now +0 +START TRANSACTION; +# This should insert 14, 15 +INSERT INTO t4 VALUES(0, 10), (0, 20); +# This should insert 111, 112 +INSERT INTO t5 VALUES(0, 10), (0, 20); +COMMIT; +START TRANSACTION; +# This should insert 16, 17 +INSERT INTO t4 VALUES(0, 10), (0, 20); +# This should insert 113, 114 +INSERT INTO t5 VALUES(0, 10), (0, 20); +# This will not rollback the counter +ROLLBACK; +# Kill and restart +SET GLOBAL innodb_flush_log_at_trx_commit=1; +SELECT MAX(a) AS `Expect 15` FROM t4; +Expect 15 +15 +SELECT MAX(a) AS `Expect 112` FROM t5; +Expect 112 +112 +SHOW CREATE TABLE t4; +Table Create Table +t4 CREATE TABLE `t4` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `b` (`b`) +) ENGINE=InnoDB AUTO_INCREMENT=18 DEFAULT CHARSET=latin1 +SHOW CREATE TABLE t5; +Table Create Table +t5 CREATE TABLE `t5` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `b` (`b`) +) ENGINE=InnoDB AUTO_INCREMENT=115 DEFAULT CHARSET=latin1 +# Scenario 2: Mix the persisted autoinc counter and corrupted bits +TRUNCATE TABLE t1; +TRUNCATE TABLE t2; +TRUNCATE TABLE t3; +set debug = "+d, dict_set_index_corrupted"; +CHECK TABLE t4; +Table Op Msg_type Msg_text +test.t4 check Warning InnoDB: The B-tree of index b is corrupted. +test.t4 check error Corrupt +set debug = "-d, dict_set_index_corrupted"; +SELECT b FROM t4 WHERE b > 3; +ERROR HY000: Index b is corrupted +# This should insert 18, 19 +INSERT INTO t4 VALUES(0, 6), (0, 7); +SELECT MAX(a) AS `Expect 19` FROM t4; +Expect 19 +19 +INSERT INTO t1 VALUES(0), (0); +INSERT INTO t2 VALUES(0), (0); +INSERT INTO t3 VALUES(0), (0); +# This should insert 115, 116 +INSERT INTO t5 VALUES(0, 10), (0, 11); +# Kill and restart +set global innodb_flush_log_at_trx_commit=1; +CHECK TABLE t4; +Table Op Msg_type Msg_text +test.t4 check Warning InnoDB: Index b is marked as corrupted +test.t4 check error Corrupt +SELECT b FROM t4 WHERE b > 3; +ERROR HY000: Index b is corrupted +# This should fix the corrupted index +DROP INDEX b ON t4; +CREATE INDEX b ON t4(b); +CHECK TABLE t4; +Table Op Msg_type Msg_text +test.t4 check status OK +INSERT INTO t4 VALUES(0, 8), (0, 9), (30, 10); +SET @start_global_value = @@global.innodb_log_checkpoint_now; +SELECT @start_global_value; +@start_global_value +0 +set global innodb_log_checkpoint_now=ON; +SELECT MAX(a) AS `Expect 116` FROM t5; +Expect 116 +116 +DELETE FROM t5; +set debug = "+d, dict_set_index_corrupted"; +CHECK TABLE t5; +Table Op Msg_type Msg_text +test.t5 check Warning InnoDB: The B-tree of index b is corrupted. +test.t5 check error Corrupt +set debug = "-d, dict_set_index_corrupted"; +SELECT @start_global_value; +@start_global_value +0 +SET @@global.innodb_log_checkpoint_now = @start_global_value; +SELECT @@global.innodb_log_checkpoint_now; +@@global.innodb_log_checkpoint_now +0 +# This should insert 117, 118 +INSERT INTO t5 VALUES(0, 10), (0, 20); +# Kill and restart +set global innodb_flush_log_at_trx_commit=1; +SELECT b FROM t5 WHERE b > 3; +ERROR HY000: Index b is corrupted +# This should fix the corrupted index +DROP INDEX b ON t5; +CREATE INDEX b ON t5(b); +CHECK TABLE t5; +Table Op Msg_type Msg_text +test.t5 check status OK +# This should insert 31, 32 +INSERT INTO t4 VALUES(0, 11), (0, 12); +SELECT MAX(a) AS `Expect 32` FROM t4; +Expect 32 +32 +# restart +set global innodb_flush_log_at_trx_commit=1; +# This should insert 119, 120 +INSERT INTO t5 VALUES(0, 12), (0, 13); +SELECT MAX(a) AS `Expect 120` FROM t5; +Expect 120 +120 +CREATE TABLE t6 ( +id INT AUTO_INCREMENT PRIMARY KEY, +name VARCHAR(100) +) ENGINE=InnoDB AUTO_INCREMENT=99; +SET GLOBAL debug="+d,innodb_evict_autoinc_table"; +# Evict table from dictionary cache +SET GLOBAL innodb_ft_aux_table="test/t6"; +ERROR 42000: Variable 'innodb_ft_aux_table' can't be set to the value of 'test/t6' +SET GLOBAL debug="-d,innodb_evict_autoinc_table"; +INSERT INTO t6(name) VALUES('mysql'); +SELECT * FROM t6; +id name +99 mysql +DROP TABLE t1, t2, t3, t4, t5, t6; diff --git a/mysql-test/suite/innodb/r/innodb-wl6742.result b/mysql-test/suite/innodb/r/innodb-wl6742.result index 835738dbe4ef..8e3ab44e2a20 100644 --- a/mysql-test/suite/innodb/r/innodb-wl6742.result +++ b/mysql-test/suite/innodb/r/innodb-wl6742.result @@ -1790,7 +1790,7 @@ a b aa bb 2 inserted by client 1 2 inserted by client 1 3 inserted by client 1 3 inserted by client 1 4 inserted by client 1 4 inserted by client 1 -6 inserted by client 1 NULL NULL +11 inserted by client 1 NULL NULL UPDATE t5 SET a = a + 100; SELECT * FROM t5; a b aa bb @@ -1799,8 +1799,8 @@ a b aa bb 102 updated by client 2 2 inserted by client 1 104 inserted by client 1 4 inserted by client 1 105 inserted by client 2 NULL NULL -106 inserted by client 1 NULL NULL 110 inserted by client 1 1 inserted by client 1 +111 inserted by client 1 NULL NULL SELECT COUNT(*) FROM t5; COUNT(*) 7 @@ -1830,8 +1830,8 @@ a b aa bb 102 updated by client 2 2 inserted by client 1 104 inserted by client 1 4 inserted by client 1 105 inserted by client 2 NULL NULL -106 inserted by client 1 NULL NULL 110 inserted by client 1 1 inserted by client 1 +111 inserted by client 1 NULL NULL SELECT COUNT(*) FROM t5; COUNT(*) 5 diff --git a/mysql-test/suite/innodb/r/innodb.result b/mysql-test/suite/innodb/r/innodb.result index b012f9141e90..26d89aed9d62 100644 --- a/mysql-test/suite/innodb/r/innodb.result +++ b/mysql-test/suite/innodb/r/innodb.result @@ -2587,11 +2587,17 @@ drop table t1; create table t1 (a int not null auto_increment primary key, val int) engine=InnoDB; insert into t1 (val) values (1); update t1 set a=2 where a=1; -insert into t1 (val) values (1); +insert into t1 (val) values (3); +select * from t1; +a val +2 1 +3 3 +insert into t1 values (2, 2); ERROR 23000: Duplicate entry '2' for key 'PRIMARY' select * from t1; a val 2 1 +3 3 drop table t1; CREATE TABLE t1 (GRADE DECIMAL(4) NOT NULL, PRIMARY KEY (GRADE)) ENGINE=INNODB; INSERT INTO t1 (GRADE) VALUES (151),(252),(343); diff --git a/mysql-test/suite/innodb/r/innodb_misc1.result b/mysql-test/suite/innodb/r/innodb_misc1.result index 813cd08e3db8..8054291a35e9 100644 --- a/mysql-test/suite/innodb/r/innodb_misc1.result +++ b/mysql-test/suite/innodb/r/innodb_misc1.result @@ -44,11 +44,13 @@ drop table t1; create table t1 (a int not null auto_increment primary key, val int) engine=InnoDB; insert into t1 (val) values (1); update t1 set a=2 where a=1; -insert into t1 (val) values (1); +insert into t1 values (2, 2); ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +insert into t1 (val) value (3); select * from t1; a val 2 1 +3 3 drop table t1; CREATE TABLE t1 (GRADE DECIMAL(4) NOT NULL, PRIMARY KEY (GRADE)) ENGINE=INNODB; INSERT INTO t1 (GRADE) VALUES (151),(252),(343); diff --git a/mysql-test/suite/innodb/t/autoinc_persist.test b/mysql-test/suite/innodb/t/autoinc_persist.test new file mode 100644 index 000000000000..a26ccfc2fdfd --- /dev/null +++ b/mysql-test/suite/innodb/t/autoinc_persist.test @@ -0,0 +1,508 @@ +--source include/have_innodb.inc +--source include/not_embedded.inc + +--echo # This test case is introduced to test the persisted autoinc, basic +--echo # autoinc features are not the main part of this one. + +--echo # Pre-create several tables + +CREATE TABLE t1(a TINYINT AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t1 VALUES(0), (0), (0), (0), (-1), (-10), (0), +(20), (30), (31); +SELECT * FROM t1; + +CREATE TABLE t2(a TINYINT UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB; +--error ER_WARN_DATA_OUT_OF_RANGE +INSERT INTO t2 VALUES(-5); +INSERT INTO t2 VALUES(0), (0), (0), (0), (8), (10), (0), +(20), (30), (31); +SELECT * FROM t2; + +CREATE TABLE t3(a SMALLINT AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t3 VALUES(0), (0), (0), (0), (-1), (-10), (0), +(20), (30), (31), (1024), (4096); +SELECT * FROM t3; + +CREATE TABLE t4(a SMALLINT UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB; +--error ER_WARN_DATA_OUT_OF_RANGE +INSERT INTO t4 VALUES(-5); +INSERT INTO t4 VALUES(0), (0), (0), (0), (8), (10), (0), +(20), (30), (31), (1024), (4096); +SELECT * FROM t4; + +CREATE TABLE t5(a MEDIUMINT AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t5 VALUES(0), (0), (0), (0), (-1), (-10), (0), +(20), (30), (31), (1000000), (1000005); +SELECT * FROM t5; + +CREATE TABLE t6(a MEDIUMINT UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB; +--error ER_WARN_DATA_OUT_OF_RANGE +INSERT INTO t6 VALUES(-5); +INSERT INTO t6 VALUES(0), (0), (0), (0), (8), (10), (0), +(20), (30), (31), (1000000), (1000005); +SELECT * FROM t6; + +CREATE TABLE t7(a INT AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t7 VALUES(0), (0), (0), (0), (-1), (-10), (0), +(20), (30), (31), (100000000), (100000008); +SELECT * FROM t7; + +CREATE TABLE t8(a INT UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB; +--error ER_WARN_DATA_OUT_OF_RANGE +INSERT INTO t8 VALUES(-5); +INSERT INTO t8 VALUES(0), (0), (0), (0), (8), (10), (0), +(20), (30), (31), (100000000), (100000008); +SELECT * FROM t8; + +CREATE TABLE t9(a BIGINT AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t9 VALUES(0), (0), (0), (0), (-1), (-10), (0), +(20), (30), (31), (100000000000), (100000000006); +SELECT * FROM t9; + +CREATE TABLE t10(a BIGINT UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB; +--error ER_WARN_DATA_OUT_OF_RANGE +INSERT INTO t10 VALUES(-5); +INSERT INTO t10 VALUES(0), (0), (0), (0), (8), (10), (0), +(20), (30), (31), (100000000000), (100000000006); +SELECT * FROM t10; + +CREATE TABLE t11(a FLOAT AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t11 VALUES(0), (0), (0), (0), (-1), (-10), (0), +(20), (30), (31); +SELECT * FROM t11; + +# Since autoinc counter is persisted by redo logs, we don't want to +# lose them on kill and restart, so to make the result after restart stable. +set global innodb_flush_log_at_trx_commit=1; + +CREATE TABLE t12(a DOUBLE AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t12 VALUES(0), (0), (0), (0), (-1), (-10), (0), +(20), (30), (31); +SELECT * FROM t12; + +CREATE TABLE t13(a INT AUTO_INCREMENT PRIMARY KEY) ENGINE = InnoDB, +AUTO_INCREMENT = 1234; + +--echo # Scenario 1: Normal restart, to test if the counters are persisted +--source include/restart_mysqld.inc + +--echo # We expect these results should be equal to above SELECTs +SELECT * FROM t1; +SELECT * FROM t2; +SELECT * FROM t3; +SELECT * FROM t4; +SELECT * FROM t5; +SELECT * FROM t6; +SELECT * FROM t7; +SELECT * FROM t8; +SELECT * FROM t9; +SELECT * FROM t10; +SELECT * FROM t11; +SELECT * FROM t12; + +SELECT * FROM t13; +SHOW CREATE TABLE t13; +INSERT INTO t13 VALUES(0); +SELECT a AS `Expect 1234` FROM t13; + +--echo # Scenario 2: Delete some values, to test the counters should not be the +--echo # one which is the largest in current table + +set global innodb_flush_log_at_trx_commit=1; + +DELETE FROM t1 WHERE a > 30; +SELECT MAX(a) AS `Expect 30` FROM t1; +DELETE FROM t3 WHERE a > 2000; +SELECT MAX(a) AS `Expect 2000` FROM t3; +DELETE FROM t5 WHERE a > 1000000; +SELECT MAX(a) AS `Expect 1000000` FROM t5; +DELETE FROM t7 WHERE a > 100000000; +SELECT MAX(a) AS `Expect 100000000` FROM t7; +DELETE FROM t9 WHERE a > 100000000000; +SELECT MAX(a) AS `Expect 100000000000` FROM t9; + +--source include/restart_mysqld.inc + +INSERT INTO t1 VALUES(0), (0); +SELECT MAX(a) AS `Expect 33` FROM t1; +INSERT INTO t3 VALUES(0), (0); +SELECT MAX(a) AS `Expect 4098` FROM t3; +INSERT INTO t5 VALUES(0), (0); +SELECT MAX(a) AS `Expect 1000007` FROM t5; +INSERT INTO t7 VALUES(0), (0); +SELECT MAX(a) AS `Expect 100000010` FROM t7; +INSERT INTO t9 VALUES(0), (0); +SELECT MAX(a) AS `Expect 100000000008` FROM t9; + +--echo # Scenario 3: Insert some bigger counters, the next counter should start +--echo # from there + +INSERT INTO t1 VALUES(40), (0); +INSERT INTO t1 VALUES(42), (0); +SELECT a AS `Expect 43, 42` FROM t1 ORDER BY a DESC LIMIT 4; +INSERT INTO t3 VALUES(5000), (0); +INSERT INTO t3 VALUES(5010), (0); +SELECT a AS `Expect 5011, 5010` FROM t3 ORDER BY a DESC LIMIT 4; +INSERT INTO t5 VALUES(1000010), (0); +INSERT INTO t5 VALUES(1000020), (0); +SELECT a AS `Expect 1000021, 1000020` FROM t5 ORDER BY a DESC LIMIT 4; +INSERT INTO t7 VALUES(100000020), (0); +INSERT INTO t7 VALUES(100000030), (0); +SELECT a AS `Expect 100000031, 100000030` FROM t7 ORDER BY a DESC LIMIT 4; +INSERT INTO t9 VALUES(100000000010), (0); +INSERT INTO t9 VALUES(100000000020), (0); +SELECT a AS `Expect 100000000021, 100000000020` FROM t9 ORDER BY a DESC LIMIT 4; + +--echo # Scenario 4: Update some values, to test the counters should be updated +--echo # to the bigger value, but not smaller value. + +INSERT INTO t1 VALUES(50), (55); +# Updating to bigger value will update the auto-increment counter +UPDATE t1 SET a = 105 WHERE a = 5; +# Updating to smaller value will not update the counter +UPDATE t1 SET a = 100 WHERE a = 55; +--echo # This should insert 102, 106, 107, and make next counter 109. +INSERT INTO t1 VALUES(102), (0), (0); +SELECT a AS `Expect 107, 106` FROM t1 ORDER BY a DESC LIMIT 2; +DELETE FROM t1 WHERE a > 105; +INSERT INTO t1 VALUES(0); +SELECT MAX(a) AS `Expect 109` FROM t1; + +--echo # Test the same things on t3, t5, t7, t9, to test if DDTableBuffer would +--echo # be updated accordingly + +INSERT INTO t3 VALUES(60), (65); +# Updating to bigger value will update the auto-increment counter +UPDATE t3 SET a = 6005 WHERE a = 5; +# Updating to smaller value will not update the counter +UPDATE t3 SET a = 6000 WHERE a = 60; +--echo # This should insert 6002, 6006, 6007, and make next counter 6009. +INSERT INTO t3 VALUES(6002), (0), (0); +SELECT a AS `Expect 6007, 6006` FROM t3 ORDER BY a DESC LIMIT 2; +DELETE FROM t3 WHERE a > 6005; +INSERT INTO t3 VALUES(0); +SELECT MAX(a) AS `Expect 6009` FROM t3; + +INSERT INTO t5 VALUES(100), (200); +# Updating to bigger value will update the auto-increment counter +UPDATE t5 SET a = 1000105 WHERE a = 5; +# Updating to smaller value will not update the counter +UPDATE t5 SET a = 1000100 WHERE a = 100; +--echo # This should insert 1000102, 1000106, 1000107, and make next counter +--echo # 1000109. +INSERT INTO t5 VALUES(1000102), (0), (0); +SELECT a AS `Expect 1000107, 1000106` FROM t5 ORDER BY a DESC LIMIT 2; +DELETE FROM t5 WHERE a > 1000105; +INSERT INTO t5 VALUES(0); +SELECT MAX(a) AS `Expect 1000109` FROM t5; + +INSERT INTO t7 VALUES(100), (200); +# Updating to bigger value will update the auto-increment counter +UPDATE t7 SET a = 100000105 WHERE a = 5; +# Updating to smaller value will not update the counter +UPDATE t7 SET a = 100000100 WHERE a = 100; +--echo # This should insert 100000102, 1100000106, 100000107, and make next +--echo # counter 100000109. +INSERT INTO t7 VALUES(100000102), (0), (0); +SELECT a AS `Expect 100000107, 100000106` FROM t7 ORDER BY a DESC LIMIT 2; +DELETE FROM t7 WHERE a > 100000105; +INSERT INTO t7 VALUES(0); +SELECT MAX(a) AS `Expect 100000109` FROM t7; + +set global innodb_flush_log_at_trx_commit=1; + +INSERT INTO t9 VALUES(100), (200); +# Updating to bigger value will update the auto-increment counter +UPDATE t9 SET a = 100000000105 WHERE a = 5; +# Updating to smaller value will not update the counter +UPDATE t9 SET a = 100000000100 WHERE a = 100; +--echo # This should insert 100000000102, 100000000106, 100000000107, and make +--echo # next counter 100000000109. +INSERT INTO t9 VALUES(100000000102), (0), (0); +SELECT a AS `Expect 100000000107, 100000000106` FROM t9 ORDER BY a DESC LIMIT 2; +DELETE FROM t9 WHERE a > 100000000105; +INSERT INTO t9 VALUES(0); +SELECT MAX(a) AS `Expect 100000000109` FROM t9; + +--source include/restart_mysqld.inc + +INSERT INTO t1 VALUES(0), (0); +SELECT a AS `Expect 110, 111` FROM t1 ORDER BY a DESC LIMIT 2; + +INSERT INTO t3 VALUES(0), (0); +SELECT a AS `Expect 6010, 6011` FROM t3 ORDER BY a DESC LIMIT 2; + +INSERT INTO t5 VALUES(0), (0); +SELECT a AS `Expect 1100111, 1100110` FROM t5 ORDER BY a DESC LIMIT 2; + +INSERT INTO t7 VALUES(0), (0); +SELECT a AS `Expect 100000111, 100000110` FROM t7 ORDER BY a DESC LIMIT 2; + +INSERT INTO t9 VALUES(0), (0); +SELECT a AS `Expect 100000000111, 100000000110` FROM t9 ORDER BY a DESC LIMIT 2; + +--echo # Scenario 5: Test kill the server + +INSERT INTO t1 VALUES(125); +DELETE FROM t1 WHERE a = 125; + +INSERT INTO t3 VALUES(6100); +DELETE FROM t3 WHERE a = 6100; + +INSERT INTO t5 VALUES(1100200); +DELETE FROM t5 WHERE a = 1100200; + +INSERT INTO t7 VALUES(100000200); +DELETE FROM t7 WHERE a = 100000200; + +set global innodb_flush_log_at_trx_commit=1; + +INSERT INTO t9 VALUES(100000000200); +DELETE FROM t9 WHERE a = 100000000200; + +--source include/kill_and_restart_mysqld.inc + +INSERT INTO t1 VALUES(0); +SELECT a AS `Expect 126` FROM t1 ORDER BY a DESC LIMIT 1; + +INSERT INTO t3 VALUES(0); +SELECT a AS `Expect 6101` FROM t3 ORDER BY a DESC LIMIT 1; + +INSERT INTO t5 VALUES(0); +SELECT a AS `Expect 1100201` FROM t5 ORDER BY a DESC LIMIT 1; + +INSERT INTO t7 VALUES(0); +SELECT a AS `Expect 100000201` FROM t7 ORDER BY a DESC LIMIT 1; + +INSERT INTO t9 VALUES(0); +SELECT a AS `Expect 100000000201` FROM t9 ORDER BY a DESC LIMIT 1; + +--echo # Scenario 6: Test truncate will reset the counters to 0 + +TRUNCATE TABLE t1; +TRUNCATE TABLE t3; +TRUNCATE TABLE t5; +TRUNCATE TABLE t7; +TRUNCATE TABLE t9; + +INSERT INTO t1 VALUES(0), (0); +SELECT * FROM t1; + +INSERT INTO t3 VALUES(0), (0); +SELECT * FROM t3; + +INSERT INTO t5 VALUES(0), (0); +SELECT * FROM t5; + +INSERT INTO t7 VALUES(0), (0); +SELECT * FROM t7; + +INSERT INTO t9 VALUES(0), (0); +SELECT * FROM t9; + +set global innodb_flush_log_at_trx_commit=1; + +TRUNCATE TABLE t1; +TRUNCATE TABLE t3; +TRUNCATE TABLE t5; +TRUNCATE TABLE t7; +TRUNCATE TABLE t9; + +--source include/kill_and_restart_mysqld.inc + +INSERT INTO t1 VALUES(0), (0); +SELECT * FROM t1; + +INSERT INTO t3 VALUES(0), (0); +SELECT * FROM t3; + +INSERT INTO t5 VALUES(0), (0); +SELECT * FROM t5; + +INSERT INTO t7 VALUES(0), (0); +SELECT * FROM t7; + +INSERT INTO t9 VALUES(0), (0); +SELECT * FROM t9; + +--echo # Scenario 7: Test explicit rename table won't change the counter + +set global innodb_flush_log_at_trx_commit=1; + +RENAME TABLE t9 to t19; +INSERT INTO t19 VALUES(0), (0); +SELECT * FROM t19; +DELETE FROM t19 WHERE a = 4; + +--source include/kill_and_restart_mysqld.inc + +RENAME TABLE t19 to t9; +INSERT INTO t9 VALUES(0), (0); +SELECT * FROM t9; + +TRUNCATE TABLE t9; + +INSERT INTO t9 VALUES(0), (0); +SELECT * FROM t9; + +--echo # Scenario 8: Test ALTER TABLE operations + +INSERT INTO t3 VALUES(0), (0), (100), (200), (1000); +SELECT * FROM t3; +DELETE FROM t3 WHERE a > 300; +SELECT MAX(a) AS `Expect 200` FROM t3; +--echo # This will not change the counter to 150, but to 201, which is the next +--echo # of current max counter in the table +ALTER TABLE t3 AUTO_INCREMENT = 150; +SHOW CREATE TABLE t3; +INSERT INTO t3 VALUES(0); +SELECT MAX(a) AS `Expect 201` FROM t3; +--echo # This will change the counter to 500, which is bigger than any counter +--echo # in the table +ALTER TABLE t3 AUTO_INCREMENT = 500; +SHOW CREATE TABLE t3; +INSERT INTO t3 VALUES(0); +SELECT MAX(a) AS `Expect 500` FROM t3; + +TRUNCATE TABLE t3; +ALTER TABLE t3 AUTO_INCREMENT = 100; +SHOW CREATE TABLE t3; +INSERT INTO t3 VALUES(0), (0); +SELECT * FROM t3; + +INSERT INTO t3 VALUES(150), (180); +UPDATE t3 SET a = 200 WHERE a = 150; +INSERT INTO t3 VALUES(220); +--echo # This still fails to set to 120, but just 221 +ALTER TABLE t3 AUTO_INCREMENT = 120; +SHOW CREATE TABLE t3; +INSERT INTO t3 VALUES(0); +SELECT MAX(a) AS `Expect 221` FROM t3; + +DELETE FROM t3 WHERE a > 120; + +ALTER TABLE t3 AUTO_INCREMENT = 120; +SHOW CREATE TABLE t3; + +--source include/kill_and_restart_mysqld.inc + +INSERT INTO t3 VALUES(0); +SELECT MAX(a) AS `Expect 120` FROM t3; + +set global innodb_flush_log_at_trx_commit=1; + +INSERT INTO t3 VALUES(0), (0), (200), (210); + +--echo # Test the different algorithms in ALTER TABLE + +--let $template = t3 +--let $algorithm = INPLACE +--let $table = t_inplace +--source suite/innodb/include/autoinc_persist_alter.inc +--let $algorithm = COPY +--let $table = t_copy +--source suite/innodb/include/autoinc_persist_alter.inc + +--echo # Scenario 9: Test the sql_mode = NO_AUTO_VALUE_ON_ZERO + +CREATE TABLE t30 (a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b INT, key(b)) ENGINE = InnoDB; + +set SQL_MODE = NO_AUTO_VALUE_ON_ZERO; + +INSERT INTO t30 VALUES(NULL, 1), (200, 2), (0, 3); +INSERT INTO t30(b) VALUES(4), (5), (6), (7); +SELECT * FROM t30 ORDER BY b; +ALTER TABLE t30 MODIFY b MEDIUMINT; +SELECT * FROM t30 ORDER BY b; + +set global innodb_flush_log_at_trx_commit=1; + +CREATE TABLE t31 (a INT) ENGINE = InnoDB; +INSERT INTO t31 VALUES(1), (2); +ALTER TABLE t31 ADD b INT AUTO_INCREMENT PRIMARY KEY; +INSERT INTO t31 VALUES(3, 0), (4, NULL), (5, NULL); +--error ER_DUP_ENTRY +INSERT INTO t31 VALUES(6, 0); +SELECT * FROM t31; + +--source include/kill_and_restart_mysqld.inc + +--echo # This will not insert 0 +INSERT INTO t31(a) VALUES(6), (0); +SELECT * FROM t31; +DROP TABLE t31; + +set SQL_MODE = NO_AUTO_VALUE_ON_ZERO; + +DELETE FROM t30 WHERE a = 0; +UPDATE t30 set a = 0 where b = 5; +SELECT * FROM t30 ORDER BY b; +DELETE FROM t30 WHERE a = 0; + +UPDATE t30 SET a = NULL WHERE b = 6; +UPDATE t30 SET a = 300 WHERE b = 7; + +SELECT * FROM t30 ORDER BY b; + +SET SQL_MODE = 0; + +--echo # Scenario 10: Rollback would not rollback the counter +CREATE TABLE t32 ( +a BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT) ENGINE=InnoDB; + +INSERT INTO t32 VALUES(0), (0); + +set global innodb_flush_log_at_trx_commit=1; + +START TRANSACTION; +INSERT INTO t32 VALUES(0), (0); +SELECT MAX(a) AS `Expect 4` FROM t32; +DELETE FROM t32 WHERE a >= 2; +ROLLBACK; + +--source include/kill_and_restart_mysqld.inc + +SELECT MAX(a) AS `Expect 2` FROM t32; +INSERT INTO t32 VALUES(0), (0); +SELECT MAX(a) AS `Expect 6` FROM t32; + +--echo # Scenario 11: Test duplicate primary key/secondary key will not stop +--echo # increasing the counter + +CREATE TABLE t33 ( +a BIGINT NOT NULL PRIMARY KEY, +b BIGINT NOT NULL AUTO_INCREMENT, +KEY(b)) ENGINE = InnoDB; + +INSERT INTO t33 VALUES(1, NULL); +INSERT INTO t33 VALUES(2, NULL); +--error ER_DUP_ENTRY +INSERT INTO t33 VALUES(2, NULL); + +INSERT INTO t33 VALUES(3, NULL); +SELECT MAX(b) AS `Expect 4` FROM t33; + +TRUNCATE TABLE t33; + +INSERT INTO t33 VALUES(1, NULL); +INSERT INTO t33 VALUES(2, NULL); + +--echo # To make sure the redo logs for autoinc counter must have been flushed +--echo # after commit, we start the transaction manually and do some other +--echo # operations to generate some redo logs. Or else the redo logs in the +--echo # single INSERT statement would not be flushed since the INSERT would fail + +set global innodb_flush_log_at_trx_commit=1; + +START TRANSACTION; +UPDATE t33 SET a = 10 WHERE a = 1; +--error ER_DUP_ENTRY +INSERT INTO t33 VALUES(2, NULL); +COMMIT; + +--source include/kill_and_restart_mysqld.inc + +INSERT INTO t33 VALUES(3, NULL); +SELECT MAX(b) AS `Expect 4` FROM t33; + +DROP TABLE t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t30, t32, t33; diff --git a/mysql-test/suite/innodb/t/autoinc_persist_debug.test b/mysql-test/suite/innodb/t/autoinc_persist_debug.test new file mode 100644 index 000000000000..b72b8d6405a3 --- /dev/null +++ b/mysql-test/suite/innodb/t/autoinc_persist_debug.test @@ -0,0 +1,181 @@ +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/not_embedded.inc + +--echo # Pre-create some tables, so that DDTableBuffer is not empty + +set global innodb_flush_log_at_trx_commit=1; + +CREATE TABLE t1(a TINYINT AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t1 VALUES(0), (0), (0), (0), (-1), (-10), (0), +(20), (30), (31); +SELECT * FROM t1; + +CREATE TABLE t2(a TINYINT UNSIGNED AUTO_INCREMENT KEY) ENGINE = InnoDB; +--error ER_WARN_DATA_OUT_OF_RANGE +INSERT INTO t2 VALUES(-5); +INSERT INTO t2 VALUES(0), (0), (0), (0), (8), (10), (0), +(20), (30), (31); +SELECT * FROM t2; + +CREATE TABLE t3(a SMALLINT AUTO_INCREMENT KEY) ENGINE = InnoDB; +INSERT INTO t3 VALUES(0), (0), (0), (0), (-1), (-10), (0), +(20), (30), (31), (1024), (4096); +SELECT * FROM t3; + +--source include/restart_mysqld.inc + +set global innodb_flush_log_at_trx_commit=1; + +--echo # Scenario 1: Create two new tables, with simple DMLs on them, +--echo # and force a checkpoint, then some other DMLs on them + +CREATE TABLE t4(a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b INT, KEY b(b)) ENGINE = InnoDB; +CREATE TABLE t5(a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b INT, KEY b(b)) ENGINE = InnoDB; + +SET @start_global_value = @@global.innodb_log_checkpoint_now; +SELECT @start_global_value; + +--echo # Invoke a checkpoint, which would persist the autoinc counters +--echo # of t4 and t5 into DD Table Buffer +set global innodb_log_checkpoint_now=ON; + +INSERT INTO t4 VALUES(0, 1), (0, 2), (10, 3), (12, 4), (0, 5); +SELECT * FROM t4; +SHOW CREATE TABLE t4; +INSERT INTO t5 VALUES(0, 1), (100, 2), (50, 3); +INSERT INTO t5 VALUES(0, 4), (109, 5), (0, 6); +SELECT * FROM t5; +SHOW CREATE TABLE t5; + +set global innodb_log_checkpoint_now=ON; +SELECT @start_global_value; +SET @@global.innodb_log_checkpoint_now = @start_global_value; +SELECT @@global.innodb_log_checkpoint_now; + +START TRANSACTION; +--echo # This should insert 14, 15 +INSERT INTO t4 VALUES(0, 10), (0, 20); +--echo # This should insert 111, 112 +INSERT INTO t5 VALUES(0, 10), (0, 20); +COMMIT; + +START TRANSACTION; +--echo # This should insert 16, 17 +INSERT INTO t4 VALUES(0, 10), (0, 20); +--echo # This should insert 113, 114 +INSERT INTO t5 VALUES(0, 10), (0, 20); +--echo # This will not rollback the counter +ROLLBACK; + +--source include/kill_and_restart_mysqld.inc + +SET GLOBAL innodb_flush_log_at_trx_commit=1; + +SELECT MAX(a) AS `Expect 15` FROM t4; +SELECT MAX(a) AS `Expect 112` FROM t5; + +SHOW CREATE TABLE t4; +SHOW CREATE TABLE t5; + +--echo # Scenario 2: Mix the persisted autoinc counter and corrupted bits + +TRUNCATE TABLE t1; +TRUNCATE TABLE t2; +TRUNCATE TABLE t3; + +set debug = "+d, dict_set_index_corrupted"; +CHECK TABLE t4; +set debug = "-d, dict_set_index_corrupted"; + +--error ER_INDEX_CORRUPT +SELECT b FROM t4 WHERE b > 3; + +--echo # This should insert 18, 19 +INSERT INTO t4 VALUES(0, 6), (0, 7); +SELECT MAX(a) AS `Expect 19` FROM t4; + +INSERT INTO t1 VALUES(0), (0); +INSERT INTO t2 VALUES(0), (0); +INSERT INTO t3 VALUES(0), (0); + +--echo # This should insert 115, 116 +INSERT INTO t5 VALUES(0, 10), (0, 11); + +--source include/kill_and_restart_mysqld.inc + +set global innodb_flush_log_at_trx_commit=1; + +CHECK TABLE t4; +--error ER_INDEX_CORRUPT +SELECT b FROM t4 WHERE b > 3; + +--echo # This should fix the corrupted index +DROP INDEX b ON t4; +CREATE INDEX b ON t4(b); +CHECK TABLE t4; + +INSERT INTO t4 VALUES(0, 8), (0, 9), (30, 10); + +SET @start_global_value = @@global.innodb_log_checkpoint_now; +SELECT @start_global_value; +set global innodb_log_checkpoint_now=ON; + +SELECT MAX(a) AS `Expect 116` FROM t5; +DELETE FROM t5; + +set debug = "+d, dict_set_index_corrupted"; +CHECK TABLE t5; +set debug = "-d, dict_set_index_corrupted"; + +SELECT @start_global_value; +SET @@global.innodb_log_checkpoint_now = @start_global_value; +SELECT @@global.innodb_log_checkpoint_now; + +--echo # This should insert 117, 118 +INSERT INTO t5 VALUES(0, 10), (0, 20); + +--source include/kill_and_restart_mysqld.inc + +set global innodb_flush_log_at_trx_commit=1; + +--error ER_INDEX_CORRUPT +SELECT b FROM t5 WHERE b > 3; + +--echo # This should fix the corrupted index +DROP INDEX b ON t5; +CREATE INDEX b ON t5(b); +CHECK TABLE t5; + +--echo # This should insert 31, 32 +INSERT INTO t4 VALUES(0, 11), (0, 12); +SELECT MAX(a) AS `Expect 32` FROM t4; + +--source include/restart_mysqld.inc + +set global innodb_flush_log_at_trx_commit=1; + +--echo # This should insert 119, 120 +INSERT INTO t5 VALUES(0, 12), (0, 13); +SELECT MAX(a) AS `Expect 120` FROM t5; + +# Scenario 3: Test if the specified autoinc counter would be persisted + +CREATE TABLE t6 ( + id INT AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(100) +) ENGINE=InnoDB AUTO_INCREMENT=99; + +SET GLOBAL debug="+d,innodb_evict_autoinc_table"; + +--echo # Evict table from dictionary cache +--error ER_WRONG_VALUE_FOR_VAR +SET GLOBAL innodb_ft_aux_table="test/t6"; + +SET GLOBAL debug="-d,innodb_evict_autoinc_table"; + +INSERT INTO t6(name) VALUES('mysql'); + +SELECT * FROM t6; + +DROP TABLE t1, t2, t3, t4, t5, t6; diff --git a/mysql-test/suite/innodb/t/innodb.test b/mysql-test/suite/innodb/t/innodb.test index bf4de26ac418..e6647dd89a8c 100644 --- a/mysql-test/suite/innodb/t/innodb.test +++ b/mysql-test/suite/innodb/t/innodb.test @@ -1518,15 +1518,18 @@ select * from t1; drop table t1; # -# Test that update does not change internal auto-increment value +# Test that update does change internal auto-increment value # create table t1 (a int not null auto_increment primary key, val int) engine=InnoDB; insert into t1 (val) values (1); update t1 set a=2 where a=1; -# We should get the following error because InnoDB does not update the counter +# This should insert 3, since the counter has been updated to 2 already +insert into t1 (val) values (3); +select * from t1; +# We should get the following error because InnoDB does update the counter --error ER_DUP_ENTRY -insert into t1 (val) values (1); +insert into t1 values (2, 2); select * from t1; drop table t1; # diff --git a/mysql-test/suite/innodb/t/innodb_misc1.test b/mysql-test/suite/innodb/t/innodb_misc1.test index 8d63f3d6db06..a95898b769f8 100644 --- a/mysql-test/suite/innodb/t/innodb_misc1.test +++ b/mysql-test/suite/innodb/t/innodb_misc1.test @@ -63,15 +63,16 @@ select * from t1; drop table t1; # -# Test that update does not change internal auto-increment value +# Test that update does change internal auto-increment value # create table t1 (a int not null auto_increment primary key, val int) engine=InnoDB; insert into t1 (val) values (1); update t1 set a=2 where a=1; -# We should get the following error because InnoDB does not update the counter +# We should get the following error because InnoDB does update the counter --error ER_DUP_ENTRY -insert into t1 (val) values (1); +insert into t1 values (2, 2); +insert into t1 (val) value (3); select * from t1; drop table t1; # diff --git a/mysql-test/suite/innodb_fts/include/ngram_token_size.inc b/mysql-test/suite/innodb_fts/include/ngram_token_size.inc index ea5af219a2e5..0b61b2846afc 100644 --- a/mysql-test/suite/innodb_fts/include/ngram_token_size.inc +++ b/mysql-test/suite/innodb_fts/include/ngram_token_size.inc @@ -4,8 +4,8 @@ let $restart_parameters = restart: --ngram_token_size=$ngram_token_size; SELECT @@ngram_token_size; # New row inserted should be tokenized correctly by new ngram_token_size -INSERT INTO articles (title, body) VALUES - ('数据库管理系统','计算机是处理大量数据的理想工具'); +INSERT INTO articles VALUES + (5, '数据库管理系统','计算机是处理大量数据的理想工具'); -- echo # Some results are incorrect due to ngram_token_size mismatch diff --git a/mysql-test/suite/innodb_fts/r/misc_debug.result b/mysql-test/suite/innodb_fts/r/misc_debug.result index 7304a7f8a88c..8137bc69feea 100644 --- a/mysql-test/suite/innodb_fts/r/misc_debug.result +++ b/mysql-test/suite/innodb_fts/r/misc_debug.result @@ -16,8 +16,8 @@ BEGIN; INSERT INTO articles (title,body) VALUES ('MySQL Tutorial','DBMS stands for DataBase ...'); # Kill and restart -INSERT INTO articles (title,body) VALUES -('MySQL Tutorial','DBMS stands for DataBase ...'); +INSERT INTO articles VALUES +(8, 'MySQL Tutorial','DBMS stands for DataBase ...'); CREATE FULLTEXT INDEX idx ON articles (title,body); SELECT * FROM articles WHERE MATCH (title,body) @@ -25,7 +25,7 @@ AGAINST ('Database' IN NATURAL LANGUAGE MODE); id title body 1 MySQL Tutorial DBMS stands for DataBase ... 5 MySQL vs. YourSQL In the following database comparison ... -7 MySQL Tutorial DBMS stands for DataBase ... +8 MySQL Tutorial DBMS stands for DataBase ... INSERT INTO articles (title,body) VALUES ('MySQL Tutorial','DBMS stands for DataBase ...') , ('How To Use MySQL Well','After you went through a ...'), @@ -37,18 +37,18 @@ BEGIN; INSERT INTO articles (title,body) VALUES ('MySQL Tutorial','DBMS stands for DataBase ...'); # Kill and restart -INSERT INTO articles (title,body) VALUES -('MySQL Tutorial','DBMS stands for DataBase ...'); +INSERT INTO articles VALUES +(16, 'MySQL Tutorial','DBMS stands for DataBase ...'); SELECT * FROM articles WHERE MATCH (title,body) AGAINST ('Database' IN NATURAL LANGUAGE MODE); id title body 1 MySQL Tutorial DBMS stands for DataBase ... 5 MySQL vs. YourSQL In the following database comparison ... -7 MySQL Tutorial DBMS stands for DataBase ... 8 MySQL Tutorial DBMS stands for DataBase ... -12 MySQL vs. YourSQL In the following database comparison ... -14 MySQL Tutorial DBMS stands for DataBase ... +9 MySQL Tutorial DBMS stands for DataBase ... +13 MySQL vs. YourSQL In the following database comparison ... +16 MySQL Tutorial DBMS stands for DataBase ... DROP TABLE articles; CREATE TABLE articles ( id int PRIMARY KEY, diff --git a/mysql-test/suite/innodb_fts/r/ngram.result b/mysql-test/suite/innodb_fts/r/ngram.result index 83683b3e3ddb..a206ab2ba4d7 100644 --- a/mysql-test/suite/innodb_fts/r/ngram.result +++ b/mysql-test/suite/innodb_fts/r/ngram.result @@ -373,8 +373,8 @@ Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID SELECT @@ngram_token_size; @@ngram_token_size 1 -INSERT INTO articles (title, body) VALUES -('数据库管理系统','计算机是处理大量数据的理想工具'); +INSERT INTO articles VALUES +(5, '数据库管理系统','计算机是处理大量数据的理想工具'); # Some results are incorrect due to ngram_token_size mismatch SELECT * FROM articles WHERE MATCH(title, body) AGAINST('数' IN BOOLEAN MODE); @@ -432,8 +432,8 @@ id title body SELECT @@ngram_token_size; @@ngram_token_size 3 -INSERT INTO articles (title, body) VALUES -('数据库管理系统','计算机是处理大量数据的理想工具'); +INSERT INTO articles VALUES +(5, '数据库管理系统','计算机是处理大量数据的理想工具'); # Some results are incorrect due to ngram_token_size mismatch SELECT * FROM articles WHERE MATCH(title, body) AGAINST('数' IN BOOLEAN MODE); @@ -485,8 +485,8 @@ id title body SELECT @@ngram_token_size; @@ngram_token_size 5 -INSERT INTO articles (title, body) VALUES -('数据库管理系统','计算机是处理大量数据的理想工具'); +INSERT INTO articles VALUES +(5, '数据库管理系统','计算机是处理大量数据的理想工具'); # Some results are incorrect due to ngram_token_size mismatch SELECT * FROM articles WHERE MATCH(title, body) AGAINST('数' IN BOOLEAN MODE); @@ -533,8 +533,8 @@ id title body SELECT @@ngram_token_size; @@ngram_token_size 7 -INSERT INTO articles (title, body) VALUES -('数据库管理系统','计算机是处理大量数据的理想工具'); +INSERT INTO articles VALUES +(5, '数据库管理系统','计算机是处理大量数据的理想工具'); # Some results are incorrect due to ngram_token_size mismatch SELECT * FROM articles WHERE MATCH(title, body) AGAINST('数' IN BOOLEAN MODE); @@ -577,8 +577,8 @@ id title body SELECT @@ngram_token_size; @@ngram_token_size 2 -INSERT INTO articles (title, body) VALUES -('数据库管理系统','计算机是处理大量数据的理想工具'); +INSERT INTO articles VALUES +(5, '数据库管理系统','计算机是处理大量数据的理想工具'); # Some results are incorrect due to ngram_token_size mismatch SELECT * FROM articles WHERE MATCH(title, body) AGAINST('数' IN BOOLEAN MODE); diff --git a/mysql-test/suite/innodb_fts/r/plugin.result b/mysql-test/suite/innodb_fts/r/plugin.result index 303e7684e76d..567ddee36b8e 100644 --- a/mysql-test/suite/innodb_fts/r/plugin.result +++ b/mysql-test/suite/innodb_fts/r/plugin.result @@ -128,9 +128,9 @@ INSERT INTO articles (title, body) VALUES SELECT COUNT(*) FROM articles; COUNT(*) 0 -SELECT * FROM articles WHERE +SELECT title, body FROM articles WHERE MATCH(title, body) AGAINST('mysql'); -id title body +title body INSERT INTO articles (title, body) VALUES ('MySQL Tutorial','DBMS stands for MySQL DataBase ...'), ('How To Use MySQL Well','After you went through a ...'), @@ -138,11 +138,11 @@ INSERT INTO articles (title, body) VALUES ('1001 MySQL Tricks','How to use full-text search engine'), ('Go MySQL Tricks','How to use full text search engine'); # Kill and restart -SELECT * FROM articles WHERE +SELECT title, body FROM articles WHERE MATCH(title, body) AGAINST('Tricks'); -id title body -4 1001 MySQL Tricks How to use full-text search engine -5 Go MySQL Tricks How to use full text search engine +title body +1001 MySQL Tricks How to use full-text search engine +Go MySQL Tricks How to use full text search engine SELECT COUNT(*) FROM articles; COUNT(*) 5 diff --git a/mysql-test/suite/innodb_fts/t/misc_debug.test b/mysql-test/suite/innodb_fts/t/misc_debug.test index fd102f0ab1fa..098d8d00ad3a 100644 --- a/mysql-test/suite/innodb_fts/t/misc_debug.test +++ b/mysql-test/suite/innodb_fts/t/misc_debug.test @@ -47,8 +47,8 @@ INSERT INTO articles (title,body) VALUES --source include/kill_and_restart_mysqld.inc # This insert will re-initialize the Doc ID counter, it should not crash -INSERT INTO articles (title,body) VALUES - ('MySQL Tutorial','DBMS stands for DataBase ...'); +INSERT INTO articles VALUES + (8, 'MySQL Tutorial','DBMS stands for DataBase ...'); # Recreate fulltext index to see if everything is OK CREATE FULLTEXT INDEX idx ON articles (title,body); @@ -76,8 +76,8 @@ INSERT INTO articles (title,body) VALUES --source include/kill_and_restart_mysqld.inc # This insert will re-initialize the Doc ID counter, it should not crash -INSERT INTO articles (title,body) VALUES - ('MySQL Tutorial','DBMS stands for DataBase ...'); +INSERT INTO articles VALUES + (16, 'MySQL Tutorial','DBMS stands for DataBase ...'); # Should return 6 rows SELECT * FROM articles diff --git a/mysql-test/suite/innodb_fts/t/plugin.test b/mysql-test/suite/innodb_fts/t/plugin.test index e08cc6feaef8..ee3e14d0c881 100644 --- a/mysql-test/suite/innodb_fts/t/plugin.test +++ b/mysql-test/suite/innodb_fts/t/plugin.test @@ -138,7 +138,7 @@ INSERT INTO articles (title, body) VALUES SELECT COUNT(*) FROM articles; # Simple term search - no records expected -SELECT * FROM articles WHERE +SELECT title, body FROM articles WHERE MATCH(title, body) AGAINST('mysql'); INSERT INTO articles (title, body) VALUES @@ -151,7 +151,7 @@ INSERT INTO articles (title, body) VALUES --source include/kill_and_restart_mysqld.inc # Simple term search - 4 records expected -SELECT * FROM articles WHERE +SELECT title, body FROM articles WHERE MATCH(title, body) AGAINST('Tricks'); SELECT COUNT(*) FROM articles; DROP TABLE articles; diff --git a/mysql-test/suite/parts/r/partition_auto_increment_innodb.result b/mysql-test/suite/parts/r/partition_auto_increment_innodb.result index 4d50d516b828..ead7a1b0fd49 100644 --- a/mysql-test/suite/parts/r/partition_auto_increment_innodb.result +++ b/mysql-test/suite/parts/r/partition_auto_increment_innodb.result @@ -44,7 +44,7 @@ UPDATE t1 SET c1 = -1 WHERE c1 = 40; SELECT AUTO_INCREMENT FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'; AUTO_INCREMENT -32 +52 UPDATE IGNORE t1 SET c1 = NULL WHERE c1 = 4; Warnings: Warning 1048 Column 'c1' cannot be null @@ -64,9 +64,9 @@ c1 25 30 31 -32 -33 51 +52 +53 DROP TABLE t1; CREATE TABLE t1 ( c1 INT NOT NULL AUTO_INCREMENT, diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index ab2b018c5c92..6c5fd2b632bf 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -187,14 +187,6 @@ dict_table_remove_from_cache_low( dict_table_t* table, /*!< in, own: table */ ibool lru_evict); /*!< in: TRUE if evicting from LRU */ -/** Write back the dirty persistent dynamic metadata of the table -to DDTableBuffer -@param[in,out] table table object */ -static -void -dict_table_persist_to_dd_table_buffer( - dict_table_t* table); - #ifdef UNIV_DEBUG /**********************************************************************//** Validate the dictionary table LRU list. @@ -745,9 +737,19 @@ dict_table_autoinc_alloc( void* table_void) { dict_table_t* table = static_cast(table_void); + table->autoinc_mutex = UT_NEW_NOKEY(ib_mutex_t()); ut_a(table->autoinc_mutex != NULL); mutex_create(LATCH_ID_AUTOINC, table->autoinc_mutex); + + if (!srv_missing_dd_table_buffer) { + table->autoinc_persisted_mutex = UT_NEW_NOKEY(ib_mutex_t()); + ut_a(table->autoinc_persisted_mutex != NULL); + mutex_create(LATCH_ID_PERSIST_AUTOINC, + table->autoinc_persisted_mutex); + } else { + table->autoinc_persisted_mutex = NULL; + } } /** Allocate and init the zip_pad_mutex of a given index. @@ -793,7 +795,6 @@ dict_index_zip_pad_lock( mutex_enter(index->zip_pad.mutex); } - /********************************************************************//** Unconditionally set the autoinc counter. */ void @@ -873,6 +874,83 @@ dict_table_autoinc_unlock( { mutex_exit(table->autoinc_mutex); } + +/** Set the persisted autoinc value of the table to the new counter, +and write the table's dynamic metadata back to DDTableBuffer. This function +should only be used in DDL operation functions like +1. create_table_info_t::initialize_autoinc() +2. ha_innobase::commit_inplace_alter_table() +3. row_rename_table_for_mysql() +4. When we do TRUNCATE TABLE +TODO: In WL#7141, we should reconsider the solution here. Once the DDL +becomes crash-safe, we could not only reset the counter in DDTableBuffer +in this simple way. We also have to make sure the update on this table +is crash-safe, but not forget the old value because it doesn't support +UNDO logging. +@param[in,out] table table +@param[in] counter new autoinc counter +@param[in] log_reset if true, it means that the persisted + autoinc is updated to a smaller one, + an autoinc change log with value of 0 + would be written, otherwise nothing to do */ +void +dict_table_set_and_persist_autoinc( + dict_table_t* table, + ib_uint64_t counter, + bool log_reset) +{ + if (srv_missing_dd_table_buffer) { + return; + } + + ut_ad(dict_table_has_autoinc_col(table)); + + mutex_enter(table->autoinc_persisted_mutex); + + if (table->autoinc_persisted == counter + && table->dirty_status != METADATA_DIRTY) { + + /* If the counter has already been written back to + DDTableBuffer, we don't need to write back again. + This could happen during ALTER TABLE, which doesn't + change the AUTOINC counter value. */ + mutex_exit(table->autoinc_persisted_mutex); + + return; + } + + /* Even now, we could be not sure if the counter has already been + persisted. It could be some other dynamic persistent metadata makes + the dirty_status as METADATA_DIRTY. However, we still have to try + to write back. */ + table->autoinc_persisted = counter; + + dict_table_mark_dirty(table); + + mutex_exit(table->autoinc_persisted_mutex); + + /* Here the dirty_status would be set to METADATA_BUFFERED, but + this function is only called for DDL operations when there is no + any other DML. See comments in AutoIncLogMtr::log(). */ + dict_table_persist_to_dd_table_buffer(table); + + if (log_reset) { + + mtr_t mtr; + AutoIncLogMtr autoinc_mtr(&mtr); + autoinc_mtr.start(); + + /* We write a redo log with counter value of 0 to indicate + that all redo logs logged before would be discarded, so that we + won't apply old bigger counter to the table during recovery, + which is incorrect if we update the counter to a smaller one. */ + autoinc_mtr.log(table, 0); + + autoinc_mtr.commit(); + } +} + + #endif /* !UNIV_HOTBACKUP */ /** Looks for column n in an index. @@ -2174,6 +2252,20 @@ dict_table_remove_from_cache( dict_table_remove_from_cache_low(table, FALSE); } +#ifndef DBUG_OFF +/** Removes a table object from the dictionary cache, for debug purpose +@param[in,out] table table object +@param[in] lru_evict true if table being evicted to make room + in the table LRU list */ +void +dict_table_remove_from_cache_debug( + dict_table_t* table, + bool lru_evict) +{ + dict_table_remove_from_cache_low(table, lru_evict); +} +#endif /* DBUG_OFF */ + /****************************************************************//** If the given column name is reserved for InnoDB system columns, return TRUE. @@ -5486,6 +5578,7 @@ dict_persist_init(void) dict_persist->persisters = UT_NEW_NOKEY(Persisters()); dict_persist->persisters->add(PM_INDEX_CORRUPTED); + dict_persist->persisters->add(PM_TABLE_AUTO_INC); } /** Clear the structure */ @@ -5513,13 +5606,15 @@ dict_persist_close(void) static void dict_init_dynamic_metadata( - const dict_table_t* table, + dict_table_t* table, PersistentTableMetadata*metadata) { if (srv_missing_dd_table_buffer) { return; } + ut_ad(mutex_own(&dict_persist->mutex)); + ut_ad(metadata->get_table_id() == table->id); metadata->reset(); @@ -5534,6 +5629,10 @@ dict_init_dynamic_metadata( } } + if (table->autoinc_persisted != 0) { + metadata->set_autoinc(table->autoinc_persisted); + } + /* Will initialize other metadata here */ } @@ -5591,6 +5690,17 @@ dict_table_apply_dynamic_metadata( } } + ib_uint64_t autoinc = metadata->get_autoinc(); + + /* This happens during recovery, so no locks are needed. */ + if (autoinc > table->autoinc_persisted) { + + get_dirty = true; + + table->autoinc = autoinc; + table->autoinc_persisted = autoinc; + } + /* Will apply other persistent metadata here */ return(get_dirty); @@ -5654,7 +5764,6 @@ dict_table_load_dynamic_metadata( ut_ad(dict_sys != NULL); ut_ad(mutex_own(&dict_sys->mutex)); ut_ad(!dict_table_is_temporary(table)); - ut_ad(!is_system_tablespace(table->space)); table_buffer = dict_persist->table_buffer; @@ -5694,6 +5803,36 @@ dict_table_load_dynamic_metadata( UT_DELETE(readmeta); } +/** Mark the dirty_status of a table as METADATA_DIRTY, and add it to the +dirty_dict_tables list if necessary. +@param[in,out] table table */ +void +dict_table_mark_dirty( + dict_table_t* table) +{ + ut_ad(!srv_missing_dd_table_buffer); + + ut_ad(!dict_table_is_temporary(table)); + + mutex_enter(&dict_persist->mutex); + + switch (table->dirty_status) { + case METADATA_DIRTY: + break; + case METADATA_CLEAN: + /* Not in dirty_tables list, add it now */ + UT_LIST_ADD_LAST(dict_persist->dirty_dict_tables, table); + ut_d(table->in_dirty_dict_tables_list = true); + /* Fall through */ + case METADATA_BUFFERED: + table->dirty_status = METADATA_DIRTY; + } + + ut_ad(table->in_dirty_dict_tables_list); + + mutex_exit(&dict_persist->mutex); +} + /** Flags an index corrupted in the data dictionary cache only. This is used to mark a corrupted index when index's own dictionary is corrupted, and we would force to load such index for repair purpose. @@ -5753,25 +5892,7 @@ dict_set_corrupted( persisted in redo log */ log_write_up_to(mtr.commit_lsn(), true); - mutex_enter(&dict_persist->mutex); - - switch (table->dirty_status) { - case METADATA_DIRTY: - break; - case METADATA_CLEAN: - /* Not in dirty_tables list, add it now */ - UT_LIST_ADD_LAST( - dict_persist->dirty_dict_tables, - table); - ut_d(table->in_dirty_dict_tables_list = true); - /* Fall through */ - case METADATA_BUFFERED: - table->dirty_status = METADATA_DIRTY; - } - - ut_ad(table->in_dirty_dict_tables_list); - - mutex_exit(&dict_persist->mutex); + dict_table_mark_dirty(table); } } @@ -5785,7 +5906,7 @@ dict_set_corrupted( } /** Write the dirty persistent dynamic metadata for a table to -DD TABLE BUFFER table +DD TABLE BUFFER table. This is the low level function to write back. @param[in,out] table table to write */ static void @@ -5837,7 +5958,6 @@ dict_table_persist_to_dd_table_buffer_low( /** Write back the dirty persistent dynamic metadata of the table to DDTableBuffer @param[in,out] table table object */ -static void dict_table_persist_to_dd_table_buffer( dict_table_t* table) @@ -7208,9 +7328,10 @@ DDTableBuffer::close() } /** Prepare for a update on METADATA field -@param[in] entry entry to insert +@param[in] entry clustered index entry to replace rec @param[in] rec clustered index record -@return update vector of differing fields without system columns */ +@return update vector of differing fields without system columns, +or NULL if there isn't any different field */ upd_t* DDTableBuffer::update_set_metadata( const dtuple_t* entry, @@ -7222,16 +7343,19 @@ DDTableBuffer::update_set_metadata( ulint len; upd_t* update; + data = rec_get_nth_field_old(rec, 1, &len); + dfield = dtuple_get_nth_field(entry, 1); + + if (dfield_data_is_binary_equal(dfield, len, data)) { + + return(NULL); + } + update = upd_create(dtuple_get_n_fields(entry), m_replace_heap); /* There are only 2 fields in one row. Since the first field TABLE_ID should be equal, we can set the second METADATA field as diff directly */ - data = rec_get_nth_field_old(rec, 1, &len); - dfield = dtuple_get_nth_field(entry, 1); - /* There must be something different so we have to update */ - ut_a(!dfield_data_is_binary_equal(dfield, len, data)); - upd_field = upd_get_nth_field(update, 0); dfield_copy(&upd_field->new_val, dfield); upd_field_set_field_no(upd_field, 1, m_index, NULL); @@ -7306,6 +7430,15 @@ DDTableBuffer::replace( upd_t* update = update_set_metadata(entry, btr_pcur_get_rec(&pcur)); + if (update == NULL) { + + /* We don't need to update if all fields are equal. */ + mtr.commit(); + mem_heap_empty(m_replace_heap); + + return(DB_SUCCESS); + } + big_rec_t* big_rec; static const ulint flags = (BTR_CREATE_FLAG | BTR_NO_LOCKING_FLAG @@ -7430,7 +7563,7 @@ void Persister::write_log( table_id_t id, const PersistentTableMetadata& metadata, - mtr_t* mtr) + mtr_t* mtr) const { byte* log_ptr; ulint size = get_write_size(metadata); @@ -7447,7 +7580,6 @@ Persister::write_log( MLOG_TABLE_DYNAMIC_META, id, log_ptr, mtr); ulint consumed = write(metadata, log_ptr, size); - ut_ad(consumed == size); log_ptr += consumed; mlog_close(mtr, log_ptr); @@ -7464,7 +7596,7 @@ ulint CorruptedIndexPersister::write( const PersistentTableMetadata& metadata, byte* buffer, - ulint size) + ulint size) const { ulint length = 0; corrupted_ids_t corrupted_ids = metadata.get_corrupted_indexes(); @@ -7501,7 +7633,7 @@ CorruptedIndexPersister::write( @return the size of metadata */ ulint CorruptedIndexPersister::get_write_size( - const PersistentTableMetadata& metadata) + const PersistentTableMetadata& metadata) const { ulint length = 0; corrupted_ids_t corrupted_ids = metadata.get_corrupted_indexes(); @@ -7534,7 +7666,7 @@ CorruptedIndexPersister::read( PersistentTableMetadata&metadata, const byte* buffer, ulint size, - bool* corrupt) + bool* corrupt) const { const byte* end = buffer + size; ulint consumed = 0; @@ -7582,6 +7714,175 @@ CorruptedIndexPersister::read( return(consumed); } +/** Write the autoinc counter of a table, we can pre-calculate +the size by calling get_write_size() +@param[in] metadata persistent metadata +@param[out] buffer write buffer +@param[in] size size of write buffer, should be + at least get_write_size() +@return the length of bytes written */ +ulint +AutoIncPersister::write( + const PersistentTableMetadata& metadata, + byte* buffer, + ulint size) const +{ + ulint length = 0; + ib_uint64_t autoinc = metadata.get_autoinc(); + + mach_write_to_1(buffer, static_cast(PM_TABLE_AUTO_INC)); + ++length; + ++buffer; + + ulint len = mach_u64_write_much_compressed(buffer, autoinc); + length += len; + buffer += len; + + ut_ad(length <= size); + return(length); +} + +/** Read the autoinc counter from buffer, and store them to +metadata object +@param[out] metadata metadata where we store the read data +@param[in] buffer buffer to read +@param[in] size size of buffer +@param[out] corrupt true if we found something wrong in + the buffer except incomplete buffer, + otherwise false +@return the bytes we read from the buffer if the buffer data +is complete and we get everything, 0 if the buffer is incomplete */ +ulint +AutoIncPersister::read( + PersistentTableMetadata& metadata, + const byte* buffer, + ulint size, + bool* corrupt) const +{ + const byte* end = buffer + size; + ulint consumed = 0; + byte type; + ib_uint64_t autoinc; + + *corrupt = false; + + /* It should contain PM_TABLE_AUTO_INC and the counter at least */ + if (size < 2) { + return(0); + } + + type = mach_read_from_1(buffer); + ++consumed; + ++buffer; + + if (type != PM_TABLE_AUTO_INC) { + *corrupt = true; + return(consumed); + } + + const byte* start = buffer; + autoinc = mach_parse_u64_much_compressed(&start, end); + + if (start == NULL) { + /* Just incomplete data, not corrupted */ + return(0); + } + + if (autoinc == 0) { + metadata.set_autoinc(autoinc); + } else { + metadata.set_autoinc_if_bigger(autoinc); + } + + consumed += start - buffer; + ut_ad(consumed <= size); + return(consumed); +} + +/** Write redo logs for autoinc counter that is to be inserted or to +update the existing one, if the counter is bigger than current one +or the counter is 0 as the special mark. +This function should be called only once at most per mtr, and work with +the commit() to finish the complete logging & commit +@param[in] table table +@param[in] counter counter to be logged */ +void +AutoIncLogMtr::log( + dict_table_t* table, + ib_uint64_t counter) +{ + ut_ad(!srv_missing_dd_table_buffer); + + ut_ad(!m_locked); + rw_lock_s_lock(&dict_persist->lock); + ut_d(m_locked = true); + + if (counter == 0) { + + /* We should always write the log for this special mark */ + m_logged = true; + + } else { + + mutex_enter(table->autoinc_persisted_mutex); + + if (table->autoinc_persisted < counter) { + dict_table_autoinc_persisted_update(table, counter); + + if (table->dirty_status == METADATA_DIRTY) { + + /* There are two cases when the dirty_status + would be changed from METADATA_DIRTY to + METADATA_BUFFERED. One is checkpoint, but + the rw-lock in dict_persist should prevent + checkpoint changing the status. The other + is dict_table_set_and_persist_autoinc(), + which would persist in-memory dynamic + metadata to DDTableBuffer. But it happens + only in DDL, when there should not be any + concurrent DML. */ + ut_ad(table->in_dirty_dict_tables_list); + } else { + + dict_table_mark_dirty(table); + } + + m_logged = true; + } + + mutex_exit(table->autoinc_persisted_mutex); + } + + if (m_logged) { + PersistentTableMetadata metadata(table->id); + metadata.set_autoinc(counter); + + Persister* persister = dict_persist->persisters->get( + PM_TABLE_AUTO_INC); + + /* No need to flush the logs now for performance reasons. */ + persister->write_log(table->id, metadata, m_mtr); + } else { + + rw_lock_s_unlock(&dict_persist->lock); + ut_d(m_locked = false); + } +} + +/** Commit the internal mtr, and some cleanup if necessary */ +void +AutoIncLogMtr::commit() +{ + m_mtr->commit(); + + if (m_logged) { + ut_ad(m_locked); + ut_d(m_locked = false); + + rw_lock_s_unlock(&dict_persist->lock); + } +} + /** Destructor */ Persisters::~Persisters() { @@ -7629,6 +7930,9 @@ Persisters::add( case PM_INDEX_CORRUPTED: persister = UT_NEW_NOKEY(CorruptedIndexPersister()); break; + case PM_TABLE_AUTO_INC: + persister = UT_NEW_NOKEY(AutoIncPersister()); + break; default: ut_ad(0); break; diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc index a3d4e5cee0bd..1739eeb5832c 100644 --- a/storage/innobase/dict/dict0load.cc +++ b/storage/innobase/dict/dict0load.cc @@ -3020,18 +3020,13 @@ dict_load_table_one( : ignore_err; err = dict_load_indexes(table, heap, index_load_err); - /* Load the corrupted index bits from DDTableBuffer */ - if (!is_system_tablespace(table->space) - && !dict_table_is_temporary(table)) { + dict_table_load_dynamic_metadata(table); - dict_table_load_dynamic_metadata(table); - - /* Re-check like we do in dict_load_indexes() */ - if (!srv_load_corrupted - && !(index_load_err & DICT_ERR_IGNORE_CORRUPT) - && dict_table_is_corrupted(table)) { - err = DB_INDEX_CORRUPT; - } + /* Re-check like we do in dict_load_indexes() */ + if (!srv_load_corrupted + && !(index_load_err & DICT_ERR_IGNORE_CORRUPT) + && dict_table_is_corrupted(table)) { + err = DB_INDEX_CORRUPT; } if (err == DB_INDEX_CORRUPT) { diff --git a/storage/innobase/dict/mem.cc b/storage/innobase/dict/mem.cc index 08c228de009b..354bb36fbe8f 100644 --- a/storage/innobase/dict/mem.cc +++ b/storage/innobase/dict/mem.cc @@ -187,6 +187,8 @@ dict_mem_table_create( dict_table_autoinc_create_lazy(table); table->autoinc = 0; + table->autoinc_persisted = 0; + table->autoinc_field_no = ULINT_UNDEFINED; table->sess_row_id = 0; table->sess_trx_id = 0; diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 04bff9f20e99..44fba7e7f2f7 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -119,6 +119,12 @@ this program; if not, write to the Free Software Foundation, Inc., /* for ha_innopart, Native InnoDB Partitioning. */ #include "ha_innopart.h" +/** TRUE if we don't have DDTableBuffer in the system tablespace, +this should be due to we run the server against old data files. +Please do NOT change this when server is running. +FIXME: This should be removed away once we can upgrade for new DD. */ +extern bool srv_missing_dd_table_buffer; + /** to protect innobase_open_files */ static mysql_mutex_t innobase_share_mutex; /** to force correct commit order in binlog */ @@ -457,6 +463,7 @@ performance schema instrumented if "UNIV_PFS_MUTEX" is defined */ static PSI_mutex_info all_innodb_mutexes[] = { PSI_KEY(autoinc_mutex), + PSI_KEY(autoinc_persisted_mutex), # ifndef PFS_SKIP_BUFFER_MUTEX_RWLOCK PSI_KEY(buffer_block_mutex), # endif /* !PFS_SKIP_BUFFER_MUTEX_RWLOCK */ @@ -5343,13 +5350,10 @@ innobase_index_lookup( return(share->idx_trans_tbl.index_mapping[keynr]); } -/************************************************************************ -Set the autoinc column max value. This should only be called once from -ha_innobase::open(). Therefore there's no need for a covering lock. */ - +/** Set the autoinc column max value. This should only be called from +ha_innobase::open, therefore there's no need for a covering lock. */ void ha_innobase::innobase_initialize_autoinc() -/*======================================*/ { ulonglong auto_inc; const Field* field = table->found_next_number_field; @@ -5385,7 +5389,7 @@ ha_innobase::innobase_initialize_autoinc() opening the table and return failure. */ my_error(ER_AUTOINC_READ_FAILED, MYF(0)); } else { - dict_index_t* index; + dict_index_t* index = NULL; const char* col_name; ib_uint64_t read_auto_inc; ulint err; @@ -5394,22 +5398,33 @@ ha_innobase::innobase_initialize_autoinc() col_name = field->field_name; - /* For intrinsic table, name of field has to be prefixed with - table name to maintain column-name uniqueness. */ - if (m_prebuilt->table != NULL - && dict_table_is_intrinsic(m_prebuilt->table)) { + read_auto_inc = dict_table_autoinc_read(m_prebuilt->table); - ulint col_no = dict_col_get_no(dict_table_get_nth_col( - m_prebuilt->table, field->field_index)); + ut_ad(!srv_missing_dd_table_buffer || read_auto_inc == 0); - col_name = dict_table_get_col_name( - m_prebuilt->table, col_no); - } + if (read_auto_inc == 0) { + + index = innobase_get_index(table->s->next_number_index); - index = innobase_get_index(table->s->next_number_index); + /* Execute SELECT MAX(col_name) FROM TABLE; + This is necessary when an imported tablespace + doesn't have a correct cfg file so autoinc + has not been initialized, or the table is empty. */ + err = row_search_max_autoinc( + index, col_name, &read_auto_inc); - /* Execute SELECT MAX(col_name) FROM TABLE; */ - err = row_search_max_autoinc(index, col_name, &read_auto_inc); + if (read_auto_inc > 0 && !srv_missing_dd_table_buffer) { + ib::warn() << "Reading max(auto_inc_col) = " + << read_auto_inc << " for table " + << index->table->name + << ", because there was an IMPORT" + << " without cfg file."; + } + + } else { + + err = DB_SUCCESS; + } switch (err) { case DB_SUCCESS: { @@ -5767,18 +5782,40 @@ ha_innobase::open(const char* name, int, uint) if (m_prebuilt->table != NULL && !m_prebuilt->table->ibd_file_missing && table->found_next_number_field != NULL) { - dict_table_autoinc_lock(m_prebuilt->table); + + dict_table_t* ib_table = m_prebuilt->table; + + dict_table_autoinc_lock(ib_table); + + ib_uint64_t autoinc = dict_table_autoinc_read(ib_table); + ib_uint64_t autoinc_persisted = 0; + + if (!srv_missing_dd_table_buffer) { + mutex_enter(ib_table->autoinc_persisted_mutex); + autoinc_persisted = ib_table->autoinc_persisted; + mutex_exit(ib_table->autoinc_persisted_mutex); + } /* Since a table can already be "open" in InnoDB's internal data dictionary, we only init the autoinc counter once, the first time the table is loaded. We can safely reuse the autoinc value from a previous MySQL open. */ - if (dict_table_autoinc_read(m_prebuilt->table) == 0) { - + if (autoinc == 0 || autoinc == autoinc_persisted) { + /* If autoinc is 0, it means the counter was never + used or imported from a tablespace without .cfg file. + We have to search the index to get proper counter. + If only the second condition is true, it means it's + the first time open for the table, we just want to + calculate the next counter */ innobase_initialize_autoinc(); } - dict_table_autoinc_unlock(m_prebuilt->table); + dict_table_autoinc_set_col_pos( + ib_table, + table->found_next_number_field->field_index); + ut_ad(dict_table_has_autoinc_col(ib_table)); + + dict_table_autoinc_unlock(ib_table); } /* Set plugin parser for fulltext index */ @@ -7717,6 +7754,7 @@ ha_innobase::update_row( dberr_t error; trx_t* trx = thd_to_trx(m_user_thd); + ib_uint64_t new_counter = 0; DBUG_ENTER("ha_innobase::update_row"); @@ -7785,7 +7823,18 @@ ha_innobase::update_row( error = row_update_for_mysql((byte*) old_row, m_prebuilt); - /* We need to do some special AUTOINC handling for the following case: + if (dict_table_has_autoinc_col(m_prebuilt->table) + && !srv_missing_dd_table_buffer) { + new_counter = row_upd_get_new_autoinc_counter( + uvect, m_prebuilt->table->autoinc_field_no); + } else { + new_counter = 0; + } + + /* We should handle the case if the AUTOINC counter has been + updated, we want to update the counter accordingly. + + We need to do some special AUTOINC handling for the following case: INSERT INTO t (c1,c2) VALUES(x,y) ON DUPLICATE KEY UPDATE ... @@ -7794,20 +7843,32 @@ ha_innobase::update_row( value used in the INSERT statement. */ if (error == DB_SUCCESS - && table->next_number_field - && new_row == table->record[0] - && thd_sql_command(m_user_thd) == SQLCOM_INSERT - && trx->duplicates) { + && (new_counter != 0 + || (table->next_number_field + && new_row == table->record[0] + && thd_sql_command(m_user_thd) == SQLCOM_INSERT + && trx->duplicates))) { ulonglong auto_inc; ulonglong col_max_value; - auto_inc = table->next_number_field->val_int(); + if (new_counter != 0) { + auto_inc = new_counter; + } else { + ut_ad(table->next_number_field != NULL); + auto_inc = table->next_number_field->val_int(); + } /* We need the upper limit of the col type to check for whether we update the table autoinc counter or not. */ col_max_value = - table->next_number_field->get_max_int_value(); + table->found_next_number_field->get_max_int_value(); + + /* TODO: To remove this whole 'if' in WL#7141 */ + if (srv_missing_dd_table_buffer) { + col_max_value = + table->next_number_field->get_max_int_value(); + } if (auto_inc <= col_max_value && auto_inc != 0) { @@ -11072,6 +11133,79 @@ create_table_info_t::initialize() DBUG_RETURN(0); } +/** Initialize the autoinc of this table if necessary, which should +be called before we flush logs, so autoinc counter can be persisted. */ +void +create_table_info_t::initialize_autoinc() +{ + dict_table_t* innobase_table; + + const bool persist = !(m_create_info->options & HA_LEX_CREATE_TMP_TABLE) + && m_form->found_next_number_field + && !srv_missing_dd_table_buffer; + + if (!persist && m_create_info->auto_increment_value == 0) { + + return; + } + + innobase_table = thd_to_innodb_session(m_thd)->lookup_table_handler( + m_table_name); + + if (innobase_table == NULL) { + innobase_table = dict_table_open_on_name( + m_table_name, true, false, DICT_ERR_IGNORE_NONE); + } else { + innobase_table->acquire(); + ut_ad(dict_table_is_intrinsic(innobase_table)); + } + + DBUG_ASSERT(innobase_table != NULL); + + if (persist) { + dict_table_autoinc_set_col_pos( + innobase_table, + m_form->found_next_number_field->field_index); + ut_ad(dict_table_has_autoinc_col(innobase_table)); + } + + /* We need to copy the AUTOINC value from the old table if + this is an ALTER|OPTIMIZE TABLE or CREATE INDEX because CREATE INDEX + does a table copy too. If query was one of : + + CREATE TABLE ...AUTO_INCREMENT = x; or + ALTER TABLE...AUTO_INCREMENT = x; or + OPTIMIZE TABLE t; or + CREATE INDEX x on t(...); + + Find out a table definition from the dictionary and get + the current value of the auto increment field. Set a new + value to the auto increment field if the value is greater + than the maximum value in the column. */ + + enum_sql_command cmd = static_cast( + thd_sql_command(m_thd)); + + if (m_create_info->auto_increment_value > 0 + && ((m_create_info->used_fields & HA_CREATE_USED_AUTO) + || cmd == SQLCOM_ALTER_TABLE + || cmd == SQLCOM_OPTIMIZE + || cmd == SQLCOM_CREATE_INDEX)) { + ib_uint64_t auto_inc_value; + + auto_inc_value = m_create_info->auto_increment_value; + + dict_table_autoinc_lock(innobase_table); + dict_table_autoinc_initialize(innobase_table, auto_inc_value); + if (persist) { + dict_table_set_and_persist_autoinc( + innobase_table, auto_inc_value - 1, false); + } + dict_table_autoinc_unlock(innobase_table); + } + + dict_table_close(innobase_table, true, false); +} /** Prepare to create a new table to an InnoDB database. @param[in] name Table name @@ -11236,14 +11370,22 @@ create_table_info_t::create_table() } } + initialize_autoinc(); + /* Cache all the FTS indexes on this table in the FTS specific structure. They are used for FTS indexed column update handling. */ if (m_flags2 & DICT_TF2_FTS) { + innobase_table = dict_table_open_on_name( + m_table_name, true, false, + DICT_ERR_IGNORE_NONE); + fts_t* fts = innobase_table->fts; ut_a(fts != NULL); dict_table_get_all_fts_indexes(innobase_table, fts->indexes); + + dict_table_close(innobase_table, true, false); } stmt = innobase_get_stmt_unsafe(m_thd, &stmt_len); @@ -11381,34 +11523,6 @@ create_table_info_t::create_table_update_dict() /* Note: We can't call update_thd() as m_prebuilt will not be setup at this stage and so we use thd. */ - /* We need to copy the AUTOINC value from the old table if - this is an ALTER|OPTIMIZE TABLE or CREATE INDEX because CREATE INDEX - does a table copy too. If query was one of : - - CREATE TABLE ...AUTO_INCREMENT = x; or - ALTER TABLE...AUTO_INCREMENT = x; or - OPTIMIZE TABLE t; or - CREATE INDEX x on t(...); - - Find out a table definition from the dictionary and get - the current value of the auto increment field. Set a new - value to the auto increment field if the value is greater - than the maximum value in the column. */ - - if (((m_create_info->used_fields & HA_CREATE_USED_AUTO) - || thd_sql_command(m_thd) == SQLCOM_ALTER_TABLE - || thd_sql_command(m_thd) == SQLCOM_OPTIMIZE - || thd_sql_command(m_thd) == SQLCOM_CREATE_INDEX) - && m_create_info->auto_increment_value > 0) { - ib_uint64_t auto_inc_value; - - auto_inc_value = m_create_info->auto_increment_value; - - dict_table_autoinc_lock(innobase_table); - dict_table_autoinc_initialize(innobase_table, auto_inc_value); - dict_table_autoinc_unlock(innobase_table); - } - dict_table_close(innobase_table, FALSE, FALSE); innobase_parse_hint_from_comment(m_thd, innobase_table, m_form->s); @@ -16731,6 +16845,12 @@ innodb_internal_table_validate( } dict_table_close(user_table, FALSE, TRUE); + + DBUG_EXECUTE_IF("innodb_evict_autoinc_table", + mutex_enter(&dict_sys->mutex); + dict_table_remove_from_cache_debug(user_table, true); + mutex_exit(&dict_sys->mutex); + ); } return(ret); diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h index 96fbfe069619..d44952e2e086 100644 --- a/storage/innobase/handler/ha_innodb.h +++ b/storage/innobase/handler/ha_innodb.h @@ -729,6 +729,10 @@ class create_table_info_t /** Create the internal innodb table definition. */ int create_table_def(); + /** Initialize the autoinc of this table if necessary, which should + be called before we flush logs, so autoinc counter can be persisted. */ + void initialize_autoinc(); + /** Connection thread handle. */ THD* m_thd; diff --git a/storage/innobase/handler/ha_innopart.cc b/storage/innobase/handler/ha_innopart.cc index feea32761d08..f016165b4e53 100644 --- a/storage/innobase/handler/ha_innopart.cc +++ b/storage/innobase/handler/ha_innopart.cc @@ -54,6 +54,12 @@ Created Nov 22, 2013 Mattias Jonsson */ #include "partition_info.h" #include "key.h" +/** TRUE if we don't have DDTableBuffer in the system tablespace, +this should be due to we run the server against old data files. +Please do NOT change this when server is running. +FIXME: This should be removed away once we can upgrade for new DD. */ +extern bool srv_missing_dd_table_buffer; + /* To be backwards compatible we also fold partition separator on windows. */ #ifdef _WIN32 static const char* part_sep = "#p#"; @@ -881,6 +887,7 @@ ha_innopart::initialize_auto_increment( dict_index_t* index; const char* col_name; ib_uint64_t read_auto_inc; + ib_uint64_t persisted_auto_inc; ib_uint64_t max_auto_inc = 0; ulint err; dict_table_t* ib_table; @@ -893,18 +900,53 @@ ha_innopart::initialize_auto_increment( col_name = field->field_name; for (uint part = 0; part < m_tot_parts; part++) { ib_table = m_part_share->get_table_part(part); + + dict_table_autoinc_set_col_pos( + ib_table, field->field_index); + dict_table_autoinc_lock(ib_table); read_auto_inc = dict_table_autoinc_read(ib_table); - if (read_auto_inc != 0) { - set_if_bigger(max_auto_inc, read_auto_inc); + + persisted_auto_inc = ib_table->autoinc_persisted; + + ut_ad(!srv_missing_dd_table_buffer + || persisted_auto_inc == 0); + + /* During startup, we may set both these two autoinc + to same value after recovery of the counter. In this + case, it's the first time we initialize the counter + here, and we have to calculate the next counter. + Otherwise, if they are not equal, we can use it + directly. */ + if (read_auto_inc != 0 + && read_auto_inc != persisted_auto_inc) { + /* Sometimes, such as after UPDATE, + we may have the persisted counter bigger + than the in-memory one, because UPDATE in + partition tables still doesn't modify the + in-memory counter while persisted one could + be updated if it's updated to larger value. */ + set_if_bigger( + max_auto_inc, + ut_max(read_auto_inc, + persisted_auto_inc)); dict_table_autoinc_unlock(ib_table); continue; } - /* Execute SELECT MAX(col_name) FROM TABLE; */ - index = m_part_share->get_index( + + if (persisted_auto_inc == 0) { + /* Execute SELECT MAX(col_name) FROM TABLE; */ + index = m_part_share->get_index( part, table->s->next_number_index); - err = row_search_max_autoinc( - index, col_name, &read_auto_inc); + err = row_search_max_autoinc( + index, col_name, &read_auto_inc); + } else { + + /* We have the persisted AUTOINC counter, + have to calculate the next one. */ + ut_ad(read_auto_inc == persisted_auto_inc); + err = DB_SUCCESS; + } switch (err) { case DB_SUCCESS: { diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index 1b4c377f2316..20933bcde710 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -6853,35 +6853,58 @@ commit_get_autoinc( rebuilt. Get the user-supplied value or the last value from the sequence. */ ib_uint64_t max_value_table; - dberr_t err; Field* autoinc_field = old_table->found_next_number_field; - dict_index_t* index = dict_table_get_index_on_first_col( - ctx->old_table, autoinc_field->field_index); - max_autoinc = ha_alter_info->create_info->auto_increment_value; dict_table_autoinc_lock(ctx->old_table); - err = row_search_max_autoinc( - index, autoinc_field->field_name, &max_value_table); + max_value_table = ctx->old_table->autoinc_persisted; - if (err != DB_SUCCESS) { - ut_ad(0); - max_autoinc = 0; - } else if (max_autoinc <= max_value_table) { - ulonglong col_max_value; - ulonglong offset; - col_max_value = autoinc_field->get_max_int_value(); + /* We still have to search the index here when we want to + set the AUTO_INCREMENT value to a smaller or equal one. + + Here is an example: + Let's say we have a table t1 with one AUTOINC column, existing + rows (1), (2), (100), (200), (1000), after following SQLs: + DELETE FROM t1 WHERE a > 200; + ALTER TABLE t1 AUTO_INCREMENT = 150; + we expect the next value allocated from 201, but not 150. + + We could only search the tree to know current max counter + in the table and compare. */ + if (max_autoinc <= max_value_table + || srv_missing_dd_table_buffer) { + dberr_t err; + dict_index_t* index; + + index = dict_table_get_index_on_first_col( + ctx->old_table, autoinc_field->field_index); + + err = row_search_max_autoinc( + index, autoinc_field->field_name, + &max_value_table); - offset = ctx->prebuilt->autoinc_offset; - max_autoinc = innobase_next_autoinc( - max_value_table, 1, 1, offset, - col_max_value); + if (err != DB_SUCCESS) { + ut_ad(0); + max_autoinc = 0; + } else if (max_autoinc <= max_value_table) { + + ulonglong col_max_value; + ulonglong offset; + + col_max_value = autoinc_field-> + get_max_int_value(); + offset = ctx->prebuilt->autoinc_offset; + max_autoinc = innobase_next_autoinc( + max_value_table, 1, 1, offset, + col_max_value); + } } + dict_table_autoinc_unlock(ctx->old_table); } else { /* An AUTO_INCREMENT value was not specified. @@ -8104,6 +8127,40 @@ ha_innobase::commit_inplace_alter_table( DBUG_SUICIDE();); } + /* Update the persistent autoinc counter if necessary, we should + do this before flushing logs. */ + if (altered_table->found_next_number_field + && !srv_missing_dd_table_buffer) { + for (inplace_alter_handler_ctx** pctx = ctx_array; + *pctx; pctx++) { + ha_innobase_inplace_ctx* ctx + = static_cast + (*pctx); + DBUG_ASSERT(ctx->need_rebuild() == new_clustered); + + dict_table_t* t = ctx->new_table; + Field* field = altered_table->found_next_number_field; + + dict_table_autoinc_lock(t); + + dict_table_autoinc_initialize(t, ctx->max_autoinc); + + dict_table_autoinc_set_col_pos(t, field->field_index); + + /* The same reason as comments on this function call + in row_rename_table_for_mysql(). Besides, we may write + redo logs here if we want to update the counter to a + smaller one. */ + ib_uint64_t autoinc = dict_table_autoinc_read(t); + dict_table_set_and_persist_autoinc( + t, autoinc - 1, + autoinc - 1 < t->autoinc_persisted); + + dict_table_autoinc_unlock(t); + } + } + + /* Flush the log to reduce probability that the .frm files and the InnoDB data dictionary get out-of-sync if the user runs with innodb_flush_log_at_trx_commit = 0 */ @@ -8283,8 +8340,10 @@ ha_innobase::commit_inplace_alter_table( (*pctx); DBUG_ASSERT(ctx->need_rebuild() == new_clustered); - if (altered_table->found_next_number_field) { - dict_table_t* t = ctx->new_table; + /* TODO: To remove the whole 'if' in WL#7141 */ + if (altered_table->found_next_number_field + && srv_missing_dd_table_buffer) { + dict_table_t* t = ctx->new_table; dict_table_autoinc_lock(t); dict_table_autoinc_initialize(t, ctx->max_autoinc); diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h index fc0309ec69cb..ad529b4c037b 100644 --- a/storage/innobase/include/dict0dict.h +++ b/storage/innobase/include/dict0dict.h @@ -157,6 +157,13 @@ dict_persist_init(void); void dict_persist_close(void); +/** Write back the dirty persistent dynamic metadata of the table +to DDTableBuffer. +@param[in,out] table table object */ +void +dict_table_persist_to_dd_table_buffer( + dict_table_t* table); + /*********************************************************************//** Gets the minimum number of bytes per character. @return minimum multi-byte char size, in bytes */ @@ -318,6 +325,7 @@ void dict_table_autoinc_lock( /*====================*/ dict_table_t* table); /*!< in/out: table */ + /********************************************************************//** Unconditionally set the autoinc counter. */ void @@ -349,6 +357,53 @@ void dict_table_autoinc_unlock( /*======================*/ dict_table_t* table); /*!< in/out: table */ + +/** Update the persisted autoinc counter to specified one, we should hold +autoinc_persisted_mutex. +@param[in,out] table table +@param[in] counter set autoinc_persisted to this value */ +UNIV_INLINE +void +dict_table_autoinc_persisted_update( + dict_table_t* table, + ib_uint64_t autoinc); + +/** Set the column position of autoinc column in clustered index for a table. +@param[in] table table +@param[in] pos column position in table definition */ +UNIV_INLINE +void +dict_table_autoinc_set_col_pos( + dict_table_t* table, + ulint pos); + +/** Check if a table has an autoinc counter column. +@param[in] table table +@return true if there is an autoinc column in the table, otherwise false. */ +UNIV_INLINE +bool +dict_table_has_autoinc_col( + const dict_table_t* table); + +/** Set the persisted autoinc value of the table to the new counter, +and write the table's dynamic metadata back to DDTableBuffer. This function +should only be used in DDL operation functions like +1. create_table_info_t::initialize_autoinc() +2. ha_innobase::commit_inplace_alter_table() +3. row_rename_table_for_mysql() +4. When we do TRUNCATE TABLE +@param[in,out] table table +@param[in] counter new autoinc counter +@param[in] log_reset if true, it means that the persisted + autoinc is updated to a smaller one, + an autoinc change log with value of 0 + would be written, otherwise nothing to do */ +void +dict_table_set_and_persist_autoinc( + dict_table_t* table, + ib_uint64_t counter, + bool log_reset); + #endif /* !UNIV_HOTBACKUP */ /**********************************************************************//** Adds system columns to a table object. */ @@ -381,6 +436,18 @@ void dict_table_remove_from_cache( /*=========================*/ dict_table_t* table); /*!< in, own: table */ + +#ifndef DBUG_OFF +/** Removes a table object from the dictionary cache, for debug purpose +@param[in,out] table table object +@param[in] lru_evict true if table being evicted to make room + in the table LRU list */ +void +dict_table_remove_from_cache_debug( + dict_table_t* table, + bool lru_evict); +#endif /* DBUG_OFF */ + /**********************************************************************//** Renames a table object. @return TRUE if success */ @@ -1861,9 +1928,10 @@ class DDTableBuffer { void close(); /** Prepare for a update on METADATA field - @param[in] entry entry to insert + @param[in] entry clustered index entry to replace rec @param[in] rec clustered index record - @return update vector of differing fields without system columns */ + @return update vector of differing fields without system columns, + or NULL if there isn't any different field */ upd_t* update_set_metadata( const dtuple_t* entry, const rec_t* rec); @@ -1890,6 +1958,13 @@ class DDTableBuffer { dtuple_t* m_replace_tuple; }; +/** Mark the dirty_status of a table as METADATA_DIRTY, and add it to the +dirty_dict_tables list if necessary. +@param[in,out] table table */ +void +dict_table_mark_dirty( + dict_table_t* table); + /** Flags an index corrupted in the data dictionary cache only. This is used to mark a corrupted index when index's own dictionary is corrupted, and we would force to load such index for repair purpose. diff --git a/storage/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic index d430e403dd68..1668968a58ee 100644 --- a/storage/innobase/include/dict0dict.ic +++ b/storage/innobase/include/dict0dict.ic @@ -1862,6 +1862,47 @@ dict_table_has_temp_general_tablespace_name( strlen(general_space_name)) == 0); } +/** Update the persisted autoinc counter to specified one, we should hold +autoinc_persisted_mutex. +@param[in,out] table table +@param[in] counter set autoinc_persisted to this value */ +UNIV_INLINE +void +dict_table_autoinc_persisted_update( + dict_table_t* table, + ib_uint64_t autoinc) +{ + ut_ad(dict_table_has_autoinc_col(table)); + ut_ad(mutex_own(table->autoinc_persisted_mutex)); + + table->autoinc_persisted = autoinc; +} + +/** Check if a table has an autoinc counter column. +@param[in] table table +@return true if there is an autoinc column in the table, otherwise false. */ +UNIV_INLINE +bool +dict_table_has_autoinc_col( + const dict_table_t* table) +{ + return(table->autoinc_field_no != ULINT_UNDEFINED); +} + +/** Set the column position of autoinc column in clustered index for a table. +@param[in] table table +@param[in] pos column position in table definition */ +UNIV_INLINE +void +dict_table_autoinc_set_col_pos( + dict_table_t* table, + ulint pos) +{ + ulint idx = dict_table_get_nth_col_pos(table, pos); + + table->autoinc_field_no = idx; +} + /** Encode the number of columns and number of virtual columns in a 4 bytes value. We could do this because the number of columns in InnoDB is limited to 1017 diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index 369e055d2fc9..7d366aaeeceb 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -1187,8 +1187,15 @@ struct dict_vcol_templ_t { dynamic metadata changed to be written back */ enum table_dirty_status { /** Some persistent metadata is now dirty in memory, need to be - written back to DDTableBuffer table and(or directly to) - DD table. There could be either one row or no row for this table in + written back to DDTableBuffer table and(or directly to) DD table. + There could be some exceptions, when it's marked as dirty, but + the metadata has already been written back to DDTableBuffer. + For example, if a corrupted index is found and marked as corrupted, + then it gets dropped. At this time, the dirty_status is still of + this dirty value. Also a concurrent checkpoint make this bit + out-of-date for other working threads, which still think the + status is dirty and write-back is necessary. + There could be either one row or no row for this table in DDTableBuffer table */ METADATA_DIRTY = 0, /** Some persistent metadata is buffered in DDTableBuffer table, @@ -1527,6 +1534,32 @@ struct dict_table_t { /** Autoinc counter value to give to the next inserted row. */ ib_uint64_t autoinc; + /** Mutex protecting the persisted autoincrement counter. */ + ib_mutex_t* autoinc_persisted_mutex; + + /** Autoinc counter value that has been persisted in redo logs or + DDTableBuffer. It's mainly used when we want to write counter back + to DDTableBuffer. + This is different from the 'autoinc' above, which could be bigger + than this one, because 'autoinc' will get updated right after + some counters are allocated, but we will write the counter to redo + logs and update this counter later. Once all allocated counters + have been written to redo logs, 'autoinc' should be exact the next + counter of this persisted one. + We want this counter because when we need to write the counter back + to DDTableBuffer, we had better keep it consistency with the counter + that has been written to redo logs. Besides, we can't read the 'autoinc' + directly easily, because the autoinc_lock is required and there could + be a deadlock. + This variable is protected by autoinc_persisted_mutex. */ + ib_uint64_t autoinc_persisted; + + /** The position of autoinc counter field in clustered index. This would + be set when CREATE/ALTER/OPEN TABLE and IMPORT TABLESPACE, and used in + modifications to clustered index, such as INSERT/UPDATE. There should + be no conflict to access it, so no protection is needed. */ + ulint autoinc_field_no; + /** This counter is used to track the number of granted and pending autoinc locks on this table. This value is set after acquiring the lock_sys_t::mutex but we peek the contents to determine whether other @@ -1608,8 +1641,10 @@ enum persistent_type_t { /** Persistent Metadata type for corrupted indexes */ PM_INDEX_CORRUPTED = 1, - /* TODO: Will add following types + /** Persistent Metadata type for autoinc counter */ PM_TABLE_AUTO_INC = 2, + + /* TODO: Will add following types PM_TABLE_UPDATE_TIME = 3, Maybe something tablespace related PM_TABLESPACE_SIZE = 4, @@ -1617,7 +1652,7 @@ enum persistent_type_t { /** The biggest type, which should be 1 bigger than the last true type */ - PM_BIGGEST_TYPE = 2 + PM_BIGGEST_TYPE = 3 }; typedef std::vector > @@ -1630,7 +1665,10 @@ class PersistentTableMetadata { @param[in] id table id */ explicit PersistentTableMetadata( table_id_t id) - : m_id(id), m_corrupted_ids() + : + m_id(id), + m_corrupted_ids(), + m_autoinc(0) {} /** Get the corrupted indexes' IDs @@ -1661,6 +1699,30 @@ class PersistentTableMetadata { return(m_id); } + /** Set the autoinc counter of the table if it's bigger + @param[in] autoinc autoinc counter */ + void set_autoinc_if_bigger( + ib_uint64_t autoinc) { + /* We only set the biggest autoinc counter. Callers don't + guarantee passing a bigger number in. */ + if (autoinc > m_autoinc) { + m_autoinc = autoinc; + } + } + + /** Set the autoinc counter of the table + @param[in] autoinc autoinc counter */ + void set_autoinc( + ib_uint64_t autoinc) { + m_autoinc = autoinc; + } + + /** Get the autoinc counter of the table + @return the autoinc counter */ + ib_uint64_t get_autoinc() const { + return(m_autoinc); + } + private: /** Table ID which this metadata belongs to */ table_id_t m_id; @@ -1668,8 +1730,10 @@ class PersistentTableMetadata { /** Storing the corrupted indexes' ID if exist, or else empty */ corrupted_ids_t m_corrupted_ids; - /* TODO: We will add update_time, auto_inc, etc. here and APIs - accordingly */ + /** Autoinc counter of the table */ + ib_uint64_t m_autoinc; + + /* TODO: We will add update_time, etc. here and APIs accordingly */ }; /** Interface for persistent dynamic table metadata. */ @@ -1688,13 +1752,13 @@ class Persister { virtual ulint write( const PersistentTableMetadata& metadata, byte* buffer, - ulint size) = 0; + ulint size) const = 0; /** Pre-calculate the size of metadata to be written @param[in] metadata metadata to be written @return the size of metadata */ virtual ulint get_write_size( - const PersistentTableMetadata& metadata) = 0; + const PersistentTableMetadata& metadata) const = 0; /** Read the dynamic metadata from buffer, and store them to metadata object @@ -1710,7 +1774,7 @@ class Persister { PersistentTableMetadata&metadata, const byte* buffer, ulint size, - bool* corrupt) = 0; + bool* corrupt) const = 0; /** Write MLOG_TABLE_DYNAMIC_META for persistent dynamic metadata of table @@ -1720,7 +1784,7 @@ class Persister { void write_log( table_id_t id, const PersistentTableMetadata& metadata, - mtr_t* mtr); + mtr_t* mtr) const ; }; /** Persister used for corrupted indexes */ @@ -1736,13 +1800,13 @@ class CorruptedIndexPersister : public Persister { ulint write( const PersistentTableMetadata& metadata, byte* buffer, - ulint size); + ulint size) const; /** Pre-calculate the size of metadata to be written @param[in] metadata metadata to be written @return the size of metadata */ ulint get_write_size( - const PersistentTableMetadata& metadata); + const PersistentTableMetadata& metadata) const; /** Read the corrupted indexes from buffer, and store them to metadata object @@ -1758,13 +1822,117 @@ class CorruptedIndexPersister : public Persister { PersistentTableMetadata&metadata, const byte* buffer, ulint size, - bool* corrupt); + bool* corrupt) const; private: /** The length of index_id_t we will write */ static const size_t INDEX_ID_LENGTH = 12; }; +/** Persister used for autoinc counters */ +class AutoIncPersister : public Persister { +public: + /** Write the autoinc counter of a table, we can pre-calculate + the size by calling get_write_size() + @param[in] metadata persistent metadata + @param[out] buffer write buffer + @param[in] size size of write buffer, should be + at least get_write_size() + @return the length of bytes written */ + ulint write( + const PersistentTableMetadata& metadata, + byte* buffer, + ulint size) const; + + /** Pre-calculate the size of metadata to be written + @param[in] metadata metadata to be written + @return the size of metadata */ + inline ulint + get_write_size( + const PersistentTableMetadata& metadata) const + { + /* We just return the max possible size that would be used + if the counter exists, so we don't calculate every time. + Here we need 1 byte for dynamic metadata type and 11 bytes + for the max possible size of counter. */ + return(12); + } + + /** Read the autoinc counter from buffer, and store them to + metadata object + @param[out] metadata metadata where we store the read data + @param[in] buffer buffer to read + @param[in] size size of buffer + @param[out] corrupt true if we found something wrong in + the buffer except incomplete buffer, + otherwise false + @return the bytes we read from the buffer if the buffer data + is complete and we get everything, 0 if the buffer is incomplete */ + ulint read( + PersistentTableMetadata&metadata, + const byte* buffer, + ulint size, + bool* corrupt) const; +}; + +/** We can use this class to log autoinc counter. Since we want to +acquire dict_persist_t::lock before logging and release the lock +after the mtr commits, so we wrap these details in this class. */ +class AutoIncLogMtr { +public: + + /** Constructor + @param[in] mtr the mtr to be used */ + AutoIncLogMtr(mtr_t* mtr) + : m_mtr(mtr), +#ifdef UNIV_DEBUG + m_locked(), +#endif /* UNIV_DEBUG */ + m_logged() + {} + + /** Destructor */ + ~AutoIncLogMtr() { + ut_ad(!m_locked); + } + + /** Start the internal mtr */ + void start() { + m_mtr->start(); + } + + /** Get the internal mtr object, if we want to call functions in mtr_t + @return the mtr */ + mtr_t* get_mtr() { + return(m_mtr); + } + + /** Write redo logs for autoinc counter that is to be inserted or to + update the existing one, if the counter is bigger than current one + or the counter is 0 as the special mark. + This function should be called only once at most per mtr, and work with + the commit() to finish the complete logging & commit + @param[in] table table + @param[in] counter counter to be logged */ + void log(dict_table_t* table, ib_uint64_t counter); + + /** Commit the internal mtr, and some cleanup if necessary */ + void commit(); + +private: + + /** mtr to be used */ + mtr_t* m_mtr; + +#ifdef UNIV_DEBUG + /** True if dict_persist_t::lock is held now, otherwise false */ + bool m_locked; +#endif /* UNIV_DEBUG */ + + /** True if counter has been logged, otherwise false */ + bool m_logged; +}; + /** Container of persisters used in the system. Currently we don't need to protect this object since we only initialize it at very beginning and destroy it in the end. During the server running, we only get the persisters */ @@ -1838,10 +2006,16 @@ void dict_table_autoinc_destroy( dict_table_t* table) { - if (table->autoinc_mutex_created == os_once::DONE - && table->autoinc_mutex != NULL) { - mutex_free(table->autoinc_mutex); - UT_DELETE(table->autoinc_mutex); + if (table->autoinc_mutex_created == os_once::DONE) { + if (table->autoinc_mutex != NULL) { + mutex_free(table->autoinc_mutex); + UT_DELETE(table->autoinc_mutex); + } + + if (table->autoinc_persisted_mutex != NULL) { + mutex_free(table->autoinc_persisted_mutex); + UT_DELETE(table->autoinc_persisted_mutex); + } } } @@ -1855,6 +2029,7 @@ dict_table_autoinc_create_lazy( dict_table_t* table) { table->autoinc_mutex = NULL; + table->autoinc_persisted_mutex = NULL; table->autoinc_mutex_created = os_once::NEVER_DONE; } diff --git a/storage/innobase/include/row0row.h b/storage/innobase/include/row0row.h index fee00b57cddb..e1b207fb425b 100644 --- a/storage/innobase/include/row0row.h +++ b/storage/innobase/include/row0row.h @@ -314,6 +314,42 @@ row_get_clust_rec( mtr_t* mtr) /*!< in: mtr */ __attribute__((warn_unused_result)); +/** Parse the integer data from specified data, which could be +DATA_INT, DATA_FLOAT or DATA_DOUBLE. If the value is less than 0 +and the type is not unsigned then we reset the value to 0 +@param[in] data data to read +@param[in] len length of data +@param[in] mtype mtype of data +@param[in] unsigned_type if the data is unsigned +@return the integer value from the data */ +inline +ib_uint64_t +row_parse_int( + const byte* data, + ulint len, + ulint mtype, + bool unsigned_type); + +/** Parse the integer data from specified field, which could be +DATA_INT, DATA_FLOAT or DATA_DOUBLE. We could return 0 if +1) the value is less than 0 and the type is not unsigned +or 2) the field is null. +@param[in] field field to read the int value +@return the integer value read from the field, 0 for negative signed +int or NULL field */ +ib_uint64_t +row_parse_int_from_field( + const dfield_t* field); + +/** Read the autoinc counter from the clustered index row. +@param[in] row row to read the autoinc counter +@param[in] n autoinc counter is in the nth field +@return the autoinc counter read */ +ib_uint64_t +row_get_autoinc_counter( + const dtuple_t* row, + ulint n); + /** Result of row_search_index_entry */ enum row_search_result { ROW_FOUND = 0, /*!< the record was found */ diff --git a/storage/innobase/include/row0row.ic b/storage/innobase/include/row0row.ic index 08c0f18e95b9..9243fae839ae 100644 --- a/storage/innobase/include/row0row.ic +++ b/storage/innobase/include/row0row.ic @@ -173,3 +173,52 @@ row_build_row_ref_fast( } } } + +/** Parse the integer data from specified data, which could be +DATA_INT, DATA_FLOAT or DATA_DOUBLE. If the value is less than 0 +and the type is not unsigned then we reset the value to 0 +@param[in] data data to read +@param[in] len length of data +@param[in] mtype mtype of data +@param[in] unsigned_type if the data is unsigned +@return the integer value from the data */ +ib_uint64_t +row_parse_int( + const byte* data, + ulint len, + ulint mtype, + bool unsigned_type) +{ + ib_uint64_t value = 0; + + switch (mtype) { + case DATA_INT: + + ut_a(len <= sizeof value); + value = mach_read_int_type(data, len, unsigned_type); + break; + + case DATA_FLOAT: + + ut_a(len == sizeof(float)); + value = mach_float_read(data); + break; + + case DATA_DOUBLE: + + ut_a(len == sizeof(double)); + value = mach_double_read(data); + break; + + default: + ut_error; + + } + + if (!unsigned_type && static_cast(value) < 0) { + value = 0; + } + + return(value); +} + diff --git a/storage/innobase/include/row0upd.h b/storage/innobase/include/row0upd.h index 697ba8b6a733..7f65f7ebe812 100644 --- a/storage/innobase/include/row0upd.h +++ b/storage/innobase/include/row0upd.h @@ -425,6 +425,15 @@ row_upd_index_parse( built */ upd_t** update_out);/*!< out: update vector */ +/** Get the new autoinc counter from the update vector when there is +an autoinc field defined in this table. +@param[in] update update vector for the clustered index +@param[in] autoinc_field_no autoinc field's order in clustered index +@return the new counter if we find it in the update vector, otherwise 0 */ +ib_uint64_t +row_upd_get_new_autoinc_counter( + const upd_t* update, + ulint autoinc_field_no); /* Update vector field */ struct upd_field_t{ diff --git a/storage/innobase/include/sync0sync.h b/storage/innobase/include/sync0sync.h index 7c07d8e173be..c11b36455e92 100644 --- a/storage/innobase/include/sync0sync.h +++ b/storage/innobase/include/sync0sync.h @@ -51,6 +51,7 @@ instrumentation due to their large number of instances. */ #ifdef UNIV_PFS_MUTEX /* Key defines to register InnoDB mutexes with performance schema */ extern mysql_pfs_key_t autoinc_mutex_key; +extern mysql_pfs_key_t autoinc_persisted_mutex_key; extern mysql_pfs_key_t buffer_block_mutex_key; extern mysql_pfs_key_t buf_pool_mutex_key; extern mysql_pfs_key_t buf_pool_zip_mutex_key; diff --git a/storage/innobase/include/sync0types.h b/storage/innobase/include/sync0types.h index 8cfd44a37917..47f552d44b6f 100644 --- a/storage/innobase/include/sync0types.h +++ b/storage/innobase/include/sync0types.h @@ -276,6 +276,7 @@ enum latch_level_t { SYNC_PERSIST_METADATA_BUFFER, SYNC_PERSIST_DIRTY_TABLES, + SYNC_PERSIST_AUTOINC, SYNC_PERSIST_CHECKPOINT, SYNC_IBUF_PESS_INSERT_MUTEX, @@ -334,6 +335,7 @@ enum latch_id_t { LATCH_ID_LOG_FLUSH_ORDER, LATCH_ID_PERSIST_METADATA_BUFFER, LATCH_ID_DICT_PERSIST_DIRTY_TABLES, + LATCH_ID_PERSIST_AUTOINC, LATCH_ID_DICT_PERSIST_CHECKPOINT, LATCH_ID_PAGE_CLEANER, LATCH_ID_PURGE_SYS_PQ, diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc index 2ba841d41c12..bd7a3ae504c6 100644 --- a/storage/innobase/log/log0log.cc +++ b/storage/innobase/log/log0log.cc @@ -2128,7 +2128,14 @@ logs_empty_and_mark_files_at_shutdown(void) that we can recover all committed transactions in a crash recovery. We must not write the lsn stamps to the data files, since at a startup InnoDB deduces - from the stamps if the previous shutdown was clean. */ + from the stamps if the previous shutdown was clean. + + In this path, there is no checkpoint, so we have to + write back persistent metadata before flushing. + There should be no concurrent DML, so no need to + require dict_persist::lock. */ + + dict_persist_to_dd_table_buffer(); log_buffer_flush_to_disk(); diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index 096c76f3024b..a0fa0c2f66c0 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -3717,6 +3717,9 @@ row_import_for_mysql( dict_table_autoinc_lock(table); dict_table_autoinc_initialize(table, autoinc); dict_table_autoinc_unlock(table); + /* This should be set later in handler level, where we know the + autoinc counter field index */ + table->autoinc_field_no = ULINT_UNDEFINED; ut_a(err == DB_SUCCESS); diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index 80928e310ca3..0384cfed3ab5 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -65,6 +65,12 @@ check. If you make a change in this module make sure that no codepath is introduced where a call to log_free_check() is bypassed. */ +/** TRUE if we don't have DDTableBuffer in the system tablespace, +this should be due to we run the server against old data files. +Please do NOT change this when server is running. +FIXME: This should be removed away once we can upgrade for new DD. */ +extern bool srv_missing_dd_table_buffer; + /*********************************************************************//** Creates an insert node struct. @return own: insert node struct */ @@ -2371,6 +2377,8 @@ row_ins_clust_index_entry_low( dberr_t err = DB_SUCCESS; big_rec_t* big_rec = NULL; mtr_t mtr; + AutoIncLogMtr autoinc_mtr(&mtr); + ib_uint64_t counter = 0; mem_heap_t* offsets_heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint* offsets = offsets_; @@ -2386,8 +2394,7 @@ row_ins_clust_index_entry_low( || !thr_get_trx(thr)->in_rollback); ut_ad(thr != NULL || !dup_chk_only); - mtr_start(&mtr); - mtr.set_named_space(index->space); + autoinc_mtr.start(); if (dict_table_is_temporary(index->table)) { /* Disable REDO logging as the lifetime of temp-tables is @@ -2399,18 +2406,22 @@ row_ins_clust_index_entry_low( ut_ad(!dict_table_is_intrinsic(index->table) || (flags & BTR_NO_UNDO_LOG_FLAG)); - mtr.set_log_mode(MTR_LOG_NO_REDO); + autoinc_mtr.get_mtr()->set_log_mode(MTR_LOG_NO_REDO); + } else { + + autoinc_mtr.get_mtr()->set_named_space(index->space); } if (mode == BTR_MODIFY_LEAF && dict_index_is_online_ddl(index)) { mode = BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED; - mtr_s_lock(dict_index_get_lock(index), &mtr); + mtr_s_lock(dict_index_get_lock(index), autoinc_mtr.get_mtr()); } /* Note that we use PAGE_CUR_LE as the search mode, because then the function will return in both low_match and up_match of the cursor sensible values */ - btr_pcur_open(index, entry, PAGE_CUR_LE, mode, &pcur, &mtr); + btr_pcur_open(index, entry, PAGE_CUR_LE, mode, &pcur, + autoinc_mtr.get_mtr()); cursor = btr_pcur_get_btr_cur(&pcur); cursor->thr = thr; @@ -2428,6 +2439,20 @@ row_ins_clust_index_entry_low( } #endif /* UNIV_DEBUG */ + /* Temporary tables don't require persistent counters. + But for 'ALTER TABLE ... ALGORITHM = COPY', MySQL temporary tables + need this, but we can set it only once when necessary to prevent + writing so many logs. This is why row_is_mysql_tmp_table_name() + is necessary here */ + if (dict_table_has_autoinc_col(index->table) + && !dict_table_is_temporary(index->table) + && !row_is_mysql_tmp_table_name(index->table->name.m_name) + && !srv_missing_dd_table_buffer) { + + counter = row_get_autoinc_counter( + entry, index->table->autoinc_field_no); + } + /* Allowing duplicates in clustered index is currently enabled only for intrinsic table and caller understand the limited operation that can be done in this case. */ @@ -2466,18 +2491,26 @@ row_ins_clust_index_entry_low( DB_LOCK_WAIT */ err = row_ins_duplicate_error_in_clust( - flags, cursor, entry, thr, &mtr); + flags, cursor, entry, thr, + autoinc_mtr.get_mtr()); } if (err != DB_SUCCESS) { err_exit: - mtr_commit(&mtr); + if (err == DB_DUPLICATE_KEY && counter != 0) { + /* Although it's duplicate in clustered index, + the counter could be still bigger and should + still be logged. */ + autoinc_mtr.log(index->table, counter); + } + + autoinc_mtr.commit(); goto func_exit; } } if (dup_chk_only) { - mtr_commit(&mtr); + autoinc_mtr.commit(); goto func_exit; } @@ -2499,14 +2532,17 @@ row_ins_clust_index_entry_low( ut_ad(thr != NULL); err = row_ins_clust_index_entry_by_modify( &pcur, flags, mode, &offsets, &offsets_heap, - entry_heap, entry, thr, &mtr); + entry_heap, entry, thr, autoinc_mtr.get_mtr()); if (err == DB_SUCCESS && dict_index_is_online_ddl(index)) { row_log_table_insert(btr_cur_get_rec(cursor), entry, index, offsets); } - mtr_commit(&mtr); + if (err == DB_SUCCESS && counter != 0) { + autoinc_mtr.log(index->table, counter); + } + autoinc_mtr.commit(); mem_heap_free(entry_heap); } else { rec_t* insert_rec; @@ -2517,7 +2553,7 @@ row_ins_clust_index_entry_low( err = btr_cur_optimistic_insert( flags, cursor, &offsets, &offsets_heap, entry, &insert_rec, &big_rec, - n_ext, thr, &mtr); + n_ext, thr, autoinc_mtr.get_mtr()); } else { if (buf_LRU_buf_pool_running_out()) { @@ -2531,19 +2567,23 @@ row_ins_clust_index_entry_low( flags, cursor, &offsets, &offsets_heap, entry, &insert_rec, &big_rec, - n_ext, thr, &mtr); + n_ext, thr, autoinc_mtr.get_mtr()); if (err == DB_FAIL) { err = btr_cur_pessimistic_insert( flags, cursor, &offsets, &offsets_heap, entry, &insert_rec, &big_rec, - n_ext, thr, &mtr); + n_ext, thr, autoinc_mtr.get_mtr()); } } if (big_rec != NULL) { - mtr_commit(&mtr); + if (err == DB_SUCCESS && counter != 0) { + autoinc_mtr.log(index->table, counter); + } + + autoinc_mtr.commit(); /* Online table rebuild could read (and ignore) the incomplete record at this point. @@ -2565,7 +2605,11 @@ row_ins_clust_index_entry_low( insert_rec, entry, index, offsets); } - mtr_commit(&mtr); + if (err == DB_SUCCESS && counter != 0) { + autoinc_mtr.log(index->table, counter); + } + + autoinc_mtr.commit(); } } diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 966adbca3e8a..49d57b784dd0 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -5410,6 +5410,31 @@ row_rename_table_for_mysql( trx_free_for_background(trx_bg); } + /* If this table has an autoinc column whose counter is non-zero, + and is renamed from mysql temporary table to normal table, we need + to write back the dynamic metadata of new table, since the table + id has been changed. */ + if (err == DB_SUCCESS && dict_table_has_autoinc_col(table) + && old_is_tmp && !new_is_tmp + && !srv_missing_dd_table_buffer) { + + dict_table_autoinc_lock(table); + ib_uint64_t autoinc = dict_table_autoinc_read(table); + dict_table_autoinc_unlock(table); + + if (autoinc != 0 && table->autoinc_persisted == 0) { + + /* Update autoinc_persisted to autoinc - 1 instead of + autoinc. The autoinc here is already the counter to + be used for next value, if we set them as equal, + when we open table to use the counter, we will + calculate the next counter, then the autoinc could be + set to a bigger one, which is unnecessary. */ + dict_table_set_and_persist_autoinc( + table, autoinc - 1, false); + } + } + if (table != NULL) { dict_table_close(table, dict_locked, FALSE); } diff --git a/storage/innobase/row/row0row.cc b/storage/innobase/row/row0row.cc index 9ca416c9a55b..53643455a066 100644 --- a/storage/innobase/row/row0row.cc +++ b/storage/innobase/row/row0row.cc @@ -1046,6 +1046,44 @@ row_get_clust_rec( return(clust_rec); } +/** Parse the integer data from specified field, which could be +DATA_INT, DATA_FLOAT or DATA_DOUBLE. We could return 0 if +1) the value is less than 0 and the type is not unsigned +or 2) the field is null. +@param[in] field field to read the int value +@return the integer value read from the field, 0 for negative signed +int or NULL field */ +ib_uint64_t +row_parse_int_from_field( + const dfield_t* field) +{ + const dtype_t* dtype = dfield_get_type(field); + ulint len = dfield_get_len(field); + const byte* data = static_cast(dfield_get_data(field)); + ulint mtype = dtype_get_mtype(dtype); + bool unsigned_type = dtype->prtype & DATA_UNSIGNED; + + if (dfield_is_null(field)) { + return(0); + } else { + return(row_parse_int(data, len, mtype, unsigned_type)); + } +} + +/** Read the autoinc counter from the clustered index row. +@param[in] row row to read the autoinc counter +@param[in] n autoinc counter is in the nth field +@return the autoinc counter read */ +ib_uint64_t +row_get_autoinc_counter( + const dtuple_t* row, + ulint n) +{ + const dfield_t* field = dtuple_get_nth_field(row, n); + + return(row_parse_int_from_field(field)); +} + /***************************************************************//** Searches an index record. @return whether the record was found or buffered */ diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index 42c65b61d140..135cc46316ce 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -6141,29 +6141,7 @@ row_search_autoinc_read_column( data = rec_get_nth_field(rec, offsets, col_no, &len); - switch (mtype) { - case DATA_INT: - ut_a(len <= sizeof value); - value = mach_read_int_type(data, len, unsigned_type); - break; - - case DATA_FLOAT: - ut_a(len == sizeof(float)); - value = (ib_uint64_t) mach_float_read(data); - break; - - case DATA_DOUBLE: - ut_a(len == sizeof(double)); - value = (ib_uint64_t) mach_double_read(data); - break; - - default: - ut_error; - } - - if (!unsigned_type && static_cast(value) < 0) { - value = 0; - } + value = row_parse_int(data, len, mtype, unsigned_type); func_exit: if (UNIV_LIKELY_NULL(heap)) { diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc index a3e1833e3efd..5cdd88517aa1 100644 --- a/storage/innobase/row/row0upd.cc +++ b/storage/innobase/row/row0upd.cc @@ -108,6 +108,12 @@ check. If you make a change in this module make sure that no codepath is introduced where a call to log_free_check() is bypassed. */ +/** TRUE if we don't have DDTableBuffer in the system tablespace, +this should be due to we run the server against old data files. +Please do NOT change this when server is running. +FIXME: This should be removed away once we can upgrade for new DD. */ +extern bool srv_missing_dd_table_buffer; + /***********************************************************//** Checks if an update vector changes some of the first ordering fields of an index record. This is only used in foreign key checks and we can assume @@ -2494,6 +2500,97 @@ row_upd_clust_rec_by_insert( return(err); } +/** Get the new autoinc counter from the update vector when there is +an autoinc field defined in this table. +@param[in] update update vector for the clustered index +@param[in] autoinc_field_no autoinc field's order in clustered index +@return the new counter if we find it in the update vector, otherwise 0. +We don't mind that the new counter happens to be 0, we just care about +non-zero counters. */ +ib_uint64_t +row_upd_get_new_autoinc_counter( + const upd_t* update, + ulint autoinc_field_no) +{ + ulint n_fields = update->n_fields; + dfield_t* field = NULL; + + for (ulint i = 0; i < n_fields; ++i) { + if (update->fields[i].field_no == autoinc_field_no) { + field = &update->fields[i].new_val; + break; + } + } + + if (field != NULL) { + + return(row_parse_int_from_field(field)); + } + + return(0); +} + +/** If the table has autoinc column and the counter is updated to +some bigger value, we need to log the new autoinc counter. We will +use the given mtr to do logging for performance reasons. +@param[in] node row update node +@param[in,out] mtr mtr */ +static +void +row_upd_check_autoinc_counter( + const upd_node_t* node, + AutoIncLogMtr* mtr) +{ + dict_table_t* table = node->table; + + if (srv_missing_dd_table_buffer) { + return; + } + + if (!dict_table_has_autoinc_col(table) + || dict_table_is_temporary(table) + || node->row == NULL) { + + return; + } + + /* If the node->row hasn't been prepared, there must + no order field change and autoinc field should keep + as is. Otherwise, we need to check if autoinc field + would be changed to a bigger number. */ + ib_uint64_t new_counter; + + new_counter = row_upd_get_new_autoinc_counter( + node->update, table->autoinc_field_no); + + if (new_counter == 0) { + + return; + } + + ib_uint64_t old_counter; + const dict_index_t* index; + + index = dict_table_get_first_index(table); + + /* The autoinc field order in row is not the + same as in clustered index, we need to get + the column number in the table instead. */ + old_counter = row_get_autoinc_counter( + node->row, + dict_index_get_nth_col_no( + index, table->autoinc_field_no)); + + /* We just check if the updated counter is bigger than + the old one, which may result in more redo logs, since + this is safer than checking with the counter in table + object. */ + if (new_counter > old_counter) { + + mtr->log(table, new_counter); + } +} + /***********************************************************//** Updates a clustered index record of a row when the ordering fields do not change. @@ -2518,6 +2615,7 @@ row_upd_clust_rec( btr_cur_t* btr_cur; dberr_t err; const dtuple_t* rebuilt_old_pk = NULL; + AutoIncLogMtr autoinc_mtr(mtr); ut_ad(node); ut_ad(dict_index_is_clust(index)); @@ -2544,19 +2642,21 @@ row_upd_clust_rec( err = btr_cur_update_in_place( flags | BTR_NO_LOCKING_FLAG, btr_cur, offsets, node->update, - node->cmpl_info, thr, thr_get_trx(thr)->id, mtr); + node->cmpl_info, thr, thr_get_trx(thr)->id, + autoinc_mtr.get_mtr()); } else { err = btr_cur_optimistic_update( flags | BTR_NO_LOCKING_FLAG, btr_cur, &offsets, offsets_heap, node->update, - node->cmpl_info, thr, thr_get_trx(thr)->id, mtr); + node->cmpl_info, thr, thr_get_trx(thr)->id, + autoinc_mtr.get_mtr()); } if (err == DB_SUCCESS) { goto success; } - mtr_commit(mtr); + autoinc_mtr.commit(); if (buf_LRU_buf_pool_running_out()) { @@ -2566,8 +2666,8 @@ row_upd_clust_rec( /* We may have to modify the tree structure: do a pessimistic descent down the index tree */ - mtr_start(mtr); - mtr->set_named_space(index->space); + autoinc_mtr.start(); + autoinc_mtr.get_mtr()->set_named_space(index->space); /* Disable REDO logging as lifetime of temp-tables is limited to server or connection lifetime and so REDO information is not needed @@ -2575,7 +2675,7 @@ row_upd_clust_rec( Disable locking as temp-tables are not shared across connection. */ if (dict_table_is_temporary(index->table)) { flags |= BTR_NO_LOCKING_FLAG; - mtr->set_log_mode(MTR_LOG_NO_REDO); + autoinc_mtr.get_mtr()->set_log_mode(MTR_LOG_NO_REDO); if (dict_table_is_intrinsic(index->table)) { flags |= BTR_NO_UNDO_LOG_FLAG; @@ -2588,7 +2688,8 @@ row_upd_clust_rec( the same transaction do not modify the record in the meantime. Therefore we can assert that the restoration of the cursor succeeds. */ - ut_a(btr_pcur_restore_position(BTR_MODIFY_TREE, pcur, mtr)); + ut_a(btr_pcur_restore_position(BTR_MODIFY_TREE, pcur, + autoinc_mtr.get_mtr())); ut_ad(!rec_get_deleted_flag(btr_pcur_get_rec(pcur), dict_table_is_comp(index->table))); @@ -2601,13 +2702,14 @@ row_upd_clust_rec( flags | BTR_NO_LOCKING_FLAG | BTR_KEEP_POS_FLAG, btr_cur, &offsets, offsets_heap, heap, &big_rec, node->update, node->cmpl_info, - thr, thr_get_trx(thr)->id, mtr); + thr, thr_get_trx(thr)->id, autoinc_mtr.get_mtr()); if (big_rec) { ut_a(err == DB_SUCCESS); DEBUG_SYNC_C("before_row_upd_extern"); err = btr_store_big_rec_extern_fields( - pcur, node->update, offsets, big_rec, mtr, + pcur, node->update, offsets, big_rec, + autoinc_mtr.get_mtr(), BTR_STORE_UPDATE); DEBUG_SYNC_C("after_row_upd_extern"); } @@ -2628,9 +2730,12 @@ row_upd_clust_rec( index, offsets, rebuilt_old_pk, new_v_row, old_v_row); } + + row_upd_check_autoinc_counter(node, &autoinc_mtr); } - mtr_commit(mtr); + autoinc_mtr.commit(); + func_exit: if (heap) { mem_heap_free(heap); diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index d4e6a7607f0c..72b1ce01fc58 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -1352,6 +1352,15 @@ srv_prepare_to_delete_redo_log_files( ulint count = 0; do { + /* Write back all dirty metadata first. To resize the logs + files to smaller ones, we will do the checkpoint at last, + if we write back there, it could be found that the new log + group was not big enough for the new redo logs, thus a + cascade checkpoint would be invoked, which is unexpected. + There should be no concurrent DML, so no need to require + dict_persist::lock. */ + dict_persist_to_dd_table_buffer(); + /* Clean the buffer pool. */ buf_flush_sync_all_buf_pools(); diff --git a/storage/innobase/sync/sync0debug.cc b/storage/innobase/sync/sync0debug.cc index 88d9427c880e..797f328e8abf 100644 --- a/storage/innobase/sync/sync0debug.cc +++ b/storage/innobase/sync/sync0debug.cc @@ -521,6 +521,7 @@ LatchDebug::LatchDebug() LEVEL_MAP_INSERT(SYNC_INDEX_TREE); LEVEL_MAP_INSERT(SYNC_PERSIST_METADATA_BUFFER); LEVEL_MAP_INSERT(SYNC_PERSIST_DIRTY_TABLES); + LEVEL_MAP_INSERT(SYNC_PERSIST_AUTOINC); LEVEL_MAP_INSERT(SYNC_PERSIST_CHECKPOINT); LEVEL_MAP_INSERT(SYNC_IBUF_PESS_INSERT_MUTEX); LEVEL_MAP_INSERT(SYNC_IBUF_HEADER); @@ -1005,11 +1006,18 @@ LatchDebug::check_order( ut_a(find(latches, SYNC_PERSIST_METADATA_BUFFER) == NULL); break; + case SYNC_PERSIST_AUTOINC: + + basic_check(latches, level, SYNC_LOG); + ut_a(find(latches, SYNC_PERSIST_METADATA_BUFFER) == NULL); + ut_a(find(latches, SYNC_PERSIST_DIRTY_TABLES) == NULL); + case SYNC_PERSIST_CHECKPOINT: basic_check(latches, level, SYNC_LOG); ut_a(find(latches, SYNC_PERSIST_METADATA_BUFFER) == NULL); ut_a(find(latches, SYNC_PERSIST_DIRTY_TABLES) == NULL); + ut_a(find(latches, SYNC_PERSIST_AUTOINC) == NULL); break; case SYNC_DICT: @@ -1392,6 +1400,10 @@ sync_latch_meta_init() SYNC_PERSIST_DIRTY_TABLES, dict_persist_dirty_tables_mutex_key); + LATCH_ADD(PERSIST_AUTOINC, + SYNC_PERSIST_AUTOINC, + autoinc_persisted_mutex_key); + LATCH_ADD(DICT_PERSIST_CHECKPOINT, SYNC_PERSIST_CHECKPOINT, dict_persist_checkpoint_key); diff --git a/storage/innobase/sync/sync0sync.cc b/storage/innobase/sync/sync0sync.cc index a1a5224af748..29f065a94bc3 100644 --- a/storage/innobase/sync/sync0sync.cc +++ b/storage/innobase/sync/sync0sync.cc @@ -37,6 +37,7 @@ Created 9/5/1995 Heikki Tuuri #ifdef UNIV_PFS_MUTEX /* Key to register autoinc_mutex with performance schema */ mysql_pfs_key_t autoinc_mutex_key; +mysql_pfs_key_t autoinc_persisted_mutex_key; mysql_pfs_key_t buffer_block_mutex_key; mysql_pfs_key_t buf_pool_mutex_key; mysql_pfs_key_t buf_pool_zip_mutex_key;