Skip to content
This repository was archived by the owner on Jun 12, 2020. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 36 additions & 0 deletions mysql-test/suite/tokudb/r/data_file_length.result
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
CREATE TABLE t1 (c1 INT, c2 VARCHAR(10)) ENGINE=TOKUDB;
INSERT INTO t1 values (1, '12345678');
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
FLUSH TABLES;
SELECT DATA_LENGTH from INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1';
DATA_LENGTH
262144
262144
CREATE TABLE t2 (c1 INT PRIMARY KEY, c2 VARCHAR(1400)) ENGINE=TOKUDB;
alter table t2 add clustering index clstr_key(c1,c2);
INSERT INTO t2 values (1, repeat('1', 1300));
INSERT INTO t2 values (2, repeat('2', 1300));
INSERT INTO t2 values (3, repeat('3', 1300));
INSERT INTO t2 values (4, repeat('4', 1300));
FLUSH TABLES;
SELECT DATA_LENGTH,INDEX_LENGTH from INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t2';
DATA_LENGTH INDEX_LENGTH
32768 40960
32768
40960
DROP TABLE t1;
DROP TABLE t2;
42 changes: 42 additions & 0 deletions mysql-test/suite/tokudb/t/data_file_length.test
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
--source include/have_tokudb.inc

# data file length test
CREATE TABLE t1 (c1 INT, c2 VARCHAR(10)) ENGINE=TOKUDB;
INSERT INTO t1 values (1, '12345678');
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
FLUSH TABLES;

SELECT DATA_LENGTH from INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1';
--let $_mysqld_datadir= `SELECT @@datadir`
--exec du -b $_mysqld_datadir/_test_t1_main* |cut -f1

# cluster index data length test
CREATE TABLE t2 (c1 INT PRIMARY KEY, c2 VARCHAR(1400)) ENGINE=TOKUDB;
alter table t2 add clustering index clstr_key(c1,c2);
INSERT INTO t2 values (1, repeat('1', 1300));
INSERT INTO t2 values (2, repeat('2', 1300));
INSERT INTO t2 values (3, repeat('3', 1300));
INSERT INTO t2 values (4, repeat('4', 1300));
FLUSH TABLES;

SELECT DATA_LENGTH,INDEX_LENGTH from INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t2';
--exec du -b $_mysqld_datadir/_test_t2_main* |cut -f1
--exec du -b $_mysqld_datadir/_test_t2_key* |cut -f1

# cleanup
DROP TABLE t1;
DROP TABLE t2;
19 changes: 2 additions & 17 deletions storage/tokudb/ha_tokudb.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5822,22 +5822,7 @@ int ha_tokudb::info(uint flag) {
stats.create_time = dict_stats.bt_create_time_sec;
stats.update_time = dict_stats.bt_modify_time_sec;
stats.check_time = dict_stats.bt_verify_time_sec;
stats.data_file_length = dict_stats.bt_dsize;
if (hidden_primary_key) {
//
// in this case, we have a hidden primary key, do not
// want to report space taken up by the hidden primary key to the user
//
uint64_t hpk_space = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH*dict_stats.bt_ndata;
stats.data_file_length = (hpk_space > stats.data_file_length) ? 0 : stats.data_file_length - hpk_space;
}
else {
//
// one infinity byte per key needs to be subtracted
//
uint64_t inf_byte_space = dict_stats.bt_ndata;
stats.data_file_length = (inf_byte_space > stats.data_file_length) ? 0 : stats.data_file_length - inf_byte_space;
}
stats.data_file_length = dict_stats.bt_fsize;

stats.mean_rec_length = stats.records ? (ulong)(stats.data_file_length/stats.records) : 0;
stats.index_file_length = 0;
Expand All @@ -5864,7 +5849,7 @@ int ha_tokudb::info(uint flag) {
&dict_stats
);
if (error) { goto cleanup; }
stats.index_file_length += dict_stats.bt_dsize;
stats.index_file_length += dict_stats.bt_fsize;

error = share->file->get_fragmentation(
share->file,
Expand Down