KickJava   Java API By Example, From Geeks To Geeks.

Java > Open Source Codes > org > apache > derbyTesting > functionTests > tests > store > OnlineCompressTest


1 /*
2
3    Derby - Class org.apache.derbyTesting.functionTests.harness.procedure
4
5    Licensed to the Apache Software Foundation (ASF) under one or more
6    contributor license agreements. See the NOTICE file distributed with
7    this work for additional information regarding copyright ownership.
8    The ASF licenses this file to You under the Apache License, Version 2.0
9    (the "License"); you may not use this file except in compliance with
10    the License. You may obtain a copy of the License at
11
12       http://www.apache.org/licenses/LICENSE-2.0
13
14    Unless required by applicable law or agreed to in writing, software
15    distributed under the License is distributed on an "AS IS" BASIS,
16    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17    See the License for the specific language governing permissions and
18    limitations under the License.
19
20  */

21
22 package org.apache.derbyTesting.functionTests.tests.store;
23
24 import org.apache.derby.iapi.db.OnlineCompress;
25
26 import org.apache.derby.iapi.services.sanity.SanityManager;
27
28 import java.sql.CallableStatement JavaDoc;
29 import java.sql.Connection JavaDoc;
30 import java.sql.PreparedStatement JavaDoc;
31 import java.sql.ResultSet JavaDoc;
32 import java.sql.SQLException JavaDoc;
33 import java.sql.Statement JavaDoc;
34
35 import org.apache.derby.tools.ij;
36
37
38 public class OnlineCompressTest extends BaseTest
39 {
40     boolean verbose = false;
41
42     public OnlineCompressTest()
43     {
44     }
45
46     /**
47      * call SYSCS_UTIL.SYSCS_INPLACE_COMPRESS_TABLE() system procedure.
48      * <p>
49      * Utility test function to call the system procedure.
50      *
51      **/

52     protected void callCompress(
53     Connection JavaDoc conn,
54     String JavaDoc schemaName,
55     String JavaDoc tableName,
56     boolean purgeRows,
57     boolean defragmentRows,
58     boolean truncateEnd,
59     boolean commit_operation)
60         throws SQLException JavaDoc
61     {
62         CallableStatement JavaDoc cstmt =
63             conn.prepareCall(
64                 "call SYSCS_UTIL.SYSCS_INPLACE_COMPRESS_TABLE(?, ?, ?, ?, ?)");
65         cstmt.setString(1, schemaName);
66         cstmt.setString(2, tableName);
67         cstmt.setInt (3, purgeRows ? 1 : 0);
68         cstmt.setInt (4, defragmentRows ? 1 : 0);
69         cstmt.setInt (5, truncateEnd ? 1 : 0);
70
71         cstmt.execute();
72
73         if (commit_operation)
74             conn.commit();
75     }
76
77     /**
78      * Create and load a table.
79      * <p>
80      * If create_table is set creates a test data table with indexes.
81      * Loads num_rows into the table. This table defaults to 32k page size.
82      * This schema fits 25 rows per page
83      * <p>
84      *
85      *
86      * @param conn Connection to use for sql execution.
87      * @param create_table If true, create new table - otherwise load into
88      * existing table.
89      * @param tblname table to use.
90      * @param num_rows number of rows to add to the table.
91      *
92      * @exception StandardException Standard exception policy.
93      **/

94     protected void createAndLoadTable(
95     Connection JavaDoc conn,
96     boolean create_table,
97     String JavaDoc tblname,
98     int num_rows,
99     int start_value)
100         throws SQLException JavaDoc
101     {
102         if (create_table)
103         {
104             Statement JavaDoc s = conn.createStatement();
105
106             s.execute(
107                 "create table " + tblname +
108                     "(keycol int, indcol1 int, indcol2 int, indcol3 int, data1 varchar(2000), data2 varchar(2000))");
109             s.close();
110         }
111
112         PreparedStatement JavaDoc insert_stmt =
113             conn.prepareStatement(
114                 "insert into " + tblname + " values(?, ?, ?, ?, ?, ?)");
115
116         char[] data1_data = new char[500];
117         char[] data2_data = new char[500];
118
119         for (int i = 0; i < data1_data.length; i++)
120         {
121             data1_data[i] = 'a';
122             data2_data[i] = 'b';
123         }
124
125         String JavaDoc data1_str = new String JavaDoc(data1_data);
126         String JavaDoc data2_str = new String JavaDoc(data2_data);
127
128         int row_count = 0;
129         try
130         {
131             for (int i = start_value; row_count < num_rows; row_count++, i++)
132             {
133                 insert_stmt.setInt(1, i); // keycol
134
insert_stmt.setInt(2, i * 10); // indcol1
135
insert_stmt.setInt(3, i * 100); // indcol2
136
insert_stmt.setInt(4, -i); // indcol3
137
insert_stmt.setString(5, data1_str); // data1_data
138
insert_stmt.setString(6, data2_str); // data2_data
139

140                 insert_stmt.execute();
141             }
142         }
143         catch (SQLException JavaDoc sqle)
144         {
145             System.out.println(
146                 "Exception while trying to insert row number: " + row_count);
147             throw sqle;
148         }
149
150         if (create_table)
151         {
152             Statement JavaDoc s = conn.createStatement();
153
154             s.execute(
155                 "create index " + tblname + "_idx_keycol on " + tblname +
156                     "(keycol)");
157             s.execute(
158                 "create index " + tblname + "_idx_indcol1 on " + tblname +
159                     "(indcol1)");
160             s.execute(
161                 "create index " + tblname + "_idx_indcol2 on " + tblname +
162                     "(indcol2)");
163             s.execute(
164                 "create unique index " + tblname + "_idx_indcol3 on " + tblname +
165                     "(indcol3)");
166             s.close();
167         }
168
169         conn.commit();
170     }
171
172     /**
173      * Create and load a table with long columns and long rows.
174      * <p>
175      * If create_table is set creates a test data table with indexes.
176      * Loads num_rows into the table. This table defaults to 32k page size.
177      * <p>
178      * schema of table:
179      * keycol int,
180      * longcol1 clob(200k),
181      * longrow1 varchar(10000),
182      * longrow2 varchar(10000),
183      * longrow3 varchar(10000),
184      * longrow4 varchar(10000),
185      * indcol1 int,
186      * indcol2 int,
187      * indcol3 int,
188      * data1 varchar(2000),
189      * data2 varchar(2000)
190      * longrow5 varchar(10000),
191      * longrow6 varchar(10000),
192      * longrow7 varchar(10000),
193      * longrow8 varchar(10000),
194      * longcol2 clob(200k),
195      *
196      *
197      * @param conn Connection to use for sql execution.
198      * @param create_table If true, create new table - otherwise load into
199      * existing table.
200      * @param tblname table to use.
201      * @param num_rows number of rows to add to the table.
202      *
203      * @exception StandardException Standard exception policy.
204      **/

205     private void createAndLoadLongTable(
206     Connection JavaDoc conn,
207     boolean create_table,
208     String JavaDoc tblname,
209     int num_rows)
210         throws SQLException JavaDoc
211     {
212         if (create_table)
213         {
214             Statement JavaDoc s = conn.createStatement();
215
216             s.execute(
217                 "create table " + tblname +
218                 " (keycol int, longcol1 clob(200k), longrow1 varchar(10000), longrow2 varchar(10000), longrow3 varchar(10000), longrow4 varchar(10000), indcol1 int, indcol2 int, indcol3 int, data1 varchar(2000), data2 varchar(2000), longrow5 varchar(10000), longrow6 varchar(10000), longrow7 varchar(10000), longrow8 varchar(10000), longcol2 clob(200k))");
219             s.close();
220         }
221
222         PreparedStatement JavaDoc insert_stmt =
223             conn.prepareStatement(
224                 "insert into " + tblname + " values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)");
225
226         char[] data1_data = new char[500];
227         char[] data2_data = new char[500];
228
229         for (int i = 0; i < data1_data.length; i++)
230         {
231             data1_data[i] = 'a';
232             data2_data[i] = 'b';
233         }
234         String JavaDoc data1_str = new String JavaDoc(data1_data);
235         String JavaDoc data2_str = new String JavaDoc(data2_data);
236
237         // some data to force row to be bigger than a page, ie. long row
238
char[] data3_data = new char[10000];
239         char[] data4_data = new char[10000];
240
241         for (int i = 0; i < data3_data.length; i++)
242         {
243             data3_data[i] = 'c';
244             data4_data[i] = 'd';
245         }
246         String JavaDoc data3_str = new String JavaDoc(data3_data);
247         String JavaDoc data4_str = new String JavaDoc(data4_data);
248
249         // some data for the long columns
250
char[] data5_data = new char[200000];
251         char[] data6_data = new char[200000];
252
253         for (int i = 0; i < data5_data.length; i++)
254         {
255             data5_data[i] = 'e';
256             data6_data[i] = 'f';
257         }
258
259         String JavaDoc data5_str = new String JavaDoc(data5_data);
260         String JavaDoc data6_str = new String JavaDoc(data6_data);
261
262         for (int i = 0; i < num_rows; i++)
263         {
264             insert_stmt.setInt(1, i); // keycol
265
insert_stmt.setString(2, data5_str); // longcol1
266
insert_stmt.setString(3, data3_str); // longrow1
267
insert_stmt.setString(4, data3_str); // longrow2
268
insert_stmt.setString(5, data3_str); // longrow3
269
insert_stmt.setString(6, data3_str); // longrow4
270
insert_stmt.setInt(7, i * 10); // indcol1
271
insert_stmt.setInt(8, i * 100); // indcol2
272
insert_stmt.setInt(9, -i); // indcol3
273
insert_stmt.setString(10, data1_str); // data1_data
274
insert_stmt.setString(11, data2_str); // data2_data
275
insert_stmt.setString(12, data4_str); // longrow5
276
insert_stmt.setString(13, data4_str); // longrow6
277
insert_stmt.setString(14, data4_str); // longrow7
278
insert_stmt.setString(15, data4_str); // longrow8
279
insert_stmt.setString(16, data5_str); // longcol2
280

281             insert_stmt.execute();
282         }
283
284         if (create_table)
285         {
286             Statement JavaDoc s = conn.createStatement();
287
288             s.execute(
289                 "create index " + tblname + "_idx_keycol on " + tblname +
290                     "(keycol)");
291             s.execute(
292                 "create index " + tblname + "_idx_indcol1 on " + tblname +
293                     "(indcol1)");
294             s.execute(
295                 "create index " + tblname + "_idx_indcol2 on " + tblname +
296                     "(indcol2)");
297             s.execute(
298                 "create unique index " + tblname + "_idx_indcol3 on " + tblname +
299                     "(indcol3)");
300             s.close();
301         }
302
303         conn.commit();
304     }
305
306     private void log_wrong_count(
307     String JavaDoc error_msg,
308     String JavaDoc table_name,
309     int num_rows,
310     int expected_val,
311     int actual_val,
312     int[] before_info,
313     int[] after_info)
314     {
315         System.out.println(error_msg);
316         System.out.println("ERROR: for " + num_rows + " row test. Expected " + expected_val + ", but got " + actual_val );
317         System.out.println("before_info:");
318         System.out.println(
319         " IS_INDEX =" + before_info[SPACE_INFO_IS_INDEX] +
320         "\n NUM_ALLOC =" + before_info[SPACE_INFO_NUM_ALLOC] +
321         "\n NUM_FREE =" + before_info[SPACE_INFO_NUM_FREE] +
322         "\n NUM_UNFILLED =" + before_info[SPACE_INFO_NUM_UNFILLED] +
323         "\n PAGE_SIZE =" + before_info[SPACE_INFO_PAGE_SIZE] +
324         "\n ESTIMSPACESAVING =" + before_info[SPACE_INFO_ESTIMSPACESAVING]);
325         System.out.println("after_info:");
326         System.out.println(
327         " IS_INDEX =" + after_info[SPACE_INFO_IS_INDEX] +
328         "\n NUM_ALLOC =" + after_info[SPACE_INFO_NUM_ALLOC] +
329         "\n NUM_FREE =" + after_info[SPACE_INFO_NUM_FREE] +
330         "\n NUM_UNFILLED =" + after_info[SPACE_INFO_NUM_UNFILLED] +
331         "\n PAGE_SIZE =" + after_info[SPACE_INFO_PAGE_SIZE] +
332         "\n ESTIMSPACESAVING =" + after_info[SPACE_INFO_ESTIMSPACESAVING]);
333     }
334
335
336     private void deleteAllRows(
337     Connection JavaDoc conn,
338     boolean create_table,
339     boolean long_table,
340     String JavaDoc schemaName,
341     String JavaDoc table_name,
342     int num_rows)
343         throws SQLException JavaDoc
344     {
345         testProgress(
346             "begin deleteAllRows," + num_rows + " row test, create = " +
347                 create_table + ".");
348
349
350         if (long_table)
351             createAndLoadLongTable(conn, create_table, table_name, num_rows);
352         else
353             createAndLoadTable(conn, create_table, table_name, num_rows, 0);
354
355         if (verbose)
356             testProgress("Calling compress.");
357
358         // compress with no deletes should not affect size
359
int[] ret_before = getSpaceInfo(conn, "APP", table_name, true);
360         callCompress(conn, "APP", table_name, true, true, true, true);
361         int[] ret_after = getSpaceInfo(conn, "APP", table_name, true);
362
363         if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])
364         {
365             log_wrong_count(
366                 "Expected no alloc page change.",
367                 table_name, num_rows,
368                 ret_before[SPACE_INFO_NUM_ALLOC],
369                 ret_after[SPACE_INFO_NUM_ALLOC],
370                 ret_before, ret_after);
371         }
372
373         if (verbose)
374             testProgress("calling consistency checker.");
375
376         if (!checkConsistency(conn, schemaName, table_name))
377         {
378             logError("conistency check failed.");
379         }
380
381         testProgress("no delete case complete.");
382
383         // delete all the rows.
384
ret_before = getSpaceInfo(conn, "APP", table_name, true);
385         executeQuery(conn, "delete from " + table_name, true);
386
387         if (verbose)
388             testProgress("deleted all rows, now calling compress.");
389
390         callCompress(conn, "APP", table_name, true, true, true, true);
391         ret_after = getSpaceInfo(conn, "APP", table_name, true);
392
393         // An empty table has 2 pages, one allocation page and the 1st page
394
// which will have a system row in it. The space vti only reports
395
// a count of the user pages so the count is 1.
396
if (ret_after[SPACE_INFO_NUM_ALLOC] != 1)
397         {
398             log_wrong_count(
399                 "Expected all pages to be truncated.",
400                 table_name, num_rows, 1, ret_after[SPACE_INFO_NUM_ALLOC],
401                 ret_before, ret_after);
402         }
403
404         if (verbose)
405             testProgress("calling consistency checker.");
406
407         if (!checkConsistency(conn, schemaName, table_name))
408         {
409             logError("conistency check failed.");
410         }
411
412         testProgress("delete all rows case succeeded.");
413
414         conn.commit();
415
416         testProgress("end deleteAllRows," + num_rows + " row test.");
417     }
418
419     private void simpleDeleteAllRows(
420     Connection JavaDoc conn,
421     boolean create_table,
422     boolean long_table,
423     String JavaDoc schemaName,
424     String JavaDoc table_name,
425     int num_rows)
426         throws SQLException JavaDoc
427     {
428         testProgress(
429             "begin simpleDeleteAllRows," + num_rows + " row test, create = " +
430                 create_table + ".");
431
432
433         if (long_table)
434             createAndLoadLongTable(conn, create_table, table_name, num_rows);
435         else
436             createAndLoadTable(conn, create_table, table_name, num_rows, 0);
437
438         if (verbose)
439             testProgress("Calling compress.");
440
441         // compress with no deletes should not affect size
442
int[] ret_before = getSpaceInfo(conn, "APP", table_name, true);
443         callCompress(conn, "APP", table_name, true, true, true, true);
444         int[] ret_after = getSpaceInfo(conn, "APP", table_name, true);
445
446         if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])
447         {
448             log_wrong_count(
449                 "Expected no alloc page change.",
450                 table_name, num_rows,
451                 ret_before[SPACE_INFO_NUM_ALLOC],
452                 ret_after[SPACE_INFO_NUM_ALLOC],
453                 ret_before, ret_after);
454         }
455
456         testProgress("no delete case complete.");
457
458         // delete all the rows.
459
ret_before = getSpaceInfo(conn, "APP", table_name, true);
460         executeQuery(conn, "delete from " + table_name, true);
461
462         if (verbose)
463             testProgress("deleted all rows, now calling compress.");
464
465         callCompress(conn, "APP", table_name, true, true, true, true);
466         ret_after = getSpaceInfo(conn, "APP", table_name, true);
467
468         // An empty table has 2 pages, one allocation page and the 1st page
469
// which will have a system row in it. The space vti only reports
470
// a count of the user pages so the count is 1.
471
if (ret_after[SPACE_INFO_NUM_ALLOC] != 1)
472         {
473             log_wrong_count(
474                 "Expected all pages to be truncated.",
475                 table_name, num_rows, 1, ret_after[SPACE_INFO_NUM_ALLOC],
476                 ret_before, ret_after);
477         }
478
479         testProgress("delete all rows case succeeded.");
480
481         conn.commit();
482
483         testProgress("end simple deleteAllRows," + num_rows + " row test.");
484     }
485
486     /**
487      * Check/exercise purge pass phase.
488      * <p>
489      * Assumes that either test creates the table, or called on an empty
490      * table with no committed deleted rows or free pages in the middle of
491      * the table in it.
492      * <p>
493      *
494      * @exception StandardException Standard exception policy.
495      **/

496     private void checkPurgePhase(
497     Connection JavaDoc conn,
498     boolean create_table,
499     boolean long_table,
500     String JavaDoc schemaName,
501     String JavaDoc table_name,
502     int num_rows)
503         throws SQLException JavaDoc
504     {
505         testProgress(
506             "begin checkPurgePhase" + num_rows + " row test, create = " +
507                 create_table + ".");
508
509         if (long_table)
510             createAndLoadLongTable(conn, create_table, table_name, num_rows);
511         else
512             createAndLoadTable(conn, create_table, table_name, num_rows, 0);
513
514         // dump_table(conn, schemaName, table_name, false);
515

516         // delete all the rows, but don't commit the delete
517
int[] ret_before = getSpaceInfo(conn, "APP", table_name, false);
518         executeQuery(conn, "delete from " + table_name, false);
519
520
521         // dump_table(conn, schemaName, table_name, false);
522

523         // Purge pass on non-committed deleted rows should do nothing.
524

525         // System.out.println("lock info before compress call:\n " + get_lock_info(conn, true));
526

527         // Calling compress with just the "purge" pass option, no commit called.
528
callCompress(conn, "APP", table_name, true, false, false, false);
529
530         int[] ret_after = getSpaceInfo(conn, "APP", table_name, false);
531
532         // expect no change in the number of allocated pages!
533
if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])
534         {
535             log_wrong_count(
536                 "Expected no alloc page change(1).",
537                 table_name, num_rows,
538                 ret_before[SPACE_INFO_NUM_ALLOC],
539                 ret_after[SPACE_INFO_NUM_ALLOC],
540                 ret_before, ret_after);
541         }
542
543         // expect no change in the number of free pages, if there are there
544
// is a problem with purge locking recognizing committed deleted rows.
545
if (ret_after[SPACE_INFO_NUM_FREE] != ret_before[SPACE_INFO_NUM_FREE])
546         {
547             log_wrong_count(
548                 "Expected no free page change(1).",
549                 table_name, num_rows,
550                 ret_before[SPACE_INFO_NUM_FREE],
551                 ret_after[SPACE_INFO_NUM_FREE],
552                 ret_before, ret_after);
553         }
554
555         // Test that it is ok to call multiple purge passes in single xact.
556

557         // Calling compress with just the "purge" pass option, no commit called.
558
callCompress(conn, "APP", table_name, true, false, false, false);
559         ret_after = getSpaceInfo(conn, "APP", table_name, false);
560
561         // expect no change in the number of allocated pages!
562
if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])
563         {
564             log_wrong_count(
565                 "Expected no alloc page change(2).",
566                 table_name, num_rows,
567                 ret_before[SPACE_INFO_NUM_ALLOC],
568                 ret_after[SPACE_INFO_NUM_ALLOC],
569                 ret_before, ret_after);
570         }
571
572         // expect no change in the number of free pages, if there are there
573
// is a problem with purge locking recognizing committed deleted rows.
574
if (ret_after[SPACE_INFO_NUM_FREE] != ret_before[SPACE_INFO_NUM_FREE])
575         {
576             log_wrong_count(
577                 "Expected no free page change(2).",
578                 table_name, num_rows,
579                 ret_before[SPACE_INFO_NUM_FREE],
580                 ret_after[SPACE_INFO_NUM_FREE],
581                 ret_before, ret_after);
582         }
583
584         // since table was just loaded a defragment pass also should
585
// not find anything to do.
586

587         // Calling compress with just the "defragment" option, no commit called.
588

589         // currently the defragment option requires a table level lock in
590
// the nested user transaction, which will conflict and cause a
591
// lock timeout.
592

593         try
594         {
595             callCompress(conn, "APP", table_name, false, true, false, false);
596             
597             logError("Defragment pass did not get a lock timeout.");
598         }
599         catch (SQLException JavaDoc sqle)
600         {
601             // ignore exception.
602
}
603
604         ret_after = getSpaceInfo(conn, "APP", table_name, false);
605
606         if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])
607         {
608             log_wrong_count(
609                 "Expected no alloc page change(3).",
610                 table_name, num_rows,
611                 ret_before[SPACE_INFO_NUM_ALLOC],
612                 ret_after[SPACE_INFO_NUM_ALLOC],
613                 ret_before, ret_after);
614         }
615         if (ret_after[SPACE_INFO_NUM_FREE] != ret_before[SPACE_INFO_NUM_FREE])
616         {
617             log_wrong_count(
618                 "Expected no free page change(3).",
619                 table_name, num_rows,
620                 ret_before[SPACE_INFO_NUM_FREE],
621                 ret_after[SPACE_INFO_NUM_FREE],
622                 ret_before, ret_after);
623         }
624
625
626         // make sure table is back to all deleted row state. lock timeout
627
// will abort transaction.
628

629         // delete all rows and commit.
630
executeQuery(conn, "delete from " + table_name, true);
631
632         // compress all space and commit.
633
callCompress(conn, "APP", table_name, true, true, true, true);
634
635         // add back all rows and commit.
636
if (long_table)
637             createAndLoadLongTable(conn, create_table, table_name, num_rows);
638         else
639             createAndLoadTable(conn, create_table, table_name, num_rows, 0);
640         conn.commit();
641
642         // delete all rows, and NO commit.
643
executeQuery(conn, "delete from " + table_name, false);
644
645
646         // Calling compress with just the truncate option, may change allocated
647
// and free page count as they system may have preallocated pages to
648
// the end of the file as part of the load. The file can't shrink
649
// any more than the free page count before the compress.
650

651         // running the truncate pass only. If it compresses anything it is
652
// just the preallocated pages at end of the file.
653

654         // currently the defragment option requires a table level lock in
655
// the nested user transaction, which will conflict and cause a
656
// lock timeout.
657

658
659         ret_before = getSpaceInfo(conn, "APP", table_name, false);
660         callCompress(conn, "APP", table_name, false, false, true, false);
661         ret_after = getSpaceInfo(conn, "APP", table_name, false);
662
663         // expect no change in the number of allocated pages!
664
if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])
665         {
666             log_wrong_count(
667                 "Expected no alloc page change(4).",
668                 table_name, num_rows,
669                 ret_before[SPACE_INFO_NUM_ALLOC],
670                 ret_after[SPACE_INFO_NUM_ALLOC],
671                 ret_before, ret_after);
672         }
673
674         // The only space that truncate only pass can free are free pages
675
// located at end of file, so after free space can be anywhere from
676
// what it was before to 0 pages.
677
if (ret_after[SPACE_INFO_NUM_FREE] > ret_before[SPACE_INFO_NUM_FREE])
678         {
679             log_wrong_count(
680                 "Expected no increase in free pages(4).",
681                 table_name, num_rows,
682                 ret_before[SPACE_INFO_NUM_FREE],
683                 ret_after[SPACE_INFO_NUM_FREE],
684                 ret_before, ret_after);
685         }
686
687         // now commit the deletes, run all phases and make sure empty table
688
// results.
689
conn.commit();
690
691         // check the table. Note that this will accumulate locks and
692
// will commit the transaction.
693
if (!checkConsistency(conn, schemaName, table_name))
694         {
695             logError("conistency check failed.");
696         }
697
698         // test running each phase in order.
699
callCompress(conn, "APP", table_name, true, false, false, false);
700         callCompress(conn, "APP", table_name, false, true, false, false);
701         callCompress(conn, "APP", table_name, false, false, true , false);
702         ret_after = getSpaceInfo(conn, "APP", table_name, false);
703
704         // An empty table has 2 pages, one allocation page and the 1st page
705
// which will have a system row in it. The space vti only reports
706
// a count of the user pages so the count is 1.
707
if (ret_after[SPACE_INFO_NUM_ALLOC] != 1)
708         {
709             log_wrong_count(
710                 "Expected all pages to be truncated.",
711                 table_name, num_rows, 1, ret_after[SPACE_INFO_NUM_ALLOC],
712                 ret_before, ret_after);
713         }
714         if (ret_after[SPACE_INFO_NUM_FREE] != 0)
715         {
716             log_wrong_count(
717                 "Expected no free page after all pages truncated.",
718                 table_name, num_rows, 1, ret_after[SPACE_INFO_NUM_ALLOC],
719                 ret_before, ret_after);
720         }
721
722         if (verbose)
723             testProgress("calling consistency checker.");
724
725         if (!checkConsistency(conn, schemaName, table_name))
726         {
727             logError("conistency check failed.");
728         }
729
730         testProgress("end checkPurgePhase" + num_rows + " row test.");
731     }
732
733     /**
734      * Test 1 - various # page tests, regular row/columns
735      * <p>
736      * perform a number of insert/delete/compress operations on a variety
737      * of sized tables, use space allocation information to verify that
738      * compression is happening and use consistency checker to verify that
739      * tables and indexes are all valid following the operations.
740      * <p>
741      * loop through testing interesting row count cases. The cases are
742      * 0 rows - basic edge case, 2 page table: 1 alloc, 1 user page
743      * 1 row - another edge case, 2 page table: 1 alloc, 1 user page
744      * 50 rows - 3 page table case: 1 alloc, 1 user page, 1 user page freed
745      * 4000 rows - reasonable number of pages to test out, still 1 alloc page
746      *
747      * note that row numbers greater than 4000 may lead to lock escalation
748      * issues, if queries like "delete from x" are used to delete all the
749      * rows.
750      *
751      * <p>
752      *
753      **/

754     private void test1(
755     Connection JavaDoc conn,
756     String JavaDoc test_name,
757     String JavaDoc table_name)
758         throws SQLException JavaDoc
759     {
760         beginTest(conn, test_name);
761
762         int[] test_cases = {0, 1, 50, 4000};
763
764         for (int i = 0; i < test_cases.length; i++)
765         {
766             // first create new table and run the tests.
767
deleteAllRows(
768                 conn, true, false, "APP", table_name, test_cases[i]);
769
770             // now rerun tests on existing table, which had all rows deleted
771
// and truncated.
772
deleteAllRows(
773                 conn, false, false, "APP", table_name, test_cases[i]);
774
775             checkPurgePhase(
776                 conn, false, false, "APP", table_name, test_cases[i]);
777
778             executeQuery(conn, "drop table " + table_name, true);
779         }
780
781         endTest(conn, test_name);
782     }
783
784     /**
785      * Test 2 - check repeated delete tests.
786      * <p>
787      * There was a timing error where test1 would usually pass, but
788      * repeated execution of this test found a timing problem with
789      * allocation using an "unallocated" page and getting an I/O error.
790      *
791      **/

792     private void test2(
793     Connection JavaDoc conn,
794     String JavaDoc test_name,
795     String JavaDoc table_name)
796         throws SQLException JavaDoc
797     {
798         beginTest(conn, test_name);
799
800         int[] test_cases = {4000};
801
802         for (int i = 0; i < test_cases.length; i++)
803         {
804             // first create new table and run the tests.
805
simpleDeleteAllRows(
806                 conn, true, false, "APP", table_name, test_cases[i]);
807
808             for (int j = 0; j < 100; j++)
809             {
810
811                 // now rerun tests on existing table, which had all rows deleted
812
// and truncated.
813
deleteAllRows(
814                     conn, false, false, "APP", table_name, test_cases[i]);
815             }
816
817             executeQuery(conn, "drop table " + table_name, true);
818         }
819
820         endTest(conn, test_name);
821     }
822
823
824
825     /**
826      * Test 3 - various # page tests, long row and long columns
827      * <p>
828      * perform a number of insert/delete/compress operations on a variety
829      * of sized tables, use space allocation information to verify that
830      * compression is happening and use consistency checker to verify that
831      * tables and indexes are all valid following the operations.
832      * <p>
833      * loop through testing interesting row count cases. The cases are
834      * 0 rows - basic edge case
835      * 1 row - another edge case
836      * 100 rows - ~50 meg table
837      * 4000 rows - ~2 gig table
838      *
839      * note that row numbers greater than 4000 may lead to lock escalation
840      * issues, if queries like "delete from x" are used to delete all the
841      * rows.
842      *
843      * <p>
844      *
845      **/

846     private void test3(
847     Connection JavaDoc conn,
848     String JavaDoc test_name,
849     String JavaDoc table_name)
850         throws SQLException JavaDoc
851     {
852         beginTest(conn, test_name);
853
854         // note that 500 rows took 30 minutes on a ~1.5 ghz laptop
855
int[] test_cases = {1, 2, 50};
856
857         for (int i = 0; i < test_cases.length; i++)
858         {
859             // first create new table and run the tests.
860
deleteAllRows(
861                 conn, true, true, "APP", table_name, test_cases[i]);
862
863             // now rerun tests on existing table, which had all rows deleted
864
// and truncated.
865
deleteAllRows(
866                 conn, false, true, "APP", table_name, test_cases[i]);
867
868             checkPurgePhase(
869                 conn, false, true, "APP", table_name, test_cases[i]);
870
871             executeQuery(conn, "drop table " + table_name, true);
872         }
873
874         endTest(conn, test_name);
875     }
876
877     /**
878      * Test 4 - check repeated delete tests.
879      * <p>
880      * There was a timing error where test1 would usually pass, but
881      * repeated execution of this test found a timing problem with
882      * allocation using an "unallocated" page and getting an I/O error.
883      *
884      **/

885     private void test4(
886     Connection JavaDoc conn,
887     String JavaDoc test_name,
888     String JavaDoc table_name)
889         throws SQLException JavaDoc
890     {
891         beginTest(conn, test_name);
892
893         int[] test_cases = {4000};
894
895         for (int i = 0; i < test_cases.length; i++)
896         {
897
898             for (int j = 0; j < 100; j++)
899             {
900                 // first create new table and run the tests.
901
simpleDeleteAllRows(
902                     conn, true, false, "APP", table_name, test_cases[i]);
903
904                 // now rerun tests on existing table, which had all rows deleted
905
// and truncated.
906
deleteAllRows(
907                     conn, false, false, "APP", table_name, test_cases[i]);
908
909                 executeQuery(conn, "drop table " + table_name, true);
910             }
911
912         }
913
914         endTest(conn, test_name);
915     }
916
917     /**
918      * Create and load table for test5.
919      * <p>
920      * schema of table:
921      * keycol int,
922      * onehalf int,
923      * onethird int,
924      * c varchar(300)
925      *
926      * @param conn Connection to use for sql execution.
927      * @param create_table If true, create new table - otherwise load into
928      * existing table.
929      * @param tblname table to use.
930      * @param num_rows number of rows to add to the table.
931      *
932      * @exception StandardException Standard exception policy.
933      **/

934     private void test5_load(
935     Connection JavaDoc conn,
936     String JavaDoc schemaName,
937     String JavaDoc table_name,
938     int num_rows)
939         throws SQLException JavaDoc
940     {
941         Statement JavaDoc s = conn.createStatement();
942
943         s.execute(
944             "create table " + table_name +
945             " (keycol integer primary key, onehalf integer, onethird integer, c varchar(300))");
946         s.close();
947
948         PreparedStatement JavaDoc insert_stmt =
949             conn.prepareStatement(
950                 "insert into " + table_name + " values(?, ?, ?, ?)");
951
952         char[] data1_data = new char[200];
953
954         for (int i = 0; i < data1_data.length; i++)
955         {
956             data1_data[i] = 'b';
957         }
958         String JavaDoc data1_str = new String JavaDoc(data1_data);
959
960         for (int i = 0; i < num_rows; i++)
961         {
962             insert_stmt.setInt(1, i); // keycol
963
insert_stmt.setInt(2, i % 2); // onehalf: 0 or 1
964
insert_stmt.setInt(3, i % 3); // onethird: 0, 1, or 3
965
insert_stmt.setString(4, data1_str); // c
966
insert_stmt.execute();
967         }
968
969         conn.commit();
970     }
971     
972     /**
973      * Execute test5, simple defragement test.
974      * <p>
975      * o delete every other row, defragment
976      * o delete every third row, defragment
977      * o delete last 1000 rows, defragment
978      * o delete first 512 rows, defragment.
979      * <p>
980      * run test with at least 2000 rows.
981      **/

982     private void test5_run(
983     Connection JavaDoc conn,
984     String JavaDoc schemaName,
985     String JavaDoc table_name,
986     int num_rows)
987         throws SQLException JavaDoc
988     {
989         testProgress("begin test5: " + num_rows + " row test.");
990
991         if (verbose)
992             testProgress("Calling compress.");
993
994         // compress with no deletes should not affect size
995
int[] ret_before = getSpaceInfo(conn, "APP", table_name, true);
996         callCompress(conn, "APP", table_name, true, true, true, true);
997         int[] ret_after = getSpaceInfo(conn, "APP", table_name, true);
998
999         if (ret_after[SPACE_INFO_NUM_ALLOC] != ret_before[SPACE_INFO_NUM_ALLOC])
1000        {
1001            log_wrong_count(
1002                "Expected no alloc page change.",
1003                table_name, num_rows,
1004                ret_before[SPACE_INFO_NUM_ALLOC],
1005                ret_after[SPACE_INFO_NUM_ALLOC],
1006                ret_before, ret_after);
1007        }
1008
1009        if (verbose)
1010            testProgress("calling consistency checker.");
1011
1012        if (!checkConsistency(conn, schemaName, table_name))
1013        {
1014            logError("conistency check failed.");
1015        }
1016
1017        // DELETE EVERY OTHER ROW, COMPRESS, CHECK
1018
//
1019
//
1020

1021        // delete all the rows every other row.
1022
ret_before = getSpaceInfo(conn, "APP", table_name, true);
1023        executeQuery(
1024            conn, "delete from " + table_name + " where onehalf = 0", true);
1025
1026        if (verbose)
1027            testProgress("deleted every other row, now calling compress.");
1028
1029        callCompress(conn, "APP", table_name, true, true, true, true);
1030        ret_after = getSpaceInfo(conn, "APP", table_name, true);
1031
1032        if (total_pages(ret_after) != total_pages(ret_before))
1033        {
1034            // currently deleting every other row does not add free or unfilled
1035
// pages to the container so defragment has nowhere to put the rows.
1036

1037            log_wrong_count(
1038                "Expected no truncation.",
1039                table_name, num_rows, 1, ret_after[SPACE_INFO_NUM_ALLOC],
1040                ret_before, ret_after);
1041        }
1042
1043        if (verbose)
1044            testProgress("calling consistency checker.");
1045
1046        if (!checkConsistency(conn, schemaName, table_name))
1047        {
1048            logError("conistency check failed.");
1049        }
1050
1051        // DELETE EVERY THIRD ROW in original dataset, COMPRESS, CHECK
1052
//
1053
//
1054

1055        // delete every third row
1056
ret_before = getSpaceInfo(conn, "APP", table_name, true);
1057        executeQuery(
1058            conn, "delete from " + table_name + " where onethird = 0", true);
1059
1060        if (verbose)
1061            testProgress("deleted every third row, now calling compress.");
1062
1063        callCompress(conn, "APP", table_name, true, true, true, true);
1064        ret_after = getSpaceInfo(conn, "APP", table_name, true);
1065
1066        if (total_pages(ret_after) != total_pages(ret_before))
1067        {
1068            // currently deleting every third row does not create any free
1069
// or unfilled pages so defragment has no place to move rows.
1070
log_wrong_count(
1071                "Expected no truncation.",
1072                table_name, num_rows, 1, ret_after[SPACE_INFO_NUM_ALLOC],
1073                ret_before, ret_after);
1074        }
1075
1076        if (verbose)
1077            testProgress("calling consistency checker.");
1078
1079        if (!checkConsistency(conn, schemaName, table_name))
1080        {
1081            logError("conistency check failed.");
1082        }
1083
1084        // DELETE top "half" of rows in original dataset, COMPRESS, CHECK
1085
//
1086
//
1087

1088        // delete top "half" of the rows in the original dataset.
1089
ret_before = getSpaceInfo(conn, "APP", table_name, true);
1090        executeQuery(
1091            conn, "delete from " + table_name + " where keycol > " +
1092            (num_rows / 2), true);
1093
1094        if (verbose)
1095            testProgress("deleted top half of the rows, now calling compress.");
1096
1097        callCompress(conn, "APP", table_name, true, true, true, true);
1098        ret_after = getSpaceInfo(conn, "APP", table_name, true);
1099
1100        // compress should be able to clean up about 1/2 of the pages.
1101
if (verbose)
1102        {
1103            log_wrong_count(
1104                "deleted top half keys, spaceinfo:",
1105                table_name, num_rows,
1106                ((total_pages(ret_before) / 2) + 2),
1107                ret_after[SPACE_INFO_NUM_ALLOC],
1108                ret_before, ret_after);
1109        }
1110
1111        if (total_pages(ret_after) > ((total_pages(ret_before) / 2) + 2))
1112        {
1113            log_wrong_count(
1114                "Expected at least " +
1115                (ret_before[SPACE_INFO_NUM_ALLOC] / 2 + 2) +
1116                " pages to be truncated.",
1117                table_name, num_rows, 1, ret_after[SPACE_INFO_NUM_ALLOC],
1118                ret_before, ret_after);
1119        }
1120
1121        if (verbose)
1122            testProgress("calling consistency checker.");
1123
1124        if (!checkConsistency(conn, schemaName, table_name))
1125        {
1126            logError("conistency check failed.");
1127        }
1128
1129        // DELETE 1st 500 rows in original dataset, COMPRESS, CHECK
1130
//
1131
//
1132

1133        // delete keys less than 500
1134
ret_before = getSpaceInfo(conn, "APP", table_name, true);
1135        executeQuery(
1136            conn, "delete from " + table_name + " where keycol < 500 ", true);
1137
1138        if (verbose)
1139            testProgress("deleted keys < 500, now calling compress.");
1140
1141        callCompress(conn, "APP", table_name, true, true, true, true);
1142        ret_after = getSpaceInfo(conn, "APP", table_name, true);
1143
1144        if (verbose)
1145        {
1146            log_wrong_count(
1147                "deleted bottom 500 keys, spaceinfo:",
1148                table_name, num_rows,
1149                (total_pages(ret_before) - 33),
1150                ret_after[SPACE_INFO_NUM_ALLOC],
1151                ret_before, ret_after);
1152        }
1153
1154        // The bottom 500 keys, assuming 4k pages, takes about 33 pages
1155
if (total_pages(ret_after) > (total_pages(ret_before) - 33))
1156        {
1157            log_wrong_count(
1158                "Expected at least 33 pages reclaimed.",
1159                table_name, num_rows, 1, ret_after[SPACE_INFO_NUM_ALLOC],
1160                ret_before, ret_after);
1161        }
1162
1163        if (verbose)
1164            testProgress("calling consistency checker.");
1165
1166        if (!checkConsistency(conn, schemaName, table_name))
1167        {
1168            logError("conistency check failed.");
1169        }
1170
1171
1172        conn.commit();
1173
1174        testProgress("end test5: " + num_rows + " row test.");
1175    }
1176
1177    /**
1178     * Cleanup after test5_run
1179     **/

1180    private void test5_cleanup(
1181    Connection JavaDoc conn,
1182    String JavaDoc schemaName,
1183    String JavaDoc table_name,
1184    int num_rows)
1185        throws SQLException JavaDoc
1186    {
1187        executeQuery(conn, "drop table " + table_name, true);
1188    }
1189
1190    /**
1191     * Test 5 - simple defragment test.
1192     * <p>
1193     * Create dataset and then:
1194     * o delete every other row, defragment
1195     * o delete every third row, defragment
1196     * o delete last 1000 rows, defragment
1197     * o delete first 512 rows, defragment.
1198     * <p>
1199     * run test with at least 2000 rows.
1200     *
1201     **/

1202    private void test5(
1203    Connection JavaDoc conn,
1204    String JavaDoc test_name,
1205    String JavaDoc table_name)
1206        throws SQLException JavaDoc
1207    {
1208        beginTest(conn, test_name);
1209
1210        int[] test_cases = {2000, 10000};
1211
1212        for (int i = 0; i < test_cases.length; i++)
1213        {
1214            test5_load(conn, "APP", table_name, test_cases[i]);
1215            test5_run(conn, "APP", table_name, test_cases[i]);
1216            test5_cleanup(conn, "APP", table_name, test_cases[i]);
1217        }
1218
1219        endTest(conn, test_name);
1220    }
1221
1222
1223
1224    public void testList(Connection JavaDoc conn)
1225        throws SQLException JavaDoc
1226    {
1227        test1(conn, "test1", "TEST1");
1228        // test2(conn, "test2", "TEST2");
1229
test3(conn, "test3", "TEST3");
1230        // test4(conn, "test4", "TEST4");
1231
test5(conn, "test5", "TEST5");
1232    }
1233
1234    public static void main(String JavaDoc[] argv)
1235        throws Throwable JavaDoc
1236    {
1237        OnlineCompressTest test = new OnlineCompressTest();
1238
1239        ij.getPropertyArg(argv);
1240        Connection JavaDoc conn = ij.startJBMS();
1241        conn.setAutoCommit(false);
1242
1243        try
1244        {
1245            test.testList(conn);
1246        }
1247        catch (SQLException JavaDoc sqle)
1248        {
1249            org.apache.derby.tools.JDBCDisplayUtil.ShowSQLException(
1250                System.out, sqle);
1251            sqle.printStackTrace(System.out);
1252        }
1253    }
1254}
1255
Popular Tags