File: BlobMemTest.java

package info (click to toggle)
derby 10.14.2.0-3
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 79,056 kB
  • sloc: java: 691,961; sql: 42,686; xml: 20,512; sh: 3,373; sed: 96; makefile: 60
file content (361 lines) | stat: -rw-r--r-- 13,665 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
/**
 * Derby - Class org.apache.derbyTesting.functionTests.tests.memory.BlobMemTest
 *
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 * KIND, either express or implied.  See the License for the
 * specific language governing permissions and limitations
 * under the License.
 */
package org.apache.derbyTesting.functionTests.tests.memory;

import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.sql.Blob;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Arrays;
import java.util.Properties;
import junit.framework.Test;
import org.apache.derbyTesting.functionTests.util.streams.LoopingAlphabetStream;
import org.apache.derbyTesting.junit.BaseJDBCTestCase;
import org.apache.derbyTesting.junit.BaseTestSuite;
import org.apache.derbyTesting.junit.JDBC;
import org.apache.derbyTesting.junit.SystemPropertyTestSetup;
import org.apache.derbyTesting.junit.TestConfiguration;

public class BlobMemTest extends BaseJDBCTestCase {

    private static final int LONG_BLOB_LENGTH = 18000000;
    private static final String LONG_BLOB_LENGTH_STRING= "18000000";
    private static final byte[] SHORT_BLOB_BYTES = new byte[] {0x01,0x02,0x03};

    public BlobMemTest(String name) {
        super(name);
    }

    /**
     * Insert a blob and test length.    
     * 
     * @param lengthless  if true use the lengthless setBinaryStream api
     * 
     * @throws SQLException
     * @throws IOException 
     * @throws InvocationTargetException 
     * @throws IllegalAccessException 
     * @throws IllegalArgumentException 
     */
    private void testBlobLength(boolean lengthless, int extraLen) throws SQLException, IOException, IllegalArgumentException, IllegalAccessException, InvocationTargetException {
        setAutoCommit(false);
        Statement s = createStatement();
        s.executeUpdate("CREATE TABLE BLOBTAB (K INT CONSTRAINT PK PRIMARY KEY, B BLOB(" + LONG_BLOB_LENGTH + "))");
        
        PreparedStatement ps = prepareStatement("INSERT INTO BLOBTAB VALUES(?,?)");
        // We allocate 16MB for the test so use something bigger than that.
        ps.setInt(1,1);
        int blobLen = LONG_BLOB_LENGTH + extraLen;
        LoopingAlphabetStream stream = new LoopingAlphabetStream(blobLen);
        if (lengthless) {
            Method m = null;
            try {
                Class<?> c = ps.getClass();
                m = c.getMethod("setBinaryStream",new Class[] {Integer.TYPE,
                            InputStream.class});                
            } catch (NoSuchMethodException e) {
                // ignore method not found as method may not be present for 
                // jdk's lower than 1.6.
                println("Skipping lengthless insert because method is not available");
                return;                
            }
            m.invoke(ps, new Object[] {2,stream});
        }
        else
            ps.setBinaryStream(2, stream,blobLen);
        if (extraLen == 0)
        {
            ps.executeUpdate();
        }
        else
        {
            try
            {
                ps.executeUpdate();
                fail("Expected truncation error for blob too large");
            }
            catch (SQLException sqlE)
            {
                assertSQLState("Wrong SQL State for truncation", "22001", sqlE);
            }
            // extraLen > 0 is just a way to force the truncation error. Once
            // we've forced that error, we're done testing, so return.
            return;
        }
        // insert a zero length blob.
        ps.setInt(1, 2);
        ps.setBytes(2, new byte[] {});
        ps.executeUpdate();
        // insert a null blob.
        ps.setInt(1, 3);
        ps.setBytes(2,null);
        ps.executeUpdate();
        // insert a short blob
        ps.setInt(1, 4);
        ps.setBytes(2, SHORT_BLOB_BYTES);
        ps.executeUpdate();
        // Currently need to use optimizer override to force use of the index.
        // Derby should use sort avoidance and do it automatically, but there
        // appears to be a bug.
        ResultSet rs = s.executeQuery("SELECT K, LENGTH(B), B FROM BLOBTAB" +
                "-- DERBY-PROPERTIES constraint=pk\n ORDER BY K"); 
        rs.next();
        assertEquals(LONG_BLOB_LENGTH_STRING,rs.getString(2));
        // make sure we can still access the blob after getting length.
        // It should be ok because we reset the stream
        InputStream rsstream = rs.getBinaryStream(3);
        int len= 0;
        byte[] buf = new byte[32672];
        for (;;)  {
                int size = rsstream.read(buf);
                if (size == -1)
                        break;
                len += size;
                int expectedValue = ((len -1) % 26) + 'a';
                if (size != 0)
                    assertEquals(expectedValue,buf[size -1]);      
        }

        assertEquals(LONG_BLOB_LENGTH,len);
        // empty blob
        rs.next();
        assertEquals("0",rs.getString(2));
        byte[] bytes = rs.getBytes(3);
        assertEquals(0, bytes.length);
        // null blob
        rs.next();
        assertEquals(null,rs.getString(2));
        bytes = rs.getBytes(3);
        assertEquals(null,bytes);
        // short blob
        rs.next();
        assertEquals("3",rs.getString(2));
        bytes = rs.getBytes(3);
        assertTrue(Arrays.equals(SHORT_BLOB_BYTES, bytes));
        rs.close();         
        
        // Select just length without selecting the blob.
        rs = s.executeQuery("SELECT K, LENGTH(B)  FROM BLOBTAB " +
                "ORDER BY K");
        JDBC.assertFullResultSet(rs, new String [][] {{"1",LONG_BLOB_LENGTH_STRING},{"2","0"},
                {"3",null},{"4","3"}});
    }
    
    /**
     * Test the length after inserting with the setBinaryStream api 
     * that takes length.  In this case the length will be encoded at the
     * begining of the stream and the call should be fairly low overhead.
     * 
     * @throws SQLException
     * @throws IOException 
     * @throws InvocationTargetException 
     * @throws IllegalAccessException 
     * @throws IllegalArgumentException 
     */
    public void testBlobLength() throws SQLException, IOException, IllegalArgumentException, IllegalAccessException, InvocationTargetException {
        testBlobLength(false, 0);
    }
    
    /**
     * Test the length after inserting the blob value with the lengthless
     * setBinaryStream api. In this case we will have to read the whole 
     * stream to get the length.
     * 
     * @throws SQLException
     * @throws IOException 
     * @throws InvocationTargetException 
     * @throws IllegalAccessException 
     * @throws IllegalArgumentException 
     */
    public void testBlobLengthWithLengthlessInsert() throws SQLException, IOException, IllegalArgumentException, IllegalAccessException, InvocationTargetException {        
        testBlobLength(true, 0);  
    }
    /**
      * Simple test to excercise message 22001 as described in DERBY-961.
      */
    public void testBlobLengthTooLongDerby961() throws SQLException, IOException, IllegalArgumentException, IllegalAccessException, InvocationTargetException {        
        testBlobLength(false, 10000);  
    }
       public static Test suite() {
        BaseTestSuite suite =  new BaseTestSuite();
        // Just add Derby-6096 embedded as it takes time to run
        suite.addTest(new BlobMemTest("xtestderby6096BlobhashJoin"));
        suite.addTest(TestConfiguration.defaultSuite(BlobMemTest.class));
        
        Properties p = new Properties();
        // use small pageCacheSize so we don't run out of memory on the insert.
        p.setProperty("derby.storage.pageCacheSize", "100");
        return new SystemPropertyTestSetup(suite,p);
    }

    /**
     * Tests that a blob can be safely occur multiple times in a SQL select and
     * test that large objects streams are not being materialized when cloned.
     * <p/>
     * See DERBY-4477.
     * @see org.apache.derbyTesting.functionTests.tests.jdbcapi.BLOBTest#testDerby4477_3645_3646_Repro
     * @see ClobMemTest#testDerby4477_3645_3646_Repro_lowmem_clob
     */
    public void testDerby4477_3645_3646_Repro_lowmem()
            throws SQLException, IOException {

        setAutoCommit(false);

        Statement s = createStatement();
        int blobsize = LONG_BLOB_LENGTH;

        s.executeUpdate(
            "CREATE TABLE T_MAIN(" +
            "ID INT  GENERATED ALWAYS AS IDENTITY PRIMARY KEY, " +
            "V BLOB(" + blobsize + ") )");

        PreparedStatement ps = prepareStatement(
            "INSERT INTO T_MAIN(V) VALUES (?)");

        int blobLen = blobsize;
        LoopingAlphabetStream stream = new LoopingAlphabetStream(blobLen);
        ps.setBinaryStream(1, stream, blobLen);

        ps.executeUpdate();
        ps.close();

        s.executeUpdate("CREATE TABLE T_COPY ( V1 BLOB(" + blobsize +
                        "), V2 BLOB(" + blobsize + "))");

        // This failed in the repro for DERBY-3645 solved as part of
        // DERBY-4477:
        s.executeUpdate("INSERT INTO T_COPY SELECT  V, V FROM T_MAIN");

        // Check that the two results are identical:
        ResultSet rs = s.executeQuery("SELECT * FROM T_COPY");
        rs.next();
        InputStream is = rs.getBinaryStream(1);

        stream.reset();
        assertEquals(stream, is);

        is = rs.getBinaryStream(2);

        stream.reset();
        assertEquals(stream, is);
        rs.close();

        // This failed in the repro for DERBY-3646 solved as part of
        // DERBY-4477 (repro slightly rewoked here):
        rs = s.executeQuery("SELECT 'I', V, ID, V from T_MAIN");
        rs.next();

        is = rs.getBinaryStream(2);
        stream.reset();
        assertEquals(stream, is);

        is = rs.getBinaryStream(4);
        stream.reset();
        assertEquals(stream, is);

        // clean up
        stream.close();
        is.close();
        s.close();
        rs.close();

        rollback();
    }

    /**
     * Test that a BLOB that goes through the sorter does not get materialized
     * twice in memory. It will still be materialized as part of the sorting,
     * but the fix for DERBY-5752 prevents the creation of a second copy when
     * accessing the BLOB after the sorting.
     */
    public void testDerby5752DoubleMaterialization() throws Exception {
        setAutoCommit(false);

        Statement s = createStatement();
        s.execute("create table d5752(id int, b blob)");

        int lobSize = 1000000;

        // Insert a single BLOB in the table.
        PreparedStatement insert =
                prepareStatement("insert into d5752 values (1,?)");
        insert.setBinaryStream(1, new LoopingAlphabetStream(lobSize), lobSize);
        insert.execute();
        closeStatement(insert);

        Blob[] blobs = new Blob[15];

        // Repeatedly sort the table and keep a reference to the BLOB.
        for (int i = 0; i < blobs.length; i++) {
            ResultSet rs = s.executeQuery("select * from d5752 order by id");
            rs.next();
            // Used to get an OutOfMemoryError here because a new copy of the
            // BLOB was created in memory.
            blobs[i] = rs.getBlob(2);
            rs.close();
        }

        // Access the BLOBs here to make sure they are not garbage collected
        // earlier (in which case we wouldn't see the OOME in the loop above).
        for (int i = 0; i < blobs.length; i++) {
            assertEquals(lobSize, blobs[i].length());
        }
    }
    
    /**
     * 
     * DERBY-6096 Make blob hash join does not run out of memory.
     * Prior to fix blobs were estimated at 0. We will test with
     * 32K blobs even though the estimatedUsage is at 10k. The default
     * max memory per table is only 1MB.
     * 
     * @throws SQLException
     */
    public void xtestderby6096BlobhashJoin() throws SQLException {
        byte[] b = new byte[32000];
        Arrays.fill(b, (byte) 'a'); 
        Statement s = createStatement();
        s.execute("create table d6096(i int, b blob)");
        PreparedStatement ps = prepareStatement("insert into d6096 values (?, ?)");
        ps.setBytes(2, b);
        for (int i = 0; i < 2000; i++) {
            ps.setInt(1, i);
            ps.execute();
        }
        ResultSet rs = s.executeQuery("select * from d6096 t1, d6096 t2 where t1.i=t2.i");
        // just a single fetch will build the hash table and consume the memory.
        assertTrue(rs.next());
        // derby.tests.debug prints memory usage
        if (TestConfiguration.getCurrent().isVerbose()) {
            System.gc();
            println("TotalMemory:" + Runtime.getRuntime().totalMemory()
                    + " " + "Free Memory:"
                    + Runtime.getRuntime().freeMemory());
        }
        rs.close();
    }

}