1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
|
/* KInterbasDB Python Package - Implementation of Materialized Blob Conversion
*
* Version 3.3
*
* The following contributors hold Copyright (C) over their respective
* portions of code (see license.txt for details):
*
* [Original Author (maintained through version 2.0-0.3.1):]
* 1998-2001 [alex] Alexander Kuznetsov <alexan@users.sourceforge.net>
* [Maintainers (after version 2.0-0.3.1):]
* 2001-2002 [maz] Marek Isalski <kinterbasdb@maz.nu>
* 2002-2007 [dsr] David Rushby <woodsplitter@rocketmail.com>
* [Contributors:]
* 2001 [eac] Evgeny A. Cherkashin <eugeneai@icc.ru>
* 2001-2002 [janez] Janez Jere <janez.jere@void.si>
*/
/* This source file is designed to be directly included in _kiconversion.c,
* without the involvement of a header file. */
/******************** FUNCTION PROTOTYPES:BEGIN ********************/
static int _blob_info_total_size_and_max_segment_size(
ISC_STATUS *status_vector,
isc_blob_handle *blob_handle_ptr,
ISC_LONG *total_size, unsigned short *max_segment_size
);
/******************** FUNCTION PROTOTYPES:END ********************/
/******************** INPUT FUNCTIONS:BEGIN ********************/
/* This implementation of conv_in_blob_from_pybuffer uses the Python raw buffer
* interface rather than slicing and converting each segment to a string before
* passing it to isc_put_segment, as the previous implementation did. */
static InputStatus conv_in_blob_from_pybuffer(
PyObject *py_buf,
ISC_QUAD *blob_id,
ISC_STATUS *status_vector, isc_db_handle db_handle, isc_tr_handle trans_handle
)
{
isc_blob_handle blob_handle = NULL_BLOB_HANDLE;
isc_blob_handle *blob_handle_ptr = &blob_handle;
PyBufferProcs *bufferProcs;
char *py_buf_start_ptr;
int bytes_written_so_far;
unsigned short bytes_to_write_this_time;
int total_size;
char *err_preamble = "";
boolean err_should_cancel_blob = TRUE;
/* This function is only called by kinterbasdb's internals, so it's
* acceptable to apply the type check only in non-production builds. */
assert (PyBuffer_Check(py_buf));
{
const Py_ssize_t total_size_ss = PySequence_Length(py_buf);
if (total_size_ss == -1) {
return INPUT_ERROR;
} else if (total_size_ss > INT_MAX) {
raise_exception(NotSupportedError, "The database API does not yet"
" officially support blobs larger than 2 GB."
);
return INPUT_ERROR;
}
total_size = (int) total_size_ss;
}
/* Get a pointer to the PyBufferObject's getreadbuffer method, then call
* that method, which will make py_buf_start_ptr point to the start of
* the PyBufferObject's raw data buffer. */
bufferProcs = py_buf->ob_type->tp_as_buffer;
/* Since this function is only called by kinterbasdb's internals, it's
* acceptable to check for a NULL bf_getreadbuffer only in non-production
* builds. */
assert (bufferProcs->bf_getreadbuffer != NULL);
(*bufferProcs->bf_getreadbuffer)(py_buf, 0, (void **) &py_buf_start_ptr);
/* Within this ENTER/LEAVE_GDAL block, a Python object (py_buf) is manipulated,
* even though the GIL is not held. However, no Python API calls are made;
* in fact, py_buf is only manipulated in the sense that its internal binary
* buffer (pointed to by py_buf_start_ptr) is read. Since the code
* surrounding the ENTER/LEAVE_GDAL block holds a reference to py_buf, and
* thereby ensures that py_buf will not be destroyed prematurely, this code
* should be safe. */
/* Create a blob and retrieve its handle into blob_handle. */
ENTER_GDAL
isc_create_blob2(status_vector,
&db_handle, &trans_handle,
blob_handle_ptr, blob_id,
/* Last two params indicate "no blob parameter buffer supplied". */
0, NULL
);
if (DB_API_ERROR(status_vector)) {
LEAVE_GDAL_WITHOUT_ENDING_CODE_BLOCK
err_preamble = "conv_in_blob_from_pybuffer.isc_create_blob2: ";
err_should_cancel_blob = FALSE;
goto fail;
}
/* Copy the data from py_buf's internal byte buffer into the database in
* chunks of size MAX_BLOB_SEGMENT_SIZE (all but the last chunk, which may be
* smaller). */
bytes_written_so_far = 0;
bytes_to_write_this_time = MAX_BLOB_SEGMENT_SIZE;
while (bytes_written_so_far < total_size) {
if (total_size - bytes_written_so_far < MAX_BLOB_SEGMENT_SIZE) {
bytes_to_write_this_time = (unsigned short)
(total_size - bytes_written_so_far);
}
isc_put_segment(status_vector,
blob_handle_ptr,
bytes_to_write_this_time,
py_buf_start_ptr + bytes_written_so_far
);
if (DB_API_ERROR(status_vector)) {
LEAVE_GDAL_WITHOUT_ENDING_CODE_BLOCK
err_preamble = "conv_in_blob_from_pybuffer.isc_put_segment: ";
goto fail;
}
bytes_written_so_far += bytes_to_write_this_time;
}
isc_close_blob(status_vector, blob_handle_ptr);
if (DB_API_ERROR(status_vector)) {
LEAVE_GDAL_WITHOUT_ENDING_CODE_BLOCK
err_preamble = "conv_in_blob_from_pybuffer.isc_close_blob: ";
goto fail;
}
LEAVE_GDAL
return INPUT_OK;
fail:
assert (DB_API_ERROR(status_vector));
raise_sql_exception(OperationalError, err_preamble, status_vector);
if (err_should_cancel_blob) {
ENTER_GDAL
isc_cancel_blob(status_vector, blob_handle_ptr);
LEAVE_GDAL
}
return INPUT_ERROR;
} /* conv_in_blob_from_pybuffer */
/* DSR created this version of conv_in_blob_from_pystring on 2002.02.23 to
* replace the previous implementation, which broke with strings of length
* >= 2^16.
* This function just creates a Python buffer object from the Python str object
* it receives. This "conversion" is QUITE A CHEAP OPERATION. It involves no
* memory copying because it simply creates a "read-only reference" into the
* string's existing character buffer. */
static InputStatus conv_in_blob_from_pystring(
PyObject *str,
ISC_QUAD *blob_id,
ISC_STATUS *status_vector, isc_db_handle db_handle, isc_tr_handle trans_handle
)
{
PyObject *pyBuffer;
InputStatus result;
/* This function is only called by kinterbasdb's internals, so it's
* acceptable to apply the type check only in non-production builds. */
assert (PyString_Check(str));
pyBuffer = PyBuffer_FromObject(str, 0, PyString_GET_SIZE(str));
if (pyBuffer == NULL) { return INPUT_ERROR; }
result = conv_in_blob_from_pybuffer(pyBuffer, blob_id, status_vector,
db_handle, trans_handle
);
/* *Must* DECREF the buffer we've created; even though its creation doesn't
* involve copying the string's internal buffer, the string will never be
* garbage collected if the buffer is not DECREFed. */
Py_DECREF(pyBuffer);
/* conv_in_blob_from_pybuffer will take care of raising an exception if it
* must; we'll just pass its return value upward. */
return result;
} /* conv_in_blob_from_pystring */
/******************** INPUT FUNCTIONS:END ********************/
/******************** OUTPUT FUNCTIONS:BEGIN ********************/
static PyObject *conv_out_blob_materialized_in_single_chunk(
ISC_STATUS *status_vector,
isc_blob_handle *blob_handle_ptr, const unsigned short max_segment_size,
const int bytes_requested, boolean allow_incomplete_segment_read
)
{
ISC_LONG bytes_read_so_far = 0;
unsigned short bytes_actually_read;
ISC_STATUS blob_stat; /* 2007.02.10: FB 2.1 fix: int->ISC_STATUS */
char *py_str_start_ptr;
/* Create an empty PyStringObject large enough to hold the entire chunk. */
PyObject *py_str = PyString_FromStringAndSize(NULL, bytes_requested);
if (py_str == NULL) { return NULL; }
/* Set py_str_start_ptr to point the beginning of py_str's internal buffer. */
py_str_start_ptr = PyString_AS_STRING(py_str);
/* DSR documented his concerns about this GIL-handling scheme in a lengthy
* comment in function conv_in_blob_from_pybuffer. */
ENTER_GDAL
/* Now, transfer the blob's contents from the database into the preallocated
* Python string named py_str. Use repeated calls to isc_get_segment to
* effect the transfer. */
assert (bytes_read_so_far == 0);
while (bytes_read_so_far < bytes_requested) {
blob_stat = isc_get_segment(status_vector,
blob_handle_ptr,
&bytes_actually_read,
(unsigned short) MIN( (long)max_segment_size, bytes_requested - bytes_read_so_far ),
py_str_start_ptr + bytes_read_so_far
);
/* Since clients of this function are required to refrain from submitting
* requests for more bytes than are available, it is not necessary to check
* for isc_segstr_eof.
*
* But isc_segment can arise under normal circumstances; it simply means
* that the requested number of bytes did not consume the last processed
* segment entirely. The database API's retrieval function,
* isc_get_segment, is smart enough to pick up where it left off during the
* next call to this function. */
if (blob_stat != 0) {
if (blob_stat == isc_segment && allow_incomplete_segment_read) {
/* Record the success (from our perspective) of the most recent read,
* then exit the read loop. */
bytes_read_so_far += bytes_actually_read;
break;
}
LEAVE_GDAL_WITHOUT_ENDING_CODE_BLOCK
raise_sql_exception(OperationalError,
"conv_out_blob_materialized_in_single_chunk.isc_get_segment: segment"
" retrieval error: ", status_vector
);
Py_DECREF(py_str);
return NULL;
}
bytes_read_so_far += bytes_actually_read;
}
LEAVE_GDAL
assert (bytes_read_so_far == bytes_requested);
return py_str;
} /* conv_out_blob_materialized_in_single_chunk */
static PyObject *conv_out_blob_materialized(
ISC_QUAD *blob_id, ISC_STATUS *status_vector,
isc_db_handle db_handle, isc_tr_handle trans_handle
)
{
isc_blob_handle blob_handle = NULL_BLOB_HANDLE;
ISC_LONG total_size = -1;
unsigned short max_segment_size = 0;
/* Based on the blob's ID, open a handle to it. */
ENTER_GDAL
isc_open_blob2(status_vector,
&db_handle, &trans_handle,
&blob_handle, blob_id,
/* Last two params indicate "no blob parameter buffer supplied": */
0, NULL
);
LEAVE_GDAL
if (DB_API_ERROR(status_vector)) {
raise_sql_exception(OperationalError,
"conv_out_blob_materialized.isc_open_blob2: ",
status_vector
);
return NULL;
}
/* Before actually reading any of the blob's contents, determine the total
* size of the blob and the size of its largest segment. */
if (_blob_info_total_size_and_max_segment_size(
status_vector, &blob_handle,
&total_size,
&max_segment_size
) != 0
)
{ return NULL; }
/* Handle the very remote possibility that passing an ISC_LONG to
* PyString_FromStringAndSize would cause an overflow (on most current
* platforms, ISC_LONG and int are identical, so no overflow is possible). */
if (total_size > INT_MAX) {
raise_exception(InternalError, "conv_out_blob_materialized:"
" The size of the requested blob exceeds the capacity of a Python str"
" object; use chunked retrieval instead."
);
return NULL;
}
{
PyObject *py_str = conv_out_blob_materialized_in_single_chunk(status_vector,
&blob_handle, max_segment_size, (int) total_size, FALSE
);
/* Close the blob regardless of whether an exception arose while reading
* it. Don't check to see whether the close op succeeds; reading was the
* important part, and it's already finished. */
ENTER_GDAL
isc_close_blob(status_vector, &blob_handle);
LEAVE_GDAL
return py_str;
}
} /* conv_out_blob_materialized */
/******************** OUTPUT FUNCTIONS:END ********************/
/******************** UTILITY FUNCTIONS:BEGIN ********************/
/* _blob_info_total_size_and_max_segment_size inserts into its arguments
* total_size and max_segment_size the total size and maximum segment size
* (respectively) of the specified blob.
* Returns 0 if successful, otherwise -1.
*
* See IB6 API Guide chapter entitled "Working with Blob Data". */
static int _blob_info_total_size_and_max_segment_size(
ISC_STATUS *status_vector,
isc_blob_handle *blob_handle_ptr,
ISC_LONG *total_size,
unsigned short *max_segment_size
)
{
char blob_info_items[] = {
isc_info_blob_total_length,
isc_info_blob_max_segment
};
char result_buffer[ISC_INFO_BUFFER_SIZE];
short length;
char *ptr;
char item;
ENTER_GDAL
isc_blob_info(status_vector,
blob_handle_ptr,
sizeof(blob_info_items),
blob_info_items,
sizeof(result_buffer),
result_buffer
);
LEAVE_GDAL
if (DB_API_ERROR(status_vector)) {
raise_sql_exception(InternalError,
"_blob_info_total_size_and_max_segment_size.isc_blob_info: ",
status_vector
);
return -1;
};
/* Extract the values returned in the result buffer. */
ptr = result_buffer;
while (*ptr != isc_info_end) {
item = *ptr++;
ENTER_GDAL
length = (short) isc_vax_integer(ptr, sizeof(short));
LEAVE_GDAL
ptr += sizeof(short);
switch (item) {
case isc_info_blob_total_length:
ENTER_GDAL
*total_size = isc_vax_integer(ptr, length);
LEAVE_GDAL
break;
case isc_info_blob_max_segment:
ENTER_GDAL
*max_segment_size = (unsigned short) isc_vax_integer(ptr, length);
LEAVE_GDAL
break;
case isc_info_truncated:
raise_sql_exception(InternalError,
"_blob_info_total_size_and_max_segment_size: isc_blob_info return"
" truncated: ",
status_vector
);
return -1;
}
ptr += length;
}
return 0;
} /* _blob_info_total_size_and_max_segment_size */
/******************** UTILITY FUNCTIONS:END ********************/
|