Subject | Optmizing ResultSet.getBytes() |
---|---|
Author | Alexey Panchenko |
Post date | 2006-03-24T11:26:10Z |
Hi,
I want to continue optimizing ResultsSet.getBytes() method for Blobs
started in the end of December.
This is more optimized version - the Blob.length() method is partially
inlined, so the Blob is opened only once.
This versions sends only 1 more packet to the server.
My opinion is that for small blobs everything is fast enough, so
nobody will notice that one more packet. And for large Blobs my
version has more effective memory usage.
I really need effective Blob operations. We keep images and other
binary data in database, and slow operations is annoying our users.
Using
Blob blob = rs.getBlob(...);
blob.getBytes(1,blob.length());
is worse than my version because it executes additional openBlob() &
closeBlob() during length().
Also I would like to discuss one more way to further optimizations -
can additional parameter to getBlobSegment() be added allowing the
single buffer reused.
What do you think ?
Index: jdbc/FBBlob.java
===================================================================
RCS file: /cvsroot/firebird/client-java/src/main/org/firebirdsql/jdbc/FBBlob.java,v
retrieving revision 1.34
diff -u -u -U6 -r1.34 FBBlob.java
--- jdbc/FBBlob.java 7 Sep 2005 22:08:45 -0000 1.34
+++ jdbc/FBBlob.java 24 Mar 2006 11:01:19 -0000
@@ -55,13 +55,13 @@
* This class is new in the JDBC 2.0 API.
* @since 1.2
*/
public class FBBlob implements FirebirdBlob, Synchronizable {
- private static final boolean SEGMENTED = true;
+ public static final boolean SEGMENTED = true;
public static final int READ_FULLY_BUFFER_SIZE = 16 * 1024;
/**
* bufferlength is the size of the buffer for blob input and output streams,
* also used for the BufferedInputStream/BufferedOutputStream wrappers.
*
@@ -185,25 +185,27 @@
} finally {
if (blobListener != null)
blobListener.executionCompleted(this);
}
}
}
+
+ public static final byte[] BLOB_LENGTH_REQUEST = new byte[]{ISCConstants.isc_info_blob_total_length};
/**
* Returns the number of bytes in the <code>BLOB</code> value
* designated by this <code>Blob</code> object.
* @return length of the <code>BLOB</code> in bytes
* @exception SQLException if there is an error accessing the
* length of the <code>BLOB</code>
* @since 1.2
* @see <a href="package-summary.html#2.0 API">What Is in the JDBC 2.0 API</a>
*/
public long length() throws SQLException {
byte[] info = getInfo(
- new byte[]{ISCConstants.isc_info_blob_total_length}, 20);
+ BLOB_LENGTH_REQUEST, 20);
return interpretLength(info, 0);
}
/**
* Interpret BLOB length from buffer.
@@ -212,23 +214,37 @@
* @param position where to start interpreting.
*
* @return length of the blob.
*
* @throws SQLException if length cannot be interpreted.
*/
- private long interpretLength(byte[] info, int position) throws SQLException {
+ public static long interpretLength(GDS gds, byte[] info, int position) throws SQLException {
if (info[position] != ISCConstants.isc_info_blob_total_length)
throw new FBSQLException("Length is not available.");
int dataLength =
- gdsHelper.getInternalAPIHandler().iscVaxInteger(info, position + 1, 2);
+ gds.iscVaxInteger(info, position + 1, 2);
- return gdsHelper.getInternalAPIHandler().iscVaxInteger(
+ return gds.iscVaxInteger(
info, position + 3, dataLength);
}
+
+ /**
+ * Interpret BLOB length from buffer.
+ *
+ * @param info server response.
+ * @param position where to start interpreting.
+ *
+ * @return length of the blob.
+ *
+ * @throws SQLException if length cannot be interpreted.
+ */
+ private long interpretLength(byte[] info, int position) throws SQLException {
+ return interpretLength(gdsHelper.getInternalAPIHandler(), info, position);
+ }
/**
* Check if blob is segmented.
*
* @return <code>true</code> if this blob is segmented,
* otherwise <code>false</code>
Index: jdbc/field/FBBlobField.java
===================================================================
RCS file: /cvsroot/firebird/client-java/src/main/org/firebirdsql/jdbc/field/FBBlobField.java,v
retrieving revision 1.12
diff -u -u -U6 -r1.12 FBBlobField.java
--- jdbc/field/FBBlobField.java 20 Jan 2006 04:09:34 -0000 1.12
+++ jdbc/field/FBBlobField.java 24 Mar 2006 10:45:21 -0000
@@ -23,12 +23,16 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import java.sql.SQLException;
import java.sql.Blob;
+import org.firebirdsql.gds.GDS;
+import org.firebirdsql.gds.GDSException;
+import org.firebirdsql.gds.ISCConstants;
+import org.firebirdsql.gds.IscBlobHandle;
import org.firebirdsql.gds.XSQLVAR;
import org.firebirdsql.jdbc.*;
/**
* Describe class <code>FBBlobField</code> here.
*
@@ -111,49 +115,45 @@
// BYTES_CONVERSION_ERROR);
return getBytesInternal();
}
public byte[] getBytesInternal() throws SQLException {
- Blob blob = getBlob();
-
- if (blob == BLOB_NULL_VALUE)
- return BYTES_NULL_VALUE;
-
- InputStream in = blob.getBinaryStream();
-
- if (in == STREAM_NULL_VALUE)
- return BYTES_NULL_VALUE;
-
- ByteArrayOutputStream bout = new ByteArrayOutputStream();
-
- // copy stream data
- byte[] buff = new byte[BUFF_SIZE];
- int counter = 0;
+ final byte[] blobIdBuffer = getFieldData();
+ if (blobIdBuffer == null) return BYTES_NULL_VALUE;
+ final long blobId = field.decodeLong( blobIdBuffer );
+ synchronized (gdsHelper) {
try {
- while((counter = in.read(buff)) != -1) {
- bout.write(buff, 0, counter);
- }
- } catch(IOException ioex) {
- throw (SQLException)createException(
- BYTES_CONVERSION_ERROR + " " + ioex.getMessage());
- } finally {
- try {
- in.close();
- } catch(IOException ioex) {
- throw new FBSQLException(ioex);
- }
-
- try {
- bout.close();
- } catch(IOException ioex) {
- throw new FBSQLException(ioex);
+ final IscBlobHandle blob = gdsHelper.openBlob(blobId, FBBlob.SEGMENTED);
+ try {
+ final GDS gds = gdsHelper.getInternalAPIHandler();
+ final byte[] lengthBuffer = gds.iscBlobInfo(blob, FBBlob.BLOB_LENGTH_REQUEST, 20);
+ final int blobLength =(int) FBBlob.interpretLength(gds, lengthBuffer, 0);
+ //
+ final int bufferLength = gdsHelper.getBlobBufferLength();
+ final byte[] resultBuffer = new byte[blobLength];
+ int offset = 0;
+ while (offset < blobLength) {
+ final byte[] segementBuffer = gdsHelper.getBlobSegment(blob, bufferLength);
+ if (segementBuffer.length == 0) {
+ // unexpected EOF
+ throw (SQLException) createException(BYTES_CONVERSION_ERROR);
+ }
+ System.arraycopy(segementBuffer, 0, resultBuffer, offset, segementBuffer.length);
+ offset += segementBuffer.length;
}
+ return resultBuffer;
+ }
+ finally {
+ gdsHelper.closeBlob(blob);
+ }
}
-
- return bout.toByteArray();
+ catch (GDSException e) {
+ throw new FBSQLException(e);
+ }
+ }
}
public byte[] getCachedObject() throws SQLException {
if (getFieldData()==null)
return BYTES_NULL_VALUE;
The patch is also available at
http://alex.olmisoft.com/files/ResultSet_Blob_getBytes.patch
--
Best regards,
Alexey mailto:alex+news@...
I want to continue optimizing ResultsSet.getBytes() method for Blobs
started in the end of December.
This is more optimized version - the Blob.length() method is partially
inlined, so the Blob is opened only once.
This versions sends only 1 more packet to the server.
My opinion is that for small blobs everything is fast enough, so
nobody will notice that one more packet. And for large Blobs my
version has more effective memory usage.
I really need effective Blob operations. We keep images and other
binary data in database, and slow operations is annoying our users.
Using
Blob blob = rs.getBlob(...);
blob.getBytes(1,blob.length());
is worse than my version because it executes additional openBlob() &
closeBlob() during length().
Also I would like to discuss one more way to further optimizations -
can additional parameter to getBlobSegment() be added allowing the
single buffer reused.
What do you think ?
Index: jdbc/FBBlob.java
===================================================================
RCS file: /cvsroot/firebird/client-java/src/main/org/firebirdsql/jdbc/FBBlob.java,v
retrieving revision 1.34
diff -u -u -U6 -r1.34 FBBlob.java
--- jdbc/FBBlob.java 7 Sep 2005 22:08:45 -0000 1.34
+++ jdbc/FBBlob.java 24 Mar 2006 11:01:19 -0000
@@ -55,13 +55,13 @@
* This class is new in the JDBC 2.0 API.
* @since 1.2
*/
public class FBBlob implements FirebirdBlob, Synchronizable {
- private static final boolean SEGMENTED = true;
+ public static final boolean SEGMENTED = true;
public static final int READ_FULLY_BUFFER_SIZE = 16 * 1024;
/**
* bufferlength is the size of the buffer for blob input and output streams,
* also used for the BufferedInputStream/BufferedOutputStream wrappers.
*
@@ -185,25 +185,27 @@
} finally {
if (blobListener != null)
blobListener.executionCompleted(this);
}
}
}
+
+ public static final byte[] BLOB_LENGTH_REQUEST = new byte[]{ISCConstants.isc_info_blob_total_length};
/**
* Returns the number of bytes in the <code>BLOB</code> value
* designated by this <code>Blob</code> object.
* @return length of the <code>BLOB</code> in bytes
* @exception SQLException if there is an error accessing the
* length of the <code>BLOB</code>
* @since 1.2
* @see <a href="package-summary.html#2.0 API">What Is in the JDBC 2.0 API</a>
*/
public long length() throws SQLException {
byte[] info = getInfo(
- new byte[]{ISCConstants.isc_info_blob_total_length}, 20);
+ BLOB_LENGTH_REQUEST, 20);
return interpretLength(info, 0);
}
/**
* Interpret BLOB length from buffer.
@@ -212,23 +214,37 @@
* @param position where to start interpreting.
*
* @return length of the blob.
*
* @throws SQLException if length cannot be interpreted.
*/
- private long interpretLength(byte[] info, int position) throws SQLException {
+ public static long interpretLength(GDS gds, byte[] info, int position) throws SQLException {
if (info[position] != ISCConstants.isc_info_blob_total_length)
throw new FBSQLException("Length is not available.");
int dataLength =
- gdsHelper.getInternalAPIHandler().iscVaxInteger(info, position + 1, 2);
+ gds.iscVaxInteger(info, position + 1, 2);
- return gdsHelper.getInternalAPIHandler().iscVaxInteger(
+ return gds.iscVaxInteger(
info, position + 3, dataLength);
}
+
+ /**
+ * Interpret BLOB length from buffer.
+ *
+ * @param info server response.
+ * @param position where to start interpreting.
+ *
+ * @return length of the blob.
+ *
+ * @throws SQLException if length cannot be interpreted.
+ */
+ private long interpretLength(byte[] info, int position) throws SQLException {
+ return interpretLength(gdsHelper.getInternalAPIHandler(), info, position);
+ }
/**
* Check if blob is segmented.
*
* @return <code>true</code> if this blob is segmented,
* otherwise <code>false</code>
Index: jdbc/field/FBBlobField.java
===================================================================
RCS file: /cvsroot/firebird/client-java/src/main/org/firebirdsql/jdbc/field/FBBlobField.java,v
retrieving revision 1.12
diff -u -u -U6 -r1.12 FBBlobField.java
--- jdbc/field/FBBlobField.java 20 Jan 2006 04:09:34 -0000 1.12
+++ jdbc/field/FBBlobField.java 24 Mar 2006 10:45:21 -0000
@@ -23,12 +23,16 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import java.sql.SQLException;
import java.sql.Blob;
+import org.firebirdsql.gds.GDS;
+import org.firebirdsql.gds.GDSException;
+import org.firebirdsql.gds.ISCConstants;
+import org.firebirdsql.gds.IscBlobHandle;
import org.firebirdsql.gds.XSQLVAR;
import org.firebirdsql.jdbc.*;
/**
* Describe class <code>FBBlobField</code> here.
*
@@ -111,49 +115,45 @@
// BYTES_CONVERSION_ERROR);
return getBytesInternal();
}
public byte[] getBytesInternal() throws SQLException {
- Blob blob = getBlob();
-
- if (blob == BLOB_NULL_VALUE)
- return BYTES_NULL_VALUE;
-
- InputStream in = blob.getBinaryStream();
-
- if (in == STREAM_NULL_VALUE)
- return BYTES_NULL_VALUE;
-
- ByteArrayOutputStream bout = new ByteArrayOutputStream();
-
- // copy stream data
- byte[] buff = new byte[BUFF_SIZE];
- int counter = 0;
+ final byte[] blobIdBuffer = getFieldData();
+ if (blobIdBuffer == null) return BYTES_NULL_VALUE;
+ final long blobId = field.decodeLong( blobIdBuffer );
+ synchronized (gdsHelper) {
try {
- while((counter = in.read(buff)) != -1) {
- bout.write(buff, 0, counter);
- }
- } catch(IOException ioex) {
- throw (SQLException)createException(
- BYTES_CONVERSION_ERROR + " " + ioex.getMessage());
- } finally {
- try {
- in.close();
- } catch(IOException ioex) {
- throw new FBSQLException(ioex);
- }
-
- try {
- bout.close();
- } catch(IOException ioex) {
- throw new FBSQLException(ioex);
+ final IscBlobHandle blob = gdsHelper.openBlob(blobId, FBBlob.SEGMENTED);
+ try {
+ final GDS gds = gdsHelper.getInternalAPIHandler();
+ final byte[] lengthBuffer = gds.iscBlobInfo(blob, FBBlob.BLOB_LENGTH_REQUEST, 20);
+ final int blobLength =(int) FBBlob.interpretLength(gds, lengthBuffer, 0);
+ //
+ final int bufferLength = gdsHelper.getBlobBufferLength();
+ final byte[] resultBuffer = new byte[blobLength];
+ int offset = 0;
+ while (offset < blobLength) {
+ final byte[] segementBuffer = gdsHelper.getBlobSegment(blob, bufferLength);
+ if (segementBuffer.length == 0) {
+ // unexpected EOF
+ throw (SQLException) createException(BYTES_CONVERSION_ERROR);
+ }
+ System.arraycopy(segementBuffer, 0, resultBuffer, offset, segementBuffer.length);
+ offset += segementBuffer.length;
}
+ return resultBuffer;
+ }
+ finally {
+ gdsHelper.closeBlob(blob);
+ }
}
-
- return bout.toByteArray();
+ catch (GDSException e) {
+ throw new FBSQLException(e);
+ }
+ }
}
public byte[] getCachedObject() throws SQLException {
if (getFieldData()==null)
return BYTES_NULL_VALUE;
The patch is also available at
http://alex.olmisoft.com/files/ResultSet_Blob_getBytes.patch
--
Best regards,
Alexey mailto:alex+news@...