/src/main/com/mongodb/gridfs/GridFSDBFile.java

https://github.com/jorgeortiz85/mongo-java-driver · Java · 205 lines · 118 code · 33 blank · 54 comment · 22 complexity · c59220e2863d3627d670c289f8aad866 MD5 · raw file

  1. // GridFSDBFile.java
  2. /**
  3. * Copyright (C) 2008 10gen Inc.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. */
  17. package com.mongodb.gridfs;
  18. import java.io.File;
  19. import java.io.FileOutputStream;
  20. import java.io.IOException;
  21. import java.io.InputStream;
  22. import java.io.OutputStream;
  23. import com.mongodb.BasicDBObject;
  24. import com.mongodb.BasicDBObjectBuilder;
  25. import com.mongodb.DBObject;
  26. import com.mongodb.MongoException;
  27. /**
  28. * This class enables to retrieve a GridFS file metadata and content.
  29. * Operations include:
  30. * - writing data to a file on disk or an OutputStream
  31. * - getting each chunk as a byte array
  32. * - getting an InputStream to stream the data into
  33. * @author antoine
  34. */
  35. public class GridFSDBFile extends GridFSFile {
  36. /**
  37. * Returns an InputStream from which data can be read
  38. * @return
  39. */
  40. public InputStream getInputStream(){
  41. return new MyInputStream();
  42. }
  43. /**
  44. * Writes the file's data to a file on disk
  45. * @param filename the file name on disk
  46. * @return
  47. * @throws IOException
  48. */
  49. public long writeTo( String filename ) throws IOException {
  50. return writeTo( new File( filename ) );
  51. }
  52. /**
  53. * Writes the file's data to a file on disk
  54. * @param f the File object
  55. * @return
  56. * @throws IOException
  57. */
  58. public long writeTo( File f ) throws IOException {
  59. FileOutputStream out = null;
  60. try{
  61. out = new FileOutputStream( f );
  62. return writeTo( out);
  63. }finally{
  64. if(out != null)
  65. out.close();
  66. }
  67. }
  68. /**
  69. * Writes the file's data to an OutputStream
  70. * @param out the OutputStream
  71. * @return
  72. * @throws IOException
  73. */
  74. public long writeTo( OutputStream out )
  75. throws IOException {
  76. final int nc = numChunks();
  77. for ( int i=0; i<nc; i++ ){
  78. out.write( getChunk( i ) );
  79. }
  80. return _length;
  81. }
  82. byte[] getChunk( int i ){
  83. if ( _fs == null )
  84. throw new RuntimeException( "no gridfs!" );
  85. DBObject chunk = _fs._chunkCollection.findOne( BasicDBObjectBuilder.start( "files_id" , _id )
  86. .add( "n" , i ).get() );
  87. if ( chunk == null )
  88. throw new MongoException( "can't find a chunk! file id: " + _id + " chunk: " + i );
  89. return (byte[])chunk.get( "data" );
  90. }
  91. class MyInputStream extends InputStream {
  92. MyInputStream(){
  93. _numChunks = numChunks();
  94. }
  95. public int available(){
  96. if ( _data == null )
  97. return 0;
  98. return _data.length - _offset;
  99. }
  100. public void close(){
  101. }
  102. public void mark(int readlimit){
  103. throw new RuntimeException( "mark not supported" );
  104. }
  105. public void reset(){
  106. throw new RuntimeException( "mark not supported" );
  107. }
  108. public boolean markSupported(){
  109. return false;
  110. }
  111. public int read(){
  112. byte b[] = new byte[1];
  113. int res = read( b );
  114. if ( res < 0 )
  115. return -1;
  116. return b[0] & 0xFF;
  117. }
  118. public int read(byte[] b){
  119. return read( b , 0 , b.length );
  120. }
  121. public int read(byte[] b, int off, int len){
  122. if ( _data == null || _offset >= _data.length ){
  123. if ( _currentChunkIdx + 1 >= _numChunks )
  124. return -1;
  125. _data = getChunk( ++_currentChunkIdx );
  126. _offset = 0;
  127. }
  128. int r = Math.min( len , _data.length - _offset );
  129. System.arraycopy( _data , _offset , b , off , r );
  130. _offset += r;
  131. return r;
  132. }
  133. /**
  134. * Will smartly skips over chunks without fetching them if possible.
  135. */
  136. public long skip(long numBytesToSkip) throws IOException {
  137. if (numBytesToSkip <= 0)
  138. return 0;
  139. if (_currentChunkIdx == _numChunks)
  140. //We're actually skipping over the back end of the file, short-circuit here
  141. //Don't count those extra bytes to skip in with the return value
  142. return 0;
  143. if (_offset + numBytesToSkip <= _chunkSize) {
  144. //We're skipping over bytes in the current chunk, adjust the offset accordingly
  145. _offset += numBytesToSkip;
  146. if (_data == null && _currentChunkIdx < _numChunks)
  147. _data = getChunk(_currentChunkIdx);
  148. return numBytesToSkip;
  149. }
  150. //We skipping over the remainder of this chunk, could do this less recursively...
  151. ++_currentChunkIdx;
  152. long skippedBytes = 0;
  153. if (_currentChunkIdx < _numChunks)
  154. skippedBytes = _chunkSize - _offset;
  155. else
  156. skippedBytes = _lastChunkSize;
  157. _offset = 0;
  158. _data = null;
  159. return skippedBytes + skip(numBytesToSkip - skippedBytes);
  160. }
  161. final int _numChunks;
  162. //Math trick to ensure the _lastChunkSize is between 1 and _chunkSize
  163. final long _lastChunkSize = ((_length - 1) % _chunkSize) + 1;
  164. int _currentChunkIdx = -1;
  165. int _offset;
  166. byte[] _data = null;
  167. }
  168. void remove(){
  169. _fs._filesCollection.remove( new BasicDBObject( "_id" , _id ) );
  170. _fs._chunkCollection.remove( new BasicDBObject( "files_id" , _id ) );
  171. }
  172. }