/driver-legacy/src/main/com/mongodb/gridfs/GridFSDBFile.java

http://github.com/mongodb/mongo-java-driver · Java · 202 lines · 117 code · 26 blank · 59 comment · 21 complexity · 7a1df04743ce8e4ea57303661f7455e7 MD5 · raw file

  1. /*
  2. * Copyright 2008-present MongoDB, Inc.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. package com.mongodb.gridfs;
  17. import com.mongodb.BasicDBObject;
  18. import com.mongodb.DBObject;
  19. import com.mongodb.MongoException;
  20. import java.io.File;
  21. import java.io.FileOutputStream;
  22. import java.io.IOException;
  23. import java.io.InputStream;
  24. import java.io.OutputStream;
  25. /**
  26. * This class enables retrieving a GridFS file metadata and content. Operations include:
  27. * <ul>
  28. * <li>Writing data to a file on disk or an OutputStream </li>
  29. * <li>Creating an {@code InputStream} to stream the data into</li>
  30. * </ul>
  31. *
  32. * @mongodb.driver.manual core/gridfs/ GridFS
  33. */
  34. public class GridFSDBFile extends GridFSFile {
  35. /**
  36. * Returns an InputStream from which data can be read.
  37. *
  38. * @return the input stream
  39. */
  40. public InputStream getInputStream() {
  41. return new GridFSInputStream();
  42. }
  43. /**
  44. * Writes the file's data to a file on disk.
  45. *
  46. * @param filename the file name on disk
  47. * @return number of bytes written
  48. * @throws IOException if there are problems writing to the file
  49. */
  50. public long writeTo(final String filename) throws IOException {
  51. return writeTo(new File(filename));
  52. }
  53. /**
  54. * Writes the file's data to a file on disk.
  55. *
  56. * @param file the File object
  57. * @return number of bytes written
  58. * @throws IOException if there are problems writing to the {@code file}
  59. */
  60. public long writeTo(final File file) throws IOException {
  61. FileOutputStream out = null;
  62. try {
  63. out = new FileOutputStream(file);
  64. return writeTo(out);
  65. } finally {
  66. if (out != null) {
  67. out.close();
  68. }
  69. }
  70. }
  71. /**
  72. * Writes the file's data to an OutputStream.
  73. *
  74. * @param out the OutputStream
  75. * @return number of bytes written
  76. * @throws IOException if there are problems writing to {@code out}
  77. */
  78. public long writeTo(final OutputStream out) throws IOException {
  79. int nc = numChunks();
  80. for (int i = 0; i < nc; i++) {
  81. out.write(getChunk(i));
  82. }
  83. return length;
  84. }
  85. private byte[] getChunk(final int chunkNumber) {
  86. if (fs == null) {
  87. throw new IllegalStateException("No GridFS instance defined!");
  88. }
  89. DBObject chunk = fs.getChunksCollection().findOne(new BasicDBObject("files_id", id).append("n", chunkNumber));
  90. if (chunk == null) {
  91. throw new MongoException("Can't find a chunk! file id: " + id + " chunk: " + chunkNumber);
  92. }
  93. return (byte[]) chunk.get("data");
  94. }
  95. /**
  96. * Removes file from GridFS i.e. removes documents from files and chunks collections.
  97. */
  98. void remove() {
  99. fs.getFilesCollection().remove(new BasicDBObject("_id", id));
  100. fs.getChunksCollection().remove(new BasicDBObject("files_id", id));
  101. }
  102. private class GridFSInputStream extends InputStream {
  103. private final int numberOfChunks;
  104. private int currentChunkId = -1;
  105. private int offset = 0;
  106. private byte[] buffer = null;
  107. GridFSInputStream() {
  108. this.numberOfChunks = numChunks();
  109. }
  110. @Override
  111. public int available() {
  112. if (buffer == null) {
  113. return 0;
  114. }
  115. return buffer.length - offset;
  116. }
  117. @Override
  118. public int read() {
  119. byte[] b = new byte[1];
  120. int res = read(b);
  121. if (res < 0) {
  122. return -1;
  123. }
  124. return b[0] & 0xFF;
  125. }
  126. @Override
  127. public int read(final byte[] b) {
  128. return read(b, 0, b.length);
  129. }
  130. @Override
  131. public int read(final byte[] b, final int off, final int len) {
  132. if (buffer == null || offset >= buffer.length) {
  133. if (currentChunkId + 1 >= numberOfChunks) {
  134. return -1;
  135. }
  136. buffer = getChunk(++currentChunkId);
  137. offset = 0;
  138. }
  139. int r = Math.min(len, buffer.length - offset);
  140. System.arraycopy(buffer, offset, b, off, r);
  141. offset += r;
  142. return r;
  143. }
  144. /**
  145. * Will smartly skip over chunks without fetching them if possible.
  146. */
  147. @Override
  148. public long skip(final long bytesToSkip) throws IOException {
  149. if (bytesToSkip <= 0) {
  150. return 0;
  151. }
  152. if (currentChunkId == numberOfChunks) {
  153. //We're actually skipping over the back end of the file, short-circuit here
  154. //Don't count those extra bytes to skip in with the return value
  155. return 0;
  156. }
  157. // offset in the whole file
  158. long offsetInFile = 0;
  159. if (currentChunkId >= 0) {
  160. offsetInFile = currentChunkId * chunkSize + offset;
  161. }
  162. if (bytesToSkip + offsetInFile >= length) {
  163. currentChunkId = numberOfChunks;
  164. buffer = null;
  165. return length - offsetInFile;
  166. }
  167. int temp = currentChunkId;
  168. currentChunkId = (int) ((bytesToSkip + offsetInFile) / chunkSize);
  169. if (temp != currentChunkId) {
  170. buffer = getChunk(currentChunkId);
  171. }
  172. offset = (int) ((bytesToSkip + offsetInFile) % chunkSize);
  173. return bytesToSkip;
  174. }
  175. }
  176. }