forked from maxmind/MaxMind-DB-Reader-java
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathBufferHolder.java
More file actions
113 lines (104 loc) · 4.24 KB
/
BufferHolder.java
File metadata and controls
113 lines (104 loc) · 4.24 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
package com.maxmind.db;
import com.maxmind.db.Reader.FileMode;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.ArrayList;
import java.util.List;
final class BufferHolder {
// DO NOT PASS OUTSIDE THIS CLASS. Doing so will remove thread safety.
private final Buffer buffer;
BufferHolder(File database, FileMode mode) throws IOException {
this(database, mode, MultiBuffer.DEFAULT_CHUNK_SIZE);
}
BufferHolder(File database, FileMode mode, int chunkSize) throws IOException {
try (RandomAccessFile file = new RandomAccessFile(database, "r");
FileChannel channel = file.getChannel()) {
long size = channel.size();
if (mode == FileMode.MEMORY) {
Buffer buf;
if (size <= chunkSize) {
buf = new SingleBuffer(size);
} else {
buf = new MultiBuffer(size);
}
if (buf.readFrom(channel) != buf.capacity()) {
throw new IOException("Unable to read "
+ database.getName()
+ " into memory. Unexpected end of stream.");
}
this.buffer = buf;
} else {
if (size <= chunkSize) {
this.buffer = SingleBuffer.mapFromChannel(channel);
} else {
this.buffer = MultiBuffer.mapFromChannel(channel);
}
}
}
}
/**
* Construct a ThreadBuffer from the provided URL.
*
* @param stream the source of my bytes.
* @throws IOException if unable to read from your source.
* @throws NullPointerException if you provide a NULL InputStream
*/
BufferHolder(InputStream stream) throws IOException {
this(stream, MultiBuffer.DEFAULT_CHUNK_SIZE);
}
BufferHolder(InputStream stream, int chunkSize) throws IOException {
if (null == stream) {
throw new NullPointerException("Unable to use a NULL InputStream");
}
List<ByteBuffer> chunks = new ArrayList<>();
long total = 0;
byte[] tmp = new byte[chunkSize];
int read;
while (-1 != (read = stream.read(tmp))) {
ByteBuffer chunk = ByteBuffer.allocate(read);
chunk.put(tmp, 0, read);
chunk.flip();
chunks.add(chunk);
total += read;
}
if (total <= chunkSize) {
byte[] data = new byte[(int) total];
int pos = 0;
for (ByteBuffer chunk : chunks) {
System.arraycopy(chunk.array(), 0, data, pos, chunk.capacity());
pos += chunk.capacity();
}
this.buffer = SingleBuffer.wrap(data);
} else {
this.buffer = new MultiBuffer(chunks.toArray(new ByteBuffer[0]), chunkSize);
}
}
/*
* Returns a duplicate of the underlying Buffer. The returned Buffer
* should not be shared between threads.
*/
Buffer get() {
// The Java API docs for buffer state:
//
// Buffers are not safe for use by multiple concurrent threads. If a buffer is to be
// used by more than one thread then access to the buffer should be controlled by
// appropriate synchronization.
//
// As such, you may think that this should be synchronized. This used to be the case, but
// we had several complaints about the synchronization causing contention, e.g.:
//
// * https://github.com/maxmind/MaxMind-DB-Reader-java/issues/65
// * https://github.com/maxmind/MaxMind-DB-Reader-java/pull/69
//
// Given that we are not modifying the original Buffer in any way and all currently
// known and most reasonably imaginable implementations of duplicate() only do read
// operations on the original buffer object, the risk of not synchronizing this call seems
// relatively low and worth taking for the performance benefit when lookups are being done
// from many threads.
return this.buffer.duplicate();
}
}