mac 10.14
2.2 GHz Intel Core i7
APPLE SSD AP0512M (底下有小伙伴测出相反的结果,跟硬盘有关系)
看RocketMQ源码的时候看到数据写到MappedFile有两种方式:
我的问题是为什么不直接采用第二种方法?于是我通过下面的代码验证两种方式的写性能。
import java.io.*;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
public class MMapTest {
static File file= new File("./test.txt");
static ByteBuffer buffer;
static int fileSize = 8 * 1024 * 1024;
static boolean del = true;
public static void main(String[] args) {
init(1);
deleteFile();
int[] sizes = {128,256,512,4096,8192,1024*16,1024*32,1024*128,1024*512};
try {
for (int size : sizes) {
testDBChannel(size);
testMappedByteBuffer(size);
System.out.println();
}
} catch (IOException e) {
e.printStackTrace();
}
}
private static void init(int size) {
buffer = ByteBuffer.allocateDirect(size);
}
private static void deleteFile() {
file.delete();
}
private static void testDBChannel(int size) throws IOException {
init(size);
RandomAccessFile rw = new RandomAccessFile(file, "rw");
FileChannel channel = rw.getChannel();
int writeSize = 0;
Long start = System.currentTimeMillis();
while (writeSize < fileSize) {
buffer.clear();
buffer.put(new byte[size]);
buffer.flip();
channel.position(writeSize);
channel.write(buffer);
channel.force(false);
writeSize += size;
}
//channel.force(false);
System.out.println("DirectBuffer + FileChannel write " + size + " bytes every time cost: " + (System.currentTimeMillis() - start) + "ms");
if(del)
deleteFile();
}
private static void testMappedByteBuffer(int size) throws IOException {
init(size);
RandomAccessFile rw = new RandomAccessFile(file, "rw");
FileChannel channel = rw.getChannel();
MappedByteBuffer map = channel.map(FileChannel.MapMode.READ_WRITE, 0, fileSize);
int writeSize = 0;
Long start = System.currentTimeMillis();
while (writeSize < fileSize) {
map.put(new byte[size]);
map.force();
writeSize += size;
}
//map.force();
System.out.println("MappedByteBuffer write " + size + " bytes every time cost: " + (System.currentTimeMillis() - start) + "ms");
if(del)
deleteFile();
}
}
输出:
DirectBuffer + FileChannel write 128 bytes every time cost: 3577ms
MappedByteBuffer write 128 bytes every time cost: 13518ms
DirectBuffer + FileChannel write 256 bytes every time cost: 1968ms
MappedByteBuffer write 256 bytes every time cost: 7044ms
DirectBuffer + FileChannel write 512 bytes every time cost: 1001ms
MappedByteBuffer write 512 bytes every time cost: 3037ms
DirectBuffer + FileChannel write 1024 bytes every time cost: 659ms
MappedByteBuffer write 1024 bytes every time cost: 1274ms
DirectBuffer + FileChannel write 4096 bytes every time cost: 214ms
MappedByteBuffer write 4096 bytes every time cost: 331ms
DirectBuffer + FileChannel write 8192 bytes every time cost: 137ms
MappedByteBuffer write 8192 bytes every time cost: 168ms
DirectBuffer + FileChannel write 16384 bytes every time cost: 77ms
MappedByteBuffer write 16384 bytes every time cost: 86ms
DirectBuffer + FileChannel write 32768 bytes every time cost: 44ms
MappedByteBuffer write 32768 bytes every time cost: 58ms
DirectBuffer + FileChannel write 131072 bytes every time cost: 16ms
MappedByteBuffer write 131072 bytes every time cost: 25ms
DirectBuffer + FileChannel write 524288 bytes every time cost: 10ms
MappedByteBuffer write 524288 bytes every time cost: 21ms
我的理解是两种方式都都是将数据写入到pageCache中再刷盘的,为什么耗时差这么多,具体两种方式的实现原理是什么?
一般情况下使用RocketMQ 都是异步刷盘,会利用OS的pageCache机制达到很高的性能;上面描述的这个问题是针对同步刷盘情况,按照 @Tyrael 第一种测试,SATA盘情况下,mbb 的性能 是要高于 db+fc 的,更加让我怀疑为什么不直接用mbb。
噜噜哒
相关分类