手记

netty自定义Decoder用于自定义协议

在使用netty时由于自定义协议的分割符是在数据包体的头部,netty提供的DelimiterBasedFrameDecoder并不能满足我们的需求。


自定义的decode如下

package com.llvision.netty.decoder;

import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.ByteToMessageDecoder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.List;

/**
 * @author yd
 * 2017/04/08
 */
public class CustomFrameDecoder  extends ByteToMessageDecoder {
    private final Logger logger= LoggerFactory.getLogger(CustomFrameDecoder.class);
    private static int HEADER_SIZE = 4;
    private final ByteBuf delimiter;
    private final int maxFrameLength;
    private static ByteBuf buf = Unpooled.buffer();

    public CustomFrameDecoder(int maxFrameLength,ByteBuf delimiter ) {
        this.delimiter = delimiter;
        this.maxFrameLength = maxFrameLength;
    }

    @Override
    protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
        Object decoded = this.decode(ctx, in);
        if(decoded != null) {
            out.add(decoded);
        }
    }

    protected Object decode(ChannelHandlerContext ctx, ByteBuf in) throws Exception {
        in.markReaderIndex();
        int headerOffset = indexOf(in, delimiter);
        if (headerOffset < 0) {
            in.resetReaderIndex();
            return null;
        } else {
            in.skipBytes(headerOffset + 5);
        }
       int bodyLength=bodyLenght(headerOffset,in);
        if(in.readableBytes()<bodyLength){
            in.resetReaderIndex();
            return null;
        }else {
            in.readBytes(buf, bodyLength);
            ByteBuf frame = ctx.alloc().buffer(bodyLength);
            frame.writeBytes(buf);
            buf.clear();
            return frame;
        }
    }

    private static int indexOf(ByteBuf haystack, ByteBuf needle) {
        for(int i = haystack.readerIndex(); i < haystack.writerIndex(); ++i) {
            int haystackIndex = i;
            int needleIndex;
            for(needleIndex = 0; needleIndex < needle.capacity() && haystack.getByte(haystackIndex) == needle.getByte(needleIndex); ++needleIndex) {
                ++haystackIndex;
                if(haystackIndex == haystack.writerIndex() && needleIndex != needle.capacity() - 1) {
                    return -1;
                }
            }
            if(needleIndex == needle.capacity()) {
                return i - haystack.readerIndex();
            }
        }

        return -1;
    }

    /**
    * 功能:获取信息头中的长度
    * */
    private  int bodyLenght(int header,ByteBuf buf){
        int bodyLength=0;
        int headerIndex=header+1,headerEnd=header+4;
        for(;headerIndex<=headerEnd;headerIndex++){
            bodyLength*=10;
            bodyLength+=(int)buf.getByte(headerIndex)-48;
        }
        return bodyLength;
    }
}

但在使用该解码器用客户端施加压力时,可达到1s 2000个的数据并发量。当大于这个量时会方式解析崩溃的问题,猜测是Tcp粘包的问题。也可能是机器性能问题。还请解答

0人推荐
随时随地看视频
慕课网APP