tcp并发(c18w)

最近研究netty, 做了一个测试.

环境:

   laptop一台(4核8G),vmware虚拟4台vm(centos 7.3)

    192.168.1.125 (4核2G)

    192.168.1.121 (2核1G)

    192.168.1.122 (2核1G)

    192.168.1.123 (2核1G)

     服务端代码: java + netty,   客户端代码: golang

修改内核参数

四台vm都要执行:

# chmod +x setup.sh
# ./setup.sh

setup.sh

#!/bin/sh
ulimit -n 300000 echo
1024 65535 > /proc/sys/net/ipv4/ip_local_port_range echo 32768 > /proc/sys/net/core/somaxconn echo 10000 > /proc/sys/net/ipv4/tcp_max_tw_buckets echo 1 > /proc/sys/net/ipv4/tcp_tw_reuse echo 1 > /proc/sys/net/ipv4/tcp_tw_recycle echo 300000 > /proc/sys/net/netfilter/nf_conntrack_max

服务端编译:

我的代码是在192.168.1.125 vm上编译的,目录结构:

 

在netty目录:

javac -cp .:lib/netty-all-4.1.5.Final.jar -d bin src/myserver/*.java

就编译好了。

运行要进入bin目录:

[root@dockerx bin]# java -cp .:../lib/netty-all-4.1.5.Final.jar myserver.NioServer
server started

客户端代码编译:  安装golang, 然后在client.go 那个目录:

# go build client.go

生成可执行文件client, 将client复制到客户端的三台vm上。

我的三个client都放在/opt

在/opt里直接执行: 

三台客户端全部执行,每台vm会产生60000个连接。三台vm就是180000个连接

[root@node1 opt]# ./client

最终成功后:

[root@dockerx bin]# java -cp .:../lib/netty-all-4.1.5.Final.jar myserver.NioServer
server started
...
...
=> 180000
=> 180000

源代码:

NioServer.java

 package myserver;

import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;

import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;

public class NioServer {
        public static ConcurrentHashMap<Object,Integer> clients = new ConcurrentHashMap<Object, Integer>();
        public void start() throws IOException, InterruptedException {
                final ClientHandler clientHandler = new ClientHandler();
                EventLoopGroup acceptGroup = new NioEventLoopGroup(4);
                EventLoopGroup clientGroup = new NioEventLoopGroup(4);

                try {
                        ServerBootstrap strap = new ServerBootstrap();
                        strap.group(acceptGroup, clientGroup);
                        strap.channel(NioServerSocketChannel.class);
                        strap.localAddress(new InetSocketAddress(5000));
                        strap.option(ChannelOption.SO_BACKLOG, 1024);
                        strap.childHandler(new ChannelInitializer<SocketChannel>() {
                                 @Override
                                 public void initChannel(SocketChannel ch) throws Exception {
                                         clients.put(ch.remoteAddress(), 1);
                                         ch.pipeline().addLast(clientHandler);
                                 }
                         });
                        strap.childOption(ChannelOption.SO_REUSEADDR, true);

                        ChannelFuture future = strap.bind().sync();
                        System.out.println("server started");
                        future.channel().closeFuture().sync();
                }finally{
                        acceptGroup.shutdownGracefully();
                        clientGroup.shutdownGracefully();
                }
        }

        public static void printClientCount() {
                Runnable task = new Runnable() {
                        public void run() {
                                System.out.println("=> " + clients.size());
                        }
                };

                ScheduledExecutorService service = Executors.newSingleThreadScheduledExecutor();
                service.scheduleAtFixedRate(task, 10, 20, TimeUnit.SECONDS);
        }

        public static void main(String[] args) throws Exception{
                printClientCount();
                new NioServer().start();
        }
}

ClientHandler.java

package myserver;

import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandler.Sharable;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.util.CharsetUtil;

@Sharable
public class ClientHandler extends ChannelInboundHandlerAdapter {

        @Override
        public void channelRead(ChannelHandlerContext ctx, Object msg) {
                ByteBuf in = (ByteBuf) msg;
                ctx.write(in);
        }

        @Override
        public void channelReadComplete(ChannelHandlerContext ctx) {
                ctx.writeAndFlush(Unpooled.EMPTY_BUFFER).addListener(ChannelFutureListener.CLOSE);
        }

        @Override
        public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
                NioServer.clients.remove(ctx.channel().remoteAddress());
                ctx.close();
        }

}

client.go

package main

import (
        "log"
        "net"
)


func main() {
        for i := 0; i < 60000; i++ {
                conn, err := net.Dial("tcp", "192.168.1.125:5000")
                if err != nil {
                        log.Println(err)
                        return
                }

                go Read(conn)
        }

        log.Println("ok")
        select {}
}

func Read(conn net.Conn) {
        buf := make([]byte, 64)
        for {
                _, err := conn.Read(buf)
                if err != nil {
                        log.Println(err)
                        return
                }
        }
}

  

原文地址:https://www.cnblogs.com/bear129/p/8629812.html