From 9bf029dc2b63d4fe401894d721917dffac2b19a0 Mon Sep 17 00:00:00 2001 From: Jon Chiappetta Date: Wed, 6 Aug 2025 16:33:18 -0400 Subject: [PATCH 1/2] bulk mode --- src/openvpn/forward.c | 285 ++++++++++++++++++++++++++++++++++------- src/openvpn/forward.h | 7 + src/openvpn/init.c | 64 +++++++++ src/openvpn/mtu.c | 11 +- src/openvpn/mtu.h | 15 +++ src/openvpn/mudp.c | 10 +- src/openvpn/multi.c | 191 +++++++++++++++++++++++---- src/openvpn/multi.h | 22 +++- src/openvpn/multi_io.c | 88 ++++++------- src/openvpn/multi_io.h | 1 + src/openvpn/openvpn.h | 11 ++ src/openvpn/options.c | 10 ++ src/openvpn/options.h | 3 + 13 files changed, 590 insertions(+), 128 deletions(-) diff --git a/src/openvpn/forward.c b/src/openvpn/forward.c index 6f1bc0cb1a8..a8805608bc7 100644 --- a/src/openvpn/forward.c +++ b/src/openvpn/forward.c @@ -45,6 +45,9 @@ #include "memdbg.h" +#include +#include + counter_type link_read_bytes_global; /* GLOBAL */ counter_type link_write_bytes_global; /* GLOBAL */ @@ -612,6 +615,21 @@ buffer_turnover(const uint8_t *orig_buf, struct buffer *dest_stub, struct buffer } } +uint8_t *buff_prepsize(uint8_t *buff, int *size) +{ + buff[0] = (uint8_t)((*size >> 8) & 0xff); + buff[1] = (uint8_t)((*size >> 0) & 0xff); + buff += 2; + return buff; +} + +uint8_t *buff_postsize(uint8_t *buff, int *size) +{ + *size = ((buff[0] << 8) + (buff[1] << 0)); + buff += 2; + return buff; +} + /* * Compress, fragment, encrypt and HMAC-sign an outgoing packet. * Input: c->c2.buf @@ -889,7 +907,7 @@ socks_postprocess_incoming_link(struct context *c, struct link_socket *sock) { if (sock->socks_proxy && sock->info.proto == PROTO_UDP) { - socks_process_incoming_udp(&c->c2.buf, &c->c2.from); + socks_process_incoming_udp(&c->c2.buf2, &c->c2.from); } } @@ -919,7 +937,7 @@ link_socket_write_post_size_adjust(int *size, int size_delta, struct buffer *buf } /* - * Output: c->c2.buf + * Output: c->c2.buf2 */ void @@ -933,10 +951,10 @@ read_incoming_link(struct context *c, struct link_socket *sock) /*ASSERT (!c->c2.to_tun.len);*/ - c->c2.buf = c->c2.buffers->read_link_buf; - ASSERT(buf_init(&c->c2.buf, c->c2.frame.buf.headroom)); + c->c2.buf2 = c->c2.buffers->read_link_buf; + ASSERT(buf_init(&c->c2.buf2, c->c2.frame.buf.headroom)); - status = link_socket_read(sock, &c->c2.buf, &c->c2.from); + status = link_socket_read(sock, &c->c2.buf2, &c->c2.from); if (socket_connection_reset(sock, status)) { @@ -989,11 +1007,11 @@ process_incoming_link_part1(struct context *c, struct link_socket_info *lsi, boo struct gc_arena gc = gc_new(); bool decrypt_status = false; - if (c->c2.buf.len > 0) + if (c->c2.buf2.len > 0) { - c->c2.link_read_bytes += c->c2.buf.len; - link_read_bytes_global += c->c2.buf.len; - c->c2.original_recv_size = c->c2.buf.len; + c->c2.link_read_bytes += c->c2.buf2.len; + link_read_bytes_global += c->c2.buf2.len; + c->c2.original_recv_size = c->c2.buf2.len; } else { @@ -1006,21 +1024,22 @@ process_incoming_link_part1(struct context *c, struct link_socket_info *lsi, boo { if (!ask_gremlin(c->options.gremlin)) { - c->c2.buf.len = 0; + c->c2.buf2.len = 0; } - corrupt_gremlin(&c->c2.buf, c->options.gremlin); + corrupt_gremlin(&c->c2.buf2, c->options.gremlin); } #endif /* log incoming packet */ #ifdef LOG_RW - if (c->c2.log_rw && c->c2.buf.len > 0) + if (c->c2.log_rw && c->c2.buf2.len > 0) { fprintf(stderr, "R"); } #endif + msg(D_LINK_RW, "%s READ [%d] from %s: %s", proto2ascii(lsi->proto, lsi->af, true), - BLEN(&c->c2.buf), print_link_socket_actual(&c->c2.from, &gc), PROTO_DUMP(&c->c2.buf, &gc)); + BLEN(&c->c2.buf2), print_link_socket_actual(&c->c2.from, &gc), PROTO_DUMP(&c->c2.buf2, &gc)); /* * Good, non-zero length packet received. @@ -1029,18 +1048,18 @@ process_incoming_link_part1(struct context *c, struct link_socket_info *lsi, boo * If any stage fails, it sets buf.len to 0 or -1, * telling downstream stages to ignore the packet. */ - if (c->c2.buf.len > 0) + if (c->c2.buf2.len > 0) { struct crypto_options *co = NULL; const uint8_t *ad_start = NULL; - if (!link_socket_verify_incoming_addr(&c->c2.buf, lsi, &c->c2.from)) + if (!link_socket_verify_incoming_addr(&c->c2.buf2, lsi, &c->c2.from)) { - link_socket_bad_incoming_addr(&c->c2.buf, lsi, &c->c2.from); + link_socket_bad_incoming_addr(&c->c2.buf2, lsi, &c->c2.from); } if (c->c2.tls_multi) { - uint8_t opcode = *BPTR(&c->c2.buf) >> P_OPCODE_SHIFT; + uint8_t opcode = *BPTR(&c->c2.buf2) >> P_OPCODE_SHIFT; /* * If DCO is enabled, the kernel drivers require that the @@ -1054,7 +1073,7 @@ process_incoming_link_part1(struct context *c, struct link_socket_info *lsi, boo { msg(D_LINK_ERRORS, "Data Channel Offload doesn't support DATA_V1 packets. " "Upgrade your server to 2.4.5 or newer."); - c->c2.buf.len = 0; + c->c2.buf2.len = 0; } /* @@ -1067,7 +1086,7 @@ process_incoming_link_part1(struct context *c, struct link_socket_info *lsi, boo * will load crypto_options with the correct encryption key * and return false. */ - if (tls_pre_decrypt(c->c2.tls_multi, &c->c2.from, &c->c2.buf, &co, floated, &ad_start)) + if (tls_pre_decrypt(c->c2.tls_multi, &c->c2.from, &c->c2.buf2, &co, floated, &ad_start)) { interval_action(&c->c2.tmp_int); @@ -1090,12 +1109,12 @@ process_incoming_link_part1(struct context *c, struct link_socket_info *lsi, boo */ if (c->c2.tls_multi && c->c2.tls_multi->multi_state < CAS_CONNECT_DONE) { - c->c2.buf.len = 0; + c->c2.buf2.len = 0; } /* authenticate and decrypt the incoming packet */ decrypt_status = - openvpn_decrypt(&c->c2.buf, c->c2.buffers->decrypt_buf, co, &c->c2.frame, ad_start); + openvpn_decrypt(&c->c2.buf2, c->c2.buffers->decrypt_buf, co, &c->c2.frame, ad_start); if (!decrypt_status /* on the instance context we have only one socket, so just check the first one */ @@ -1120,12 +1139,12 @@ void process_incoming_link_part2(struct context *c, struct link_socket_info *lsi, const uint8_t *orig_buf) { - if (c->c2.buf.len > 0) + if (c->c2.buf2.len > 0) { #ifdef ENABLE_FRAGMENT if (c->c2.fragment) { - fragment_incoming(c->c2.fragment, &c->c2.buf, &c->c2.frame_fragment); + fragment_incoming(c->c2.fragment, &c->c2.buf2, &c->c2.frame_fragment); } #endif @@ -1133,14 +1152,14 @@ process_incoming_link_part2(struct context *c, struct link_socket_info *lsi, /* decompress the incoming packet */ if (c->c2.comp_context) { - (*c->c2.comp_context->alg.decompress)(&c->c2.buf, c->c2.buffers->decompress_buf, + (*c->c2.comp_context->alg.decompress)(&c->c2.buf2, c->c2.buffers->decompress_buf, c->c2.comp_context, &c->c2.frame); } #endif #ifdef PACKET_TRUNCATION_CHECK - /* if (c->c2.buf.len > 1) --c->c2.buf.len; */ - ipv4_packet_size_verify(BPTR(&c->c2.buf), BLEN(&c->c2.buf), TUNNEL_TYPE(c->c1.tuntap), + /* if (c->c2.buf2.len > 1) --c->c2.buf2.len; */ + ipv4_packet_size_verify(BPTR(&c->c2.buf2), BLEN(&c->c2.buf2), TUNNEL_TYPE(c->c1.tuntap), "POST_DECRYPT", &c->c2.n_trunc_post_decrypt); #endif @@ -1153,39 +1172,39 @@ process_incoming_link_part2(struct context *c, struct link_socket_info *lsi, * * Also, update the persisted version of our packet-id. */ - if (!TLS_MODE(c) && c->c2.buf.len > 0) + if (!TLS_MODE(c) && c->c2.buf2.len > 0) { link_socket_set_outgoing_addr(lsi, &c->c2.from, NULL, c->c2.es); } /* reset packet received timer */ - if (c->options.ping_rec_timeout && c->c2.buf.len > 0) + if (c->options.ping_rec_timeout && c->c2.buf2.len > 0) { event_timeout_reset(&c->c2.ping_rec_interval); } /* increment authenticated receive byte count */ - if (c->c2.buf.len > 0) + if (c->c2.buf2.len > 0) { - c->c2.link_read_bytes_auth += c->c2.buf.len; + c->c2.link_read_bytes_auth += c->c2.buf2.len; c->c2.max_recv_size_local = max_int(c->c2.original_recv_size, c->c2.max_recv_size_local); } /* Did we just receive an openvpn ping packet? */ - if (is_ping_msg(&c->c2.buf)) + if (is_ping_msg(&c->c2.buf2)) { dmsg(D_PING, "RECEIVED PING PACKET"); - c->c2.buf.len = 0; /* drop packet */ + c->c2.buf2.len = 0; /* drop packet */ } /* Did we just receive an OCC packet? */ - if (is_occ_msg(&c->c2.buf)) + if (is_occ_msg(&c->c2.buf2)) { process_received_occ_msg(c); } - buffer_turnover(orig_buf, &c->c2.to_tun, &c->c2.buf, &c->c2.buffers->read_link_buf); + buffer_turnover(orig_buf, &c->c2.to_tun, &c->c2.buf2, &c->c2.buffers->read_link_buf); /* to_tun defined + unopened tuntap can cause deadlock */ if (!tuntap_defined(c->c1.tuntap)) @@ -1199,14 +1218,31 @@ process_incoming_link_part2(struct context *c, struct link_socket_info *lsi, } } +void process_incoming_link_part3(struct context *c) +{ + if (BULK_MODE(c)) + { + if (c->c2.buf2.len > 0) + { + c->c2.to_tun.offset += 2; + c->c2.buf2.offset += 2; + } + else + { + buf_reset(&c->c2.to_tun); + } + } +} + static void process_incoming_link(struct context *c, struct link_socket *sock) { struct link_socket_info *lsi = &sock->info; - const uint8_t *orig_buf = c->c2.buf.data; + const uint8_t *orig_buf = c->c2.buf2.data; process_incoming_link_part1(c, lsi, false); process_incoming_link_part2(c, lsi, orig_buf); + process_incoming_link_part3(c); } void @@ -1302,7 +1338,7 @@ process_incoming_dco(dco_context_t *dco) */ void -read_incoming_tun(struct context *c) +read_incoming_tun_part2(struct context *c) { /* * Setup for read() call on TUN/TAP device. @@ -1357,6 +1393,63 @@ read_incoming_tun(struct context *c) check_status(c->c2.buf.len, "read from TUN/TAP", NULL, c->c1.tuntap); } +void read_incoming_tun_part3(struct context *c) +{ + fd_set rfds; + struct timeval timo; + int plen = 0, pidx = -1; + int fdno = c->c1.tuntap->fd; + for (int x = 0; x < TUN_BAT_MIN; ++x) + { + int leng = plen, indx = (pidx + 1); + if (leng < 1) + { + FD_ZERO(&rfds); + FD_SET(fdno, &rfds); + timo.tv_sec = 0; + timo.tv_usec = 0; + select(fdno+1, &rfds, NULL, NULL, &timo); + if (FD_ISSET(fdno, &rfds)) + { + read_incoming_tun_part2(c); + plen = BLEN(&c->c2.buf); + } + else + { + break; + } + } + leng = plen; + if (leng > 0) + { + c->c2.buffers->read_tun_bufs[indx].offset = TUN_BAT_OFF; + c->c2.buffers->read_tun_bufs[indx].len = leng; + bcopy(BPTR(&c->c2.buf), BPTR(&c->c2.buffers->read_tun_bufs[indx]), leng); + c->c2.bufs[indx] = c->c2.buffers->read_tun_bufs[indx]; + pidx = indx; + } + else + { + break; + } + plen = 0; + } + c->c2.buffers->bulk_indx = 0; + c->c2.buffers->bulk_leng = (pidx + 1); +} + +void read_incoming_tun(struct context *c) +{ + if (!BULK_MODE(c)) + { + read_incoming_tun_part2(c); + } + else + { + read_incoming_tun_part3(c); + } +} + /** * Drops UDP packets which OS decided to route via tun. * @@ -1481,7 +1574,7 @@ drop_if_recursive_routing(struct context *c, struct buffer *buf) */ void -process_incoming_tun(struct context *c, struct link_socket *out_sock) +process_incoming_tun_part2(struct context *c, struct link_socket *out_sock) { struct gc_arena gc = gc_new(); @@ -1498,7 +1591,7 @@ process_incoming_tun(struct context *c, struct link_socket *out_sock) #endif /* Show packet content */ - dmsg(D_TUN_RW, "TUN READ [%d]", BLEN(&c->c2.buf)); + dmsg(D_TUN_RW, "TUN READ [%d] [%d]", BLEN(&c->c2.buf), c->c2.frame.buf.payload_size); if (c->c2.buf.len > 0) { @@ -1522,7 +1615,10 @@ process_incoming_tun(struct context *c, struct link_socket *out_sock) } if (c->c2.buf.len > 0) { - encrypt_sign(c, true); + if (!BULK_MODE(c)) + { + encrypt_sign(c, true); + } } else { @@ -1531,6 +1627,60 @@ process_incoming_tun(struct context *c, struct link_socket *out_sock) gc_free(&gc); } +void process_incoming_tun_part3(struct context *c, struct link_socket *out_sock) +{ + if (BULK_DATA(c->c2.buffers)) + { + c->c2.buffers->read_tun_max.offset = TUN_BAT_OFF; + c->c2.buffers->read_tun_max.len = 0; + uint8_t *temp = BPTR(&c->c2.buffers->read_tun_max); + int leng = c->c2.buffers->bulk_leng; + int plen = 0, maxl = 0; + for (int x = 0; x < leng; ++x) + { + c->c2.buf = c->c2.bufs[x]; + process_incoming_tun_part2(c, out_sock); + plen = BLEN(&c->c2.buf); + if (plen > 0) + { + temp = buff_prepsize(temp, &plen); + bcopy(BPTR(&c->c2.buf), temp, plen); + temp += plen; maxl += (plen + 2); + } + c->c2.bufs[x].len = 0; + } + if (maxl > 0) + { + c->c2.buffers->read_tun_max.offset = TUN_BAT_OFF; + c->c2.buffers->read_tun_max.len = maxl; + c->c2.buf = c->c2.buffers->read_tun_max; + encrypt_sign(c, true); + } + else + { + buf_reset(&c->c2.to_link); + } + } + else + { + buf_reset(&c->c2.to_link); + } + c->c2.buffers->bulk_indx = -1; + c->c2.buffers->bulk_leng = -1; +} + +void process_incoming_tun(struct context *c, struct link_socket *out_sock) +{ + if (!BULK_MODE(c)) + { + process_incoming_tun_part2(c, out_sock); + } + else + { + process_incoming_tun_part3(c, out_sock); + } +} + /** * Forges a IPv6 ICMP packet with a no route to host error code from the * IPv6 packet in buf and sends it directly back to the client via the tun @@ -1759,7 +1909,7 @@ process_outgoing_link(struct context *c, struct link_socket *sock) struct gc_arena gc = gc_new(); int error_code = 0; - if (c->c2.to_link.len > 0 && c->c2.to_link.len <= c->c2.frame.buf.payload_size) + if (c->c2.to_link.len > 0 && (c->c2.to_link.len <= c->c2.frame.buf.payload_size || c->c2.frame.bulk_size > 0)) { /* * Setup for call to send/sendto which will send @@ -1804,6 +1954,7 @@ process_outgoing_link(struct context *c, struct link_socket *sock) fprintf(stderr, "W"); } #endif + msg(D_LINK_RW, "%s WRITE [%d] to %s: %s", proto2ascii(sock->info.proto, sock->info.af, true), BLEN(&c->c2.to_link), print_link_socket_actual(c->c2.to_link_addr, &gc), PROTO_DUMP(&c->c2.to_link, &gc)); @@ -1888,7 +2039,7 @@ process_outgoing_link(struct context *c, struct link_socket *sock) */ void -process_outgoing_tun(struct context *c, struct link_socket *in_sock) +process_outgoing_tun_part2(struct context *c, struct link_socket *in_sock) { /* * Set up for write() call to TUN/TAP @@ -1919,7 +2070,8 @@ process_outgoing_tun(struct context *c, struct link_socket *in_sock) fprintf(stderr, "w"); } #endif - dmsg(D_TUN_RW, "TUN WRITE [%d]", BLEN(&c->c2.to_tun)); + + dmsg(D_TUN_RW, "TUN WRITE [%d] [%d]", BLEN(&c->c2.to_tun), c->c2.frame.buf.payload_size); #ifdef PACKET_TRUNCATION_CHECK ipv4_packet_size_verify(BPTR(&c->c2.to_tun), BLEN(&c->c2.to_tun), TUNNEL_TYPE(c->c1.tuntap), @@ -1973,6 +2125,49 @@ process_outgoing_tun(struct context *c, struct link_socket *in_sock) buf_reset(&c->c2.to_tun); } +void process_outgoing_tun_part3(struct context *c, struct link_socket *in_sock) +{ + if ((c->c2.to_tun.len > 0) && (c->c2.to_tun.offset > 1)) + { + c->c2.to_tun.offset -= 2; + buf_init(&c->c2.buffers->send_tun_max, TUN_BAT_OFF); + buf_copy(&c->c2.buffers->send_tun_max, &c->c2.to_tun); + int maxl = 0, plen = 0; + int leng = BLEN(&c->c2.buffers->send_tun_max); + uint8_t *temp = BPTR(&c->c2.buffers->send_tun_max); + for (int x = 0; x < TUN_BAT_MIN; ++x) + { + temp = buff_postsize(temp, &plen); + if ((leng > 0) && (plen > 0) && ((maxl + plen) < leng)) + { + c->c2.to_tun = c->c2.buffers->to_tun_max; + c->c2.to_tun.offset = TUN_BAT_OFF; + c->c2.to_tun.len = plen; + bcopy(temp, BPTR(&c->c2.to_tun), plen); + temp += plen; maxl += (plen + 2); + process_outgoing_tun_part2(c, in_sock); + } + else + { + break; + } + } + } + buf_reset(&c->c2.to_tun); +} + +void process_outgoing_tun(struct context *c, struct link_socket *in_sock) +{ + if (!BULK_MODE(c)) + { + process_outgoing_tun_part2(c, in_sock); + } + else + { + process_outgoing_tun_part3(c, in_sock); + } +} + void pre_select(struct context *c) { @@ -2246,7 +2441,7 @@ io_wait_dowork(struct context *c, const unsigned int flags) if (!c->sig->signal_received) { - if (!(flags & IOW_CHECK_RESIDUAL) || !sockets_read_residual(c)) + if (true) { int status; @@ -2298,9 +2493,9 @@ io_wait_dowork(struct context *c, const unsigned int flags) c->c2.event_set_status = ES_TIMEOUT; } } - else + if (sockets_read_residual(c)) { - c->c2.event_set_status = SOCKET_READ; + c->c2.event_set_status |= (SOCKET_READ << SOCKET_SHIFT); } } diff --git a/src/openvpn/forward.h b/src/openvpn/forward.h index 06808b93b3d..31ca497c685 100644 --- a/src/openvpn/forward.h +++ b/src/openvpn/forward.h @@ -34,6 +34,11 @@ * file */ +#define BULK_MODE(c) (c && c->c2.frame.bulk_size > 0) +#define BULK_DATA(b) (b && (b->bulk_leng > 0) && (b->bulk_indx < b->bulk_leng)) +#define INST_LENG(a) (a && (a->inst_leng > 0) && (a->inst_indx < a->inst_leng)) +#define LINK_LEFT(i) (i && sockets_read_residual(i)) + #define TUN_OUT(c) (BLEN(&(c)->c2.to_tun) > 0) #define LINK_OUT(c) (BLEN(&(c)->c2.to_link) > 0) #define ANY_OUT(c) (TUN_OUT(c) || LINK_OUT(c)) @@ -196,6 +201,8 @@ bool process_incoming_link_part1(struct context *c, struct link_socket_info *lsi void process_incoming_link_part2(struct context *c, struct link_socket_info *lsi, const uint8_t *orig_buf); +void process_incoming_link_part3(struct context *c); + /** * Transfers \c float_sa data extracted from an incoming DCO * PEER_FLOAT_NTF to \c out_osaddr for later processing. diff --git a/src/openvpn/init.c b/src/openvpn/init.c index fc079e1124a..d796ea57f7c 100644 --- a/src/openvpn/init.c +++ b/src/openvpn/init.c @@ -2952,6 +2952,11 @@ frame_finalize_options(struct context *c, const struct options *o) tailroom += COMP_EXTRA_BUFFER(payload_size); #endif + if (frame->bulk_size > 0) + { + payload_size = BAT_SIZE(TUN_BAT_ONE, frame->tun_mtu, TUN_BAT_OFF); + } + frame->buf.payload_size = payload_size; frame->buf.headroom = headroom; frame->buf.tailroom = tailroom; @@ -3455,6 +3460,10 @@ do_init_frame_tls(struct context *c) if (c->c2.tls_multi) { tls_multi_init_finalize(c->c2.tls_multi, c->options.ce.tls_mtu); + if (c->c2.frame.bulk_size > 0) + { + c->c2.tls_multi->opt.frame.buf.payload_size = c->c2.frame.tun_mtu; + } ASSERT(c->c2.tls_multi->opt.frame.buf.payload_size <= c->c2.frame.buf.payload_size); frame_print(&c->c2.tls_multi->opt.frame, D_MTU_INFO, "Control Channel MTU parms"); @@ -3522,6 +3531,14 @@ do_init_frame(struct context *c) c->c2.frame.extra_tun += c->options.ce.tun_mtu_extra; } + /* + * Adjust bulk size based on the --bulk-mode parameter. + */ + if (c->options.ce.bulk_mode) + { + c->c2.frame.bulk_size = c->options.ce.tun_mtu; + } + /* * Fill in the blanks in the frame parameters structure, * make sure values are rational, etc. @@ -3662,9 +3679,45 @@ init_context_buffers(const struct frame *frame) size_t buf_size = BUF_SIZE(frame); + if (frame->bulk_size > 0) + { + size_t off_size = (frame->buf.headroom + TUN_BAT_OFF + frame->buf.tailroom); + buf_size = BAT_SIZE(TUN_BAT_MAX, frame->tun_mtu, off_size); + } + + dmsg(M_INFO, "BULK bufs [%ld] [%d+%d+%d]", buf_size, frame->buf.headroom, frame->buf.payload_size, frame->buf.tailroom); + b->read_link_buf = alloc_buf(buf_size); b->read_tun_buf = alloc_buf(buf_size); + if (frame->bulk_size > 0) + { + size_t off_size = (frame->buf.headroom + TUN_BAT_OFF + frame->buf.tailroom); + size_t one_size = BAT_SIZE(TUN_BAT_ONE, frame->tun_mtu, off_size); + + for (int x = 0; x < TUN_BAT_MAX; ++x) + { + b->read_tun_bufs[x] = alloc_buf(one_size); + b->read_tun_bufs[x].offset = TUN_BAT_OFF; + b->read_tun_bufs[x].len = 0; + } + + b->read_tun_max = alloc_buf(buf_size); + b->read_tun_max.offset = TUN_BAT_OFF; + b->read_tun_max.len = 0; + + b->send_tun_max = alloc_buf(buf_size); + b->send_tun_max.offset = TUN_BAT_OFF; + b->send_tun_max.len = 0; + + b->to_tun_max = alloc_buf(buf_size); + b->to_tun_max.offset = TUN_BAT_OFF; + b->to_tun_max.len = 0; + } + + b->bulk_indx = -1; + b->bulk_leng = -1; + b->aux_buf = alloc_buf(buf_size); b->encrypt_buf = alloc_buf(buf_size); @@ -3687,6 +3740,17 @@ free_context_buffers(struct context_buffers *b) free_buf(&b->read_tun_buf); free_buf(&b->aux_buf); + if (b->to_tun_max.data) + { + free_buf(&b->to_tun_max); + free_buf(&b->send_tun_max); + free_buf(&b->read_tun_max); + for (int x = 0; x < TUN_BAT_MAX; ++x) + { + free_buf(&b->read_tun_bufs[x]); + } + } + #ifdef USE_COMP free_buf(&b->compress_buf); free_buf(&b->decompress_buf); diff --git a/src/openvpn/mtu.c b/src/openvpn/mtu.c index e080ea9eb09..6b3d521fb46 100644 --- a/src/openvpn/mtu.c +++ b/src/openvpn/mtu.c @@ -41,9 +41,16 @@ void alloc_buf_sock_tun(struct buffer *buf, const struct frame *frame) { /* allocate buffer for overlapped I/O */ - *buf = alloc_buf(BUF_SIZE(frame)); + int alen = BUF_SIZE(frame); + int blen = frame->buf.payload_size; + if (frame->bulk_size > 0) + { + alen = BAT_SIZE(TUN_BAT_MAX, frame->tun_mtu, TUN_BAT_OFF); + blen = BAT_SIZE(TUN_BAT_MAX, frame->tun_mtu, TUN_BAT_NOP); + } + *buf = alloc_buf(alen); ASSERT(buf_init(buf, frame->buf.headroom)); - buf->len = frame->buf.payload_size; + buf->len = blen; ASSERT(buf_safe(buf, 0)); } diff --git a/src/openvpn/mtu.h b/src/openvpn/mtu.h index b6901491e14..1a610034c7f 100644 --- a/src/openvpn/mtu.h +++ b/src/openvpn/mtu.h @@ -58,6 +58,15 @@ */ #define TUN_MTU_MIN 100 +/* + * Bulk mode static define values. + */ +#define TUN_BAT_MIN 6 +#define TUN_BAT_MAX 9 +#define TUN_BAT_OFF 250 +#define TUN_BAT_NOP 0 +#define TUN_BAT_ONE 1 + /* * Default MTU of network over which tunnel data will pass by TCP/UDP. */ @@ -157,6 +166,11 @@ struct frame * which defaults to 0 for tun and 32 * (\c TAP_MTU_EXTRA_DEFAULT) for tap. * */ + + int bulk_size; /**< Configure and setup in the init library + * frame function to signal and inform the various + * related functions to process bulk mode data transfers. + * */ }; /* Forward declarations, to prevent includes */ @@ -176,6 +190,7 @@ struct options; * larger than the headroom. */ #define BUF_SIZE(f) ((f)->buf.headroom + (f)->buf.payload_size + (f)->buf.tailroom) +#define BAT_SIZE(a, b, c) ((a * b) + c) /* * Function prototypes. diff --git a/src/openvpn/mudp.c b/src/openvpn/mudp.c index b03e165e45e..c2f820ed834 100644 --- a/src/openvpn/mudp.c +++ b/src/openvpn/mudp.c @@ -369,15 +369,15 @@ unsigned int p2mp_iow_flags(const struct multi_context *m) { unsigned int flags = IOW_WAIT_SIGNAL; - if (m->pending) + if (m->pending || m->pending2) { - if (TUN_OUT(&m->pending->context)) + if (m->pending && LINK_OUT(&m->pending->context)) { - flags |= IOW_TO_TUN; + flags |= IOW_TO_LINK; } - if (LINK_OUT(&m->pending->context)) + if (m->pending2 && TUN_OUT(&m->pending2->context)) { - flags |= IOW_TO_LINK; + flags |= IOW_TO_TUN; } } else if (mbuf_defined(m->mbuf)) diff --git a/src/openvpn/multi.c b/src/openvpn/multi.c index 153695c49b4..cf5b422804e 100644 --- a/src/openvpn/multi.c +++ b/src/openvpn/multi.c @@ -424,6 +424,10 @@ multi_init(struct context *t) } m->deferred_shutdown_signal.signal_received = 0; + + m->inst_indx = -1; + m->inst_leng = -1; + m->inst_list = calloc(TUN_BAT_MAX, sizeof(struct multi_instance *)); } const char * @@ -584,6 +588,10 @@ multi_close_instance(struct multi_context *m, struct multi_instance *mi, bool sh { multi_set_pending(m, NULL); } + if (m->pending2 == mi) + { + multi_set_pending2(m, NULL); + } if (m->earliest_wakeup == mi) { m->earliest_wakeup = NULL; @@ -705,6 +713,10 @@ multi_uninit(struct multi_context *m) multi_reap_free(m->reaper); mroute_helper_free(m->route_helper); multi_io_free(m->multi_io); + + m->inst_indx = -1; + m->inst_leng = -1; + free(m->inst_list); } } @@ -3009,7 +3021,7 @@ multi_process_post(struct multi_context *m, struct multi_instance *mi, const uns if (!IS_SIG(&mi->context) && ((flags & MPP_PRE_SELECT) - || ((flags & MPP_CONDITIONAL_PRE_SELECT) && !ANY_OUT(&mi->context)))) + || ((flags & MPP_CONDITIONAL_PRE_SELECT) && !LINK_OUT(&mi->context)))) { #if defined(ENABLE_ASYNC_PUSH) bool was_unauthenticated = true; @@ -3076,10 +3088,11 @@ multi_process_post(struct multi_context *m, struct multi_instance *mi, const uns else { /* continue to pend on output? */ - multi_set_pending(m, ANY_OUT(&mi->context) ? mi : NULL); + multi_set_pending(m, LINK_OUT(&mi->context) ? mi : NULL); + multi_set_pending2(m, TUN_OUT(&mi->context) ? mi : NULL); #ifdef MULTI_DEBUG_EVENT_LOOP - printf("POST %s[%d] to=%d lo=%d/%d w=%" PRIi64 "/%ld\n", id(mi), (int)(mi == m->pending), + printf("POST %s[%d][%d] to=%d lo=%d/%d w=%" PRIi64 "/%ld\n", id(mi), (int)(mi == m->pending), (int)(mi == m->pending2), mi ? mi->context.c2.to_tun.len : -1, mi ? mi->context.c2.to_link.len : -1, (mi && mi->context.c2.fragment) ? mi->context.c2.fragment->outgoing.len : -1, (int64_t)mi->context.c2.timeval.tv_sec, (long)mi->context.c2.timeval.tv_usec); @@ -3138,7 +3151,7 @@ multi_process_float(struct multi_context *m, struct multi_instance *mi, struct l msg(D_MULTI_LOW, "Disallow float to an address taken by another client %s", multi_instance_string(ex_mi, false, &gc)); - mi->context.c2.buf.len = 0; + mi->context.c2.buf2.len = 0; goto done; } @@ -3343,7 +3356,7 @@ multi_process_incoming_link(struct multi_context *m, struct multi_instance *inst bool ret = true; bool floated = false; - if (m->pending) + if (m->pending2) { return true; } @@ -3351,26 +3364,26 @@ multi_process_incoming_link(struct multi_context *m, struct multi_instance *inst if (!instance) { #ifdef MULTI_DEBUG_EVENT_LOOP - printf("TCP/UDP -> TUN [%d]\n", BLEN(&m->top.c2.buf)); + printf("TCP/UDP -> TUN [%d]\n", BLEN(&m->top.c2.buf2)); #endif - multi_set_pending(m, multi_get_create_instance_udp(m, &floated, sock)); + multi_set_pending2(m, multi_get_create_instance_udp(m, &floated, sock)); } else { - multi_set_pending(m, instance); + multi_set_pending2(m, instance); } - if (m->pending) + if (m->pending2) { - set_prefix(m->pending); + set_prefix(m->pending2); /* get instance context */ - c = &m->pending->context; + c = &m->pending2->context; if (!instance) { /* transfer packet pointer from top-level context buffer to instance */ - c->c2.buf = m->top.c2.buf; + c->c2.buf2 = m->top.c2.buf2; /* transfer from-addr from top-level context buffer to instance */ if (!floated) @@ -3379,7 +3392,7 @@ multi_process_incoming_link(struct multi_context *m, struct multi_instance *inst } } - if (BLEN(&c->c2.buf) > 0) + if (BLEN(&c->c2.buf2) > 0) { struct link_socket_info *lsi; const uint8_t *orig_buf; @@ -3387,16 +3400,17 @@ multi_process_incoming_link(struct multi_context *m, struct multi_instance *inst /* decrypt in instance context */ lsi = &sock->info; - orig_buf = c->c2.buf.data; + orig_buf = c->c2.buf2.data; if (process_incoming_link_part1(c, lsi, floated)) { /* nonzero length means that we have a valid, decrypted packed */ - if (floated && c->c2.buf.len > 0) + if (floated && c->c2.buf2.len > 0) { - multi_process_float(m, m->pending, sock); + multi_process_float(m, m->pending2, sock); } process_incoming_link_part2(c, lsi, orig_buf); + process_incoming_link_part3(c); } if (TUNNEL_TYPE(m->top.c1.tuntap) == DEV_TYPE_TUN) @@ -3411,7 +3425,7 @@ multi_process_incoming_link(struct multi_context *m, struct multi_instance *inst c->c2.to_tun.len = 0; } /* make sure that source address is associated with this client */ - else if (multi_get_instance_by_virtual_addr(m, &src, true) != m->pending) + else if (multi_get_instance_by_virtual_addr(m, &src, true) != m->pending2) { /* IPv6 link-local address (fe80::xxx)? */ if ((src.type & MR_ADDR_MASK) == MR_ADDR_IPV6 @@ -3434,7 +3448,7 @@ multi_process_incoming_link(struct multi_context *m, struct multi_instance *inst if (mroute_flags & MROUTE_EXTRACT_MCAST) { /* for now, treat multicast as broadcast */ - multi_bcast(m, &c->c2.to_tun, m->pending, 0); + multi_bcast(m, &c->c2.to_tun, m->pending2, 0); } else /* possible client to client routing */ { @@ -3476,14 +3490,14 @@ multi_process_incoming_link(struct multi_context *m, struct multi_instance *inst if (mroute_flags & MROUTE_EXTRACT_SUCCEEDED) { - if (multi_learn_addr(m, m->pending, &src, 0) == m->pending) + if (multi_learn_addr(m, m->pending2, &src, 0) == m->pending2) { /* check for broadcast */ if (m->enable_c2c) { if (mroute_flags & (MROUTE_EXTRACT_BCAST | MROUTE_EXTRACT_MCAST)) { - multi_bcast(m, &c->c2.to_tun, m->pending, vid); + multi_bcast(m, &c->c2.to_tun, m->pending2, vid); } else /* try client-to-client routing */ { @@ -3515,7 +3529,7 @@ multi_process_incoming_link(struct multi_context *m, struct multi_instance *inst } /* postprocess and set wakeup */ - ret = multi_process_post(m, m->pending, mpp_flags); + ret = multi_process_post(m, m->pending2, mpp_flags); clear_prefix(); } @@ -3524,12 +3538,19 @@ multi_process_incoming_link(struct multi_context *m, struct multi_instance *inst return ret; } +int min_max(int a, int b, int c) +{ + if (a > c) { return c; } + if (a < b) { return b; } + return a; +} + /* * Process packets in the TUN/TAP interface -> TCP/UDP socket direction, * i.e. server -> client direction. */ bool -multi_process_incoming_tun(struct multi_context *m, const unsigned int mpp_flags) +multi_process_incoming_tun_part2(struct multi_context *m, const unsigned int mpp_flags) { bool ret = true; @@ -3575,10 +3596,30 @@ multi_process_incoming_tun(struct multi_context *m, const unsigned int mpp_flags /* for now, treat multicast as broadcast */ multi_bcast(m, &m->top.c2.buf, NULL, vid); } + else if (m->inst_indx == -9) + { + struct multi_instance *inst = multi_get_instance_by_virtual_addr(m, &dest, dev_type == DEV_TYPE_TUN); + if (inst) + { + int leng = m->inst_leng; + for (int x = 0; x < leng; ++x) + { + if (m->inst_list[x] == inst) + { + m->inst_indx = x; + return true; + } + } + leng = min_max(leng, 0, TUN_BAT_MIN - 1); + m->inst_list[leng] = inst; + m->inst_indx = leng; + m->inst_leng = (leng + 1); + } + return true; + } else { - multi_set_pending( - m, multi_get_instance_by_virtual_addr(m, &dest, dev_type == DEV_TYPE_TUN)); + multi_set_pending(m, multi_get_instance_by_virtual_addr(m, &dest, dev_type == DEV_TYPE_TUN)); if (m->pending) { @@ -3616,6 +3657,106 @@ multi_process_incoming_tun(struct multi_context *m, const unsigned int mpp_flags return ret; } +bool multi_process_inp_tun_post(struct multi_context *m, const unsigned int mpp_flags) +{ + if (!INST_LENG(m)) + { + return false; + } + if (m->pending) + { + return false; + } + while (m->inst_indx < m->inst_leng) + { + struct multi_instance *i = m->inst_list[m->inst_indx]; + if (i) + { + if (!(multi_output_queue_ready(m, i))) + { + return false; + } + multi_set_pending(m, i); + set_prefix(m->pending); + multi_process_post(m, m->pending, mpp_flags); + clear_prefix(); + m->inst_list[m->inst_indx] = NULL; + } + m->inst_indx += 1; + break; + } + return true; +} + +bool multi_process_incoming_tun_part3(struct multi_context *m, const unsigned int mpp_flags) +{ + struct context *c, *b = &(m->top); + struct multi_instance *i; + int leng = b->c2.buffers->bulk_leng; + m->inst_indx = -1; + m->inst_leng = -1; + for (int x = 0; x < leng; ++x) + { + m->inst_indx = -9; + m->top.c2.buf = b->c2.bufs[x]; + multi_process_incoming_tun_part2(m, mpp_flags); + if (m->inst_indx > -1) + { + i = m->inst_list[m->inst_indx]; + c = &(i->context); + int y = min_max(c->c2.buffers->bulk_leng, 0, TUN_BAT_MIN - 1); + c->c2.buffers->read_tun_bufs[y].offset = TUN_BAT_OFF; + c->c2.buffers->read_tun_bufs[y].len = BLEN(&b->c2.bufs[x]); + bcopy(BPTR(&b->c2.bufs[x]), BPTR(&c->c2.buffers->read_tun_bufs[y]), BLEN(&b->c2.bufs[x])); + c->c2.bufs[y] = c->c2.buffers->read_tun_bufs[y]; + c->c2.buffers->bulk_indx = 0; + c->c2.buffers->bulk_leng = (y + 1); + } + } + b->c2.buffers->bulk_indx = -1; + b->c2.buffers->bulk_leng = -1; + for (int x = 0; x < m->inst_leng; ++x) + { + i = m->inst_list[x]; + c = &(i->context); + c->c2.buf = c->c2.bufs[0]; + process_incoming_tun(c, c->c2.link_sockets[0]); + } + m->inst_indx = 0; + return multi_process_inp_tun_post(m, mpp_flags); +} + +bool multi_process_incoming_tun(struct multi_context *m, const unsigned int mpp_flags) +{ + if (!(m->top.options.ce.bulk_mode)) + { + return multi_process_incoming_tun_part2(m, mpp_flags); + } + else + { + return multi_process_incoming_tun_part3(m, mpp_flags); + } +} + +bool multi_in_tun(struct multi_context *m, const unsigned int mpp_flags) +{ + if (INST_LENG(m)) + { + multi_process_inp_tun_post(m, mpp_flags); + } + else + { + struct context *c = &(m->top); + read_incoming_tun(c); + if (!IS_SIG(c)) + { + multi_process_incoming_tun(m, mpp_flags); + } + return true; + } + return false; +} + /* * Process a possible client-to-client/bcast/mcast message in the * queue. @@ -3691,7 +3832,7 @@ multi_process_timeout(struct multi_context *m, const unsigned int mpp_flags) void multi_process_drop_outgoing_tun(struct multi_context *m, const unsigned int mpp_flags) { - struct multi_instance *mi = m->pending; + struct multi_instance *mi = m->pending2; ASSERT(mi); diff --git a/src/openvpn/multi.h b/src/openvpn/multi.h index a44f9f25b04..d3c7fca1ebe 100644 --- a/src/openvpn/multi.h +++ b/src/openvpn/multi.h @@ -194,6 +194,7 @@ struct multi_context #endif struct multi_instance *pending; + struct multi_instance *pending2; struct multi_instance *earliest_wakeup; struct multi_instance **mpp_touched; struct context_buffers *context_buffers; @@ -217,6 +218,10 @@ struct multi_context #endif struct deferred_signal_schedule_entry deferred_shutdown_signal; + + int inst_indx; + int inst_leng; + struct multi_instance **inst_list; }; /** @@ -256,6 +261,7 @@ struct multi_route */ void tunnel_server(struct context *top); +int min_max(int a, int b, int c); const char *multi_instance_string(const struct multi_instance *mi, bool null, struct gc_arena *gc); @@ -354,6 +360,9 @@ bool multi_process_incoming_link(struct multi_context *m, struct multi_instance */ bool multi_process_incoming_tun(struct multi_context *m, const unsigned int mpp_flags); +bool multi_process_inp_tun_post(struct multi_context *m, const unsigned int mpp_flags); + +bool multi_in_tun(struct multi_context *m, const unsigned int mpp_flags); void multi_process_drop_outgoing_tun(struct multi_context *m, const unsigned int mpp_flags); @@ -406,7 +415,6 @@ static inline struct multi_instance * multi_process_outgoing_link_pre(struct multi_context *m) { struct multi_instance *mi = NULL; - if (m->pending) { mi = m->pending; @@ -631,7 +639,7 @@ multi_get_timeout_instance(struct multi_context *m, struct timeval *dest) static inline bool multi_process_outgoing_tun(struct multi_context *m, const unsigned int mpp_flags) { - struct multi_instance *mi = m->pending; + struct multi_instance *mi = m->pending2; bool ret = true; ASSERT(mi); @@ -651,8 +659,7 @@ multi_process_outgoing_tun(struct multi_context *m, const unsigned int mpp_flags | OPT_P_COMP | OPT_P_SOCKFLAGS) static inline bool -multi_process_outgoing_link_dowork(struct multi_context *m, struct multi_instance *mi, - const unsigned int mpp_flags) +multi_process_outgoing_link_dowork(struct multi_context *m, struct multi_instance *mi, const unsigned int mpp_flags) { bool ret = true; set_prefix(mi); @@ -699,6 +706,13 @@ multi_set_pending(struct multi_context *m, struct multi_instance *mi) { m->pending = mi; } + +static inline void +multi_set_pending2(struct multi_context *m, struct multi_instance *mi) +{ + m->pending2 = mi; +} + /** * Assigns a peer-id to a a client and adds the instance to the * the instances array of the \c multi_context structure. diff --git a/src/openvpn/multi_io.c b/src/openvpn/multi_io.c index 997951ec05f..f5c5039fc9c 100644 --- a/src/openvpn/multi_io.c +++ b/src/openvpn/multi_io.c @@ -292,12 +292,9 @@ multi_io_dispatch(struct multi_context *m, struct multi_instance *mi, const int switch (action) { + case TA_INST_LENG: case TA_TUN_READ: - read_incoming_tun(&m->top); - if (!IS_SIG(&m->top)) - { - multi_process_incoming_tun(m, mpp_flags); - } + multi_in_tun(m, mpp_flags); break; case TA_SOCKET_READ: @@ -363,52 +360,34 @@ multi_io_post(struct multi_context *m, struct multi_instance *mi, const int acti struct context *c = multi_get_context(m, mi); int newaction = TA_UNDEF; -#define MTP_NONE 0 -#define MTP_TUN_OUT (1 << 0) -#define MTP_LINK_OUT (1 << 1) - unsigned int flags = MTP_NONE; + if (LINK_OUT(c)) + { + newaction = TA_SOCKET_WRITE; + goto last; + } + else if (INST_LENG(m)) + { + newaction = TA_INST_LENG; + goto last; + } if (TUN_OUT(c)) { - flags |= MTP_TUN_OUT; + newaction = TA_TUN_WRITE; + goto last; } - if (LINK_OUT(c)) + else if (LINK_LEFT(c)) { - flags |= MTP_LINK_OUT; + newaction = TA_SOCKET_READ_RESIDUAL; + goto last; } - switch (flags) + if (mi) { - case MTP_TUN_OUT | MTP_LINK_OUT: - case MTP_TUN_OUT: - newaction = TA_TUN_WRITE; - break; - - case MTP_LINK_OUT: - newaction = TA_SOCKET_WRITE; - break; - - case MTP_NONE: - if (mi && sockets_read_residual(c)) - { - newaction = TA_SOCKET_READ_RESIDUAL; - } - else - { - multi_io_set_global_rw_flags(m, mi); - } - break; - - default: - { - struct gc_arena gc = gc_new(); - msg(M_FATAL, "MULTI IO: multi_io_post bad state, mi=%s flags=%d", - multi_instance_string(mi, false, &gc), flags); - gc_free(&gc); - break; - } + multi_io_set_global_rw_flags(m, mi); } +last: dmsg(D_MULTI_DEBUG, "MULTI IO: multi_io_post %s -> %s", pract(action), pract(newaction)); return newaction; @@ -462,19 +441,17 @@ multi_io_process_io(struct multi_context *m) { socket_reset_listen_persistent(ev_arg->u.sock); mi = multi_create_instance_tcp(m, ev_arg->u.sock); + if (mi) { multi_io_action(m, mi, TA_INITIAL, false); } } else { multi_process_io_udp(m, ev_arg->u.sock); - mi = m->pending; + if (m->pending) { multi_io_action(m, m->pending, TA_INITIAL, false); } + if (m->pending2) { multi_io_action(m, m->pending2, TA_INITIAL, false); } } /* monitor and/or handle events that are * triggered in succession by the first one * before returning to the main loop. */ - if (mi) - { - multi_io_action(m, mi, TA_INITIAL, false); - } break; } } @@ -559,7 +536,7 @@ multi_io_action(struct multi_context *m, struct multi_instance *mi, int action, * On our first pass, poll will be false because we already know * that input is available, and to call io_wait would be redundant. */ - if (poll && action != TA_SOCKET_READ_RESIDUAL) + if (poll && action != TA_SOCKET_READ_RESIDUAL && action != TA_INST_LENG) { const int orig_action = action; action = multi_io_wait_lite(m, mi, action, &tun_input_pending); @@ -594,10 +571,22 @@ multi_io_action(struct multi_context *m, struct multi_instance *mi, int action, * for a particular instance, point to * that instance. */ + int retry_undef = 0; if (m->pending) { mi = m->pending; } + if (m->pending2) + { + if (!m->pending) + { + mi = m->pending2; + } + else + { + retry_undef = 1; + } + } /* * Based on the effects of the action, @@ -605,6 +594,11 @@ multi_io_action(struct multi_context *m, struct multi_instance *mi, int action, * possibly transition to a new action state. */ action = multi_io_post(m, mi, action); + if ((action == TA_UNDEF) && (retry_undef == 1)) + { + mi = m->pending2; + action = multi_io_post(m, mi, action); + } /* * If we are finished processing the original action, diff --git a/src/openvpn/multi_io.h b/src/openvpn/multi_io.h index 4a3c60d236d..daf43b0e0f6 100644 --- a/src/openvpn/multi_io.h +++ b/src/openvpn/multi_io.h @@ -44,6 +44,7 @@ #define TA_INITIAL 8 #define TA_TIMEOUT 9 #define TA_TUN_WRITE_TIMEOUT 10 +#define TA_INST_LENG 11 /* * I/O state and events tracker diff --git a/src/openvpn/openvpn.h b/src/openvpn/openvpn.h index a198fcf6d84..30110d818d9 100644 --- a/src/openvpn/openvpn.h +++ b/src/openvpn/openvpn.h @@ -112,6 +112,14 @@ struct context_buffers */ struct buffer read_link_buf; struct buffer read_tun_buf; + + struct buffer read_tun_bufs[TUN_BAT_MAX]; + struct buffer read_tun_max; + struct buffer send_tun_max; + struct buffer to_tun_max; + + int bulk_indx; + int bulk_leng; }; /* @@ -373,9 +381,12 @@ struct context_2 * struct context_buffers. */ struct buffer buf; + struct buffer buf2; struct buffer to_tun; struct buffer to_link; + struct buffer bufs[TUN_BAT_MAX]; + /* should we print R|W|r|w to console on packet transfers? */ bool log_rw; diff --git a/src/openvpn/options.c b/src/openvpn/options.c index a3fc19d6c79..8d93dec4820 100644 --- a/src/openvpn/options.c +++ b/src/openvpn/options.c @@ -306,6 +306,7 @@ static const char usage_message[] = " 'maybe' -- Use per-route hints\n" " 'yes' -- Always DF (Don't Fragment)\n" "--mtu-test : Empirically measure and report MTU.\n" + "--bulk-mode : Use bulk TUN/TCP reads/writes.\n" #ifdef ENABLE_FRAGMENT "--fragment max : Enable internal datagram fragmentation so that no UDP\n" " datagrams are sent which are larger than max bytes.\n" @@ -3298,6 +3299,11 @@ options_postprocess_mutate_invariant(struct options *options) options->pkcs11_providers[0] = DEFAULT_PKCS11_MODULE; } #endif + + if ((options->ce.proto != PROTO_TCP) && (options->ce.proto != PROTO_TCP_SERVER) && (options->ce.proto != PROTO_TCP_CLIENT)) + { + options->ce.bulk_mode = false; + } } static void @@ -9298,6 +9304,10 @@ add_option(struct options *options, char *p[], bool is_inline, const char *file, } options->vlan_pvid = (uint16_t)vlan_pvid; } + else if (streq(p[0], "bulk-mode")) + { + options->ce.bulk_mode = true; + } else { int i; diff --git a/src/openvpn/options.h b/src/openvpn/options.h index 42db9caec6b..954dcc3b975 100644 --- a/src/openvpn/options.h +++ b/src/openvpn/options.h @@ -177,6 +177,9 @@ struct connection_entry /* Allow only client that support resending the wrapped client key */ bool tls_crypt_v2_force_cookie; + + /* Bulk mode allows for multiple tun reads + larger tcp writes */ + bool bulk_mode; }; struct remote_entry From 88786316f003bd73d069b1be039181977ef2293a Mon Sep 17 00:00:00 2001 From: Jon Chiappetta Date: Tue, 12 Aug 2025 14:44:05 -0400 Subject: [PATCH 2/2] mtio mode --- src/openvpn/forward.c | 33 ++- src/openvpn/forward.h | 2 +- src/openvpn/init.c | 5 + src/openvpn/mtcp.c | 7 +- src/openvpn/mtcp.h | 2 +- src/openvpn/mudp.c | 5 +- src/openvpn/multi.c | 512 ++++++++++++++++++++++++++++++++++++----- src/openvpn/multi.h | 23 +- src/openvpn/multi_io.c | 28 ++- src/openvpn/multi_io.h | 2 +- src/openvpn/openvpn.c | 435 +++++++++++++++++++++++++++++++++- src/openvpn/openvpn.h | 88 +++++++ src/openvpn/options.c | 21 ++ src/openvpn/options.h | 5 + src/openvpn/socket.c | 7 + src/openvpn/socket.h | 2 + src/openvpn/tun.c | 32 ++- src/openvpn/tun.h | 3 + 18 files changed, 1112 insertions(+), 100 deletions(-) diff --git a/src/openvpn/forward.c b/src/openvpn/forward.c index a8805608bc7..a7c8c690ebf 100644 --- a/src/openvpn/forward.c +++ b/src/openvpn/forward.c @@ -46,7 +46,9 @@ #include "memdbg.h" #include +#include #include +#include counter_type link_read_bytes_global; /* GLOBAL */ counter_type link_write_bytes_global; /* GLOBAL */ @@ -1358,8 +1360,7 @@ read_incoming_tun_part2(struct context *c) ASSERT(buf_safe(&c->c2.buf, c->c2.frame.buf.payload_size)); if (c->c1.tuntap->backend_driver == DRIVER_AFUNIX) { - c->c2.buf.len = - read_tun_afunix(c->c1.tuntap, BPTR(&c->c2.buf), c->c2.frame.buf.payload_size); + c->c2.buf.len = read_tun_afunix(c->c1.tuntap, BPTR(&c->c2.buf), c->c2.frame.buf.payload_size); } else { @@ -2511,8 +2512,28 @@ io_wait_dowork(struct context *c, const unsigned int flags) dmsg(D_EVENT_WAIT, "I/O WAIT status=0x%04x", c->c2.event_set_status); } +void threaded_fwd_inp_intf(struct context *c, struct link_socket *sock, struct thread_pointer *b) +{ + if (b->p->h == b->p->n) + { + ssize_t size; + uint8_t temp[1]; + size = read(c->c1.tuntap->fd, temp, 1); + if (size < 1) { /* no-op */ } + if (!IS_SIG(c)) + { + if (!BULK_MODE(c)) + { + c->c2.buf = c->c2.buffers->read_tun_buf; + } + process_incoming_tun(c, sock); + } + size = write(c->c1.tuntap->fz, temp, 1); + } +} + void -process_io(struct context *c, struct link_socket *sock) +process_io(struct context *c, struct link_socket *sock, struct thread_pointer *b) { const unsigned int status = c->c2.event_set_status; @@ -2546,11 +2567,7 @@ process_io(struct context *c, struct link_socket *sock) /* Incoming data on TUN device */ else if (status & TUN_READ) { - read_incoming_tun(c); - if (!IS_SIG(c)) - { - process_incoming_tun(c, sock); - } + threaded_fwd_inp_intf(c, sock, b); } else if (status & DCO_READ) { diff --git a/src/openvpn/forward.h b/src/openvpn/forward.h index 31ca497c685..0a2797ec2f8 100644 --- a/src/openvpn/forward.h +++ b/src/openvpn/forward.h @@ -82,7 +82,7 @@ void io_wait_dowork(struct context *c, const unsigned int flags); void pre_select(struct context *c); -void process_io(struct context *c, struct link_socket *sock); +void process_io(struct context *c, struct link_socket *sock, struct thread_pointer *b); /**********************************************************************/ diff --git a/src/openvpn/init.c b/src/openvpn/init.c index d796ea57f7c..3d6a2099193 100644 --- a/src/openvpn/init.c +++ b/src/openvpn/init.c @@ -1896,6 +1896,7 @@ do_open_tun(struct context *c, int *error_flags) } /* do ifconfig */ + c->c1.tuntap->skip_bind = c->skip_bind; if (!ifconfig_noexec_enabled(c) && ifconfig_order(c->c1.tuntap) == IFCONFIG_BEFORE_TUN_OPEN) { /* guess actual tun/tap unit number that will be returned @@ -1989,6 +1990,10 @@ do_open_tun(struct context *c, int *error_flags) add_wfp_block(c); } + if (c->c1.tuntap) + { + c->c1.tuntap->fe = c->c1.tuntap->fd; + } gc_free(&gc); return ret; } diff --git a/src/openvpn/mtcp.c b/src/openvpn/mtcp.c index 81310a2bb5a..33207aa0ef9 100644 --- a/src/openvpn/mtcp.c +++ b/src/openvpn/mtcp.c @@ -46,15 +46,18 @@ struct ta_iow_flags }; struct multi_instance * -multi_create_instance_tcp(struct multi_context *m, struct link_socket *sock) +multi_create_instance_tcp(struct thread_pointer *b, struct link_socket *sock) { struct gc_arena gc = gc_new(); + struct multi_context *m = b->p->m[b->i-1]; struct multi_instance *mi = NULL; struct hash *hash = m->hash; - mi = multi_create_instance(m, NULL, sock); + mi = multi_create_instance(b, NULL, sock); if (mi) { + m = b->p->p; + hash = m->hash; mi->real.proto = sock->info.proto; struct hash_element *he; const uint32_t hv = hash_value(hash, &mi->real); diff --git a/src/openvpn/mtcp.h b/src/openvpn/mtcp.h index f4b806ccf6c..3ca6f2a33b0 100644 --- a/src/openvpn/mtcp.h +++ b/src/openvpn/mtcp.h @@ -45,7 +45,7 @@ bool multi_tcp_process_outgoing_link(struct multi_context *m, bool defer, bool multi_tcp_process_outgoing_link_ready(struct multi_context *m, struct multi_instance *mi, const unsigned int mpp_flags); -struct multi_instance *multi_create_instance_tcp(struct multi_context *m, struct link_socket *sock); +struct multi_instance *multi_create_instance_tcp(struct thread_pointer *b, struct link_socket *sock); void multi_tcp_link_out_deferred(struct multi_context *m, struct multi_instance *mi); diff --git a/src/openvpn/mudp.c b/src/openvpn/mudp.c index c2f820ed834..68ff638a623 100644 --- a/src/openvpn/mudp.c +++ b/src/openvpn/mudp.c @@ -193,6 +193,8 @@ multi_get_create_instance_udp(struct multi_context *m, bool *floated, struct lin struct mroute_addr real = { 0 }; struct multi_instance *mi = NULL; struct hash *hash = m->hash; + struct context_pointer p = { 0 }; + struct thread_pointer b = { 0 }; real.proto = sock->info.proto; m->hmac_reply_ls = sock; @@ -266,7 +268,8 @@ multi_get_create_instance_udp(struct multi_context *m, bool *floated, struct lin * connect-freq but not against connect-freq-initial */ reflect_filter_rate_limit_decrease(m->initial_rate_limiter); - mi = multi_create_instance(m, &real, sock); + p.p = m; b.p = &p; b.i = -1; + mi = multi_create_instance(&b, &real, sock); if (mi) { hash_add_fast(hash, bucket, &mi->real, hv, mi); diff --git a/src/openvpn/multi.c b/src/openvpn/multi.c index cf5b422804e..d2cf7c13008 100644 --- a/src/openvpn/multi.c +++ b/src/openvpn/multi.c @@ -160,10 +160,15 @@ multi_ifconfig_pool_persist(struct multi_context *m, bool force) static void multi_reap_range(const struct multi_context *m, uint32_t start_bucket, uint32_t end_bucket) { - struct gc_arena gc = gc_new(); struct hash_iterator hi; struct hash_element *he; + /*if (m->top.options.ce.mtio_conf) + { + return; + }*/ + + struct gc_arena gc = gc_new(); dmsg(D_MULTI_DEBUG, "MULTI: REAP range %d -> %d", start_bucket, end_bucket); hash_iterator_init_range(m->vhash, &hi, start_bucket, end_bucket); while ((he = hash_iterator_next(&hi)) != NULL) @@ -171,12 +176,13 @@ multi_reap_range(const struct multi_context *m, uint32_t start_bucket, uint32_t struct multi_route *r = (struct multi_route *)he->value; if (!multi_route_defined(m, r)) { - dmsg(D_MULTI_DEBUG, "MULTI: REAP DEL %s", mroute_addr_print(&r->addr, &gc)); + msg(M_INFO, "MULTI: REAP DEL %s", mroute_addr_print(&r->addr, &gc)); learn_address_script(m, NULL, "delete", &r->addr); multi_route_del(r); hash_iterator_delete_element(&hi); } } + hash_iterator_free(&hi); gc_free(&gc); } @@ -428,6 +434,10 @@ multi_init(struct context *t) m->inst_indx = -1; m->inst_leng = -1; m->inst_list = calloc(TUN_BAT_MAX, sizeof(struct multi_instance *)); + + m->mtio_stat = 1; + m->mtio_idno = 1; + bzero(&(m->mtio_info), sizeof(struct multi_info)); } const char * @@ -629,7 +639,7 @@ multi_close_instance(struct multi_context *m, struct multi_instance *mi, bool sh schedule_remove_entry(m->schedule, (struct schedule_entry *)mi); - ifconfig_pool_release(m->ifconfig_pool, mi->vaddr_handle, false); + ifconfig_pool_release(m->mtio_info.pool, mi->vaddr_handle, false); if (mi->did_iroutes) { @@ -645,6 +655,10 @@ multi_close_instance(struct multi_context *m, struct multi_instance *mi, bool sh mbuf_dereference_instance(m->mbuf, mi); } + mi->mtio_stat = 1; + mi->mtio_idno = m->mtio_idno; + bzero(&(mi->mtio_addr), sizeof(struct multi_address)); + #ifdef ENABLE_MANAGEMENT set_cc_config(mi, NULL); #endif @@ -717,18 +731,172 @@ multi_uninit(struct multi_context *m) m->inst_indx = -1; m->inst_leng = -1; free(m->inst_list); + + m->mtio_stat = 1; + m->mtio_idno = 1; + bzero(&(m->mtio_info), sizeof(struct multi_info)); + } +} + +bool multi_context_switch_addr(struct multi_context *m, struct multi_instance *i, bool s, bool l) +{ + struct gc_arena g = gc_new(); + + in_addr_t ladr_objc = i->context.c2.push_ifconfig_local; + struct sockaddr_in *wadr_objc = (struct sockaddr_in *)&i->context.c2.link_sockets[0]->info.lsa->actual.dest.addr.sa; + + const char *ladr = print_in_addr_t(ladr_objc, IA_EMPTY_IF_UNDEF, &g); + if ((strcmp(i->mtio_addr.ladr, "") == 0) && ladr) + { + bzero(i->mtio_addr.ladr, MAX_STRLENG * sizeof(char)); + strncpy(i->mtio_addr.ladr, ladr, MAX_STRLENG-5); + } + + const char *wadr = inet_ntoa(wadr_objc->sin_addr); + if ((strcmp(i->mtio_addr.wadr, "") == 0) && wadr) + { + bzero(i->mtio_addr.wadr, MAX_STRLENG * sizeof(char)); + strncpy(i->mtio_addr.wadr, wadr, MAX_STRLENG-5); + } + + const char *comm = tls_common_name(i->context.c2.tls_multi, true); + if ((strcmp(i->mtio_addr.comm, "") == 0) && comm) + { + bzero(i->mtio_addr.comm, MAX_STRLENG * sizeof(char)); + strncpy(i->mtio_addr.comm, comm, MAX_STRLENG-5); + } + + const char *conn = tls_username(i->context.c2.tls_multi, true); + if ((strcmp(i->mtio_addr.user, "") == 0) && conn) + { + bzero(i->mtio_addr.user, MAX_STRLENG * sizeof(char)); + strncpy(i->mtio_addr.user, conn, MAX_STRLENG-5); + } + + const char *uniq = i->mtio_addr.uniq; + if ((strcmp(i->mtio_addr.uniq, "") == 0) && wadr) + { + bzero(i->mtio_addr.uniq, MAX_STRLENG * sizeof(char)); + snprintf(i->mtio_addr.uniq, MAX_STRLENG-5, "%s", wadr); + } + + i->mtio_addr.addr = ladr_objc; + + if (strcmp(uniq, "") == 0) + { + goto last; + } + + if (m) + { + pthread_mutex_lock(m->mtio_info.lock); + for (int x = 0; x < m->mtio_info.maxc; ++x) + { + struct multi_link *l = &(m->mtio_info.link[x]); + if (strcmp(l->uniq, uniq) == 0) + { + if (s) + { + int indx = (i->mtio_idno % MAX_THREADS); + l->adrs[indx] = i->mtio_addr; + } + if (l) + { + for (int y = 0; y < MAX_THREADS; ++y) + { + struct multi_address *a = &(l->adrs[y]); + if (strcmp(a->uniq, "") != 0) + { + multi_learn_in_addr_t(m, i, a->addr, -1, true); + } + } + } + } + } + pthread_mutex_unlock(m->mtio_info.lock); + } + +last: + gc_free(&g); + + return true; +} + +struct multi_context *multi_context_switch_conn(struct thread_pointer *b, struct multi_context *m, struct multi_instance *i) +{ + if (b->i <= 0) + { + b->p->p = m; + return m; + } + + int indx = -1, fidx = 0; + time_t secs = time(NULL); + time_t last = b->p->k[fidx].last; + struct multi_link *link; + + struct sockaddr_in *wadr_objc = (struct sockaddr_in *)&i->context.c2.link_sockets[0]->info.lsa->actual.dest.addr.sa; + const char *wadr = inet_ntoa(wadr_objc->sin_addr); + + char uniq[MAX_STRLENG]; + bzero(uniq, MAX_STRLENG * sizeof(char)); + if (strcmp(wadr, "") != 0) + { + snprintf(uniq, MAX_STRLENG-5, "%s", wadr); + } + + if (strcmp(uniq, "") == 0) + { + goto last; + } + + for (int x = 0; x < b->p->x; ++x) + { + link = &(b->p->k[x]); + if ((link->last < 1) || (link->last < last)) + { + fidx = x; + last = link->last; + } + if (strcmp(link->uniq, uniq) == 0) + { + indx = x; + break; + } + } + + pthread_mutex_lock(m->mtio_info.lock); + if (indx < 0) + { + indx = fidx; + link = &(b->p->k[indx]); + bzero(link, sizeof(struct multi_link)); + strncpy(link->uniq, uniq, MAX_STRLENG-5); } + pthread_mutex_unlock(m->mtio_info.lock); + + link = &(b->p->k[indx]); + m = b->p->m[link->indx]; + b->p->p = b->p->m[link->indx]; + i->mtio_idno = m->mtio_idno; + link->indx = ((link->indx + 1) % b->p->n); + link->last = secs; + +last: + msg(M_INFO, "TCPv4_SERVER MTIO conn [%s][%p] [%d][%d] {%d}{%d}", uniq, m, indx, fidx, i->mtio_idno, m->mtio_idno); + + return m; } /* * Create a client instance object for a newly connected client. */ struct multi_instance * -multi_create_instance(struct multi_context *m, const struct mroute_addr *real, - struct link_socket *sock) +multi_create_instance(struct thread_pointer *b, const struct mroute_addr *real, struct link_socket *sock) { struct gc_arena gc = gc_new(); struct multi_instance *mi; + struct multi_context *m = (b->i > 0) ? b->p->m[b->i-1] : b->p->p; msg(D_MULTI_MEDIUM, "MULTI: multi_create_instance called"); @@ -751,6 +919,7 @@ multi_create_instance(struct multi_context *m, const struct mroute_addr *real, { goto err; } + m = multi_context_switch_conn(b, m, mi); mi->context.c2.tls_multi->multi_state = CAS_NOT_CONNECTED; @@ -801,6 +970,10 @@ multi_create_instance(struct multi_context *m, const struct mroute_addr *real, mi->ev_arg.type = EVENT_ARG_MULTI_INSTANCE; mi->ev_arg.u.mi = mi; + mi->mtio_stat = 1; + mi->mtio_idno = m->mtio_idno; + bzero(&(mi->mtio_addr), sizeof(struct multi_address)); + gc_free(&gc); return mi; @@ -1041,8 +1214,7 @@ multi_print_status(struct multi_context *m, struct status_output *so, const int * or NULL if none. */ static struct multi_instance * -multi_learn_addr(struct multi_context *m, struct multi_instance *mi, const struct mroute_addr *addr, - const unsigned int flags) +multi_learn_addr(struct multi_context *m, struct multi_instance *mi, const struct mroute_addr *addr, const unsigned int flags) { struct hash_element *he; const uint32_t hv = hash_value(m->vhash, addr); @@ -1051,6 +1223,18 @@ multi_learn_addr(struct multi_context *m, struct multi_instance *mi, const struc struct multi_instance *owner = NULL; struct gc_arena gc = gc_new(); + struct sockaddr_in *wadr_objc = (struct sockaddr_in *)&mi->context.c2.link_sockets[0]->info.lsa->actual.dest.addr.sa; + const char *ladr = print_in_addr_t(mi->context.c2.push_ifconfig_local, IA_EMPTY_IF_UNDEF, &gc); + const char *madr = mroute_addr_print(addr, &gc); + const char *wadr = inet_ntoa(wadr_objc->sin_addr); + + if (strcmp(ladr, "") == 0) + { + goto last; + } + + msg(M_INFO, "TCPv4_SERVER MTIO addr [%s][%s] [%s][%d] {%d}{%d}", ladr, madr, wadr, mi->mtio_stat, m->mtio_idno, mi->mtio_idno); + /* if route currently exists, get the instance which owns it */ he = hash_lookup_fast(m->vhash, bucket, addr, hv); if (he) @@ -1121,6 +1305,8 @@ multi_learn_addr(struct multi_context *m, struct multi_instance *mi, const struc free(newroute); } } + +last: gc_free(&gc); return owner; @@ -1172,7 +1358,10 @@ multi_get_instance_by_virtual_addr(struct multi_context *m, const struct mroute_ { /* found an applicable route, cache host route */ struct multi_instance *mi = route->instance; - multi_learn_addr(m, mi, addr, MULTI_ROUTE_CACHE | MULTI_ROUTE_AGEABLE); + if (!m->top.options.ce.mtio_conf) + { + multi_learn_addr(m, mi, addr, MULTI_ROUTE_CACHE | MULTI_ROUTE_AGEABLE); + } ret = mi; break; } @@ -1203,11 +1392,10 @@ multi_get_instance_by_virtual_addr(struct multi_context *m, const struct mroute_ /* * Helper function to multi_learn_addr(). + * netbits: -1 if host route, otherwise # of network bits in address */ -static struct multi_instance * -multi_learn_in_addr_t(struct multi_context *m, struct multi_instance *mi, in_addr_t a, - int netbits, /* -1 if host route, otherwise # of network bits in address */ - bool primary) +struct multi_instance * +multi_learn_in_addr_t(struct multi_context *m, struct multi_instance *mi, in_addr_t a, int netbits, bool primary) { struct openvpn_sockaddr remote_si; struct mroute_addr addr = { 0 }; @@ -1327,7 +1515,10 @@ multi_add_iroutes(struct multi_context *m, struct multi_instance *mi) mroute_helper_add_iroute46(m->route_helper, ir->netbits); - multi_learn_in_addr_t(m, mi, ir->network, ir->netbits, false); + if (!m->top.options.ce.mtio_conf) + { + multi_learn_in_addr_t(m, mi, ir->network, ir->netbits, false); + } } for (ir6 = mi->context.options.iroutes_ipv6; ir6 != NULL; ir6 = ir6->next) { @@ -1350,6 +1541,11 @@ multi_add_iroutes(struct multi_context *m, struct multi_instance *mi) static void multi_delete_dup(struct multi_context *m, struct multi_instance *new_mi) { + if (m->top.options.ce.mtio_conf) + { + return; + } + if (new_mi) { const char *new_cn = tls_common_name(new_mi->context.c2.tls_multi, true); @@ -1368,6 +1564,7 @@ multi_delete_dup(struct multi_context *m, struct multi_instance *new_mi) const char *cn = tls_common_name(mi->context.c2.tls_multi, true); if (cn && !strcmp(cn, new_cn)) { + msg(M_INFO, "MULTI: DEL DUP %s -> %s", cn, new_cn); mi->did_iter = false; multi_close_instance(m, mi, false); hash_iterator_delete_element(&hi); @@ -1394,6 +1591,11 @@ check_stale_routes(struct multi_context *m) struct hash_iterator hi; struct hash_element *he; + if (m->top.options.ce.mtio_conf) + { + return; + } + dmsg(D_MULTI_DEBUG, "MULTI: Checking stale routes"); hash_iterator_init_range(m->vhash, &hi, 0, hash_n_buckets(m->vhash)); while ((he = hash_iterator_next(&hi)) != NULL) @@ -1402,13 +1604,13 @@ check_stale_routes(struct multi_context *m) if (multi_route_defined(m, r) && difftime(now, r->last_reference) >= m->top.options.stale_routes_ageing_time) { - dmsg(D_MULTI_DEBUG, "MULTI: Deleting stale route for address '%s'", - mroute_addr_print(&r->addr, &gc)); + msg(M_INFO, "MULTI: Deleting stale route for address '%s'", mroute_addr_print(&r->addr, &gc)); learn_address_script(m, NULL, "delete", &r->addr); multi_route_del(r); hash_iterator_delete_element(&hi); } } + hash_iterator_free(&hi); gc_free(&gc); } @@ -1452,7 +1654,7 @@ multi_select_virtual_addr(struct multi_context *m, struct multi_instance *mi) * release dynamic allocation */ if (mi->vaddr_handle >= 0) { - ifconfig_pool_release(m->ifconfig_pool, mi->vaddr_handle, true); + ifconfig_pool_release(m->mtio_info.pool, mi->vaddr_handle, true); mi->vaddr_handle = -1; } @@ -1473,20 +1675,21 @@ multi_select_virtual_addr(struct multi_context *m, struct multi_instance *mi) "MULTI_sva: WARNING: if --ifconfig-push is used for IPv4, automatic IPv6 assignment from --ifconfig-ipv6-pool does not work. Use --ifconfig-ipv6-push for IPv6 then."); } } - else if (m->ifconfig_pool && mi->vaddr_handle < 0) /* otherwise, choose a pool address */ + else if (m->mtio_info.pool && mi->vaddr_handle < 0) /* otherwise, choose a pool address */ { in_addr_t local = 0, remote = 0; struct in6_addr remote_ipv6; const char *cn = NULL; - if (!mi->context.options.duplicate_cn) + if ((!mi->context.options.duplicate_cn) && (!(m->top.options.ce.mtio_mode))) { cn = tls_common_name(mi->context.c2.tls_multi, true); } CLEAR(remote_ipv6); - mi->vaddr_handle = - ifconfig_pool_acquire(m->ifconfig_pool, &local, &remote, &remote_ipv6, cn); + pthread_mutex_lock(m->mtio_info.lock); + mi->vaddr_handle = ifconfig_pool_acquire(m->mtio_info.pool, &local, &remote, &remote_ipv6, cn); + pthread_mutex_unlock(m->mtio_info.lock); if (mi->vaddr_handle >= 0) { const int tunnel_type = TUNNEL_TYPE(mi->context.c1.tuntap); @@ -2391,6 +2594,12 @@ multi_client_connect_late_setup(struct multi_context *m, struct multi_instance * mi->reporting_addr = mi->context.c2.push_ifconfig_local; mi->reporting_addr_ipv6 = mi->context.c2.push_ifconfig_ipv6_local; + if (mi->mtio_stat <= 1) + { + mi->mtio_stat = 3; + m->mtio_stat = 3; + } + /* set context-level authentication flag */ mi->context.c2.tls_multi->multi_state = CAS_CONNECT_DONE; @@ -2422,7 +2631,7 @@ multi_client_connect_late_setup(struct multi_context *m, struct multi_instance * */ if (TUNNEL_TYPE(mi->context.c1.tuntap) == DEV_TYPE_TUN) { - if (mi->context.c2.push_ifconfig_defined) + if (mi->context.c2.push_ifconfig_defined && !m->top.options.ce.mtio_conf) { multi_learn_in_addr_t(m, mi, mi->context.c2.push_ifconfig_local, -1, true); msg(D_MULTI_LOW, "MULTI: primary virtual IP for %s: %s", @@ -2442,7 +2651,11 @@ multi_client_connect_late_setup(struct multi_context *m, struct multi_instance * /* add routes locally, pointing to new client, if * --iroute options have been specified */ - multi_add_iroutes(m, mi); + if (!mi->did_iroutes) + { + multi_add_iroutes(m, mi); + mi->did_iroutes = true; + } /* * iroutes represent subnets which are "owned" by a particular @@ -3339,6 +3552,42 @@ multi_process_incoming_dco(dco_context_t *dco) } #endif /* if defined(ENABLE_DCO) */ +struct multi_instance *multi_learn_peer_addr(struct multi_context *m, struct multi_instance *i, struct mroute_addr *p) +{ + struct multi_instance *r = NULL; + in_addr_t b = ntohl(p->v4.addr); + if (i) + { + multi_context_switch_addr(m, i, false, true); + r = i; + } + else + { + for (int z = 0; z < m->max_clients; ++z) + { + struct multi_instance *j = m->instances[z]; + if (!j) { continue; } + for (int x = 0; x < m->mtio_info.maxc; ++x) + { + struct multi_link *l = &(m->mtio_info.link[x]); + if (strcmp(l->uniq, "") != 0) + { + for (int y = 0; y < MAX_THREADS; ++y) + { + struct multi_address *a = &(l->adrs[y]); + if (a->addr == b) + { + multi_context_switch_addr(m, j, false, true); + break; + } + } + } + } + } + } + return r; +} + /* * Process packets in the TCP/UDP socket -> TUN/TAP interface direction, * i.e. client -> server direction. @@ -3416,8 +3665,7 @@ multi_process_incoming_link(struct multi_context *m, struct multi_instance *inst if (TUNNEL_TYPE(m->top.c1.tuntap) == DEV_TYPE_TUN) { /* extract packet source and dest addresses */ - mroute_flags = - mroute_extract_addr_from_packet(&src, &dest, 0, &c->c2.to_tun, DEV_TYPE_TUN); + mroute_flags = mroute_extract_addr_from_packet(&src, &dest, 0, &c->c2.to_tun, DEV_TYPE_TUN); /* drop packet if extract failed */ if (!(mroute_flags & MROUTE_EXTRACT_SUCCEEDED)) @@ -3425,7 +3673,7 @@ multi_process_incoming_link(struct multi_context *m, struct multi_instance *inst c->c2.to_tun.len = 0; } /* make sure that source address is associated with this client */ - else if (multi_get_instance_by_virtual_addr(m, &src, true) != m->pending2) + else if ((multi_get_instance_by_virtual_addr(m, &src, true) != m->pending2) && (multi_learn_peer_addr(m, m->pending2, &src) != m->pending2)) { /* IPv6 link-local address (fe80::xxx)? */ if ((src.type & MR_ADDR_MASK) == MR_ADDR_IPV6 @@ -3435,7 +3683,7 @@ multi_process_incoming_link(struct multi_context *m, struct multi_instance *inst } else { - msg(D_MULTI_DROPPED, + msg(D_MULTI_ERRORS, "MULTI: bad source address from client [%s], packet dropped", mroute_addr_print(&src, &gc)); } @@ -3485,8 +3733,7 @@ multi_process_incoming_link(struct multi_context *m, struct multi_instance *inst } } /* extract packet source and dest addresses */ - mroute_flags = - mroute_extract_addr_from_packet(&src, &dest, vid, &c->c2.to_tun, DEV_TYPE_TAP); + mroute_flags = mroute_extract_addr_from_packet(&src, &dest, vid, &c->c2.to_tun, DEV_TYPE_TAP); if (mroute_flags & MROUTE_EXTRACT_SUCCEEDED) { @@ -3515,7 +3762,7 @@ multi_process_incoming_link(struct multi_context *m, struct multi_instance *inst } else { - msg(D_MULTI_DROPPED, + msg(D_MULTI_ERRORS, "MULTI: bad source address from client [%s], packet dropped", mroute_addr_print(&src, &gc)); c->c2.to_tun.len = 0; @@ -3589,6 +3836,13 @@ multi_process_incoming_tun_part2(struct multi_context *m, const unsigned int mpp if (mroute_flags & MROUTE_EXTRACT_SUCCEEDED) { struct context *c; + struct multi_instance *i = multi_get_instance_by_virtual_addr(m, &dest, dev_type == DEV_TYPE_TUN); + + if (!i) + { + multi_learn_peer_addr(m, m->pending, &dest); + i = multi_get_instance_by_virtual_addr(m, &dest, dev_type == DEV_TYPE_TUN); + } /* broadcast or multicast dest addr? */ if (mroute_flags & (MROUTE_EXTRACT_BCAST | MROUTE_EXTRACT_MCAST)) @@ -3598,20 +3852,19 @@ multi_process_incoming_tun_part2(struct multi_context *m, const unsigned int mpp } else if (m->inst_indx == -9) { - struct multi_instance *inst = multi_get_instance_by_virtual_addr(m, &dest, dev_type == DEV_TYPE_TUN); - if (inst) + if (i) { int leng = m->inst_leng; for (int x = 0; x < leng; ++x) { - if (m->inst_list[x] == inst) + if (m->inst_list[x] == i) { m->inst_indx = x; return true; } } leng = min_max(leng, 0, TUN_BAT_MIN - 1); - m->inst_list[leng] = inst; + m->inst_list[leng] = i; m->inst_indx = leng; m->inst_leng = (leng + 1); } @@ -3619,7 +3872,7 @@ multi_process_incoming_tun_part2(struct multi_context *m, const unsigned int mpp } else { - multi_set_pending(m, multi_get_instance_by_virtual_addr(m, &dest, dev_type == DEV_TYPE_TUN)); + multi_set_pending(m, i); if (m->pending) { @@ -3738,7 +3991,7 @@ bool multi_process_incoming_tun(struct multi_context *m, const unsigned int mpp_ } } -bool multi_in_tun(struct multi_context *m, const unsigned int mpp_flags) +bool threaded_multi_inp_tun(struct multi_context *m, const unsigned int mpp_flags) { if (INST_LENG(m)) { @@ -3747,12 +4000,19 @@ bool multi_in_tun(struct multi_context *m, const unsigned int mpp_flags) else { struct context *c = &(m->top); - read_incoming_tun(c); - if (!IS_SIG(c)) + if (*(m->mtio_info.hold) == m->mtio_info.maxt) { - multi_process_incoming_tun(m, mpp_flags); + int size; + uint8_t temp[1]; + size = read(c->c1.tuntap->fd, temp, 1); + if (size < 1) { /* no-op */ } + if (!IS_SIG(c)) + { + multi_process_incoming_tun(m, mpp_flags); + } + size = write(c->c1.tuntap->fz, temp, 1); + return true; } - return true; } return false; } @@ -4315,13 +4575,72 @@ multi_get_timeout(struct multi_context *multi, struct timeval *timeval) * * @param multi context structure */ -static void -tunnel_server_loop(struct multi_context *multi) +static void tunnel_server_loop(struct thread_pointer *b) { int status; + struct context_pointer *p = b->p; + + status = 0; + while (status == 0) + { + status = 1; + for (int x = 0; x < p->n; ++x) + { + if (p->m[x] == NULL) + { + status = 0; + } + } + sleep(1); + } + + struct multi_context *multi = p->m[b->i-1]; + struct context *c = &(p->m[b->i-1]->top); + struct context *d = &(p->m[0]->top); + + multi->mtio_idno = b->i; + multi->mtio_info.maxt = b->n; + multi->mtio_info.maxc = p->x; + multi->mtio_info.link = p->k; + multi->mtio_info.lock = p->l; + multi->mtio_info.indx = &(p->i); + multi->mtio_info.hold = &(p->h); + multi->mtio_info.pool = p->m[0]->ifconfig_pool; + + if (b->i == 1) + { + while (p->h < p->n) + { + if (p->z == -1) { break; } else { sleep(1); } + } + p->z = 1; + } + else + { + b->h += 1; p->h += 1; + while ((p->z != 1) || (!(d->c1.tuntap)) || (d->c1.tuntap->ff <= 1)) + { + if (p->z == -1) { break; } else { sleep(1); } + } + } + + msg(M_INFO, "TCPv4_SERVER MTIO init [%d][%d] [%d][%d] {%d}{%d}", b->h, b->n, p->h, p->n, p->z, b->i); while (true) { + if (p->z != 1) { break; } + if (c->c1.tuntap && (c->c1.tuntap->fd > 1) && (c->c1.tuntap->ff <= 1)) + { + socketpair(AF_UNIX, SOCK_DGRAM, 0, p->s[b->i-1]); + socketpair(AF_UNIX, SOCK_DGRAM, 0, p->r[b->i-1]); + c->c1.tuntap->ff = c->c1.tuntap->fd; + c->c1.tuntap->fe = (b->i == 1) ? c->c1.tuntap->ff : d->c1.tuntap->ff; + //c->c1.tuntap->fd = (b->i == 1) ? c->c1.tuntap->ff : d->c1.tuntap->ff; + c->c1.tuntap->fd = p->s[b->i-1][0]; + c->c1.tuntap->fz = p->r[b->i-1][1]; + msg(M_INFO, "TCPv4_SERVER MTIO fdno [%d][%d][%d][%d] {%d}", c->c1.tuntap->fd, c->c1.tuntap->fe, c->c1.tuntap->ff, c->c1.tuntap->fz, b->i); + } + /* wait on tun/socket list */ multi_get_timeout(multi, &multi->top.c2.timeval); status = multi_io_wait(multi); @@ -4334,7 +4653,7 @@ tunnel_server_loop(struct multi_context *multi) if (status > 0) { /* process the I/O which triggered select */ - multi_io_process_io(multi); + multi_io_process_io(b); } else if (status == 0) { @@ -4343,18 +4662,34 @@ tunnel_server_loop(struct multi_context *multi) MULTI_CHECK_SIG(multi); } + + msg(M_INFO, "TCPv4_SERVER MTIO fins [%d][%d] [%d][%d] {%d}{%d}", b->h, b->n, p->h, p->n, p->z, b->i); + + p->z = -1; + + if (c->c1.tuntap && (c->c1.tuntap->ff > 1)) + { + close(p->s[b->i-1][0]); close(p->s[b->i-1][1]); + close(p->r[b->i-1][0]); close(p->r[b->i-1][1]); + c->c1.tuntap->fd = c->c1.tuntap->ff; + c->c1.tuntap->ff = -1; + } } /* * Top level event loop. */ -void -tunnel_server(struct context *top) +void *tunnel_server(void *args) { - ASSERT(top->options.mode == MODE_SERVER); - + struct thread_pointer *arg = (struct thread_pointer *)args; + struct context_pointer *ptr = arg->p; + struct context *top = (arg->i == 1) ? ptr->c : arg->c; struct multi_context multi; + if (arg->i == 1) { sleep(1); } + + ASSERT(top->options.mode == MODE_SERVER); + top->mode = CM_TOP; top->multi = &multi; context_clear_2(top); @@ -4363,7 +4698,7 @@ tunnel_server(struct context *top) init_instance_handle_signals(top, top->es, CC_HARD_USR1_TO_HUP); if (IS_SIG(top)) { - return; + return NULL; } /* initialize global multi_context object */ @@ -4386,7 +4721,10 @@ tunnel_server(struct context *top) } #endif - tunnel_server_loop(&multi); + bzero(&(multi.mtio_info), sizeof(struct multi_info)); + ptr->m[arg->i-1] = &multi; + + tunnel_server_loop(arg); #ifdef ENABLE_ASYNC_PUSH close(top->c2.inotify_fd); @@ -4402,6 +4740,54 @@ tunnel_server(struct context *top) multi_uninit(&multi); multi_top_free(&multi); close_instance(top); + + return NULL; +} + +void threaded_tunnel_server(struct context *c, struct context *d) +{ + int maxt = (c->options.ce.mtio_mode) ? MAX_THREADS : 1; + int maxc = c->options.max_clients; + struct thread_pointer b[MAX_THREADS]; + struct context_pointer p; + struct multi_link k[maxc]; + pthread_mutex_t lock; + pthread_t thrm, thrd[MAX_THREADS]; + + bzero(&(p), sizeof(struct context_pointer)); + p.i = 1; p.h = 1; p.n = maxt; p.x = maxc; p.z = 0; + p.c = c; p.k = k; p.l = &(lock); p.p = NULL; + p.m = calloc(MAX_THREADS, sizeof(struct multi_context *)); + bzero(p.k, maxc * sizeof(struct multi_link)); + bzero(p.l, sizeof(pthread_mutex_t)); + pthread_mutex_init(p.l, NULL); + + c->skip_bind = 0; + b[0].p = &(p); b[0].c = c; b[0].i = 1; b[0].n = p.n; b[0].h = 0; + bzero(&(thrd[0]), sizeof(pthread_t)); + pthread_create(&(thrd[0]), NULL, tunnel_server, &(b[0])); + + bzero(&(thrm), sizeof(pthread_t)); + pthread_create(&(thrm), NULL, threaded_io_management, &(b[0])); + + for (int x = 1; x < p.n; ++x) + { + d[x].skip_bind = -1; + b[x].p = &(p); b[x].c = &(d[x]); b[x].i = (x + 1); b[x].n = p.n; b[x].h = 1; + bzero(&(thrd[x]), sizeof(pthread_t)); + pthread_create(&(thrd[x]), NULL, tunnel_server, &(b[x])); + } + + pthread_join(thrd[0], NULL); + + for (int x = 1; x < p.n; ++x) + { + pthread_join(thrd[x], NULL); + } + + pthread_join(thrm, NULL); + + free(p.m); } /* Searches for the address and deletes it if it is owned by the multi_instance */ @@ -4413,6 +4799,11 @@ multi_unlearn_addr(struct multi_context *m, struct multi_instance *mi, const str struct hash_bucket *bucket = hash_bucket(m->vhash, hv); struct multi_route *r = NULL; + if (m->top.options.ce.mtio_conf) + { + return; + } + /* if route currently exists, get the instance which owns it */ he = hash_lookup_fast(m->vhash, bucket, addr, hv); if (he) @@ -4427,7 +4818,7 @@ multi_unlearn_addr(struct multi_context *m, struct multi_instance *mi, const str } struct gc_arena gc = gc_new(); - msg(D_MULTI_LOW, "MULTI: Unlearn: %s -> %s", mroute_addr_print(&r->addr, &gc), multi_instance_string(mi, false, &gc)); + msg(M_INFO, "MULTI: Unlearn: %s -> %s", mroute_addr_print(&r->addr, &gc), multi_instance_string(mi, false, &gc)); learn_address_script(m, NULL, "delete", &r->addr); hash_remove_by_value(m->vhash, r); multi_route_del(r); @@ -4509,22 +4900,33 @@ unlearn_ifconfig_ipv6(struct multi_context *m, struct multi_instance *mi) void update_vhash(struct multi_context *m, struct multi_instance *mi, const char *new_ip, const char *new_ipv6) { + if (m->top.options.ce.mtio_conf) + { + return; + } + if (new_ip) { + in_addr_t old_addr_t = mi->context.c2.push_ifconfig_local; + + struct in_addr new_addr; + CLEAR(new_addr); + int addr_stat = inet_pton(AF_INET, new_ip, &new_addr); + in_addr_t new_addr_t = ntohl(new_addr.s_addr); + /* Remove old IP */ - if (mi->context.c2.push_ifconfig_defined) + if (addr_stat == 1 && new_addr_t != old_addr_t + && mi->context.c2.push_ifconfig_defined) { unlearn_ifconfig(m, mi); } /* Add new IP */ - struct in_addr new_addr; - CLEAR(new_addr); - if (inet_pton(AF_INET, new_ip, &new_addr) == 1 - && multi_learn_in_addr_t(m, mi, ntohl(new_addr.s_addr), -1, true)) + if (addr_stat == 1 && new_addr_t != old_addr_t + && multi_learn_in_addr_t(m, mi, new_addr_t, -1, true)) { mi->context.c2.push_ifconfig_defined = true; - mi->context.c2.push_ifconfig_local = ntohl(new_addr.s_addr); + mi->context.c2.push_ifconfig_local = new_addr_t; /* set our client's VPN endpoint for status reporting purposes */ mi->reporting_addr = mi->context.c2.push_ifconfig_local; } diff --git a/src/openvpn/multi.h b/src/openvpn/multi.h index d3c7fca1ebe..686d1376a91 100644 --- a/src/openvpn/multi.h +++ b/src/openvpn/multi.h @@ -146,6 +146,10 @@ struct multi_instance #ifdef ENABLE_ASYNC_PUSH int inotify_watch; /* watch descriptor for acf */ #endif + + int mtio_stat; + int mtio_idno; + struct multi_address mtio_addr; }; @@ -222,8 +226,13 @@ struct multi_context int inst_indx; int inst_leng; struct multi_instance **inst_list; + + int mtio_stat; + int mtio_idno; + struct multi_info mtio_info; }; + /** * Return values used by the client connect call-back functions. */ @@ -259,19 +268,23 @@ struct multi_route * * @param top - Top-level context structure. */ -void tunnel_server(struct context *top); +void threaded_tunnel_server(struct context *c, struct context *d); int min_max(int a, int b, int c); +bool multi_context_switch_addr(struct multi_context *m, struct multi_instance *i, bool s, bool l); + +struct multi_context *multi_context_switch_conn(struct thread_pointer *b, struct multi_context *m, struct multi_instance *i); + +struct multi_instance *multi_learn_in_addr_t(struct multi_context *m, struct multi_instance *mi, in_addr_t a, int netbits, bool primary); + const char *multi_instance_string(const struct multi_instance *mi, bool null, struct gc_arena *gc); /* * Called by mtcp.c, mudp.c, or other (to be written) protocol drivers */ -struct multi_instance *multi_create_instance(struct multi_context *m, - const struct mroute_addr *real, - struct link_socket *sock); +struct multi_instance *multi_create_instance(struct thread_pointer *b, const struct mroute_addr *real, struct link_socket *sock); void multi_close_instance(struct multi_context *m, struct multi_instance *mi, bool shutdown); @@ -362,7 +375,7 @@ bool multi_process_incoming_tun(struct multi_context *m, const unsigned int mpp_ bool multi_process_inp_tun_post(struct multi_context *m, const unsigned int mpp_flags); -bool multi_in_tun(struct multi_context *m, const unsigned int mpp_flags); +bool threaded_multi_inp_tun(struct multi_context *m, const unsigned int mpp_flags); void multi_process_drop_outgoing_tun(struct multi_context *m, const unsigned int mpp_flags); diff --git a/src/openvpn/multi_io.c b/src/openvpn/multi_io.c index f5c5039fc9c..8a9603bbbba 100644 --- a/src/openvpn/multi_io.c +++ b/src/openvpn/multi_io.c @@ -294,7 +294,7 @@ multi_io_dispatch(struct multi_context *m, struct multi_instance *mi, const int { case TA_INST_LENG: case TA_TUN_READ: - multi_in_tun(m, mpp_flags); + threaded_multi_inp_tun(m, mpp_flags); break; case TA_SOCKET_READ: @@ -394,8 +394,9 @@ multi_io_post(struct multi_context *m, struct multi_instance *mi, const int acti } void -multi_io_process_io(struct multi_context *m) +multi_io_process_io(struct thread_pointer *b) { + struct multi_context *m = b->p->m[b->i-1]; struct multi_io *multi_io = m->multi_io; int i; @@ -440,8 +441,8 @@ multi_io_process_io(struct multi_context *m) if (!proto_is_dgram(ev_arg->u.sock->info.proto)) { socket_reset_listen_persistent(ev_arg->u.sock); - mi = multi_create_instance_tcp(m, ev_arg->u.sock); - if (mi) { multi_io_action(m, mi, TA_INITIAL, false); } + mi = multi_create_instance_tcp(b, ev_arg->u.sock); + if (mi) { multi_io_action(b->p->p, mi, TA_INITIAL, false); } } else { @@ -449,9 +450,6 @@ multi_io_process_io(struct multi_context *m) if (m->pending) { multi_io_action(m, m->pending, TA_INITIAL, false); } if (m->pending2) { multi_io_action(m, m->pending2, TA_INITIAL, false); } } - /* monitor and/or handle events that are - * triggered in succession by the first one - * before returning to the main loop. */ break; } } @@ -477,7 +475,6 @@ multi_io_process_io(struct multi_context *m) multi_io_action(m, NULL, TA_TUN_READ, false); } } - #if defined(ENABLE_DCO) /* incoming data on DCO? */ else if (e->arg == MULTI_IO_DCO) @@ -514,6 +511,21 @@ multi_io_process_io(struct multi_context *m) multi_io_action(m, mi, TA_SOCKET_WRITE, true); } } + + if (m->mtio_stat == 3) + { + for (int x = 0; x < m->max_clients; ++x) + { + struct multi_instance *j = m->instances[x]; + if (!j) { continue; } + if (j->mtio_stat == 3) + { + multi_context_switch_addr(m, j, true, true); + j->mtio_stat = 5; + } + } + m->mtio_stat = 5; + } } void diff --git a/src/openvpn/multi_io.h b/src/openvpn/multi_io.h index daf43b0e0f6..631b29629ad 100644 --- a/src/openvpn/multi_io.h +++ b/src/openvpn/multi_io.h @@ -68,7 +68,7 @@ void multi_io_free(struct multi_io *multi_io); int multi_io_wait(struct multi_context *m); -void multi_io_process_io(struct multi_context *m); +void multi_io_process_io(struct thread_pointer *b); void multi_io_set_global_rw_flags(struct multi_context *m, struct multi_instance *mi); diff --git a/src/openvpn/openvpn.c b/src/openvpn/openvpn.c index eaaa59b92bd..9c52009d00f 100644 --- a/src/openvpn/openvpn.c +++ b/src/openvpn/openvpn.c @@ -45,6 +45,77 @@ process_signal_p2p(struct context *c) } +int proc_addr_cons_maps(struct mtio_args *args, struct mtio_cons *cons, uint8_t *buff_data, int buff_size, bool mtio_mode) +{ + int thid = *(args->thid), expr = args->expr; + struct mroute_addr srca = { 0 }, dsta = { 0 }; + struct buffer buff; + buff.data = buff_data; buff.offset = 0; buff.len = buff_size; + if (mtio_mode && (mroute_extract_addr_ip(&srca, &dsta, &buff) == MROUTE_EXTRACT_SUCCEEDED)) + { + int flag = 1; + for (int y = 0; y < args->notl; ++y) + { + if (TEST_ADRS_CONN_NOTS(srca, dsta, args->nots[y])) + { + flag = 0; + } + } + for (int y = 0; y < args->mskl; ++y) + { + if (TEST_ADRS_CONN_MSKS(srca, dsta, args->msks[y])) + { + flag = 0; + } + } + if (flag == 1) + { + int indx = -1, scan = 0, memr = 4, logs = -1; + time_t secs = time(NULL); + uint32_t srcn = srca.v4.addr, dstn = dsta.v4.addr; + uint32_t srch = (HASH_PART(srcn, 24, 11, 103) + HASH_PART(srcn, 16, 13, 107) + HASH_PART(srcn, 8, 17, 109) + HASH_PART(srcn, 0, 19, 113)); + uint32_t dsth = (HASH_PART(dstn, 24, 31, 131) + HASH_PART(dstn, 16, 53, 137) + HASH_PART(dstn, 8, 67, 139) + HASH_PART(dstn, 0, 79, 151)); + uint32_t hidx = (((srch * 163) + (dsth * 167)) % MAX_CSTATES); + for (int y = 0; y < MAX_CSTATES; ++y) + { + if (TEST_ADRS_CONN_MAPS(srca, dsta, cons[hidx])) + { + indx = hidx; + if ((secs - cons[hidx].last) >= expr) { indx = (-1 * (hidx + 11)); } + break; + } + else if ((secs - cons[hidx].last) >= expr) + { + if (indx == -1) { indx = (-1 * (hidx + 11)); } + } + if ((scan >= memr) && (indx != -1)) { break; } + hidx = ((hidx + 1) % MAX_CSTATES); + scan += 1; + } + if (indx < 0) + { + indx = (indx == -1) ? hidx : ((indx * -1) - 11); + cons[indx].srca = srca.v4.addr; + cons[indx].dsta = dsta.v4.addr; + cons[indx].thid = thid; + logs = 1; + } + if (logs == 1) + { + char a[28], b[28]; + struct in_addr t; + t.s_addr = srca.v4.addr; bzero(a, 28); snprintf(a, 24, "%s", inet_ntoa(t)); + t.s_addr = dsta.v4.addr; bzero(b, 28); snprintf(b, 24, "%s", inet_ntoa(t)); + msg(M_INFO, "%s MTIO maps <%d>(%d) [%s][%s] {%d}{%d}", args->pref, indx, expr, a, b, thid, cons[indx].thid); + } + thid = cons[indx].thid; + cons[indx].last = secs; + } + } + return thid; +} + + /**************************************************************************/ /** * Main event loop for OpenVPN in client mode, where only one VPN tunnel @@ -53,9 +124,13 @@ process_signal_p2p(struct context *c) * * @param c - The context structure of the single active VPN tunnel. */ -static void -tunnel_point_to_point(struct context *c) +void *tunnel_point_to_point(void *a) { + struct thread_pointer *b = (struct thread_pointer *)a; + struct context_pointer *p = b->p; + struct context *c = (b->n == 1) ? p->c : b->c; + struct context *d = (b->n == 1) ? b->c : p->c; + context_clear_2(c); /* set point-to-point mode */ @@ -66,12 +141,44 @@ tunnel_point_to_point(struct context *c) init_instance_handle_signals(c, c->es, stdin_config ? 0 : CC_HARD_USR1_TO_HUP); if (IS_SIG(c)) { - return; + return NULL; } + if (b->i == 1) + { + while (p->h < p->n) + { + if (p->z == -1) { break; } else { sleep(1); } + } + p->z = 1; + } + else + { + b->h += 1; p->h += 1; + while ((p->z != 1) || (!(d->c1.tuntap)) || (d->c1.tuntap->ff <= 1)) + { + if (p->z == -1) { break; } else { sleep(1); } + } + } + + msg(M_INFO, "TCPv4_CLIENT MTIO init [%d][%d] [%d][%d] {%d}{%d}", b->h, b->n, p->h, p->n, p->z, b->i); + /* main event loop */ while (true) { + if (p->z != 1) { break; } + if (c->c1.tuntap && (c->c1.tuntap->fd > 1) && (c->c1.tuntap->ff <= 1)) + { + socketpair(AF_UNIX, SOCK_DGRAM, 0, p->s[b->i-1]); + socketpair(AF_UNIX, SOCK_DGRAM, 0, p->r[b->i-1]); + c->c1.tuntap->ff = c->c1.tuntap->fd; + c->c1.tuntap->fe = (b->i == 1) ? c->c1.tuntap->ff : d->c1.tuntap->ff; + //c->c1.tuntap->fd = (b->i == 1) ? c->c1.tuntap->ff : d->c1.tuntap->ff; + c->c1.tuntap->fd = p->s[b->i-1][0]; + c->c1.tuntap->fz = p->r[b->i-1][1]; + msg(M_INFO, "TCPv4_CLIENT MTIO fdno [%d][%d][%d][%d] {%d}", c->c1.tuntap->fd, c->c1.tuntap->fe, c->c1.tuntap->ff, c->c1.tuntap->fz, b->i); + } + /* process timers, TLS, etc. */ pre_select(c); P2P_CHECK_SIG(); @@ -87,16 +194,282 @@ tunnel_point_to_point(struct context *c) } /* process the I/O which triggered select */ - process_io(c, c->c2.link_sockets[0]); + process_io(c, c->c2.link_sockets[0], b); P2P_CHECK_SIG(); } + msg(M_INFO, "TCPv4_CLIENT MTIO fins [%d][%d] [%d][%d] {%d}{%d}", b->h, b->n, p->h, p->n, p->z, b->i); + + p->z = -1; + + if (c->c1.tuntap && (c->c1.tuntap->ff > 1)) + { + close(p->s[b->i-1][0]); close(p->s[b->i-1][1]); + close(p->r[b->i-1][0]); close(p->r[b->i-1][1]); + c->c1.tuntap->fd = c->c1.tuntap->ff; + c->c1.tuntap->ff = -1; + } + persist_client_stats(c); uninit_management_callback(); /* tear down tunnel instance (unless --persist-tun) */ close_instance(c); + + return NULL; +} + +void *threaded_io_management(void *a) +{ + struct thread_pointer *b = (struct thread_pointer *)a; + struct context_pointer *p = b->p; + struct context *c, *d; + int maxt = p->n, maxf = 0, maxl = 0; + int maxz = TUN_BAT_MIN, maxx = (MAX_THREADS * TUN_BAT_MIN); + int fdno = 0, indx = 0, size = 0, selw = 0; + int thid = 0, tidx = 0, tbeg = 0, tend = 0; + ssize_t leng = 0; + uint8_t *ptra; + uint8_t busy[maxx], proc[maxx], sels[maxx]; + fd_set rfds; + struct timeval timo; + struct mtio_args marg; + struct mtio_cons cons[MAX_CSTATES]; + + while (true) + { + if (p->z == -1) { break; } + if ((p->z == 1) && (p->h == p->n)) { break; } + sleep(1); + } + d = (p->m && p->m[0]) ? &(p->m[0]->top) : b[0].c; + size = d->c2.frame.buf.payload_size; + if (size < 1) { size = 1; } + while (true) + { + if (p->z != 1) { break; } + int stat = 0; + for (int x = 0; x < maxt; ++x) + { + if (p->m) { stat += 1; } + else + { + struct tls_multi *m = b[x].c->c2.tls_multi; + if (m && (m->multi_state == CAS_CONNECT_DONE)) { stat += 1; } + } + } + if (stat == maxt) { break; } + sleep(1); + } + + bool mtmo = d->options.ce.mtio_mode; + int expr = d->options.ce.mtio_time; + in_addr_t nots[] = { inet_addr("0.0.0.0"), inet_addr("255.255.255.255") }; + in_addr_t msks[] = { inet_addr("10.0.0.0") }; + int sizs[maxx], idxs[maxx], lens[maxt]; + uint8_t bufl[maxx][size]; + uint8_t *bufs[maxx], *swap[maxx]; + + for (int x = 0; x < maxx; ++x) + { + sizs[x] = 0; idxs[x] = 0; + bufs[x] = bufl[x]; + } + + msg(M_INFO, "%s MTIO mgmt [%d] {%d}", (p->m) ? "TCPv4_SERVER" : "TCPv4_CLIENT", size, BULK_MODE(d)); + + marg.pref = (p->m) ? "TCPv4_SERVER" : "TCPv4_CLIENT"; + marg.thid = &(thid); marg.expr = expr; marg.busy = busy; + marg.nots = nots; marg.notl = (sizeof(nots) / sizeof(nots[0])); + marg.msks = msks; marg.mskl = (sizeof(msks) / sizeof(msks[0])); + + bzero(busy, maxt * sizeof(uint8_t)); + bzero(cons, MAX_CSTATES * sizeof(struct mtio_cons)); + while (true) + { + if (p->z != 1) { break; } + indx = -1; maxf = 0; maxl = 0; + FD_ZERO(&rfds); + for (int x = 0; x < maxt; ++x) + { + if (busy[x] != 1) { indx = x; } + if (p->r[x][0] > 1) { FD_SET(p->r[x][0], &rfds); maxl += 1; } + if (p->r[x][0] > maxf) { maxf = p->r[x][0]; } + } + if (maxl != maxt) + { + sleep(1); + continue; + } + timo.tv_sec = 0; timo.tv_usec = 0; + select(maxf+1, &rfds, NULL, NULL, (indx < 0) ? NULL : &timo); + for (int x = 0; x < maxt; ++x) + { + if (FD_ISSET(p->r[x][0], &rfds) || ((busy[x] == 1) && (sels[x] == 1) && (selw == 1))) + { + leng = read(p->r[x][0], &(busy[maxt+1]), 1); + busy[x] = 0; + } + if (busy[x] != 1) + { + thid = x; + lens[x] = 0; + } + proc[x] = 0; sels[x] = 0; + } + tbeg = 0; tend = 0; selw = 0; + for (int x = 0; x < maxx; ++x) + { + if (sizs[x] > 0) { tend = (x + 1); } + else if (sizs[tbeg] > 0) { tbeg = x; } + } + d = (p->m) ? &(p->m[0]->top) : b[0].c; + fdno = d->c1.tuntap->ff; + FD_ZERO(&rfds); FD_SET(fdno, &rfds); + timo.tv_sec = 1; timo.tv_usec = 750000; + if (tend > 0) { timo.tv_sec = 0; timo.tv_usec = 0; } + if (BULK_MODE(d)) + { + for (int x = 0; (fdno > 1) && (x < maxx); ++x) + { + if (x >= tend) + { + tend = min_max(tend, 0, maxx); + select(fdno+1, &rfds, NULL, NULL, &timo); + if ((p->z == 1) && FD_ISSET(fdno, &rfds) && (tend < maxx)) + { + leng = read(fdno, bufs[tend], size); + maxl = (int)leng; + tidx = proc_addr_cons_maps(&(marg), cons, bufs[tend], maxl, mtmo); + sizs[tend] = maxl; idxs[tend] = tidx; + tend = (tend + 1); + thid = ((thid + 1) % maxt); + } + FD_ZERO(&rfds); FD_SET(fdno, &rfds); + timo.tv_sec = 0; timo.tv_usec = 0; + } + if (sizs[x] > 0) + { + tidx = idxs[x]; sels[tidx] = 1; selw = 1; + c = (p->m) ? &(p->m[tidx]->top) : b[tidx].c; + indx = min_max(lens[tidx], 0, maxz); + if ((p->z == 1) && (busy[tidx] != 1) && (indx < maxz)) + { + c->c2.buffers->read_tun_bufs[indx].len = sizs[x]; + c->c2.buffers->read_tun_bufs[indx].offset = TUN_BAT_OFF; + ptra = BPTR(&c->c2.buffers->read_tun_bufs[indx]); + bcopy(bufs[x], ptra, sizs[x]); + c->c2.bufs[indx] = c->c2.buffers->read_tun_bufs[indx]; + c->c2.buffers->bulk_indx = 0; + c->c2.buffers->bulk_leng = (indx + 1); + sizs[x] = 0; proc[tidx] = 1; lens[tidx] += 1; + } + } + if ((sizs[x] < 1) && ((x < tbeg) || (sizs[tbeg] > 0))) + { + tbeg = x; + } + if ((sizs[x] > 0) && ((x > tbeg) && (sizs[tbeg] < 1))) + { + swap[tbeg] = bufs[tbeg]; bufs[tbeg] = bufs[x]; bufs[x] = swap[tbeg]; + sizs[tbeg] = sizs[x]; idxs[tbeg] = idxs[x]; sizs[x] = 0; + for (int z = (tbeg + 1); z <= x; ++z) + { + if (sizs[z] < 1) { tbeg = z; break; } + } + } + if ((sizs[x] < 1) && ((x < tbeg) || (sizs[tbeg] > 0))) + { + tbeg = x; + } + } + } + else + { + if (fdno > 1) + { + tend = (maxx - 1); + if (sizs[tend] < 1) + { + tend = min_max(tend, 0, maxx); + select(fdno+1, &rfds, NULL, NULL, &timo); + if ((p->z == 1) && FD_ISSET(fdno, &rfds)) + { + leng = read(fdno, bufs[tend], size); + maxl = (int)leng; + tidx = proc_addr_cons_maps(&(marg), cons, bufs[tend], maxl, mtmo); + sizs[tend] = maxl; idxs[tend] = tidx; + thid = ((thid + 1) % maxt); + } + } + if (sizs[tend] > 0) + { + tidx = idxs[tend]; + if ((p->z == 1) && (busy[tidx] != 1)) + { + c = (p->m) ? &(p->m[tidx]->top) : b[tidx].c; + c->c2.buffers->read_tun_buf.len = sizs[tend]; + ptra = BPTR(&c->c2.buffers->read_tun_buf); + bcopy(bufs[tend], ptra, sizs[tend]); + sizs[tend] = 0; proc[tidx] = 1; + } + } + } + } + for (int x = 0; x < maxt; ++x) + { + if (proc[x] == 1) + { + leng = write(p->s[x][1], busy, 1); + busy[x] = 1; sels[x] = 0; selw = 0; + } + } + } + + p->z = -1; + + return NULL; +} + +void threaded_tunnel_point_to_point(struct context *c, struct context *d) +{ + int maxt = (c->options.ce.mtio_mode) ? MAX_THREADS : 1; + struct context_pointer p; + struct thread_pointer b[MAX_THREADS]; + pthread_t thrm, thrd[MAX_THREADS]; + pthread_mutex_t lock; + + bzero(&(p), sizeof(struct context_pointer)); + p.c = c; p.i = 1; p.n = maxt; p.h = 1; p.z = 0; + p.l = &(lock); + bzero(p.l, sizeof(pthread_mutex_t)); + pthread_mutex_init(p.l, NULL); + + c->skip_bind = 0; + b[0].p = &(p); b[0].c = c; b[0].i = 1; b[0].n = p.n; b[0].h = 0; + bzero(&(thrd[0]), sizeof(pthread_t)); + pthread_create(&(thrd[0]), NULL, tunnel_point_to_point, &(b[0])); + + bzero(&(thrm), sizeof(pthread_t)); + pthread_create(&(thrm), NULL, threaded_io_management, &(b[0])); + + for (int x = 1; x < p.n; ++x) + { + d[x].skip_bind = -1; + b[x].p = &(p); b[x].c = &(d[x]); b[x].i = (x + 1); b[x].n = p.n; b[x].h = 1; + bzero(&(thrd[x]), sizeof(pthread_t)); + pthread_create(&(thrd[x]), NULL, tunnel_point_to_point, &(b[x])); + } + + pthread_join(thrd[0], NULL); + + for (int x = 1; x < p.n; ++x) + { + pthread_join(thrd[x], NULL); + } + + pthread_join(thrm, NULL); } #undef PROCESS_SIGNAL_P2P @@ -153,6 +526,9 @@ static int openvpn_main(int argc, char *argv[]) { struct context c; + struct context d[MAX_THREADS]; + char devs[MAX_THREADS][MAX_STRLENG]; + char fils[MAX_THREADS][MAX_STRLENG]; #if PEDANTIC fprintf(stderr, "Sorry, I was built with --enable-pedantic and I am incapable of doing any real work!\n"); @@ -296,17 +672,44 @@ openvpn_main(int argc, char *argv[]) /* finish context init */ context_init_1(&c); + if (c.options.ce.mtio_mode) + { + for (int x = 0; x < MAX_THREADS; ++x) + { + struct context *b = &(d[x]); + + bcopy(&c, b, sizeof(struct context)); + context_init_1(b); + + if (c.options.dev) + { + bzero(devs[x], MAX_STRLENG * sizeof(char)); + snprintf(devs[x], MAX_STRLENG-5, "%st%02d", c.options.dev, x); + b->options.dev = devs[x]; + } + + if (c.options.status_file) + { + bzero(fils[x], MAX_STRLENG * sizeof(char)); + snprintf(fils[x], MAX_STRLENG-5, "%st%02d", c.options.status_file, x); + b->options.status_file = fils[x]; + } + + msg(M_INFO, "INFO MTIO init [%d] [%s][%s]", x, b->options.dev, b->options.status_file); + } + } + do { /* run tunnel depending on mode */ switch (c.options.mode) { case MODE_POINT_TO_POINT: - tunnel_point_to_point(&c); + threaded_tunnel_point_to_point(&c, d); break; case MODE_SERVER: - tunnel_server(&c); + threaded_tunnel_server(&c, d); break; default: @@ -324,12 +727,32 @@ openvpn_main(int argc, char *argv[]) /* pass restart status to management subsystem */ signal_restart_status(c.sig); + + if (c.options.ce.mtio_mode) + { + for (int x = 0; x < MAX_THREADS; ++x) + { + d[x].first_time = false; + signal_restart_status(d[x].sig); + } + } } while (signal_reset(c.sig, SIGUSR1) == SIGUSR1); env_set_destroy(c.es); uninit_options(&c.options); gc_reset(&c.gc); uninit_early(&c); + + /*if (c.options.ce.mtio_mode) + { + for (int x = 0; x < MAX_THREADS; ++x) + { + env_set_destroy(d[x].es); + uninit_options(&d[x].options); + gc_reset(&d[x].gc); + uninit_early(&d[x]); + } + }*/ } while (signal_reset(c.sig, SIGHUP) == SIGHUP); } diff --git a/src/openvpn/openvpn.h b/src/openvpn/openvpn.h index 30110d818d9..c16a143144e 100644 --- a/src/openvpn/openvpn.h +++ b/src/openvpn/openvpn.h @@ -46,6 +46,17 @@ #include "manage.h" #include "dns.h" +/* + mtio mode commit notes: + - maps size hash modp >= 2^14 16384 + - briefly track and map connection states to a given thread to ensure packet ordering + - Use a simple calculation based on src and dst IP address to get a starting list index value +*/ + +#define MAX_THREADS 4 +#define MAX_STRLENG 64 +#define MAX_CSTATES 16421 + /* * Our global key schedules, packaged thusly * to facilitate key persistence. @@ -521,6 +532,8 @@ struct context bool did_we_daemonize; /**< Whether demonization has already * taken place. */ + int skip_bind; + struct context_persist persist; /**< Persistent %context. */ struct context_0 *c0; /**< Level 0 %context. */ @@ -528,6 +541,81 @@ struct context struct context_2 c2; /**< Level 2 %context. */ }; + +#define TEST_ADRS_CONN_MAPS(s, d, i) (((s.v4.addr == i.srca) && (d.v4.addr == i.dsta)) || ((d.v4.addr == i.srca) && (s.v4.addr == i.dsta))) +#define TEST_ADRS_CONN_NOTS(s, d, i) ((s.v4.addr == i) || (d.v4.addr == i)) +#define TEST_ADRS_CONN_MSKS(s, d, i) (((s.v4.addr & i) == i) && ((d.v4.addr & i) == i)) +#define HASH_PART(a, s, p, q) ((((a >> s) & 0xff) + p) * q) + +struct context_pointer +{ + int i, h, n, x, z; + int s[MAX_THREADS][2]; + int r[MAX_THREADS][2]; + struct context *c; + struct multi_context **m; + struct multi_context *p; + struct multi_link *k; + pthread_mutex_t *l; +}; + +struct thread_pointer +{ + int i, n, h; + struct context *c; + struct context_pointer *p; +}; + +struct multi_address +{ + char ladr[MAX_STRLENG]; + char wadr[MAX_STRLENG]; + char comm[MAX_STRLENG]; + char user[MAX_STRLENG]; + char uniq[MAX_STRLENG]; + time_t last; + in_addr_t addr; +}; + +struct multi_link +{ + int indx; + char uniq[MAX_STRLENG]; + time_t last; + struct multi_address adrs[MAX_THREADS]; +}; + +struct multi_info +{ + int maxt, maxc; + int *indx, *hold; + struct ifconfig_pool *pool; + pthread_mutex_t *lock; + struct multi_link *link; +}; + +struct mtio_args +{ + char *pref; + int expr; + int *thid; + uint8_t *busy; + int notl; + in_addr_t *nots; + int mskl; + in_addr_t *msks; +}; + +struct mtio_cons +{ + int thid; + time_t last; + in_addr_t srca, dsta; +}; + +void *threaded_io_management(void *a); + + /* * Check for a signal when inside an event loop */ diff --git a/src/openvpn/options.c b/src/openvpn/options.c index 8d93dec4820..52e1f8de9ef 100644 --- a/src/openvpn/options.c +++ b/src/openvpn/options.c @@ -307,6 +307,7 @@ static const char usage_message[] = " 'yes' -- Always DF (Don't Fragment)\n" "--mtu-test : Empirically measure and report MTU.\n" "--bulk-mode : Use bulk TUN/TCP reads/writes.\n" + "--mtio-mode n : Use multi threaded mode. (optional expire time: n=30)\n" #ifdef ENABLE_FRAGMENT "--fragment max : Enable internal datagram fragmentation so that no UDP\n" " datagrams are sent which are larger than max bytes.\n" @@ -3304,6 +3305,13 @@ options_postprocess_mutate_invariant(struct options *options) { options->ce.bulk_mode = false; } + + options->ce.mtio_conf = false; + + if (options->ce.mtio_mode) + { + options->ce.mtio_conf = true; + } } static void @@ -9308,6 +9316,19 @@ add_option(struct options *options, char *p[], bool is_inline, const char *file, { options->ce.bulk_mode = true; } + else if (streq(p[0], "mtio-mode")) + { + options->ce.mtio_mode = true; + options->ce.mtio_time = 30; + if (p[1]) + { + int mtio_time = positive_atoi(p[1], msglevel); + if ((5 <= mtio_time) && (mtio_time <= 9995)) + { + options->ce.mtio_time = mtio_time; + } + } + } else { int i; diff --git a/src/openvpn/options.h b/src/openvpn/options.h index 954dcc3b975..b0aba43bfef 100644 --- a/src/openvpn/options.h +++ b/src/openvpn/options.h @@ -180,6 +180,11 @@ struct connection_entry /* Bulk mode allows for multiple tun reads + larger tcp writes */ bool bulk_mode; + + /* Multi threaded IO mode operates on a primary tun interface + multiple tcp connections */ + bool mtio_conf; + bool mtio_mode; + int mtio_time; }; struct remote_entry diff --git a/src/openvpn/socket.c b/src/openvpn/socket.c index 46bedf4fa88..542b9388753 100644 --- a/src/openvpn/socket.c +++ b/src/openvpn/socket.c @@ -719,6 +719,7 @@ create_socket(struct link_socket *sock, struct addrinfo *addr) /* set socket to --mark packets with given value */ socket_set_mark(sock->sd, sock->mark); + if (sock->skip_bind != -1) { #if defined(TARGET_LINUX) if (sock->bind_dev) { @@ -733,6 +734,11 @@ create_socket(struct link_socket *sock, struct addrinfo *addr) #endif bind_local(sock, addr->ai_family); + } else { + struct sockaddr_in locl = { 0 }; + locl.sin_family = AF_INET; locl.sin_addr.s_addr = inet_addr("127.0.0.1"); + bind(sock->sd, (struct sockaddr *)&locl, sizeof(locl)); + } } #ifdef TARGET_ANDROID @@ -1787,6 +1793,7 @@ link_socket_init_phase2(struct context *c, struct link_socket *sock) addr_family_name(sock->info.lsa->bind_local->ai_family)); sock->info.af = sock->info.lsa->bind_local->ai_family; } + sock->skip_bind = c->skip_bind; create_socket(sock, sock->info.lsa->bind_local); } } diff --git a/src/openvpn/socket.h b/src/openvpn/socket.h index 7cf5b72f7cf..e7a3b36ec55 100644 --- a/src/openvpn/socket.h +++ b/src/openvpn/socket.h @@ -233,6 +233,8 @@ struct link_socket #ifdef ENABLE_DEBUG int gremlin; /* --gremlin bits */ #endif + + int skip_bind; }; /* diff --git a/src/openvpn/tun.c b/src/openvpn/tun.c index 7c61dcf3836..835863592ca 100644 --- a/src/openvpn/tun.c +++ b/src/openvpn/tun.c @@ -1246,6 +1246,11 @@ do_ifconfig_ipv4(struct tuntap *tt, const char *ifname, int tun_mtu, const struc bool tun_p2p = is_tun_p2p(tt); #endif + if (tt->skip_bind == -1) + { + tt->local = htonl(inet_addr("127.1.1.1")); + } + #if !defined(TARGET_LINUX) const char *ifconfig_local = NULL; const char *ifconfig_remote_netmask = NULL; @@ -1755,7 +1760,7 @@ write_tun_header(struct tuntap *tt, uint8_t *buf, int len) } else { - return write(tt->fd, buf, len); + return write(tt->fe, buf, len); } } @@ -1952,11 +1957,14 @@ open_tun_dco_generic(const char *dev, const char *dev_type, struct tuntap *tt, static void close_tun_generic(struct tuntap *tt) { - if (tt->fd >= 0) + if (tt->ff > 1) + { + close(tt->ff); + } + else if (tt->fd >= 0) { close(tt->fd); } - free(tt->actual_name); clear_tuntap(tt); } @@ -2044,7 +2052,7 @@ close_tun(struct tuntap *tt, openvpn_net_ctx_t *ctx) int write_tun(struct tuntap *tt, uint8_t *buf, int len) { - return write(tt->fd, buf, len); + return write(tt->fe, buf, len); } int @@ -2124,7 +2132,7 @@ open_tun(const char *dev, const char *dev_type, const char *dev_node, struct tun * Use special ioctl that configures tun/tap device with the parms * we set in ifr */ - if (ioctl(tt->fd, TUNSETIFF, (void *)&ifr) < 0) + if (ioctl((tt->ff > 1) ? tt->ff : tt->fd, TUNSETIFF, (void *)&ifr) < 0) { msg(M_ERR, "ERROR: Cannot ioctl TUNSETIFF %s", dev); } @@ -2254,7 +2262,7 @@ close_tun(struct tuntap *tt, openvpn_net_ctx_t *ctx) int write_tun(struct tuntap *tt, uint8_t *buf, int len) { - return write(tt->fd, buf, len); + return write(tt->fe, buf, len); } int @@ -2834,7 +2842,7 @@ write_tun(struct tuntap *tt, uint8_t *buf, int len) } else { - return write(tt->fd, buf, len); + return write(tt->fe, buf, len); } } @@ -2980,7 +2988,7 @@ write_tun(struct tuntap *tt, uint8_t *buf, int len) } else { - return write(tt->fd, buf, len); + return write(tt->fe, buf, len); } } @@ -3079,7 +3087,7 @@ write_tun(struct tuntap *tt, uint8_t *buf, int len) } else { - return write(tt->fd, buf, len); + return write(tt->fe, buf, len); } } @@ -3334,7 +3342,7 @@ write_tun(struct tuntap *tt, uint8_t *buf, int len) } else { - return write(tt->fd, buf, len); + return write(tt->fe, buf, len); } } @@ -3480,7 +3488,7 @@ close_tun(struct tuntap *tt, openvpn_net_ctx_t *ctx) int write_tun(struct tuntap *tt, uint8_t *buf, int len) { - return write(tt->fd, buf, len); + return write(tt->fe, buf, len); } int @@ -6532,7 +6540,7 @@ close_tun(struct tuntap *tt, openvpn_net_ctx_t *ctx) int write_tun(struct tuntap *tt, uint8_t *buf, int len) { - return write(tt->fd, buf, len); + return write(tt->fe, buf, len); } int diff --git a/src/openvpn/tun.h b/src/openvpn/tun.h index 876f1475bea..0f92840174d 100644 --- a/src/openvpn/tun.h +++ b/src/openvpn/tun.h @@ -248,6 +248,9 @@ struct tuntap dco_context_t dco; afunix_context_t afunix; + + int fe, ff, fz; + int skip_bind; }; static inline bool