Merge pull request #358 from Waujito/codebase_update

TCP Conntrack matching + TCP ports filtering
This commit is contained in:
Vadim Vetrov
2026-01-31 21:23:16 +03:00
committed by GitHub
13 changed files with 977 additions and 749 deletions

2
Kbuild
View File

@@ -1,3 +1,3 @@
obj-m := kyoutubeUnblock.o
kyoutubeUnblock-objs := src/kytunblock.o src/mangle.o src/quic.o src/quic_crypto.o src/utils.o src/tls.o src/getopt.o src/inet_ntop.o src/args.o src/trie.o deps/cyclone/aes.o deps/cyclone/cpu_endian.o deps/cyclone/ecb.o deps/cyclone/gcm.o deps/cyclone/hkdf.o deps/cyclone/hmac.o deps/cyclone/sha256.o
kyoutubeUnblock-objs := src/kytunblock.o src/dpi.o src/mangle.o src/quic.o src/quic_crypto.o src/utils.o src/tls.o src/getopt.o src/inet_ntop.o src/args.o src/trie.o deps/cyclone/aes.o deps/cyclone/cpu_endian.o deps/cyclone/ecb.o deps/cyclone/gcm.o deps/cyclone/hkdf.o deps/cyclone/hmac.o deps/cyclone/sha256.o
ccflags-y := -std=gnu99 -DKERNEL_SPACE -Wno-error -Wno-declaration-after-statement -I$(src)/src -I$(src)/deps/cyclone/include

View File

@@ -1,7 +1,7 @@
USPACE_TARGETS := default all install uninstall dev run_dev
KMAKE_TARGETS := kmake kload kunload kreload xmod xtclean
PKG_VERSION := 1.2.0
PKG_VERSION := 1.3.0
PKG_RELEASE := 1
PKG_FULLVERSION := $(PKG_VERSION)-$(PKG_RELEASE)

View File

@@ -10,6 +10,7 @@
- [Check it](#check-it)
- [Flags](#flags)
- [UDP/QUIC](#udpquic)
- [Cloudflare](#cloudflare)
- [Troubleshooting](#troubleshooting)
- [TV](#tv)
- [Troubleshooting EPERMS (Operation not permitted)](#troubleshooting-eperms-operation-not-permitted)
@@ -243,6 +244,10 @@ Flags that do not scoped to a specific section, used over all the youtubeUnblock
- `--tls={enabled|disabled}` Set it if you want not to process TLS traffic in current section. May be used if you want to set only UDP-based section. (Here section is a unit between `--fbegin` and `--fend` flags).
- `--tcp-dport-filter=<5,6,200-500>` Filter the TCP destination ports. Defaults to no ports. Specifie the ports you want to be handled by youtubeUnblock. By default, youtubeUnblock will filter only 443 TLS port. This may disabled by `--no-dport-filter`.
- `--tcp-match-connpackets` Use this with `--use-conntrack` set. Instead of matching by TLS domains will match packets by OS conntrack connpackets variable (e. g. number of packets sent while connection is alive (SYN is included in the connpackets counter, but anyways will be skipped by youtubeUnblock). You should not set too high number for matching. I recommend something like 4 or 5. If matching happens, youtubeUnblock will send fake and fragement the packet according to fragmentation and faking settings.
- `--fake-sni={0|1}` This flag enables fake-sni which forces **youtubeUnblock** to send at least three packets instead of one with TLS *ClientHello*: Fake *ClientHello*, 1st part of original *ClientHello*, 2nd part of original *ClientHello*. This flag may be related to some Operation not permitted error messages, so before open an issue refer to [Troubleshooting for EPERMS](#troubleshooting-eperms-operation-not-permitted). Defaults to **1**.
- `--fake-sni-seq-len=<length>` This flag specifies **youtubeUnblock** to build a complicated construction of fake client hello packets. length determines how much fakes will be sent. Defaults to **1**.
@@ -327,6 +332,29 @@ QUIC is enabled with `--udp-filter-quic` flag. The flag supports two modes: `all
For **other UDP protocols** I recommend to configure UDP support in the separate section from TCP, like `--fbegin --udp-dport-filter=50000-50099 --tls=disabled`. **You should not pass `--quic-drop` here unless you are sure what you are doing**
## Cloudflare
In Russia, Cloudflare technologies takes special care by RKN.
This was caused primarily by ECH technology which allows to easily bypass the TSPU. RKN blocks ECH but alongside with it blocks a lot of harmless network protocols. Currently, the only TLS (and may be HTTP) protocols are allowed on the Cloudflare network. If TSPU could not determine the protocol and Server Name (SNI for TLS), it will drop the connection after 16 KB transferred. This affects not only 443 or 80 ports, but every port on Cloudflare network.
Because of this, ECH and tons of protocols are unavailable. Tons of various custom servers/utilites/games are down, since custom protocols are being blocked.
An example: Hypixel Minecraft server relies on Cloudflare and works up on custom Minecraft protocol on port 25565. TSPU can't determine this protocol, so it blocks the connection after 16 KB transferred, so server does not work.
Note, that the faking here is a key to bypass the TSPU.
An example of the solution:
```sh
sudo ./build/youtubeUnblock --use-conntrack --tls=disabled --tcp-match-connpackets=4 --tcp-dport-filter=25565 --frag-sni-pos=1 --fake-sni=1 --faking-strategy=tcp_check,timestamp
```
Also do not forget to add the iptables rule on the custom port:
```sh
sudo iptables -t mangle -A YOUTUBEUNBLOCK -p tcp --dport 25565 -m connbytes --connbytes-dir original --connbytes-mode packets --connbytes 0:19 -j NFQUEUE --queue-num 537 --queue-bypass
```
## Troubleshooting
Check up [this issue](https://github.com/Waujito/youtubeUnblock/issues/148) for useful configs.

View File

@@ -215,7 +215,7 @@ static int parse_faking_strategy(char *optarg, int *faking_strategy) {
return 0;
}
static int parse_udp_dport_range(char *str, struct udp_dport_range **udpr, int *udpr_len) {
static int parse_dport_range(char *str, struct dport_range **udpr, int *udpr_len) {
int seclen = 1;
const char *p = str;
while (*p != '\0') {
@@ -225,14 +225,14 @@ static int parse_udp_dport_range(char *str, struct udp_dport_range **udpr, int *
}
#ifdef KERNEL_SPACE
struct udp_dport_range *udp_dport_ranges = kmalloc(
seclen * sizeof(struct udp_dport_range), GFP_KERNEL);
struct dport_range *dport_ranges = kmalloc(
seclen * sizeof(struct dport_range), GFP_KERNEL);
#else
struct udp_dport_range *udp_dport_ranges = malloc(
seclen * sizeof(struct udp_dport_range));
struct dport_range *dport_ranges = malloc(
seclen * sizeof(struct dport_range));
#endif
if (udp_dport_ranges == NULL) {
if (dport_ranges == NULL) {
return -ENOMEM;
}
@@ -279,7 +279,7 @@ static int parse_udp_dport_range(char *str, struct udp_dport_range **udpr, int *
)
goto erret;
udp_dport_ranges[i] = (struct udp_dport_range){
dport_ranges[i] = (struct dport_range){
.start = num1,
.end = num2
};
@@ -297,15 +297,15 @@ static int parse_udp_dport_range(char *str, struct udp_dport_range **udpr, int *
}
if (i == 0) {
free(udp_dport_ranges);
free(dport_ranges);
}
*udpr = udp_dport_ranges;
*udpr = dport_ranges;
*udpr_len = i;
return 0;
erret:
free(udp_dport_ranges);
free(dport_ranges);
return -1;
}
@@ -350,6 +350,7 @@ enum {
OPT_EXCLUDE_DOMAINS,
OPT_SNI_DOMAINS_FILE,
OPT_EXCLUDE_DOMAINS_FILE,
OPT_TCP_DPORT_FILTER,
OPT_FAKE_SNI,
OPT_FAKING_TTL,
OPT_FAKING_STRATEGY,
@@ -397,6 +398,7 @@ enum {
OPT_HELP,
OPT_VERSION,
OPT_CONNBYTES_LIMIT,
OPT_TCP_M_CONNPKTS,
};
static struct option long_opt[] = {
@@ -410,6 +412,7 @@ static struct option long_opt[] = {
{"synfake", 1, 0, OPT_SYNFAKE},
{"synfake-len", 1, 0, OPT_SYNFAKE_LEN},
{"tls", 1, 0, OPT_TLS_ENABLED},
{"tcp-dport-filter", 1, 0, OPT_TCP_DPORT_FILTER},
{"fake-sni-seq-len", 1, 0, OPT_FAKE_SNI_SEQ_LEN},
{"fake-sni-type", 1, 0, OPT_FAKE_SNI_TYPE},
{"fake-custom-payload", 1, 0, OPT_FAKE_CUSTOM_PAYLOAD},
@@ -435,6 +438,7 @@ static struct option long_opt[] = {
{"udp-stun-filter", 0, 0, OPT_UDP_STUN_FILTER},
{"udp-filter-quic", 1, 0, OPT_UDP_FILTER_QUIC},
{"no-dport-filter", 0, 0, OPT_NO_DPORT_FILTER},
{"tcp-match-connpackets", 1, 0, OPT_TCP_M_CONNPKTS},
{"threads", 1, 0, OPT_THREADS},
{"silent", 0, 0, OPT_SILENT},
{"trace", 0, 0, OPT_TRACE},
@@ -476,6 +480,7 @@ void print_usage(const char *argv0) {
printf("\t--sni-domains-file=<file contains comma or new-line separated list>\n");
printf("\t--exclude-domains-file=<file contains comma or new-line separated list>\n");
printf("\t--tls={enabled|disabled}\n");
printf("\t--tcp-dport-filter=<5,6,200-500>\n");
printf("\t--fake-sni={1|0}\n");
printf("\t--fake-sni-seq-len=<length>\n");
printf("\t--fake-sni-type={default|random|custom}\n");
@@ -507,6 +512,7 @@ void print_usage(const char *argv0) {
printf("\t--threads=<threads number>\n");
printf("\t--packet-mark=<mark>\n");
printf("\t--connbytes-limit=<pkts>\n");
printf("\t--tcp-match-connpackets=<n of packets in connection>\n");
printf("\t--silent\n");
printf("\t--trace\n");
printf("\t--instaflush\n");
@@ -788,6 +794,23 @@ int yparse_args(struct config_t *config, int argc, char *argv[]) {
sect_config->frag_sni_pos = num;
break;
case OPT_TCP_DPORT_FILTER:
{
SFREE(sect_config->tcp_dport_range);
if (parse_dport_range(optarg, &sect_config->tcp_dport_range, &sect_config->tcp_dport_range_len) < 0) {
goto invalid_opt;
}
break;
}
case OPT_TCP_M_CONNPKTS:
num = parse_numeric_option(optarg);
if (errno != 0 || num < 0) {
goto invalid_opt;
}
sect_config->tcp_match_connpkts = num;
break;
case OPT_FAKING_STRATEGY:
if (parse_faking_strategy(
optarg, &sect_config->faking_strategy) < 0) {
@@ -982,7 +1005,7 @@ int yparse_args(struct config_t *config, int argc, char *argv[]) {
case OPT_UDP_DPORT_FILTER:
{
SFREE(sect_config->udp_dport_range);
if (parse_udp_dport_range(optarg, &sect_config->udp_dport_range, &sect_config->udp_dport_range_len) < 0) {
if (parse_dport_range(optarg, &sect_config->udp_dport_range, &sect_config->udp_dport_range_len) < 0) {
goto invalid_opt;
}
break;
@@ -1055,8 +1078,26 @@ static size_t print_config_section(const struct section_config_t *section, char
size_t buf_sz = buffer_size;
size_t sz;
if (section->tcp_dport_range_len != 0) {
print_cnf_raw("--tcp-dport-filter=");
for (int i = 0; i < section->tcp_dport_range_len; i++) {
struct dport_range range = section->tcp_dport_range[i];
print_cnf_raw("%d-%d,", range.start, range.end);
}
print_cnf_raw(" ");
}
if (section->tcp_match_connpkts) {
print_cnf_buf("--tcp-match-connpackets=%d",
section->tcp_match_connpkts);
}
if (section->tls_enabled || section->tcp_dport_range_len != 0) {
if (section->tls_enabled) {
print_cnf_buf("--tls=enabled");
}
switch(section->fragmentation_strategy) {
case FRAG_STRAT_IP:
@@ -1205,7 +1246,7 @@ static size_t print_config_section(const struct section_config_t *section, char
print_cnf_raw("--udp-dport-filter=");
for (int i = 0; i < section->udp_dport_range_len; i++) {
struct udp_dport_range range = section->udp_dport_range[i];
struct dport_range range = section->udp_dport_range[i];
print_cnf_raw("%d-%d,", range.start, range.end);
}
print_cnf_raw(" ");
@@ -1364,6 +1405,10 @@ void free_config_section(struct section_config_t *section) {
SFREE(section->udp_dport_range);
}
if (section->tcp_dport_range_len != 0) {
SFREE(section->tcp_dport_range);
}
free_sni_domains(&section->sni_domains);
free_sni_domains(&section->exclude_sni_domains);

View File

@@ -48,7 +48,7 @@ struct logging_config_t {
};
extern struct logging_config_t logging_conf;
struct udp_dport_range {
struct dport_range {
uint16_t start;
uint16_t end;
};
@@ -64,6 +64,11 @@ struct section_config_t {
int tls_enabled;
struct dport_range *tcp_dport_range;
int tcp_dport_range_len;
int tcp_match_connpkts;
int fragmentation_strategy;
int frag_sni_reverse;
int frag_sni_faked;
@@ -106,7 +111,7 @@ struct section_config_t {
unsigned int udp_fake_len;
int udp_faking_strategy;
struct udp_dport_range *udp_dport_range;
struct dport_range *udp_dport_range;
int udp_dport_range_len;
int udp_stun_filter;
int udp_filter_quic;
@@ -238,6 +243,9 @@ enum {
.sni_domains = {0}, \
.exclude_sni_domains = {0}, \
.all_domains = 0, \
.tcp_dport_range = NULL, \
.tcp_dport_range_len = 0, \
.tcp_match_connpkts = 0, \
.tls_enabled = 1, \
.frag_sni_reverse = 1, \
.frag_sni_faked = 0, \

509
src/dpi.c Normal file
View File

@@ -0,0 +1,509 @@
/*
youtubeUnblock - https://github.com/Waujito/youtubeUnblock
Copyright (C) 2024-2025 Vadim Vetrov <vetrovvd@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
/**
* dpi.c - Inspects packets for blocked patterns.
* "If you want to bypass the DPI, you should became the DPI"
*/
#define _GNU_SOURCE
#include "types.h" // IWYU pragma: keep
#ifndef KERNEL_SPACE
#include <stdlib.h>
#else
#include "linux/inet.h"
#endif
#include "dpi.h"
#include "config.h"
#include "utils.h"
#include "quic.h"
#include "logging.h"
#include "tls.h"
#include "mangle.h"
void log_packet(const struct parsed_packet *pkt);
#define MAX_FRAGMENTATION_PTS 16
struct fragmentation_points {
size_t payload_points[16];
int used_points;
};
int process_packet(const struct config_t *config, const struct packet_data *pd) {
assert (config);
assert (pd);
struct parsed_packet pkt = {0};
int ret = 0;
pkt.yct = pd->yct;
lgtrace_start();
pkt.raw_payload = pd->payload;
pkt.raw_payload_len = pd->payload_len;
if (pkt.raw_payload_len > MAX_PACKET_SIZE) {
return PKT_ACCEPT;
}
pkt.ipver = netproto_version(pkt.raw_payload, pkt.raw_payload_len);
lgtrace_wr("IPv%d ", pkt.ipver);
pkt.transport_proto = -1;
if (pkt.ipver == IP4VERSION) {
ret = ip4_payload_split((uint8_t *)pkt.raw_payload, pkt.raw_payload_len,
(struct iphdr **)&pkt.iph, &pkt.iph_len,
(uint8_t **)&pkt.ip_payload, &pkt.ip_payload_len);
if (ret < 0)
goto accept;
pkt.transport_proto = pkt.iph->protocol;
}
#ifndef NO_IPV6
else if (pkt.ipver == IP6VERSION && config->use_ipv6) {
ret = ip6_payload_split((uint8_t *)pkt.raw_payload, pkt.raw_payload_len,
(struct ip6_hdr **)&pkt.ip6h, &pkt.iph_len,
(uint8_t **)&pkt.ip_payload, &pkt.ip_payload_len);
if (ret < 0)
goto accept;
pkt.transport_proto = pkt.ip6h->ip6_nxt;
}
#endif
if (pkt.transport_proto == IPPROTO_TCP) {
int ret = tcp_payload_split((uint8_t *)pkt.raw_payload, pkt.raw_payload_len,
NULL, NULL,
(struct tcphdr **)&pkt.tcph, &pkt.tcph_len,
(uint8_t **)&pkt.transport_payload, &pkt.transport_payload_len);
if (ret < 0)
goto accept;
} else if (pkt.transport_proto == IPPROTO_UDP) {
int ret = udp_payload_split((uint8_t *)pkt.raw_payload, pkt.raw_payload_len,
NULL, NULL,
(struct udphdr **)&pkt.udph,
(uint8_t **)&pkt.transport_payload, &pkt.transport_payload_len);
if (ret < 0)
goto accept;
}
if (LOG_LEVEL >= VERBOSE_TRACE) {
log_packet(&pkt);
}
int verdict = PKT_CONTINUE;
ITER_CONFIG_SECTIONS(config, section) {
lgtrace_wr("Section #%d: ", CONFIG_SECTION_NUMBER(section));
switch (pkt.transport_proto) {
case IPPROTO_TCP:
verdict = process_tcp_packet(section, &pkt);
break;
case IPPROTO_UDP:
verdict = process_udp_packet(section, &pkt);
break;
}
if (verdict == PKT_CONTINUE) {
lgtrace_wr("continue_flow");
lgtrace_write();
continue;
}
lgtrace_write();
goto ret_verdict;
}
accept:
verdict = PKT_ACCEPT;
ret_verdict:
switch (verdict) {
case PKT_ACCEPT:
lgtrace_wr("accept");
break;
case PKT_DROP:
lgtrace_wr("drop");
break;
default:
lgtrace_wr("unknown verdict: %d", verdict);
}
lgtrace_end();
return verdict;
}
enum tls_proc_verdict {
TLS_NOT_MATCHED,
TLS_ERROR,
TLS_MATCHED,
};
enum tls_proc_verdict process_tls_packet(const struct section_config_t *section,
const struct parsed_packet *pkt,
struct fragmentation_points *frag_pts);
int perform_attack(const struct section_config_t *section,
const struct parsed_packet *pkt, const struct fragmentation_points *frag_pts);
int process_tcp_packet(const struct section_config_t *section, const struct parsed_packet *pkt) {
assert (section);
assert (pkt);
assert (pkt->transport_proto == IPPROTO_TCP);
uint16_t dport = ntohs(pkt->tcph->dest);
if (section->tcp_dport_range_len) {
int is_dport_matched = 0;
for (int i = 0; i < section->tcp_dport_range_len; i++) {
struct dport_range crange = section->tcp_dport_range[i];
if (dport >= crange.start && dport <= crange.end) {
lgtrace_addp("matched to %d-%d", crange.start, crange.end);
is_dport_matched = 1;
}
}
if (!is_dport_matched) {
return PKT_CONTINUE;
}
} else if (section->dport_filter && dport != 443) {
return PKT_CONTINUE;
}
if (pkt->tcph->syn && section->synfake) {
return send_synfake(section, pkt);
}
if (pkt->tcph->syn)
return PKT_CONTINUE;
int is_matched = 0;
struct fragmentation_points frag_pts = {0};
if (!is_matched && section->tls_enabled) {
enum tls_proc_verdict vrd = process_tls_packet(section, pkt, &frag_pts);
if (vrd == TLS_ERROR) {
return PKT_ACCEPT;
}
if (vrd == TLS_MATCHED) {
is_matched = 1;
}
}
if (!is_matched && section->tcp_match_connpkts && pkt->yct.orig_packets) {
if (pkt->yct.orig_packets <= section->tcp_match_connpkts) {
lgtrace_addp("connpackets match: %u <= %d",
(uint32_t) pkt->yct.orig_packets, section->tcp_match_connpkts);
is_matched = 1;
frag_pts.used_points = 0;
if (section->frag_sni_pos &&
pkt->transport_payload_len > section->frag_sni_pos) {
frag_pts.payload_points[frag_pts.used_points++] =
section->frag_sni_pos;
lgtrace_addp("frag set to %d", section->frag_sni_pos);
}
}
}
if (is_matched) {
return perform_attack(section, pkt, &frag_pts);
}
return PKT_CONTINUE;
}
static void bubblesort(size_t arr[], size_t n){
for (int i = 0; i < n - 1; i++) {
for (int j = 0; j < n - 1 - i; j++) {
if (arr[j] > arr[j + 1]) {
int temp = arr[j];
arr[j] = arr[j + 1];
arr[j + 1] = temp;
}
}
}
}
enum tls_proc_verdict process_tls_packet(const struct section_config_t *section,
const struct parsed_packet *pkt,
struct fragmentation_points *frag_pts) {
assert (section);
assert (pkt);
struct tls_verdict vrd = analyze_tls_data(section,
pkt->transport_payload, pkt->transport_payload_len);
lgtrace_addp("TLS analyzed");
if (vrd.sni_len != 0) {
lgtrace_addp("SNI detected: %.*s", vrd.sni_len, vrd.sni_ptr);
}
if (vrd.target_sni) {
lgdebug("Target SNI detected: %.*s", vrd.sni_len, vrd.sni_ptr);
size_t target_sni_offset = vrd.target_sni_ptr - pkt->transport_payload;
size_t ipd_offset = target_sni_offset;
size_t mid_offset = ipd_offset + vrd.target_sni_len / 2;
// hardcode googlevideo.com split
// googlevideo domains are very long, so
// it is possible for the entire domain to not be
// splitted (split goes for subdomain)
if (vrd.target_sni_len > 30) {
mid_offset = ipd_offset +
vrd.target_sni_len - 12;
}
frag_pts->used_points = 0;
if (section->frag_sni_pos && pkt->transport_payload_len > section->frag_sni_pos) {
frag_pts->payload_points[frag_pts->used_points++] = section->frag_sni_pos;
}
if (section->frag_middle_sni) {
frag_pts->payload_points[frag_pts->used_points++] = mid_offset;
}
bubblesort(frag_pts->payload_points, frag_pts->used_points);
return TLS_MATCHED;
}
return TLS_NOT_MATCHED;
}
int perform_attack(const struct section_config_t *section,
const struct parsed_packet *pkt, const struct fragmentation_points *frag_pts) {
assert (section);
assert (pkt);
assert (frag_pts);
int ret = 0;
size_t payload_len = pkt->raw_payload_len;
uint8_t *payload = malloc(pkt->raw_payload_len);
if (payload == NULL) {
lgerror(-ENOMEM, "Allocation error");
return PKT_ACCEPT;
}
memcpy(payload, pkt->raw_payload, pkt->raw_payload_len);
if (pkt->transport_payload_len > AVAILABLE_MTU) {
lgdebug("WARNING! Tartget packet is too big and may cause issues!");
}
if (section->fake_sni) {
void *iph;
size_t iph_len;
struct tcphdr *tcph;
size_t tcph_len;
uint8_t *data;
size_t dlen;
int ret = tcp_payload_split(payload, payload_len,
&iph, &iph_len, &tcph, &tcph_len,
&data, &dlen);
if (ret < 0) {
lgerror(ret, "tcp_payload_split in targ_sni");
goto accept_lc;
}
if (section->fk_winsize) {
tcph->window = htons(section->fk_winsize);
set_tcp_checksum(tcph, iph, iph_len);
}
struct fake_type f_type = args_default_fake_type(section);
post_fake_sni(f_type, iph, iph_len, tcph, tcph_len);
}
if (frag_pts->used_points > 0) {
if (section->fragmentation_strategy == FRAG_STRAT_TCP) {
ret = send_tcp_frags(section, payload, payload_len, frag_pts->payload_points,
frag_pts->used_points, 0);
if (ret < 0) {
lgerror(ret, "tcp4 send frags");
goto accept_lc;
}
goto drop_lc;
} else if (section->fragmentation_strategy == FRAG_STRAT_IP && pkt->ipver == IP4VERSION) {
ret = send_ip4_frags(section, payload, payload_len, frag_pts->payload_points,
frag_pts->used_points, 0);
if (ret < 0) {
lgerror(ret, "tcp4 send frags");
goto accept_lc;
}
goto drop_lc;
} else if (section->fragmentation_strategy == FRAG_STRAT_IP && pkt->ipver != IP4VERSION) {
lginfo("WARNING: IP fragmentation is supported only for IPv4");
goto accept_lc;
}
}
accept_lc:
free(payload);
return PKT_ACCEPT;
drop_lc:
free(payload);
return PKT_DROP;
}
int process_udp_packet(const struct section_config_t *section, const struct parsed_packet *pkt) {
assert (section);
assert (pkt);
assert (pkt->transport_proto == IPPROTO_UDP);
int ret = 0;
if (!detect_udp_filtered(section, pkt->raw_payload, pkt->raw_payload_len))
goto continue_flow;
if (section->udp_mode == UDP_MODE_DROP)
goto drop;
else if (section->udp_mode == UDP_MODE_FAKE) {
for (int i = 0; i < section->udp_fake_seq_len; i++) {
uint8_t *fake_udp = NULL;
size_t fake_udp_len = 0;
struct udp_fake_type fake_type = {
.fake_len = section->udp_fake_len,
.strategy = {
.strategy = section->udp_faking_strategy,
.faking_ttl = section->faking_ttl,
},
};
ret = gen_fake_udp(fake_type, pkt->iph, pkt->iph_len, pkt->udph,
&fake_udp, &fake_udp_len);
if (ret < 0) {
lgerror(ret, "gen_fake_udp");
goto erret;
}
lgtrace_addp("post fake udp #%d", i + 1);
ret = instance_config.send_raw_packet(fake_udp, fake_udp_len);
if (ret < 0) {
lgerror(ret, "send fake udp");
goto erret_lc;
}
free(fake_udp);
continue;
erret_lc:
free(fake_udp);
erret:
goto accept;
}
// requeue
ret = instance_config.send_raw_packet(pkt->raw_payload, pkt->raw_payload_len);
goto drop;
}
continue_flow:
return PKT_CONTINUE;
accept:
return PKT_ACCEPT;
drop:
return PKT_DROP;
}
void log_packet(const struct parsed_packet *pkt) {
int ret = 0;
const char *bpt = inet_ntop(
pkt->ipver == IP4VERSION ? AF_INET : AF_INET6,
pkt->ipver == IP4VERSION ? (void *)(&pkt->iph->saddr) :
(void *)(&pkt->ip6h->ip6_src),
ylgh_curptr, ylgh_leftbuf);
if (bpt != NULL) {
ret = strnlen(bpt, ylgh_leftbuf);
ylgh_leftbuf -= ret;
ylgh_curptr += ret;
}
lgtrace_wr(" => ");
bpt = inet_ntop(
pkt->ipver == IP4VERSION ? AF_INET : AF_INET6,
pkt->ipver == IP4VERSION ? (void *)(&pkt->iph->daddr) :
(void *)(&pkt->ip6h->ip6_dst),
ylgh_curptr, ylgh_leftbuf);
if (bpt != NULL) {
ret = strnlen(bpt, ylgh_leftbuf);
ylgh_leftbuf -= ret;
ylgh_curptr += ret;
}
lgtrace_wr(" ");
int sport = -1, dport = -1;
if (pkt->transport_proto == IPPROTO_TCP) {
lgtrace_wr("TCP ");
sport = ntohs(pkt->tcph->source);
dport = ntohs(pkt->tcph->dest);
} else if (pkt->transport_proto == IPPROTO_UDP) {
lgtrace_wr("UDP ");
sport = ntohs(pkt->udph->source);
dport = ntohs(pkt->udph->dest);
}
lgtrace_wr("%d => %d ", sport, dport);
lgtrace_write();
lgtrace_wr("Transport payload: [ ");
for (int i = 0; i < min((int)16, (int)pkt->transport_payload_len); i++) {
lgtrace_wr("%02x ", pkt->transport_payload[i]);
}
lgtrace_wr("]");
lgtrace_write();
}

87
src/dpi.h Normal file
View File

@@ -0,0 +1,87 @@
/*
youtubeUnblock - https://github.com/Waujito/youtubeUnblock
Copyright (C) 2024-2025 Vadim Vetrov <vetrovvd@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef YU_DPI_H
#define YU_DPI_H
#include "types.h"
#include "tls.h"
#include "config.h"
#define PKT_ACCEPT 0
#define PKT_DROP 1
// Used for section config
#define PKT_CONTINUE 2
struct parsed_packet {
const uint8_t *raw_payload;
uint32_t raw_payload_len;
int ipver;
union {
void *ipxh;
const struct iphdr *iph;
#ifndef NO_IPV6
const struct ip6_hdr *ip6h;
#endif
};
size_t iph_len;
const uint8_t *ip_payload;
size_t ip_payload_len;
int transport_proto;
union {
struct {
const struct tcphdr *tcph;
size_t tcph_len;
};
struct {
const struct udphdr *udph;
};
};
const uint8_t *transport_payload;
size_t transport_payload_len;
struct ytb_conntrack yct;
};
/**
* Processes the packet and returns verdict.
* This is the primary function that traverses the packet.
*/
int process_packet(const struct config_t *config, const struct packet_data *pd);
/**
* Processe the TCP packet.
* Returns verdict.
*/
int process_tcp_packet(const struct section_config_t *section, const struct parsed_packet *pkt);
/**
* Processes the UDP packet.
* Returns verdict.
*/
int process_udp_packet(const struct section_config_t *section, const struct parsed_packet *pkt);
#endif /* DPI_H */

View File

@@ -25,6 +25,7 @@
#include "quic.h"
#include "logging.h"
#include "tls.h"
#include "dpi.h"
#ifndef KERNEL_SPACE
#include <stdlib.h>
@@ -32,198 +33,13 @@
#include "linux/inet.h"
#endif
int process_packet(const struct config_t *config, const struct packet_data *pd) {
const uint8_t *raw_payload = pd->payload;
uint32_t raw_payload_len = pd->payload_len;
int send_synfake(const struct section_config_t *section, const struct parsed_packet *pkt) {
assert (section);
assert (pkt);
if (raw_payload_len > MAX_PACKET_SIZE) {
return PKT_ACCEPT;
}
assert (pkt->transport_proto == IPPROTO_TCP);
assert (pkt->tcph->syn);
const struct iphdr *iph;
const struct ip6_hdr *ip6h;
size_t iph_len;
const uint8_t *ip_payload;
size_t ip_payload_len;
const char *bpt;
int transport_proto = -1;
int ipver = netproto_version(raw_payload, raw_payload_len);
int ret;
lgtrace_start();
lgtrace_wr("IPv%d ", ipver);
if (ipver == IP4VERSION) {
ret = ip4_payload_split((uint8_t *)raw_payload, raw_payload_len,
(struct iphdr **)&iph, &iph_len,
(uint8_t **)&ip_payload, &ip_payload_len);
if (ret < 0)
goto accept;
transport_proto = iph->protocol;
}
#ifndef NO_IPV6
else if (ipver == IP6VERSION && config->use_ipv6) {
ret = ip6_payload_split((uint8_t *)raw_payload, raw_payload_len,
(struct ip6_hdr **)&ip6h, &iph_len,
(uint8_t **)&ip_payload, &ip_payload_len);
if (ret < 0)
goto accept;
transport_proto = ip6h->ip6_nxt;
}
#endif
else {
lgtrace("Unknown layer 3 protocol version: %d", ipver);
goto accept;
}
if (LOG_LEVEL >= VERBOSE_TRACE) {
bpt = inet_ntop(
ipver == IP4VERSION ? AF_INET : AF_INET6,
ipver == IP4VERSION ? (void *)(&iph->saddr) :
(void *)(&ip6h->ip6_src),
ylgh_curptr, ylgh_leftbuf);
if (bpt != NULL) {
ret = strnlen(bpt, ylgh_leftbuf);
ylgh_leftbuf -= ret;
ylgh_curptr += ret;
}
lgtrace_wr(" => ");
bpt = inet_ntop(
ipver == IP4VERSION ? AF_INET : AF_INET6,
ipver == IP4VERSION ? (void *)(&iph->daddr) :
(void *)(&ip6h->ip6_dst),
ylgh_curptr, ylgh_leftbuf);
if (bpt != NULL) {
ret = strnlen(bpt, ylgh_leftbuf);
ylgh_leftbuf -= ret;
ylgh_curptr += ret;
}
lgtrace_wr(" ");
const uint8_t *transport_payload = NULL;
size_t transport_payload_len = 0;
int sport = -1, dport = -1;
if (transport_proto == IPPROTO_TCP) {
lgtrace_wr("TCP ");
const struct tcphdr *tcph;
ret = tcp_payload_split((uint8_t *)raw_payload, raw_payload_len,
NULL, NULL,
(struct tcphdr **)&tcph, NULL,
(uint8_t **)&transport_payload, &transport_payload_len);
if (ret == 0) {
sport = ntohs(tcph->source);
dport = ntohs(tcph->dest);
}
} else if (transport_proto == IPPROTO_UDP) {
lgtrace_wr("UDP ");
const struct udphdr *udph = ((const struct udphdr *)ip_payload);
ret = udp_payload_split((uint8_t *)raw_payload, raw_payload_len,
NULL, NULL,
(struct udphdr **)&udph,
(uint8_t **)&transport_payload, &transport_payload_len);
if (ret == 0) {
sport = ntohs(udph->source);
dport = ntohs(udph->dest);
}
}
lgtrace_wr("%d => %d ", sport, dport);
lgtrace_write();
lgtrace_wr("Transport payload: [ ");
for (int i = 0; i < min((int)16, (int)transport_payload_len); i++) {
lgtrace_wr("%02x ", transport_payload[i]);
}
lgtrace_wr("]");
lgtrace_write();
}
int verdict = PKT_CONTINUE;
ITER_CONFIG_SECTIONS(config, section) {
lgtrace_wr("Section #%d: ", CONFIG_SECTION_NUMBER(section));
switch (transport_proto) {
case IPPROTO_TCP:
verdict = process_tcp_packet(section, raw_payload, raw_payload_len);
break;
case IPPROTO_UDP:
verdict = process_udp_packet(section, raw_payload, raw_payload_len);
break;
}
if (verdict == PKT_CONTINUE) {
lgtrace_wr("continue_flow");
lgtrace_write();
continue;
}
lgtrace_write();
goto ret_verdict;
}
accept:
verdict = PKT_ACCEPT;
ret_verdict:
switch (verdict) {
case PKT_ACCEPT:
lgtrace_wr("accept");
break;
case PKT_DROP:
lgtrace_wr("drop");
break;
default:
lgtrace_wr("unknown verdict: %d", verdict);
}
lgtrace_end();
return verdict;
}
int process_tcp_packet(const struct section_config_t *section, const uint8_t *raw_payload, size_t raw_payload_len) {
const void *ipxh;
size_t iph_len;
const struct tcphdr *tcph;
size_t tcph_len;
const uint8_t *data;
size_t dlen;
int ipxv = netproto_version(raw_payload, raw_payload_len);
int ret = tcp_payload_split((uint8_t *)raw_payload, raw_payload_len,
(void *)&ipxh, &iph_len,
(struct tcphdr **)&tcph, &tcph_len,
(uint8_t **)&data, &dlen);
if (ret < 0) {
return PKT_ACCEPT;
}
// As defined by TLS standard.
if (section->dport_filter && ntohs(tcph->dest) != 443) {
return PKT_ACCEPT;
}
if (tcph->syn && section->synfake) {
lgtrace_addp("TCP syn alter");
size_t fake_len = section->fake_sni_pkt_sz;
@@ -231,32 +47,31 @@ int process_tcp_packet(const struct section_config_t *section, const uint8_t *ra
fake_len = min((int)section->synfake_len, (int)fake_len);
size_t payload_len = iph_len + tcph_len + fake_len;
size_t payload_len = pkt->iph_len + pkt->tcph_len + fake_len;
uint8_t *payload = malloc(payload_len);
if (payload == NULL) {
lgerror(-ENOMEM, "Allocation error");
return PKT_ACCEPT;
}
memcpy(payload, ipxh, iph_len);
memcpy(payload + iph_len, tcph, tcph_len);
memcpy(payload + iph_len + tcph_len, section->fake_sni_pkt, fake_len);
memcpy(payload, pkt->ipxh, pkt->iph_len);
memcpy(payload + pkt->iph_len, pkt->tcph, pkt->tcph_len);
memcpy(payload + pkt->iph_len + pkt->tcph_len, section->fake_sni_pkt, fake_len);
struct tcphdr *tcph = (struct tcphdr *)(payload + iph_len);
if (ipxv == IP4VERSION) {
struct tcphdr *tcph = (struct tcphdr *)(payload + pkt->iph_len);
if (pkt->ipver == IP4VERSION) {
struct iphdr *iph = (struct iphdr *)payload;
iph->tot_len = htons(iph_len + tcph_len + fake_len);
set_ip_checksum(payload, iph_len);
set_tcp_checksum(tcph, iph, iph_len);
} else if (ipxv == IP6VERSION) {
iph->tot_len = htons(pkt->iph_len + pkt->tcph_len + fake_len);
set_ip_checksum(payload, pkt->iph_len);
set_tcp_checksum(tcph, iph, pkt->iph_len);
} else if (pkt->ipver == IP6VERSION) {
struct ip6_hdr *ip6h = (struct ip6_hdr *)payload;
ip6h->ip6_plen = ntohs(tcph_len + fake_len);
set_ip_checksum(ip6h, iph_len);
set_tcp_checksum(tcph, ip6h, iph_len);
ip6h->ip6_plen = ntohs(pkt->tcph_len + fake_len);
set_ip_checksum(ip6h, pkt->iph_len);
set_tcp_checksum(tcph, ip6h, pkt->iph_len);
}
ret = instance_config.send_raw_packet(payload, payload_len);
int ret = instance_config.send_raw_packet(payload, payload_len);
if (ret < 0) {
lgerror(ret, "send_syn_altered");
@@ -266,260 +81,6 @@ int process_tcp_packet(const struct section_config_t *section, const uint8_t *ra
free(payload);
return PKT_DROP;
}
if (tcph->syn)
return PKT_CONTINUE;
if (!section->tls_enabled)
return PKT_CONTINUE;
struct tls_verdict vrd = analyze_tls_data(section, data, dlen);
lgtrace_addp("TLS analyzed");
if (vrd.sni_len != 0) {
lgtrace_addp("SNI detected: %.*s", vrd.sni_len, vrd.sni_ptr);
}
if (vrd.target_sni) {
lgdebug("Target SNI detected: %.*s", vrd.sni_len, vrd.sni_ptr);
size_t target_sni_offset = vrd.target_sni_ptr - data;
size_t payload_len = raw_payload_len;
uint8_t *payload = malloc(raw_payload_len);
if (payload == NULL) {
lgerror(-ENOMEM, "Allocation error");
return PKT_ACCEPT;
}
memcpy(payload, raw_payload, raw_payload_len);
void *iph;
size_t iph_len;
struct tcphdr *tcph;
size_t tcph_len;
uint8_t *data;
size_t dlen;
int ret = tcp_payload_split(payload, payload_len,
&iph, &iph_len, &tcph, &tcph_len,
&data, &dlen);
if (ret < 0) {
lgerror(ret, "tcp_payload_split in targ_sni");
goto accept_lc;
}
if (section->fk_winsize) {
tcph->window = htons(section->fk_winsize);
set_tcp_checksum(tcph, iph, iph_len);
}
if (0) {
int delta = 2;
ret = seqovl_packet(payload, &payload_len, delta);
int ret = tcp_payload_split(payload, payload_len,
&iph, &iph_len, &tcph, &tcph_len,
&data, &dlen);
if (ret < 0) {
lgerror(ret, "seqovl_packet delta %d", delta);
}
}
if (dlen > AVAILABLE_MTU) {
lgdebug("WARNING! Client Hello packet is too big and may cause issues!");
}
if (section->fake_sni) {
struct fake_type f_type = args_default_fake_type(section);
// f_type.strategy.strategy = FAKE_STRAT_RAND_SEQ;
// f_type.strategy.randseq_offset = f_type.fake_len - 1;
// f_type.fake_data = "\x16";
// f_type.fake_len = 1;
//
// post_fake_sni(f_type, iph, iph_len, tcph, tcph_len);
//
// f_type = args_default_fake_type(section);
// // f_type.strategy.strategy = FAKE_STRAT_RAND_SEQ;
// // f_type.strategy.randseq_offset = 0;
// f_type.fake_data += 1;
// f_type.fake_len -= 1;
post_fake_sni(f_type, iph, iph_len, tcph, tcph_len);
}
size_t ipd_offset;
size_t mid_offset;
switch (section->fragmentation_strategy) {
case FRAG_STRAT_TCP:
{
ipd_offset = target_sni_offset;
mid_offset = ipd_offset + vrd.target_sni_len / 2;
// hardcode googlevideo.com split
// googlevideo domains are very long, so
// it is possible for the entire domain to not be
// splitted (split goes for subdomain)
if (vrd.target_sni_len > 30) {
mid_offset = ipd_offset +
vrd.target_sni_len - 12;
}
size_t poses[2];
int cnt = 0;
if (section->frag_sni_pos && dlen > section->frag_sni_pos) {
poses[cnt++] = section->frag_sni_pos;
}
if (section->frag_middle_sni) {
poses[cnt++] = mid_offset;
}
if (cnt > 1 && poses[0] > poses[1]) {
size_t tmp = poses[0];
poses[0] = poses[1];
poses[1] = tmp;
}
ret = send_tcp_frags(section, payload, payload_len, poses, cnt, 0);
if (ret < 0) {
lgerror(ret, "tcp4 send frags");
goto accept_lc;
}
goto drop_lc;
}
break;
case FRAG_STRAT_IP:
if (ipxv == IP4VERSION) {
ipd_offset = ((char *)data - (char *)tcph) + target_sni_offset;
mid_offset = ipd_offset + vrd.target_sni_len / 2;
mid_offset += 8 - mid_offset % 8;
size_t poses[2];
int cnt = 0;
if (section->frag_sni_pos && dlen > section->frag_sni_pos) {
poses[cnt] = section->frag_sni_pos + ((char *)data - (char *)tcph);
poses[cnt] += 8 - poses[cnt] % 8;
cnt++;
}
if (section->frag_middle_sni) {
poses[cnt++] = mid_offset;
}
if (cnt > 1 && poses[0] > poses[1]) {
size_t tmp = poses[0];
poses[0] = poses[1];
poses[1] = tmp;
}
ret = send_ip4_frags(section, payload, payload_len, poses, cnt, 0);
if (ret < 0) {
lgerror(ret, "ip4 send frags");
goto accept_lc;
}
goto drop_lc;
} else {
lginfo("WARNING: IP fragmentation is supported only for IPv4");
goto default_send;
}
break;
}
default_send:
ret = instance_config.send_raw_packet(payload, payload_len);
if (ret < 0) {
lgerror(ret, "raw pack send");
goto accept_lc;
}
goto drop_lc;
accept_lc:
free(payload);
return PKT_ACCEPT;
drop_lc:
free(payload);
return PKT_DROP;
}
return PKT_CONTINUE;
}
int process_udp_packet(const struct section_config_t *section, const uint8_t *pkt, size_t pktlen) {
const void *iph;
size_t iph_len;
const struct udphdr *udph;
const uint8_t *data;
size_t dlen;
int ret = udp_payload_split((uint8_t *)pkt, pktlen,
(void **)&iph, &iph_len,
(struct udphdr **)&udph,
(uint8_t **)&data, &dlen);
if (ret < 0) {
lgtrace_addp("undefined");
goto accept;
}
if (!detect_udp_filtered(section, pkt, pktlen))
goto continue_flow;
if (section->udp_mode == UDP_MODE_DROP)
goto drop;
else if (section->udp_mode == UDP_MODE_FAKE) {
for (int i = 0; i < section->udp_fake_seq_len; i++) {
uint8_t *fake_udp;
size_t fake_udp_len;
struct udp_fake_type fake_type = {
.fake_len = section->udp_fake_len,
.strategy = {
.strategy = section->udp_faking_strategy,
.faking_ttl = section->faking_ttl,
},
};
ret = gen_fake_udp(fake_type, iph, iph_len, udph, &fake_udp, &fake_udp_len);
if (ret < 0) {
lgerror(ret, "gen_fake_udp");
goto erret;
}
lgtrace_addp("post fake udp #%d", i + 1);
ret = instance_config.send_raw_packet(fake_udp, fake_udp_len);
if (ret < 0) {
lgerror(ret, "send fake udp");
goto erret_lc;
}
free(fake_udp);
continue;
erret_lc:
free(fake_udp);
erret:
goto accept;
}
ret = instance_config.send_raw_packet(pkt, pktlen);
goto drop;
}
continue_flow:
return PKT_CONTINUE;
accept:
return PKT_ACCEPT;
drop:
return PKT_DROP;
}
int send_ip4_frags(const struct section_config_t *section, const uint8_t *packet, size_t pktlen, const size_t *poses, size_t poses_sz, size_t dvs) {

View File

@@ -23,32 +23,18 @@
#include "types.h"
#include "tls.h"
#include "config.h"
#include "dpi.h"
#define PKT_ACCEPT 0
#define PKT_DROP 1
// Used for section config
#define PKT_CONTINUE 2
/**
* Processes the packet and returns verdict.
* This is the primary function that traverses the packet.
*/
int process_packet(const struct config_t *config, const struct packet_data *pd);
/**
* Processe the TCP packet.
* Returns verdict.
* Sends synfake message
*/
int process_tcp_packet(const struct section_config_t *section, const uint8_t *raw_payload, size_t raw_payload_len);
/**
* Processes the UDP packet.
* Returns verdict.
*/
int process_udp_packet(const struct section_config_t *section, const uint8_t *pkt, size_t pktlen);
int send_synfake(const struct section_config_t *section, const struct parsed_packet *pkt);
/**

View File

@@ -579,7 +579,7 @@ int detect_udp_filtered(const struct section_config_t *section,
match_port:
for (int i = 0; i < section->udp_dport_range_len; i++) {
struct udp_dport_range crange = section->udp_dport_range[i];
struct dport_range crange = section->udp_dport_range[i];
if (udp_dport >= crange.start && udp_dport <= crange.end) {
lgtrace_addp("matched to %d-%d", crange.start, crange.end);
goto approve;

View File

@@ -22,6 +22,12 @@
#define TYPES_H
#include <asm/byteorder.h>
#ifndef KERNEL_SPACE
#include <assert.h>
#else
#define assert(...) ;
#endif
#ifdef KERNEL_SPACE
#include <linux/errno.h> // IWYU pragma: export
#include <linux/string.h> // IWYU pragma: export

View File

@@ -49,7 +49,7 @@
#include <signal.h>
#include "config.h"
#include "mangle.h"
#include "dpi.h"
#include "args.h"
#include "utils.h"
#include "logging.h"
@@ -63,8 +63,9 @@ int raw6socket = -2;
static struct config_t *cur_config = NULL;
static int open_socket(struct mnl_socket **_nl) {
struct mnl_socket *nl = NULL;
nl = mnl_socket_open(NETLINK_NETFILTER);
assert (_nl);
struct mnl_socket *nl = mnl_socket_open(NETLINK_NETFILTER);
if (nl == NULL) {
lgerror(-errno, "mnl_socket_open");
@@ -83,15 +84,15 @@ static int open_socket(struct mnl_socket **_nl) {
}
static int close_socket(struct mnl_socket **_nl) {
struct mnl_socket *nl = *_nl;
if (nl == NULL) return 1;
if (mnl_socket_close(nl) < 0) {
static int close_socket(struct mnl_socket **nl) {
assert (nl);
if (*nl && mnl_socket_close(*nl) < 0) {
lgerror(-errno, "mnl_socket_close");
return -1;
}
*_nl = NULL;
*nl = NULL;
return 0;
}
@@ -199,231 +200,6 @@ static int close_raw6_socket(void) {
return 0;
}
/*
* libnetfilter_conntrack
* (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This code has been sponsored by Vyatta Inc. <http://www.vyatta.com>
*/
enum ctattr_counters {
CTA_COUNTERS_UNSPEC,
CTA_COUNTERS_PACKETS, /* 64bit counters */
CTA_COUNTERS_BYTES, /* 64bit counters */
CTA_COUNTERS32_PACKETS, /* old 32bit counters, unused */
CTA_COUNTERS32_BYTES, /* old 32bit counters, unused */
CTA_COUNTERS_PAD,
__CTA_COUNTERS_MAX
};
#define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1)
enum ctattr_type {
CTA_UNSPEC,
CTA_TUPLE_ORIG,
CTA_TUPLE_REPLY,
CTA_STATUS,
CTA_PROTOINFO,
CTA_HELP,
CTA_NAT_SRC,
#define CTA_NAT CTA_NAT_SRC /* backwards compatibility */
CTA_TIMEOUT,
CTA_MARK,
CTA_COUNTERS_ORIG,
CTA_COUNTERS_REPLY,
CTA_USE,
CTA_ID,
CTA_NAT_DST,
CTA_TUPLE_MASTER,
CTA_SEQ_ADJ_ORIG,
CTA_NAT_SEQ_ADJ_ORIG = CTA_SEQ_ADJ_ORIG,
CTA_SEQ_ADJ_REPLY,
CTA_NAT_SEQ_ADJ_REPLY = CTA_SEQ_ADJ_REPLY,
CTA_SECMARK, /* obsolete */
CTA_ZONE,
CTA_SECCTX,
CTA_TIMESTAMP,
CTA_MARK_MASK,
CTA_LABELS,
CTA_LABELS_MASK,
CTA_SYNPROXY,
CTA_FILTER,
CTA_STATUS_MASK,
__CTA_MAX
};
#define CTA_MAX (__CTA_MAX - 1)
enum {
__DIR_ORIG,
__DIR_REPL
};
static int
yct_parse_counters_attr_cb(const struct nlattr *attr, void *data)
{
const struct nlattr **tb = data;
int type = mnl_attr_get_type(attr);
if (mnl_attr_type_valid(attr, CTA_COUNTERS_MAX) < 0)
return MNL_CB_OK;
switch(type) {
case CTA_COUNTERS_PACKETS:
case CTA_COUNTERS_BYTES:
if (mnl_attr_validate(attr, MNL_TYPE_U64) < 0)
return MNL_CB_ERROR;
break;
case CTA_COUNTERS32_PACKETS:
case CTA_COUNTERS32_BYTES:
if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0)
return MNL_CB_ERROR;
break;
}
tb[type] = attr;
return MNL_CB_OK;
}
static int
yct_parse_counters(const struct nlattr *attr, struct ytb_conntrack *yct,
int dir)
{
struct nlattr *tb[CTA_COUNTERS_MAX+1] = {0};
if (mnl_attr_parse_nested(attr, yct_parse_counters_attr_cb, tb) < 0)
return -1;
if (tb[CTA_COUNTERS_PACKETS] || tb[CTA_COUNTERS32_PACKETS]) {
uint64_t packets_counter;
if (tb[CTA_COUNTERS32_PACKETS]) {
packets_counter =
ntohl(mnl_attr_get_u32(tb[CTA_COUNTERS32_PACKETS]));
}
if (tb[CTA_COUNTERS_PACKETS]) {
packets_counter =
be64toh(mnl_attr_get_u64(tb[CTA_COUNTERS_PACKETS]));
}
switch(dir) {
case __DIR_ORIG:
yct->orig_packets = packets_counter;
yct_set_mask_attr(YCTATTR_ORIG_PACKETS, yct);
break;
case __DIR_REPL:
yct->repl_packets = packets_counter;
yct_set_mask_attr(YCTATTR_REPL_PACKETS, yct);
break;
}
}
if (tb[CTA_COUNTERS_BYTES] || tb[CTA_COUNTERS32_BYTES]) {
uint64_t bytes_counter;
if (tb[CTA_COUNTERS32_BYTES]) {
bytes_counter =
ntohl(mnl_attr_get_u32(tb[CTA_COUNTERS32_BYTES]));
}
if (tb[CTA_COUNTERS_BYTES]) {
bytes_counter =
be64toh(mnl_attr_get_u64(tb[CTA_COUNTERS_BYTES]));
}
switch(dir) {
case __DIR_ORIG:
yct->orig_bytes = bytes_counter;
yct_set_mask_attr(YCTATTR_ORIG_BYTES, yct);
break;
case __DIR_REPL:
yct->repl_bytes = bytes_counter;
yct_set_mask_attr(YCTATTR_REPL_BYTES, yct);
break;
}
}
return 0;
}
static int
yct_parse_conntrack_attr_cb(const struct nlattr *attr, void *data){
const struct nlattr **tb = data;
int type = mnl_attr_get_type(attr);
if (mnl_attr_type_valid(attr, CTA_MAX) < 0)
return MNL_CB_OK;
switch(type) {
case CTA_TUPLE_ORIG:
case CTA_TUPLE_REPLY:
case CTA_TUPLE_MASTER:
case CTA_NAT_SEQ_ADJ_ORIG:
case CTA_NAT_SEQ_ADJ_REPLY:
case CTA_PROTOINFO:
case CTA_COUNTERS_ORIG:
case CTA_COUNTERS_REPLY:
case CTA_HELP:
case CTA_SECCTX:
case CTA_TIMESTAMP:
if (mnl_attr_validate(attr, MNL_TYPE_NESTED) < 0)
return MNL_CB_ERROR;
break;
case CTA_STATUS:
case CTA_TIMEOUT:
case CTA_MARK:
case CTA_SECMARK:
case CTA_USE:
case CTA_ID:
if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0)
return MNL_CB_ERROR;
break;
case CTA_ZONE:
if (mnl_attr_validate(attr, MNL_TYPE_U16) < 0)
return MNL_CB_ERROR;
break;
case CTA_NAT_SRC:
case CTA_NAT_DST:
/* deprecated */
break;
}
tb[type] = attr;
return MNL_CB_OK;
}
static int
yct_payload_parse(const void *payload, size_t payload_len,
uint16_t l3num, struct ytb_conntrack *yct)
{
struct nlattr *tb[CTA_MAX+1] = {0};
if (mnl_attr_parse_payload(payload, payload_len,
yct_parse_conntrack_attr_cb, tb) < 0)
return -1;
if (tb[CTA_MARK]) {
yct->connmark = ntohl(mnl_attr_get_u32(tb[CTA_MARK]));
yct_set_mask_attr(YCTATTR_CONNMARK, yct);
}
if (tb[CTA_COUNTERS_ORIG]) {
if (yct_parse_counters(tb[CTA_COUNTERS_ORIG],
yct, __DIR_ORIG) < 0)
return -1;
}
if (tb[CTA_ID]) {
yct->id = ntohl(mnl_attr_get_u32(tb[CTA_ID]));
yct_set_mask_attr(YCTATTR_CONNID, yct);
}
if (tb[CTA_COUNTERS_REPLY]) {
if (yct_parse_counters(tb[CTA_COUNTERS_REPLY],
yct, __DIR_REPL) < 0)
return -1;
}
return 0;
}
static int send_raw_ipv4(const uint8_t *pkt, size_t pktlen) {
int ret;
if (pktlen > AVAILABLE_MTU) return -ENOMEM;
@@ -563,6 +339,227 @@ erret_lc:
return ret;
}
/*
* libnetfilter_conntrack
* (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This code has been sponsored by Vyatta Inc. <http://www.vyatta.com>
*/
enum ctattr_counters {
CTA_COUNTERS_UNSPEC,
CTA_COUNTERS_PACKETS, /* 64bit counters */
CTA_COUNTERS_BYTES, /* 64bit counters */
CTA_COUNTERS32_PACKETS, /* old 32bit counters, unused */
CTA_COUNTERS32_BYTES, /* old 32bit counters, unused */
CTA_COUNTERS_PAD,
__CTA_COUNTERS_MAX
};
#define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1)
enum ctattr_type {
CTA_UNSPEC,
CTA_TUPLE_ORIG,
CTA_TUPLE_REPLY,
CTA_STATUS,
CTA_PROTOINFO,
CTA_HELP,
CTA_NAT_SRC,
#define CTA_NAT CTA_NAT_SRC /* backwards compatibility */
CTA_TIMEOUT,
CTA_MARK,
CTA_COUNTERS_ORIG,
CTA_COUNTERS_REPLY,
CTA_USE,
CTA_ID,
CTA_NAT_DST,
CTA_TUPLE_MASTER,
CTA_SEQ_ADJ_ORIG,
CTA_NAT_SEQ_ADJ_ORIG = CTA_SEQ_ADJ_ORIG,
CTA_SEQ_ADJ_REPLY,
CTA_NAT_SEQ_ADJ_REPLY = CTA_SEQ_ADJ_REPLY,
CTA_SECMARK, /* obsolete */
CTA_ZONE,
CTA_SECCTX,
CTA_TIMESTAMP,
CTA_MARK_MASK,
CTA_LABELS,
CTA_LABELS_MASK,
CTA_SYNPROXY,
CTA_FILTER,
CTA_STATUS_MASK,
__CTA_MAX
};
#define CTA_MAX (__CTA_MAX - 1)
enum {
__DIR_ORIG,
__DIR_REPL
};
static int yct_parse_counters_attr_cb(const struct nlattr *attr,
void *data) {
const struct nlattr **tb = data;
int type = mnl_attr_get_type(attr);
if (mnl_attr_type_valid(attr, CTA_COUNTERS_MAX) < 0)
return MNL_CB_OK;
switch(type) {
case CTA_COUNTERS_PACKETS:
case CTA_COUNTERS_BYTES:
if (mnl_attr_validate(attr, MNL_TYPE_U64) < 0)
return MNL_CB_ERROR;
break;
case CTA_COUNTERS32_PACKETS:
case CTA_COUNTERS32_BYTES:
if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0)
return MNL_CB_ERROR;
break;
}
tb[type] = attr;
return MNL_CB_OK;
}
static int yct_parse_counters(const struct nlattr *attr,
struct ytb_conntrack *yct, int dir) {
struct nlattr *tb[CTA_COUNTERS_MAX+1] = {0};
if (mnl_attr_parse_nested(attr, yct_parse_counters_attr_cb, tb) < 0)
return -1;
if (tb[CTA_COUNTERS_PACKETS] || tb[CTA_COUNTERS32_PACKETS]) {
uint64_t packets_counter;
if (tb[CTA_COUNTERS32_PACKETS]) {
packets_counter =
ntohl(mnl_attr_get_u32(tb[CTA_COUNTERS32_PACKETS]));
}
if (tb[CTA_COUNTERS_PACKETS]) {
packets_counter =
be64toh(mnl_attr_get_u64(tb[CTA_COUNTERS_PACKETS]));
}
switch(dir) {
case __DIR_ORIG:
yct->orig_packets = packets_counter;
yct_set_mask_attr(YCTATTR_ORIG_PACKETS, yct);
break;
case __DIR_REPL:
yct->repl_packets = packets_counter;
yct_set_mask_attr(YCTATTR_REPL_PACKETS, yct);
break;
}
}
if (tb[CTA_COUNTERS_BYTES] || tb[CTA_COUNTERS32_BYTES]) {
uint64_t bytes_counter;
if (tb[CTA_COUNTERS32_BYTES]) {
bytes_counter =
ntohl(mnl_attr_get_u32(tb[CTA_COUNTERS32_BYTES]));
}
if (tb[CTA_COUNTERS_BYTES]) {
bytes_counter =
be64toh(mnl_attr_get_u64(tb[CTA_COUNTERS_BYTES]));
}
switch(dir) {
case __DIR_ORIG:
yct->orig_bytes = bytes_counter;
yct_set_mask_attr(YCTATTR_ORIG_BYTES, yct);
break;
case __DIR_REPL:
yct->repl_bytes = bytes_counter;
yct_set_mask_attr(YCTATTR_REPL_BYTES, yct);
break;
}
}
return 0;
}
static int yct_parse_conntrack_attr_cb(const struct nlattr *attr,
void *data) {
const struct nlattr **tb = data;
int type = mnl_attr_get_type(attr);
if (mnl_attr_type_valid(attr, CTA_MAX) < 0)
return MNL_CB_OK;
switch(type) {
case CTA_TUPLE_ORIG:
case CTA_TUPLE_REPLY:
case CTA_TUPLE_MASTER:
case CTA_NAT_SEQ_ADJ_ORIG:
case CTA_NAT_SEQ_ADJ_REPLY:
case CTA_PROTOINFO:
case CTA_COUNTERS_ORIG:
case CTA_COUNTERS_REPLY:
case CTA_HELP:
case CTA_SECCTX:
case CTA_TIMESTAMP:
if (mnl_attr_validate(attr, MNL_TYPE_NESTED) < 0)
return MNL_CB_ERROR;
break;
case CTA_STATUS:
case CTA_TIMEOUT:
case CTA_MARK:
case CTA_SECMARK:
case CTA_USE:
case CTA_ID:
if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0)
return MNL_CB_ERROR;
break;
case CTA_ZONE:
if (mnl_attr_validate(attr, MNL_TYPE_U16) < 0)
return MNL_CB_ERROR;
break;
case CTA_NAT_SRC:
case CTA_NAT_DST:
/* deprecated */
break;
}
tb[type] = attr;
return MNL_CB_OK;
}
static int yct_payload_parse(const void *payload,
size_t payload_len, uint16_t l3num,
struct ytb_conntrack *yct) {
struct nlattr *tb[CTA_MAX+1] = {0};
if (mnl_attr_parse_payload(payload, payload_len,
yct_parse_conntrack_attr_cb, tb) < 0)
return -1;
if (tb[CTA_MARK]) {
yct->connmark = ntohl(mnl_attr_get_u32(tb[CTA_MARK]));
yct_set_mask_attr(YCTATTR_CONNMARK, yct);
}
if (tb[CTA_COUNTERS_ORIG]) {
if (yct_parse_counters(tb[CTA_COUNTERS_ORIG],
yct, __DIR_ORIG) < 0)
return -1;
}
if (tb[CTA_ID]) {
yct->id = ntohl(mnl_attr_get_u32(tb[CTA_ID]));
yct_set_mask_attr(YCTATTR_CONNID, yct);
}
if (tb[CTA_COUNTERS_REPLY]) {
if (yct_parse_counters(tb[CTA_COUNTERS_REPLY],
yct, __DIR_REPL) < 0)
return -1;
}
return 0;
}
// Per-queue data. Passed to queue_cb.
struct queue_data {
@@ -684,15 +681,16 @@ static int queue_cb(const struct nlmsghdr *nlh, void *data) {
packet.payload_len = mnl_attr_get_payload_len(attr[NFQA_PAYLOAD]);
packet.payload = mnl_attr_get_payload(attr[NFQA_PAYLOAD]);
if (attr[NFQA_CAP_LEN] != NULL && ntohl(mnl_attr_get_u32(attr[NFQA_CAP_LEN])) != packet.payload_len) {
if (attr[NFQA_CAP_LEN] != NULL &&
ntohl(mnl_attr_get_u32(attr[NFQA_CAP_LEN])) != packet.payload_len) {
lgerr("The packet was truncated! Skip!");
return fallback_accept_packet(id, *qdata);
}
if (attr[NFQA_MARK] != NULL) {
// Skip packets sent by rawsocket to escape infinity loop.
if ((ntohl(mnl_attr_get_u32(attr[NFQA_MARK])) & cur_config->mark) ==
cur_config->mark) {
if (CHECK_BITFIELD(ntohl(mnl_attr_get_u32(attr[NFQA_MARK])),
cur_config->mark)) {
return fallback_accept_packet(id, *qdata);
}
}

View File

@@ -34,7 +34,7 @@ export CC CCLD LD CFLAGS LDFLAGS LIBNFNETLINK_CFLAGS LIBNFNETLINK_LIBS LIBMNL_CF
APP:=$(BUILD_DIR)/youtubeUnblock
TEST_APP:=$(BUILD_DIR)/testYoutubeUnblock
SRCS := mangle.c args.c utils.c quic.c tls.c getopt.c quic_crypto.c inet_ntop.c trie.c
SRCS := mangle.c args.c utils.c quic.c tls.c getopt.c quic_crypto.c inet_ntop.c trie.c dpi.c
OBJS := $(SRCS:%.c=$(BUILD_DIR)/%.o)
APP_EXEC := youtubeUnblock.c
APP_OBJ := $(APP_EXEC:%.c=$(BUILD_DIR)/%.o)