Use mallocs instead of NETBUF_ALLOC

malloc won't hurt when youtubeUnblock is processing the packet. But it
is better for kmod and a way cleaner than NETBUF_ALLOC defines.
This commit is contained in:
Vadim Vetrov
2025-01-09 19:44:30 +03:00
parent 9985fcea49
commit d87ab29199
8 changed files with 270 additions and 336 deletions

View File

@@ -211,32 +211,32 @@ int process_tcp_packet(const struct section_config_t *section, const uint8_t *ra
if (ret < 0) {
goto accept;
return PKT_ACCEPT;
}
// As defined by TLS standard.
if (section->dport_filter && ntohs(tcph->dest) != 443) {
goto accept;
return PKT_ACCEPT;
}
if (tcph->syn && section->synfake) {
lgtrace_addp("TCP syn alter");
NETBUF_ALLOC(payload, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(payload)) {
lgerror(-ENOMEM, "Allocation error");
goto accept;
}
memcpy(payload, ipxh, iph_len);
memcpy(payload + iph_len, tcph, tcph_len);
size_t fake_len = section->fake_sni_pkt_sz;
if (section->synfake_len)
fake_len = min(section->synfake_len, fake_len);
memcpy(payload + iph_len + tcph_len, section->fake_sni_pkt, fake_len);
size_t payload_len = iph_len + tcph_len + fake_len;
uint8_t *payload = malloc(payload_len);
if (payload == NULL) {
lgerror(-ENOMEM, "Allocation error");
return PKT_ACCEPT;
}
memcpy(payload, ipxh, iph_len);
memcpy(payload + iph_len, tcph, tcph_len);
memcpy(payload + iph_len + tcph_len, section->fake_sni_pkt, fake_len);
struct tcphdr *tcph = (struct tcphdr *)(payload + iph_len);
if (ipxv == IP4VERSION) {
@@ -252,22 +252,23 @@ int process_tcp_packet(const struct section_config_t *section, const uint8_t *ra
}
ret = instance_config.send_raw_packet(payload, iph_len + tcph_len + fake_len);
ret = instance_config.send_raw_packet(payload, payload_len);
if (ret < 0) {
lgerror(ret, "send_syn_altered");
NETBUF_FREE(payload);
goto accept;
free(payload);
return PKT_ACCEPT;
}
NETBUF_FREE(payload);
goto drop;
free(payload);
return PKT_DROP;
}
if (tcph->syn) goto continue_flow;
if (tcph->syn)
return PKT_CONTINUE;
if (!section->tls_enabled)
goto continue_flow;
return PKT_CONTINUE;
struct tls_verdict vrd = analyze_tls_data(section, data, dlen);
lgtrace_addp("TLS analyzed");
@@ -282,12 +283,11 @@ int process_tcp_packet(const struct section_config_t *section, const uint8_t *ra
size_t target_sni_offset = vrd.target_sni_ptr - data;
size_t payload_len = raw_payload_len;
NETBUF_ALLOC(payload, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(payload)) {
uint8_t *payload = malloc(raw_payload_len);
if (payload == NULL) {
lgerror(-ENOMEM, "Allocation error");
goto accept;
return PKT_ACCEPT;
}
memcpy(payload, raw_payload, raw_payload_len);
void *iph;
@@ -311,6 +311,7 @@ int process_tcp_packet(const struct section_config_t *section, const uint8_t *ra
set_tcp_checksum(tcph, iph, iph_len);
}
/*
if (0) {
int delta = 2;
ret = seqovl_packet(payload, &payload_len, delta);
@@ -321,116 +322,108 @@ int process_tcp_packet(const struct section_config_t *section, const uint8_t *ra
lgerror(ret, "seqovl_packet delta %d", delta);
}
}
*/
if (dlen > 1480 && config.verbose) {
if (dlen > AVAILABLE_MTU) {
lgdebug("WARNING! Client Hello packet is too big and may cause issues!");
}
if (section->fake_sni) {
post_fake_sni(args_default_fake_type(section), iph, iph_len, tcph, tcph_len);
post_fake_sni(args_default_fake_type(section), iph, iph_len, tcph, tcph_len);
}
size_t ipd_offset;
size_t mid_offset;
switch (section->fragmentation_strategy) {
case FRAG_STRAT_TCP: {
ipd_offset = target_sni_offset;
mid_offset = ipd_offset + vrd.target_sni_len / 2;
case FRAG_STRAT_TCP:
{
ipd_offset = target_sni_offset;
mid_offset = ipd_offset + vrd.target_sni_len / 2;
size_t poses[2];
int cnt = 0;
size_t poses[2];
int cnt = 0;
if (section->frag_sni_pos && dlen > section->frag_sni_pos) {
poses[cnt++] = section->frag_sni_pos;
}
if (section->frag_middle_sni) {
poses[cnt++] = mid_offset;
}
if (cnt > 1 && poses[0] > poses[1]) {
size_t tmp = poses[0];
poses[0] = poses[1];
poses[1] = tmp;
}
ret = send_tcp_frags(section, payload, payload_len, poses, cnt, 0);
if (ret < 0) {
lgerror(ret, "tcp4 send frags");
goto accept_lc;
}
goto drop_lc;
if (section->frag_sni_pos && dlen > section->frag_sni_pos) {
poses[cnt++] = section->frag_sni_pos;
}
break;
case FRAG_STRAT_IP:
if (ipxv == IP4VERSION) {
ipd_offset = ((char *)data - (char *)tcph) + target_sni_offset;
mid_offset = ipd_offset + vrd.target_sni_len / 2;
mid_offset += 8 - mid_offset % 8;
size_t poses[2];
int cnt = 0;
if (section->frag_sni_pos && dlen > section->frag_sni_pos) {
poses[cnt] = section->frag_sni_pos + ((char *)data - (char *)tcph);
poses[cnt] += 8 - poses[cnt] % 8;
cnt++;
}
if (section->frag_middle_sni) {
poses[cnt++] = mid_offset;
}
if (cnt > 1 && poses[0] > poses[1]) {
size_t tmp = poses[0];
poses[0] = poses[1];
poses[1] = tmp;
}
ret = send_ip4_frags(section, payload, payload_len, poses, cnt, 0);
if (ret < 0) {
lgerror(ret, "ip4 send frags");
goto accept_lc;
}
goto drop_lc;
} else {
lginfo("WARNING: IP fragmentation is supported only for IPv4");
goto default_send;
if (section->frag_middle_sni) {
poses[cnt++] = mid_offset;
}
default:
default_send:
ret = instance_config.send_raw_packet(payload, payload_len);
if (ret < 0) {
lgerror(ret, "raw pack send");
goto accept_lc;
}
goto drop_lc;
if (cnt > 1 && poses[0] > poses[1]) {
size_t tmp = poses[0];
poses[0] = poses[1];
poses[1] = tmp;
}
ret = send_tcp_frags(section, payload, payload_len, poses, cnt, 0);
if (ret < 0) {
lgerror(ret, "tcp4 send frags");
goto accept_lc;
}
goto drop_lc;
}
break;
case FRAG_STRAT_IP:
if (ipxv == IP4VERSION) {
ipd_offset = ((char *)data - (char *)tcph) + target_sni_offset;
mid_offset = ipd_offset + vrd.target_sni_len / 2;
mid_offset += 8 - mid_offset % 8;
size_t poses[2];
int cnt = 0;
if (section->frag_sni_pos && dlen > section->frag_sni_pos) {
poses[cnt] = section->frag_sni_pos + ((char *)data - (char *)tcph);
poses[cnt] += 8 - poses[cnt] % 8;
cnt++;
}
if (section->frag_middle_sni) {
poses[cnt++] = mid_offset;
}
if (cnt > 1 && poses[0] > poses[1]) {
size_t tmp = poses[0];
poses[0] = poses[1];
poses[1] = tmp;
}
ret = send_ip4_frags(section, payload, payload_len, poses, cnt, 0);
if (ret < 0) {
lgerror(ret, "ip4 send frags");
goto accept_lc;
}
goto drop_lc;
} else {
lginfo("WARNING: IP fragmentation is supported only for IPv4");
goto default_send;
}
break;
}
default_send:
ret = instance_config.send_raw_packet(payload, payload_len);
if (ret < 0) {
lgerror(ret, "raw pack send");
goto accept_lc;
}
goto drop_lc;
accept_lc:
NETBUF_FREE(payload);
goto accept;
free(payload);
return PKT_ACCEPT;
drop_lc:
NETBUF_FREE(payload);
goto drop;
free(payload);
return PKT_DROP;
}
continue_flow:
return PKT_CONTINUE;
accept:
return PKT_ACCEPT;
drop:
return PKT_DROP;
}
int process_udp_packet(const struct section_config_t *section, const uint8_t *pkt, size_t pktlen) {
@@ -458,12 +451,8 @@ int process_udp_packet(const struct section_config_t *section, const uint8_t *pk
goto drop;
else if (section->udp_mode == UDP_MODE_FAKE) {
for (int i = 0; i < section->udp_fake_seq_len; i++) {
NETBUF_ALLOC(fake_udp, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(fake_udp)) {
lgerror(-ENOMEM, "Allocation error");
return -ENOMEM;
}
size_t fsn_len = MAX_PACKET_SIZE;
uint8_t *fake_udp;
size_t fake_udp_len;
struct udp_fake_type fake_type = {
.fake_len = section->udp_fake_len,
@@ -472,24 +461,25 @@ int process_udp_packet(const struct section_config_t *section, const uint8_t *pk
.faking_ttl = section->faking_ttl,
},
};
ret = gen_fake_udp(fake_type, iph, iph_len, udph, fake_udp, &fsn_len);
ret = gen_fake_udp(fake_type, iph, iph_len, udph, &fake_udp, &fake_udp_len);
if (ret < 0) {
lgerror(ret, "gen_fake_udp");
goto erret_lc;
goto erret;
}
lgtrace_addp("post fake udp #%d", i + 1);
ret = instance_config.send_raw_packet(fake_udp, fsn_len);
ret = instance_config.send_raw_packet(fake_udp, fake_udp_len);
if (ret < 0) {
lgerror(ret, "send fake udp");
goto erret_lc;
}
NETBUF_FREE(fake_udp);
free(fake_udp);
continue;
erret_lc:
NETBUF_FREE(fake_udp);
free(fake_udp);
erret:
goto accept;
}
@@ -524,33 +514,21 @@ int send_ip4_frags(const struct section_config_t *section, const uint8_t *packet
packet, pktlen);
}
} else {
NETBUF_ALLOC(frag1, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(frag1)) {
size_t f1len = pktlen;
uint8_t *frag1 = malloc(f1len);
if (frag1 == NULL) {
lgerror(-ENOMEM, "Allocation error");
return -ENOMEM;
}
NETBUF_ALLOC(frag2, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(frag2)) {
size_t f2len = pktlen;
uint8_t *frag2 = malloc(f2len);
if (frag2 == NULL) {
lgerror(-ENOMEM, "Allocation error");
NETBUF_FREE(frag1);
free(frag1);
return -ENOMEM;
}
/*
NETBUF_ALLOC(fake_pad, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(fake_pad)) {
lgerror(-ENOMEM, "Allocation error");
NETBUF_FREE(frag1);
NETBUF_FREE(frag2);
return -ENOMEM;
}
*/
size_t f1len = MAX_PACKET_SIZE;
size_t f2len = MAX_PACKET_SIZE;
// size_t fake_pad_len = MAX_PACKET_SIZE;
int ret;
if (dvs > poses[0]) {
@@ -570,8 +548,6 @@ int send_ip4_frags(const struct section_config_t *section, const uint8_t *packet
goto erret_lc;
}
dvs += frag_pos;
if (section->frag_sni_reverse)
goto send_frag2;
send_frag1:
@@ -583,54 +559,22 @@ send_frag1:
if (section->frag_sni_reverse)
goto out_lc;
send_fake:
/*
if (section->frag_sni_faked) {
ITER_FAKE_STRAT(section->faking_strategy, strategy) {
size_t iphfl;
fake_pad_len = f2len;
ret = ip4_payload_split(frag2, f2len, NULL, &iphfl, NULL, NULL);
if (ret < 0) {
lgerror("Invalid frag2", ret);
goto erret_lc;
}
memcpy(fake_pad, frag2, iphfl + sizeof(struct udphdr));
memset(fake_pad + iphfl + sizeof(struct udphdr), 0, f2len - iphfl - sizeof(struct udphdr));
((struct iphdr *)fake_pad)->tot_len = htons(fake_pad_len);
((struct iphdr *)fake_pad)->id = 1;
((struct iphdr *)fake_pad)->ttl = 8;
((struct iphdr *)fake_pad)->frag_off = 0;
ip4_set_checksum((struct iphdr*)fake_pad);
// *(struct udphdr *)(fake_pad + iphfl) = *(struct udphdr *)(frag2 + iphfl);
ret = send_ip4_frags(fake_pad, fake_pad_len, NULL, 0, 0);
if (ret < 0) {
goto erret_lc;
}
}
}
*/
if (section->frag_sni_reverse)
goto send_frag1;
send_frag2:
ret = send_ip4_frags(section, frag2, f2len, poses + 1, poses_sz - 1, dvs);
ret = send_ip4_frags(section, frag2, f2len, poses + 1, poses_sz - 1, poses[0]);
if (ret < 0) {
goto erret_lc;
}
if (section->frag_sni_reverse)
goto send_fake;
goto send_frag1;
out_lc:
NETBUF_FREE(frag1);
NETBUF_FREE(frag2);
// NETBUF_FREE(fake_pad);
free(frag1);
free(frag2);
goto out;
erret_lc:
NETBUF_FREE(frag1);
NETBUF_FREE(frag2);
// NETBUF_FREE(fake_pad);
free(frag1);
free(frag2);
return ret;
}
@@ -656,22 +600,21 @@ int send_tcp_frags(const struct section_config_t *section, const uint8_t *packet
packet, pktlen);
}
} else {
NETBUF_ALLOC(frag1, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(frag1)) {
size_t f1len = pktlen;
uint8_t *frag1 = malloc(f1len);
if (frag1 == NULL) {
lgerror(-ENOMEM, "Allocation error");
return -ENOMEM;
}
NETBUF_ALLOC(frag2, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(frag2)) {
size_t f2len = pktlen;
uint8_t *frag2 = malloc(f2len);
if (frag2 == NULL) {
lgerror(-ENOMEM, "Allocation error");
NETBUF_FREE(frag1);
free(frag1);
return -ENOMEM;
}
size_t f1len = MAX_PACKET_SIZE;
size_t f2len = MAX_PACKET_SIZE;
int ret;
if (dvs > poses[0]) {
@@ -680,7 +623,6 @@ int send_tcp_frags(const struct section_config_t *section, const uint8_t *packet
goto erret_lc;
}
ret = tcp_frag(packet, pktlen, poses[0] - dvs,
frag1, &f1len, frag2, &f2len);
@@ -697,16 +639,14 @@ int send_tcp_frags(const struct section_config_t *section, const uint8_t *packet
goto send_frag2;
send_frag1:
{
ret = send_tcp_frags(section, frag1, f1len, NULL, 0, 0);
if (ret < 0) {
goto erret_lc;
}
if (section->frag_sni_reverse)
goto out_lc;
ret = send_tcp_frags(section, frag1, f1len, NULL, 0, 0);
if (ret < 0) {
goto erret_lc;
}
if (section->frag_sni_reverse)
goto out_lc;
send_fake:
if (section->frag_sni_faked) {
size_t iphfl, tcphfl;
@@ -729,22 +669,20 @@ send_fake:
goto send_frag1;
send_frag2:
{
ret = send_tcp_frags(section, frag2, f2len, poses + 1, poses_sz - 1, poses[0]);
if (ret < 0) {
goto erret_lc;
}
if (section->frag_sni_reverse)
goto send_fake;
ret = send_tcp_frags(section, frag2, f2len, poses + 1, poses_sz - 1, poses[0]);
if (ret < 0) {
goto erret_lc;
}
if (section->frag_sni_reverse)
goto send_fake;
out_lc:
NETBUF_FREE(frag1);
NETBUF_FREE(frag2);
free(frag1);
free(frag2);
goto out;
erret_lc:
NETBUF_FREE(frag1);
NETBUF_FREE(frag2);
free(frag1);
free(frag2);
return ret;
}
out:
@@ -773,28 +711,24 @@ int post_fake_sni(struct fake_type f_type,
// one goes for default fake
for (int i = 0; i < fake_seq_type.sequence_len; i++) {
NETBUF_ALLOC(fake_sni, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(fake_sni)) {
lgerror(-ENOMEM, "Allocation error");
return -ENOMEM;
}
size_t fsn_len = MAX_PACKET_SIZE;
uint8_t *fake_sni;
size_t fake_sni_len;
ret = gen_fake_sni(
fake_seq_type,
fsiph, iph_len, fstcph, tcph_len,
fake_sni, &fsn_len);
&fake_sni, &fake_sni_len);
if (ret < 0) {
lgerror(ret, "gen_fake_sni");
goto erret_lc;
return ret;
}
lgtrace_addp("post fake sni #%d", i + 1);
if (f_type.seg2delay) {
ret = instance_config.send_delayed_packet(fake_sni, fsn_len, f_type.seg2delay);
ret = instance_config.send_delayed_packet(fake_sni, fake_sni_len, f_type.seg2delay);
} else {
ret = instance_config.send_raw_packet(fake_sni, fsn_len);
ret = instance_config.send_raw_packet(fake_sni, fake_sni_len);
}
if (ret < 0) {
lgerror(ret, "send fake sni");
@@ -804,7 +738,7 @@ int post_fake_sni(struct fake_type f_type,
size_t tcph_len;
size_t plen;
ret = tcp_payload_split(
fake_sni, fsn_len,
fake_sni, fake_sni_len,
&fsiph, &iph_len,
&fstcph, &tcph_len,
NULL, &plen);
@@ -830,10 +764,10 @@ int post_fake_sni(struct fake_type f_type,
fsiph = (void *)rfsiph;
fstcph = (void *)rfstcph;
NETBUF_FREE(fake_sni);
free(fake_sni);
continue;
erret_lc:
NETBUF_FREE(fake_sni);
free(fake_sni);
return ret;
}
}