65 Commits

Author SHA1 Message Date
Vadim Vetrov
2d1b58bc6d Delete workflow for kmod 2024-09-21 18:16:09 +03:00
Vadim Vetrov
cdb26833ba Merge pull request #98 from Waujito/kmod
Kernel module
2024-09-21 08:11:01 -07:00
Vadim Vetrov
c786a44dd5 Fix warnings 2024-09-20 23:06:59 +03:00
Vadim Vetrov
2fd3107401 Fix memcpy 2024-09-19 22:48:25 +03:00
Vadim Vetrov
5415bc37ec Compatibility for kernel version 3 2024-09-19 21:55:15 +03:00
Vadim Vetrov
3187b3ca61 Delete mutexes from rawsocket
The program works in sync way with kernel
2024-09-19 16:47:18 +03:00
Vadim Vetrov
1cacac7adc Add notices about kernel module and openwrt sdk 2024-09-17 19:28:13 +03:00
Vadim Vetrov
6eaa0a67c8 Merge branch 'main' into kmod 2024-09-17 19:10:18 +03:00
Vadim Vetrov
edbfe120c5 Update README.md 2024-09-17 19:04:33 +03:00
Vadim Vetrov
7e71c5e9b8 Update README.md 2024-09-15 21:40:14 -07:00
Vadim Vetrov
1ade21aa22 Update README for kernel module 2024-09-16 00:28:10 +03:00
Vadim Vetrov
49a48c33cf Update kernel module arguments 2024-09-15 22:21:24 +03:00
Vadim Vetrov
5e28fe83c2 Allow allocate in user-space with malloc 2024-09-14 20:37:19 +03:00
Vadim Vetrov
d93763ac44 Merge branch 'main' into kmod 2024-09-14 20:17:25 +03:00
Vadim Vetrov
85d3843273 Update README.md 2024-09-14 04:23:33 -07:00
Vadim Vetrov
f0826606e3 Delete development branches 2024-09-14 12:27:37 +03:00
Vadim Vetrov
c2158a7450 Merge pull request #113 from Waujito/luci_upd
Openwrt LuCI support Part 2
2024-09-14 02:26:31 -07:00
Vadim Vetrov
740df8979f Add sections about restart after settings apply 2024-09-14 12:24:20 +03:00
Vadim Vetrov
7e73fa2613 Add documentation for LuCI and UCI 2024-09-14 11:39:10 +03:00
Vadim Vetrov
8c405b81df Log ip version for udp 2024-09-14 11:18:45 +03:00
Vadim Vetrov
2ff83c6030 Update firewall rules 2024-09-14 11:08:38 +03:00
Vadim Vetrov
9f5f194a37 Support for luci in actions 2024-09-13 21:14:01 +03:00
Vadim Vetrov
e38e0e7bd9 Add md5sum faking strategy 2024-09-13 19:11:17 +03:00
Vadim Vetrov
aef2b5b469 Merge pull request #108 from spvkgn/musl-cross
CI: Add MIPS
2024-09-10 07:06:25 -07:00
spvkgn
bbd9f29a67 CI: add armv7 static build 2024-09-10 14:23:35 +05:00
spvkgn
ec9f5bb20c CI: add job for MIPS arches 2024-09-10 14:23:35 +05:00
Vadim Vetrov
7919f82f4b Delete kmod from workflow
The systems and kernels are very various so we can't offer modules for
all the systems. It is just easier to simply delete it.
2024-09-07 16:25:11 +03:00
Vadim Vetrov
ed6979cbcd Merge branch 'main' into kmod 2024-09-07 15:52:31 +03:00
Vadim Vetrov
b3668f07ba Add flags for kernel module 2024-09-07 15:51:25 +03:00
Vadim Vetrov
31aa309198 Fix #104 Infinity loop with default parameters
Fixes infinity loop with default parameters for youtubeUnblock when
trying to connect not handled website. The commit is also related to
issue #100 where the support for --exclude-domains flag was added
2024-09-07 09:24:35 +03:00
Vadim Vetrov
7d01d0974d Update README.md 2024-09-03 21:46:47 +03:00
Vadim Vetrov
5f2e423dfa Add --exclude-domains flag. Fix #100 2024-09-03 21:23:29 +03:00
Vadim Vetrov
f96ac2252b CI devices 2024-09-02 14:29:02 +03:00
Vadim Vetrov
1e6a9496f6 Fix compilationg errors for some kernels 2024-09-02 13:41:25 +03:00
Vadim Vetrov
c0dc5d2652 CI for kernel module 2024-09-02 02:06:35 +03:00
Vadim Vetrov
8bb5368b85 Fix setsockopt (deleted it) 2024-09-02 01:48:23 +03:00
Vadim Vetrov
b249903ead CI for kmod 2024-09-02 01:29:09 +03:00
Vadim Vetrov
5870df44df Linearize instead of kmalloc 2024-09-02 00:11:58 +03:00
Vadim Vetrov
b20f15086e Kernel versions 2024-09-01 23:52:37 +03:00
Vadim Vetrov
5eeff9bc0d Use netfilter hook instead of iptables target
KISS principle is in action. No need to specify rules, just insmod
youtubeUnblock and it works!
2024-09-01 20:58:50 +03:00
Vadim Vetrov
5e327497bb Split raw socket logic from iptables kernel module, add udp over ipv6
support
2024-09-01 19:56:38 +03:00
Vadim Vetrov
731da0dd50 Delete redunant checksum setter 2024-09-01 16:53:11 +03:00
Vadim Vetrov
9c839a5094 IPv6 for kernel module 2024-09-01 16:52:41 +03:00
Vadim Vetrov
27629ba0cc Kernel module basic ipv4 with debug settings 2024-09-01 16:07:47 +03:00
Vadim Vetrov
3d50c00e4f Fix internet :)
Related to #96
2024-08-29 19:28:26 +03:00
Vadim Vetrov
0a679ea41c Update version grabber 2024-08-29 18:13:51 +03:00
Vadim Vetrov
cad262f201 Update docs for entware 2024-08-29 17:45:11 +03:00
Vadim Vetrov
8b23ab762d Fix issue with synfake and two youtubeUnblock instances one after another 2024-08-29 15:55:05 +03:00
Vadim Vetrov
3d9481d72d Allow to select synfake length 2024-08-29 15:49:01 +03:00
Vadim Vetrov
0f71d5f3c4 Add synfake option 2024-08-29 14:45:27 +03:00
Vadim Vetrov
33b0ca421b Update default value of frag-sni-pos
Related to #43 and probably other issues with some ISPs. Some providers
throws RST on 2 bytes tcp, but no RST on 1 byte
2024-08-29 12:21:34 +03:00
Vadim Vetrov
bc398cbd02 Merge branch 'main' of github.com:Waujito/youtubeUnblock 2024-08-29 09:10:51 +03:00
Vadim Vetrov
491d485260 Allow to change default mark
Related to #96
2024-08-29 09:09:57 +03:00
Vadim Vetrov
f273d9cc7a Update README.md 2024-08-28 15:54:37 +03:00
Vadim Vetrov
c101adcd07 entware for entware 2024-08-28 14:00:29 +03:00
Vadim Vetrov
725dc1a6d2 Allow tune randseq offsets.
May be useful for #94
2024-08-27 23:23:54 +03:00
Vadim Vetrov
3b5276c834 Merge pull request #93 from Waujito/ipv6
Support for ipv6
2024-08-27 13:03:38 -07:00
Vadim Vetrov
d16805871f Trace logs update 2024-08-27 21:21:33 +03:00
Vadim Vetrov
5a30ac427b Add option to disable ipv6, document ipv6 2024-08-27 20:01:34 +03:00
Vadim Vetrov
a3a497bc82 Merge branch 'main' into ipv6 2024-08-27 19:42:20 +03:00
Vadim Vetrov
d530dd26d1 Support for ipv6 2024-08-27 19:27:27 +03:00
Vadim Vetrov
564820ce38 Related to #86 2024-08-26 21:21:42 +03:00
Denis Strizhkin
de9b42ae46 add options of choosing to use system libs 2024-08-21 18:40:23 +03:00
Vadim Vetrov
c10393983a Fix bug with pastseq and frag-sni-faked 2024-08-21 12:25:13 +03:00
Vadim Vetrov
e62d76e1d6 pastseq by default
Pastseq is a way more stable than randseq since some providers just drop packets with invalid conntrack state.
2024-08-21 11:53:10 +03:00
23 changed files with 2138 additions and 689 deletions

View File

@@ -18,6 +18,11 @@ jobs:
version: ${{ steps.gh.outputs.version }}
sha: ${{ steps.gh.outputs.sha }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
ref: 'openwrt'
- name: GH
id: gh
env:
@@ -25,7 +30,7 @@ jobs:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
shell: bash
run: |
echo "version=$(gh api repos/$REPO/releases/latest --jq '.tag_name' | sed 's/v//')" >> $GITHUB_OUTPUT
echo "version=$(cat youtubeUnblock/Makefile | grep PKG_VERSION | sed 's/PKG_VERSION:=//')" >> $GITHUB_OUTPUT
if [[ "${{ github.event_name }}" != "pull_request" ]]; then
echo "sha=$(echo ${GITHUB_SHA::7})" >> $GITHUB_OUTPUT
else
@@ -39,7 +44,7 @@ jobs:
strategy:
matrix:
# arch: [x86_64, x86, aarch64, armhf, armv7, ppc64le, s390x]
arch: [x86_64, x86, aarch64, armhf]
arch: [x86_64, x86, aarch64, armhf, armv7]
branch: [latest-stable]
steps:
- name: Checkout
@@ -72,7 +77,7 @@ jobs:
shell: alpine.sh {0}
run: |
case $ARCH in
x86_64) PLATFORM=x86_64 ;;
x86_64) PLATFORM=x86-64 ;;
x86) PLATFORM=x86 ;;
aarch64) PLATFORM=arm64 ;;
armhf) PLATFORM=arm ;;
@@ -80,14 +85,70 @@ jobs:
esac
make -j$(nproc) CC="ccache gcc -static-libgcc -static" || exit 1
strip -s build/youtubeUnblock
rm -rf youtubeUnblock || true
mkdir youtubeUnblock
cp build/youtubeUnblock youtubeUnblock
cp youtubeUnblock.service youtubeUnblock
cp README.md youtubeUnblock
tar -czvf youtubeUnblock-$VERSION-$SHA-$PLATFORM-static.tar.gz youtubeUnblock
cp -va build/youtubeUnblock .
tar -czvf youtubeUnblock-$VERSION-$SHA-$PLATFORM-static.tar.gz youtubeUnblock youtubeUnblock.service README.md
ccache --show-stats
- name: Upload artifacts
if: steps.build.outcome == 'success'
uses: actions/upload-artifact@v4
with:
name: youtubeUnblock-static-${{ matrix.arch }}
path: ./**/youtubeUnblock*.tar.gz
build-static-cross:
needs: prepare
name: build ${{ matrix.arch }}
runs-on: ubuntu-latest
strategy:
matrix:
include:
- arch: mips64el
tool: mips64el-unknown-linux-musl
- arch: mips64
tool: mips64-unknown-linux-musl
- arch: mipsel
tool: mipsel-unknown-linux-musl
- arch: mipselsf
tool: mipsel-unknown-linux-muslsf
- arch: mips
tool: mips-unknown-linux-musl
- arch: mipssf
tool: mips-unknown-linux-muslsf
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up build tools
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
REPO: 'musl-cross/musl-cross'
TOOL: ${{ matrix.tool }}
run: |
mkdir -p $HOME/tools
gh api repos/$REPO/releases/latest --jq '.tag_name' |\
xargs -I{} wget -qO- https://github.com/$REPO/releases/download/{}/$TOOL.tgz | tar -C $HOME/tools -xz || exit 1
[ -d "$HOME/tools/$TOOL/bin" ] && echo "$HOME/tools/$TOOL/bin" >> $GITHUB_PATH
- name: Build
id: build
env:
ARCH: ${{ matrix.arch }}
TOOL: ${{ matrix.tool }}
VERSION: ${{ needs.prepare.outputs.version }}
SHA: ${{ needs.prepare.outputs.sha }}
run: |
make -j$(nproc) \
CC="$TOOL-gcc -static-libgcc -static" \
LD=$TOOL-ld \
AR=$TOOL-ar \
NM=$TOOL-nm \
STRIP=$TOOL-strip \
CROSS_COMPILE_PLATFORM=$TOOL || exit 1
$TOOL-strip -s build/youtubeUnblock
cp -va build/youtubeUnblock .
tar -czvf youtubeUnblock-$VERSION-$SHA-$ARCH-static.tar.gz youtubeUnblock youtubeUnblock.service README.md
- name: Upload artifacts
if: steps.build.outcome == 'success'
uses: actions/upload-artifact@v4
@@ -164,7 +225,7 @@ jobs:
if: steps.build.outcome == 'success'
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.branch }}-${{ matrix.arch }}
name: youtubeUnblock-${{ matrix.branch }}-${{ matrix.arch }}
path: /builder/youtubeUnblock*.ipk
if-no-files-found: error
@@ -220,7 +281,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
with:
ref: 'openwrt'
ref: 'entware'
- name: Prepare build
env:
@@ -256,13 +317,13 @@ jobs:
if: steps.build.outcome == 'success'
uses: actions/upload-artifact@v4
with:
name: entware-${{ matrix.arch }}
name: youtubeUnblock-entware-${{ matrix.arch }}
path: ./**/youtubeUnblock*-entware.tar.gz
if-no-files-found: error
pre-release:
if: github.event_name != 'pull_request' && github.ref_name == 'main'
needs: [build-static, build-openwrt, build-entware]
needs: [build-static, build-static-cross, build-openwrt, build-entware]
permissions:
contents: write
runs-on: ubuntu-latest
@@ -276,7 +337,9 @@ jobs:
repo_token: ${{ secrets.GITHUB_TOKEN }}
automatic_release_tag: 'continuous'
prerelease: true
draft: true
title: 'Development build'
files: |
./**/youtubeUnblock*.ipk
./**/kmod-youtubeUnblock*.ipk
./**/youtubeUnblock*.tar.gz

6
Kbuild
View File

@@ -1,3 +1,3 @@
obj-m := ipt_YTUNBLOCK.o
ipt_YTUNBLOCK-objs := iptk_YTUNBLOCK.o mangle.o
ccflags-y := -std=gnu11 -Wno-unused-variable -DKERNEL_SPACE -DDEBUG
obj-m := kyoutubeUnblock.o
kyoutubeUnblock-objs := kytunblock.o mangle.o quic.o utils.o kmod_utils.o kargs.o
ccflags-y := -std=gnu99 -DKERNEL_SPACE -Wno-error -Wno-declaration-after-statement

183
README.md
View File

@@ -6,30 +6,49 @@
- [Firewall configuration](#firewall-configuration)
- [nftables rules](#nftables-rules)
- [Iptables rules](#iptables-rules)
- [IPv6](#ipv6)
- [Check it](#check-it)
- [Flags](#flags)
- [Troubleshooting](#troubleshooting)
- [TV](#tv)
- [Troubleshooting EPERMS (Operation not permitted)](#troubleshooting-eperms-operation-not-permitted)
- [How it works:](#how-it-works)
- [How it processes packets](#how-it-processes-packets)
- [Compilation](#compilation)
- [OpenWRT case](#openwrt-case)
- [Building OpenWRT .ipk package](#building-openwrt-ipk-package)
- [Building with toolchain](#building-with-toolchain)
- [Kernel module](#kernel-module)
- [Building kernel module](#building-kernel-module)
- [Building on host system](#building-on-host-system)
- [Building on any kernel](#building-on-any-kernel)
- [Building with openwrt SDK](#building-with-openwrt-sdk)
# youtubeUnblock
Bypasses Deep Packet Inspection (DPI) systems that relies on SNI. The package is for Linux only. It is also fully compatible with routers running [OpenWRT](https://github.com/openwrt).
The program was primarily developed to bypass YouTube Outage in Russia, but it works good with other websites blocked by SNI. Adjust the list of websites via `--sni-domains` flag for the program.
The program was primarily developed to bypass YouTube Outage in Russia.
```
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
```
The program is distributed in two version:
- A userspace application works on top of nfnetlink queue which requires nfnetlink modules in the kernel and firewall rules. This approach is default and normally should be used but it has some limitations on embedded devices which may have no nfnetlink support. Also this solution may break down the internet speed and CPU load on your device because of jumps between userspace and kernelspace for each packet (this behavior may be fixed with connbytes but it also requires conntrack kernel module).
- A kernel module which integrates deeply within the netfilter stack and does not interact with the userspace firewall. The module requires only netfilter kernel support but it definetly present on every device connected to the Internet. The only difficulity is how to build it. I cannot provide modules within Github Actions for each single one kernel, even if we talk only about OpenWRT versions. If you want to learn more about the module, jump on [its section in the README](#kernel-module)
The program is compatible with routers based on OpenWRT, Entware(Keenetic/ASUS) and host machines. The program offers binaries via Github Actions. The binaries of main branch are published in the [development pre-release](https://github.com/Waujito/youtubeUnblock/releases/tag/continuous). Check out [Github Actions](https://github.com/Waujito/youtubeUnblock/actions/workflows/build-ci.yml) if you want to see all the binaries compiled ever. You should know the arcitecture of your hardware to use binaries. On OpenWRT you can check it with command `grep ARCH /etc/openwrt_release`.
On both OpenWRT and Entware install the program with opkg. If you got read-only filesystem error you may unpack the binary manually or specify opkg path `opkg -o <destdir>`.
For Windows use [GoodbyeDPI from ValdikSS](https://github.com/ValdikSS/GoodbyeDPI) (you can find how to use it for YouTube [here](https://github.com/ValdikSS/GoodbyeDPI/issues/378)) The same behavior is also implemented in [zapret package for linux](https://github.com/bol-van/zapret).
For Windows use [GoodbyeDPI by ValdikSS](https://github.com/ValdikSS/GoodbyeDPI) (you can find how to use it for YouTube [here](https://github.com/ValdikSS/GoodbyeDPI/issues/378)) The same behavior is also implemented in [zapret package for linux](https://github.com/bol-van/zapret).
## Configuration
@@ -64,22 +83,29 @@ Next step is to add required firewall rules.
For nftables on OpenWRT rules comes out-of-the-box and stored under `/usr/share/nftables.d/ruleset-post/537-youtubeUnblock.nft`. All you need is install requirements and do `/etc/init.d/firewall reload`. If no, go to [Firewall configuration](#firewall-configuration).
Now we are ready to demonize the application.
Now we go to the configuration. For OpenWRT here is configuration via [UCI](https://openwrt.org/docs/guide-user/base-system/uci) and [LuCI](https://openwrt.org/docs/guide-user/luci/start) available (CLI and GUI respectively).
If you installed package from Github Actions or built it yourself with OpenWRT SDK, rc scripts are preinstalled. All you need is to do `/etc/init.d/youtubeUnblock start`.
Elsewhere copy `owrt/youtubeUnblock.owrt` to `/etc/init.d/youtubeUnblock` and put the program's binary into /usr/bin/. (Don't forget to `chmod +x` both). Now run `/etc/init.d/youtubeUnblock start`.
Luci is a configuration interface for your router (which you connect when enter 192.168.1.1 in browser). LuCI configuration lives in **Services->youtubeUnblock** section. It is self descriptive, with description for each flag. Note, that after you push `Save & Apply` button, the configuration is applied automatically and the service is restarted.
You can also run `/etc/init.d/youtubeUnblock enable` to force OpenWRT autostart on boot, but I don't recommend this since if the package has bugs you may lose access to the router (I think you will be able to reset it with reset settings tricks documented for your router).
UCI configuration is available in /etc/config/youtubeUnblock file, in section `youtubeUnblock.youtubeUnblock`. The configuration is done with [flags](#flags). Note, that names of flags are not the same: you should replace `-` with `_`, you shouldn't use leading `--` for flag. Also you will enable toggle flags (without parameters) with `1`.
For example, to enable trace logs you should do
```sh
uci set youtubeUnblock.youtubeUnblock.trace=1
```
You can check the logs in CLI mode with `logread -l 200 | grep youtubeUnblock` command.
For uci, to save the configs you should do `uci commit` and then `reload_config` to restart the youtubeUnblock
In CLI mode you will use youtubeUnblock as a normal init.d service:
for example, you can enable it with `/etc/init.d/youtubeUnblock enable`.
### Entware
For Entware on Keenetic here is an [installation guide (russian)](https://help.keenetic.com/hc/ru/articles/360021214160-%D0%A3%D1%81%D1%82%D0%B0%D0%BD%D0%BE%D0%B2%D0%BA%D0%B0-%D1%81%D0%B8%D1%81%D1%82%D0%B5%D0%BC%D1%8B-%D0%BF%D0%B0%D0%BA%D0%B5%D1%82%D0%BE%D0%B2-%D1%80%D0%B5%D0%BF%D0%BE%D0%B7%D0%B8%D1%82%D0%BE%D1%80%D0%B8%D1%8F-Entware-%D0%BD%D0%B0-USB-%D0%BD%D0%B0%D0%BA%D0%BE%D0%BF%D0%B8%D1%82%D0%B5%D0%BB%D1%8C). Note that if your Entware router is missing netfilter queue kernel modules, here is no way to deal with it since Entware does not offer kernel modules. You should probably try to install OpenWRT if the problem persist. You can check required modules with command `find /lib/modules/$(uname -r) -type f -name 'nfnetlink_queue.ko*'`. If that command return not null string, everything alright. All you need is to load the modules.
For Entware on Keenetic here is an [installation guide (russian)](https://help.keenetic.com/hc/ru/articles/360021214160-%D0%A3%D1%81%D1%82%D0%B0%D0%BD%D0%BE%D0%B2%D0%BA%D0%B0-%D1%81%D0%B8%D1%81%D1%82%D0%B5%D0%BC%D1%8B-%D0%BF%D0%B0%D0%BA%D0%B5%D1%82%D0%BE%D0%B2-%D1%80%D0%B5%D0%BF%D0%BE%D0%B7%D0%B8%D1%82%D0%BE%D1%80%D0%B8%D1%8F-Entware-%D0%BD%D0%B0-USB-%D0%BD%D0%B0%D0%BA%D0%BE%D0%BF%D0%B8%D1%82%D0%B5%D0%BB%D1%8C). Note that if your Entware router is missing netfilter queue kernel modules, here is no way to deal with it since Entware does not offer kernel modules.
To check whether the modules are loaded, do `lsmod | grep nfnetlink_queue`. If the program return nothing, you should load them manually.
```sh
insmod /lib/modules/3.3.8/kernel/net/netfilter/nfnetlink_queue.ko
insmod /lib/modules/3.3.8/kernel/net/netfilter/xt_NFQUEUE.ko
```
Install the binary with `opkg install youtubeUnblock-*.ipk`. After installation, the binary in /opt/bin and the init script in /opt/etc/init.d/S51youtubeUnblock will be available. To run the youtubeUnblock, simply run `/opt/etc/init.d/S51youtubeUnblock start`
### PC configuration
On local host make sure to change **FORWARD** to **OUTPUT** chain in the following Firewall rulesets.
@@ -92,18 +118,33 @@ Copy `youtubeUnblock.service` to `/usr/lib/systemd/system` (you should change th
On nftables you should put next nftables rules:
```sh
nft add rule inet fw4 mangle_forward tcp dport 443 ct original "packets < 20" counter queue num 537 bypass
nft insert rule inet fw4 output mark and 0x8000 == 0x8000 counter accept
nft add chain inet fw4 youtubeUnblock '{ type filter hook postrouting priority mangle - 1; policy accept; }'
nft add rule inet fw4 youtubeUnblock 'meta l4proto { tcp, udp } th dport 443 ct original packets < 20 counter queue num 537 bypass'
nft insert rule inet fw4 output 'mark and 0x8000 == 0x8000 counter accept'
```
#### Iptables rules
On iptables you should put next iptables rules:
```sh
iptables -t mangle -A FORWARD -p tcp --dport 443 -m connbytes --connbytes-dir original --connbytes-mode packets --connbytes 0:19 -j NFQUEUE --queue-num 537 --queue-bypass
iptables -t mangle -N YOUTUBEUNBLOCK
iptables -t mangle -A YOUTUBEUNBLOCK -p tcp --dport 443 -m connbytes --connbytes-dir original --connbytes-mode packets --connbytes 0:19 -j NFQUEUE --queue-num 537 --queue-bypass
iptables -t mangle -A YOUTUBEUNBLOCK -p udp --dport 443 -m connbytes --connbytes-dir original --connbytes-mode packets --connbytes 0:19 -j NFQUEUE --queue-num 537 --queue-bypass
iptables -t mangle -A POSTROUTING -j YOUTUBEUNBLOCK
iptables -I OUTPUT -m mark --mark 32768/32768 -j ACCEPT
```
#### IPv6
For IPv6 on iptables you need to duplicate rules above for ip6tables:
```sh
ip6tables -t mangle -N YOUTUBEUNBLOCK
ip6tables -t mangle -A YOUTUBEUNBLOCK -p tcp --dport 443 -m connbytes --connbytes-dir original --connbytes-mode packets --connbytes 0:19 -j NFQUEUE --queue-num 537 --queue-bypass
ip6tables -t mangle -A YOUTUBEUNBLOCK -p udp --dport 443 -m connbytes --connbytes-dir original --connbytes-mode packets --connbytes 0:19 -j NFQUEUE --queue-num 537 --queue-bypass
ip6tables -t mangle -A POSTROUTING -j YOUTUBEUNBLOCK
ip6tables -I OUTPUT -m mark --mark 32768/32768 -j ACCEPT
```
Note that above rules use *conntrack* to route only first 20 packets from the connection to **youtubeUnblock**.
If you got some troubles with it, for example **youtubeUnblock** doesn't detect YouTube, try to delete *connbytes* from the rules. But it is an unlikely behavior and you should probably check your ruleset.
@@ -126,9 +167,13 @@ curl -o/dev/null -k --connect-to ::google.com -k -L -H Host:\ mirror.gcr.io http
## Flags
Put flags to the **BINARY**, not an init script. If you are on OpenWRT you should put the flags inside the script: open `/etc/init.d/youtubeUnblock` with any text editor, like vi or nano and put your flags after `procd_set_param command /usr/bin/youtubeUnblock` line.
Available flags:
- `--sni-domains=<comma separated domain list>|all` List of domains you want to be handled by SNI. Use this string if you want to change default domain list. Defaults is `googlevideo.com,ggpht.com,ytimg.com,youtube.com,play.google.com,youtu.be,googleapis.com,googleusercontent.com,gstatic.com,l.google.com`. You can pass **all** if you want for every *ClientHello* to be handled.
- `--sni-domains=<comma separated domain list>|all` List of domains you want to be handled by SNI. Use this string if you want to change default domain list. Defaults to `googlevideo.com,ggpht.com,ytimg.com,youtube.com,play.google.com,youtu.be,googleapis.com,googleusercontent.com,gstatic.com,l.google.com`. You can pass **all** if you want for every *ClientHello* to be handled. You can exclude some domains with `--exclude-domains` flag.
- `--exclude-domains=<comma separated domain list>` List of domains to be excluded from targetting.
- `--queue-num=<number of netfilter queue>` The number of netfilter queue **youtubeUnblock** will be linked to. Defaults to **537**.
@@ -136,14 +181,17 @@ Available flags:
- `--fake-sni-seq-len=<length>` This flag specifies **youtubeUnblock** to build a complicated construction of fake client hello packets. length determines how much fakes will be sent. Defaults to **1**.
- `--faking-strategy={randseq|ttl|tcp_check|pastseq}` This flag determines the strategy of fake packets invalidation. Defaults to `randseq`
- `--faking-strategy={randseq|ttl|tcp_check|pastseq|md5sum}` This flag determines the strategy of fake packets invalidation. Defaults to `randseq`
- `randseq` specifies that random sequence/acknowledgemend random will be set. This option may be handled by provider which uses *conntrack* with drop on invalid *conntrack* state firewall rule enabled.
- `ttl` specifies that packet will be invalidated after `--faking-ttl=n` hops. `ttl` is better but may cause issues if unconfigured.
- `pastseq` is like `randseq` but sequence number is not random but references the packet sent in the past (before current).
- `tcp_check` will invalidate faking packet with invalid checksum. May be handled and dropped by some providers/TSPUs.
- `md5sum` will invalidate faking packet with invalid TCP md5sum. md5sum is a TCP option which is handled by the destination server but may be skipped by TSPU.
- `--faking-ttl=<ttl>` Tunes the time to live (TTL) of fake SNI messages. TTL is specified like that the packet will go through the DPI system and captured by it, but will not reach the destination server. Defaults to **8**.
- `--fake-seq-offset` Tunes the offset from original sequence number for fake packets. Used by randseq faking strategy. Defaults to 10000. If 0, random sequence number will be set.
- `--frag={tcp,ip,none}` Specifies the fragmentation strategy for the packet. tcp is used by default. Ip fragmentation may be blocked by DPI system. None specifies no fragmentation. Probably this won't work, but may be will work for some fake sni strategies.
- `--frag-sni-reverse={0|1}` Specifies **youtubeUnblock** to send *ClientHello* fragments in the reverse order. Defaults to **1**.
@@ -152,12 +200,16 @@ Available flags:
- `--frag-middle-sni={0|1}` With this options **youtubeUnblock** will split the packet in the middle of SNI data. Defaults to 1.
- `--frag-sni-pos=<pos>` With this option **youtubeUnblock** will split the packet at the position pos. Defaults to 2.
- `--frag-sni-pos=<pos>` With this option **youtubeUnblock** will split the packet at the position pos. Defaults to 1.
- `--quic-drop` Drop all QUIC packets which goes to youtubeUnblock. Won't affect any other UDP packets. Suitable for some TVs. Note, that for this option to work you should also add proxy udp to youtubeUnblock in firewall. `connbytes` may also be used with udp.
- `--fk-winsize=<winsize>` Specifies window size for the fragmented TCP packet. Applicable if you want for response to be fragmented. May slowdown connection initialization.
- `--synfake={1|0}` If 1, syn payload will be sent before each request. The idea is taken from syndata from zapret project. Syn payload will normally be discarded by endpoint but may be handled by TSPU. This option sends normal fake in that payload. Please note, that the option works for all the sites, so --sni-domains won't change anything.
- `--synfake-len=<len>` The fake packet sent in synfake may be too large. If you experience issues, lower up synfake-len. where len stands for how much bytes should be sent as syndata. Pass 0 if you want to send an entire fake packet. Defaults to 0
- `--sni-detection={parse|brute}` Specifies how to detect SNI. Parse will normally detect it by parsing the Client Hello message. Brute will go through the entire message and check possibility of SNI occurrence. Please note, that when `--sni-domains` option is not all brute will be O(nm) time complexity where n stands for length of the message and m is number of domains. Defaults to parse.
- `--seg2delay=<delay>` This flag forces **youtubeUnblock** to wait a little bit before send the 2nd part of the split packet.
@@ -168,8 +220,12 @@ Available flags:
- `--no-gso` Disables support for Google Chrome fat packets which uses GSO. This feature is well tested now, so this flag probably won't fix anything.
- `--no-ipv6` Disables support for ipv6. May be useful if you don't want for ipv6 socket to be opened.
- `--threads=<threads number>` Specifies the amount of threads you want to be running for your program. This defaults to **1** and shouldn't be edited for normal use. If you have performance issues, consult [performance chaptr](https://github.com/Waujito/youtubeUnblock?tab=readme-ov-file#performance)
- `--packet-mark=<mark>` Use this option if youtubeUnblock conflicts with other systems rely on packet mark. Note that you may want to change accept rule for iptables to follow the mark.
## Troubleshooting
If you got troubles with some sites and you sure that they are blocked by SNI (youtube for example), use may play around with [flags](#flags) and their combinations. At first it is recommended to try `--faking-strategy` flag and `--frag-sni-faked=1`.
@@ -179,13 +235,13 @@ If you are on Chromium you may have to disable *kyber* (the feature that makes t
If your browser is using QUIC it may not work properly. Disable it in Chrome in `chrome://flags` and in Firefox `network.http.http{2,3}.enable(d)` in `about:config` option.
It seems like some TSPUs started to block wrongseq packets, so you should play around with faking strategies. I personally recommend to start with `md5sum` faking strategy.
### TV
Televisions are the biggest headache.
In [this issue](https://github.com/Waujito/youtubeUnblock/issues/59) the problem has been resolved.
If you have troubles with televisions try `--faking-strategy=ttl` flag and play around with `--faking-ttl=n`. See [#flags](#flags) for more details. Also you might be have to disable QUIC. To do it you may use `--quic-drop` [flag](#flags) with proper firewall configuration (check description of the flag). Note, that this flag won't disable gQUIC and some TVs may relay on it. To disable gQUIC you will need to block the entire 443 port for udp in firewall configuration:
In [this issue](https://github.com/Waujito/youtubeUnblock/issues/59) the problem has been resolved. And now youtubeUnblock should work with default flags. If not, play around with faking strategies and other flags. Also you might be have to disable QUIC. To do it you may use `--quic-drop` [flag](#flags) with proper firewall configuration (check description of the flag). Note, that this flag won't disable gQUIC and some TVs may relay on it. To disable gQUIC you will need to block the entire 443 port for udp in firewall configuration:
For **nftables** do
```
@@ -210,28 +266,6 @@ Where you have to replace 192.168.. with ip of your television.
* send fake sni EPERM: Fake SNI is out-of-state thing and will likely corrupt the connection (the behavior is expected). conntrack considers it as an invalid packet. By default OpenWRT set up to drop outgoing packets like this one. You may delete nftables/iptables rule that drops packets with invalid conntrack state, but I don't recommend to do this. The step 3 is better solution.
* Step 3, ultimate solution. Use mark (don't confuse with connmark). The youtubeUnblock uses mark internally to avoid infinity packet loops (when the packet is sent by youtubeUnblock but on next step handled by itself). Currently it uses mark (1 << 15) = 32768. You should put iptables/nftables that ultimately accepts such marks at the very start of the filter OUTPUT chain: `iptables -I OUTPUT -m mark --mark 32768/32768 -j ACCEPT` or `nft insert rule inet fw4 output mark and 0x8000 == 0x8000 counter accept`.
## How it works:
Let's look from the DPI systems side of view: All of they have an ip and tcp information, higher-level data is encrypted. So from the IP header only IP address might be helpful for them to limit user traffic. In TCP here is basically nothing. So they may handle IP addresses and process it.
What's wrong? Google servers are on the way: It is very hard to handle all that infrastructure. One server may host multiple websites and it is very bad if them blocks, for example, Google Search while trying to block YouTube (googlevideo). But even if YouTube servers have their own IP for only googlevideo purposes, here is a problem about how large is Google infrastracture and how much servers in it. The DPI systems can't even parse normally all the servers, because each video may live on its cache server [GGC](https://support.google.com/interconnect/answer/9058809?hl=en).
So what's else? Let's take a look at a TLS level. All information here is encrypted. All... Except *ClientHello* messages! They are used to initialize handshake connections and hold tons of helpful information. If we talk about TLS version 1.3, it is optimized to transfer as less information as possible unencrypted. But here is only one thing that may point us which domain the user wants to connect, the SNI extension. It transfers all domain names unencrypted in plain text. Exactly what we need! And DPI systems may use this text to detect YouTube connections and slow down or reject them (In fact they are corrupting a TCP connection with bad packets).
So we aim to somehow hide the SNI from them. How?
- We can alter the SNI name in the tls packet to something else. But what's wrong with this? The server also uses SNI name for certificates (CN=). And if we change it, the server will return an invalid certificate which browser can't normally process, which may turn out to the MITM problem.
- We can encrypt it. Here are a lot of investigations about SNI, but the server should support this technique. Also ISPs may block [encrypted SNI](https://en.wikipedia.org/wiki/Server_Name_Indication).
- So what else can we do with the SNI info? If we can't hide it, let's rely on DPI systems weak spots. The DPI is an extremely high loaded infrastructure that analyzes every single packet sent to the Internet. And every performance-impacted feature should be avoided for them. One of this features is IP packet fragmentation. We can split the packet in the middle of SNI message and post it. For DPI fragmentation involves too much overhead: they should store a very big mapping table which maps IP id, Source ip and Destination ip. Also note that some packets may be lost and DPI should support auto-clean of that table. So just imagine how much memory and CPU time will this cost for DPI. But fragments are ok for clients and hosts. And that's the base idea behind this package. I have to mention here that the idea isn't mine, I get in here after some research for this side. Here already was a solution for Windows, GoodbyeDPI. I just made an alternative for Linux.
You may read further in an [yt-dlp issue page](https://github.com/yt-dlp/yt-dlp/issues/10443) and in [ntc party forum](https://ntc.party/t/%D0%BE%D0%B1%D1%81%D1%83%D0%B6%D0%B4%D0%B5%D0%BD%D0%B8%D0%B5-%D0%B7%D0%B0%D0%BC%D0%B5%D0%B4%D0%BB%D0%B5%D0%BD%D0%B8%D0%B5-youtube-%D0%B2-%D1%80%D0%BE%D1%81%D1%81%D0%B8%D0%B8/8074).
## How it processes packets
When the packet is joining the queue, the application checks SNI payload to be YouTube(googlevideo) (right how the DPI systems do), segmentate/fragmentates (both TCP and IP fragmentation techniques are supported) and posts the packet. Note that it is impossible to post two fragmented packets from one netfilter queue verdict. Instead, the application drops an original packet and makes another linux raw socket to post the packets in the network. To escape infinity loops the socket marks outgoing packets and the application automatically accepts it.
## Compilation
Before compilation make sure `gcc`, `make`, `autoconf`, `automake`, `pkg-config` and `libtool` is installed. For Fedora `glibc-static` should be installed as well.
@@ -276,5 +310,64 @@ Take a look at `CROSS_COMPILE_PLATFORM` It is required by autotools but I think
When compilation is done, the binary file will be in build directory. Copy it to your router. Note that a ssh access is likely to be required to proceed. *sshfs* don't work on my model so I injected the application to the router via *Software Upload Package* page. It has given me an error, but also a `/tmp/upload.ipk` file which I copied in root directory, `chmod +x` it and run.
## Kernel module
This section describes the kernel module version of youtubeUnblock. The kernel module operates as a normal module inside the kernel and integrates within the netfilter stack to statelessly mangle the packets sent over the Internet.
You can configure the module with its flags in insmod:
```
insmod kyoutubeUnblock.ko fake_sni=1 exclude_domains=.ru quic_drop=1
```
Note that the flags names are different from ones used for the regular youtubeUnblock(right like in UCI configuration for OpenWRT): replace `-` with `_` and no leading `--`. Also to configure togglers you should set them to `1` (`silent=1 quic_drop=1`)
Also a drop in replacement is supported for all the parameters excluding packet mark. A drop in replacement does not require module restart if you want to change the parameters. You can specify and check the parameters within module's directory inside the sysfs: `/sys/module/kyoutubeUnblock/parameters/`. For example, to set quic_drop to true you may use next command:
```sh
echo 1 | sudo tee /sys/module/kyoutubeUnblock/parameters/quic_drop
```
and
```sh
cat /sys/module/kyoutubeUnblock/parameters/quic_drop
```
to check the parameter.
### Building kernel module
#### Building on host system
To build the kernel module on your host system you should install `linux-headers` which will provide build essential tools and `gcc` compiler suite. On host system you may build the module with
```sh
make kmake
```
#### Building on any kernel
To build the module for external kernel you should build that kernel locally and point make to it. Use `KERNEL_BUILDER_MAKEDIR=~/linux` flag for make, for example:
```
make kmake KERNEL_BUILDER_MAKEDIR=~/linux
```
Note, that the kernel should be already configured and built. See linux kernel building manuals for more information about your specific case.
#### Building with openwrt SDK
Building with openwrt SDK is not such a hard thing. The only thing you should do is to obtain the sdk. You can find it by looking to your architecture and version of the openwrt currently used. You should use the exactly your version of openwrt since kernels there change often. You can find the sdk in two ways: by downloading it from their site or by using the openwrt sdk docker container (recommended).
If you decide to download the tar archive, follow next steps:
For me the archive lives in https://downloads.openwrt.org/releases/23.05.3/targets/ramips/mt76x8/ and called `openwrt-sdk-23.05.3-ramips-mt76x8_gcc-12.3.0_musl.Linux-x86_64`. You will need to [install sdk requirements on your system](https://openwrt.org/docs/guide-developer/toolchain/install-buildsystem) If you have any problems, use docker ubuntu:24.04 image. Make sure to be a non-root user since some makesystem fails with it. Next, untar the SDK and cd into it.
Or you can obtain the docker image with sdk built-in: [https://hub.docker.com/u/openwrt/sdk](https://hub.docker.com/u/openwrt/sdk). In my case the image has tag `ramips-mt76x8-23.05.3`. A good thing here is that you don't need to install any dependencies inside the docker container. Also docker hub has a perfect search around tags if you don't sure which one corresponds to your device.
When you unpacked/installed the sdk, you is ready to start with building the kernel module.
Do
```sh
echo "src-git youtubeUnblock https://github.com/Waujito/youtubeUnblock.git;openwrt" >> feeds.conf
./scripts/feeds update youtubeUnblock
./scripts/feeds install -a -p youtubeUnblock
make defconfig
make package/kyoutubeUnblock/compile V=s
```
When the commands finish, the module is ready. Find it with `find bin -name "kmod-youtubeUnblock*.ipk"`, copy to your host and install to the router via gui software interface. The module should start immediately. If not, do `modprobe kyoutubeUnblock`.
>If you have any questions/suggestions/problems feel free to open an [issue](https://github.com/Waujito/youtubeUnblock/issues).

89
args.c
View File

@@ -19,7 +19,12 @@ struct config_t config = {
.fake_sni = 1,
.fake_sni_seq_len = 1,
.frag_middle_sni = 1,
.frag_sni_pos = 2,
.frag_sni_pos = 1,
.use_ipv6 = 1,
.fakeseq_offset = 10000,
.mark = DEFAULT_RAWSOCKET_MARK,
.synfake = 0,
.synfake_len = 0,
.sni_detection = SNI_DETECTION_PARSE,
@@ -44,12 +49,16 @@ struct config_t config = {
.domains_str = defaul_snistr,
.domains_strlen = sizeof(defaul_snistr),
.exclude_domains_str = "",
.exclude_domains_strlen = 0,
.queue_start_num = DEFAULT_QUEUE_NUM,
.fake_sni_pkt = fake_sni_old,
.fake_sni_pkt_sz = sizeof(fake_sni_old) - 1, // - 1 for null-terminator
};
#define OPT_SNI_DOMAINS 1
#define OPT_EXCLUDE_DOMAINS 25
#define OPT_FAKE_SNI 2
#define OPT_FAKING_TTL 3
#define OPT_FAKING_STRATEGY 10
@@ -63,21 +72,30 @@ struct config_t config = {
#define OPT_TRACE 15
#define OPT_QUIC_DROP 16
#define OPT_SNI_DETECTION 17
#define OPT_NO_IPV6 20
#define OPT_FAKE_SEQ_OFFSET 21
#define OPT_PACKET_MARK 22
#define OPT_SYNFAKE 23
#define OPT_SYNFAKE_LEN 24
#define OPT_SEG2DELAY 5
#define OPT_THREADS 6
#define OPT_SILENT 7
#define OPT_NO_GSO 8
#define OPT_QUEUE_NUM 9
#define OPT_MAX OPT_FRAG_SNI_POS
#define OPT_MAX OPT_SNI_DOMAINS
static struct option long_opt[] = {
{"help", 0, 0, 'h'},
{"version", 0, 0, 'v'},
{"sni-domains", 1, 0, OPT_SNI_DOMAINS},
{"exclude-domains", 1, 0, OPT_EXCLUDE_DOMAINS},
{"fake-sni", 1, 0, OPT_FAKE_SNI},
{"synfake", 1, 0, OPT_SYNFAKE},
{"synfake-len", 1, 0, OPT_SYNFAKE_LEN},
{"fake-sni-seq-len", 1, 0, OPT_FAKE_SNI_SEQ_LEN},
{"faking-strategy", 1, 0, OPT_FAKING_STRATEGY},
{"fake-seq-offset", 1, 0, OPT_FAKE_SEQ_OFFSET},
{"faking-ttl", 1, 0, OPT_FAKING_TTL},
{"frag", 1, 0, OPT_FRAG},
{"frag-sni-reverse", 1, 0, OPT_FRAG_SNI_REVERSE},
@@ -92,7 +110,9 @@ static struct option long_opt[] = {
{"silent", 0, 0, OPT_SILENT},
{"trace", 0, 0, OPT_TRACE},
{"no-gso", 0, 0, OPT_NO_GSO},
{"no-ipv6", 0, 0, OPT_NO_IPV6},
{"queue-num", 1, 0, OPT_QUEUE_NUM},
{"packet-mark", 1, 0, OPT_PACKET_MARK},
{0,0,0,0}
};
@@ -127,10 +147,14 @@ void print_usage(const char *argv0) {
printf("Options:\n");
printf("\t--queue-num=<number of netfilter queue>\n");
printf("\t--sni-domains=<comma separated domain list>|all\n");
printf("\t--exclude-domains=<comma separated domain list>\n");
printf("\t--fake-sni={1|0}\n");
printf("\t--fake-sni-seq-len=<length>\n");
printf("\t--fake-seq-offset=<offset>\n");
printf("\t--faking-ttl=<ttl>\n");
printf("\t--faking-strategy={randseq|ttl|tcp_check|pastseq}\n");
printf("\t--faking-strategy={randseq|ttl|tcp_check|pastseq|md5sum}\n");
printf("\t--synfake={1|0}\n");
printf("\t--synfake-len=<len>\n");
printf("\t--frag={tcp,ip,none}\n");
printf("\t--frag-sni-reverse={0|1}\n");
printf("\t--frag-sni-faked={0|1}\n");
@@ -141,9 +165,11 @@ void print_usage(const char *argv0) {
printf("\t--sni-detection={parse|brute}\n");
printf("\t--seg2delay=<delay>\n");
printf("\t--threads=<threads number>\n");
printf("\t--packet-mark=<mark>\n");
printf("\t--silent\n");
printf("\t--trace\n");
printf("\t--no-gso\n");
printf("\t--no-ipv6\n");
printf("\n");
}
@@ -169,6 +195,9 @@ int parse_args(int argc, char *argv[]) {
case OPT_NO_GSO:
config.use_gso = 0;
break;
case OPT_NO_IPV6:
config.use_ipv6 = 0;
break;
case OPT_QUIC_DROP:
config.quic_drop = 1;
break;
@@ -180,6 +209,10 @@ int parse_args(int argc, char *argv[]) {
config.domains_str = optarg;
config.domains_strlen = strlen(config.domains_str);
break;
case OPT_EXCLUDE_DOMAINS:
config.exclude_domains_str = optarg;
config.exclude_domains_strlen = strlen(config.exclude_domains_str);
break;
case OPT_SNI_DETECTION:
if (strcmp(optarg, "parse") == 0) {
config.sni_detection = SNI_DETECTION_PARSE;
@@ -249,6 +282,8 @@ int parse_args(int argc, char *argv[]) {
config.faking_strategy = FAKE_STRAT_TCP_CHECK;
} else if (strcmp(optarg, "pastseq") == 0) {
config.faking_strategy = FAKE_STRAT_PAST_SEQ;
} else if (strcmp(optarg, "md5sum") == 0) {
config.faking_strategy = FAKE_STRAT_TCP_MD5SUM;
} else {
goto invalid_opt;
}
@@ -262,7 +297,14 @@ int parse_args(int argc, char *argv[]) {
config.faking_ttl = num;
break;
case OPT_FAKE_SEQ_OFFSET:
num = parse_numeric_option(optarg);
if (errno != 0 || num < 0) {
goto invalid_opt;
}
config.fakeseq_offset = num;
break;
case OPT_FAKE_SNI:
if (strcmp(optarg, "1") == 0) {
config.fake_sni = 1;
@@ -289,6 +331,24 @@ int parse_args(int argc, char *argv[]) {
config.fk_winsize = num;
break;
case OPT_SYNFAKE:
if (strcmp(optarg, "1") == 0) {
config.synfake = 1;
} else if (strcmp(optarg, "0") == 0) {
config.synfake = 0;
} else {
goto invalid_opt;
}
break;
case OPT_SYNFAKE_LEN:
num = parse_numeric_option(optarg);
if (errno != 0 || num < 0) {
goto invalid_opt;
}
config.synfake_len = num;
break;
case OPT_SEG2DELAY:
num = parse_numeric_option(optarg);
if (errno != 0 || num < 0) {
@@ -313,6 +373,15 @@ int parse_args(int argc, char *argv[]) {
config.queue_start_num = num;
break;
case OPT_PACKET_MARK:
num = parse_numeric_option(optarg);
if (errno != 0 || num < 0) {
goto invalid_opt;
}
config.mark = num;
break;
default:
goto error;
}
@@ -375,6 +444,7 @@ void print_welcome() {
break;
case FAKE_STRAT_RAND_SEQ:
printf("Random seq faking strategy will be used\n");
printf("Fake seq offset set to %u\n", config.fakeseq_offset);
break;
case FAKE_STRAT_TCP_CHECK:
printf("TCP checksum faking strategy will be used\n");
@@ -382,17 +452,30 @@ void print_welcome() {
case FAKE_STRAT_PAST_SEQ:
printf("Past seq faking strategy will be used\n");
break;
case FAKE_STRAT_TCP_MD5SUM:
printf("md5sum faking strategy will be used\n");
break;
}
if (config.fk_winsize) {
printf("Response TCP window will be set to %d with the appropriate scale\n", config.fk_winsize);
}
if (config.synfake) {
printf("Fake SYN payload will be sent with each TCP request SYN packet\n");
}
if (config.use_gso) {
printf("GSO is enabled\n");
}
if (config.use_ipv6) {
printf("IPv6 is enabled\n");
} else {
printf("IPv6 is disabled\n");
}
if (config.quic_drop) {
printf("All QUIC packets will be dropped\n");
}

View File

@@ -1,6 +1,10 @@
#ifndef YTB_CONFIG_H
#define YTB_CONFIG_H
#ifndef KERNEL_SPACE
#define USER_SPACE
#endif
typedef int (*raw_send_t)(const unsigned char *data, unsigned int data_len);
/**
* Sends the packet after delay_ms. The function should schedule send and return immediately
@@ -18,6 +22,7 @@ struct config_t {
unsigned int queue_start_num;
int threads;
int use_gso;
int use_ipv6;
int fragmentation_strategy;
int frag_sni_reverse;
int frag_sni_faked;
@@ -39,10 +44,16 @@ struct config_t {
unsigned int seg2_delay;
const char *domains_str;
unsigned int domains_strlen;
const char *exclude_domains_str;
unsigned int exclude_domains_strlen;
unsigned int all_domains;
const char *fake_sni_pkt;
unsigned int fake_sni_pkt_sz;
unsigned int fk_winsize;
unsigned int fakeseq_offset;
unsigned int mark;
int synfake;
unsigned int synfake_len;
};
extern struct config_t config;
@@ -69,7 +80,7 @@ extern struct config_t config;
#define FRAGMENTATION_STRATEGY FRAG_STRAT_TCP
#endif
#define RAWSOCKET_MARK (1 << 15)
#define DEFAULT_RAWSOCKET_MARK (1 << 15)
#ifdef USE_SEG2_DELAY
#define SEG2_DELAY 100
@@ -84,10 +95,11 @@ extern struct config_t config;
#define FAKE_STRAT_TTL 2
#define FAKE_STRAT_PAST_SEQ 3
#define FAKE_STRAT_TCP_CHECK 4
#define FAKE_STRAT_TCP_MD5SUM 5
#ifndef FAKING_STRATEGY
#define FAKING_STRATEGY FAKE_STRAT_RAND_SEQ
#define FAKING_STRATEGY FAKE_STRAT_PAST_SEQ
#endif
#if !defined(SILENT) && !defined(KERNEL_SPACE)
@@ -102,6 +114,8 @@ extern struct config_t config;
#define MAX_PACKET_SIZE 8192
static const char defaul_snistr[] = "googlevideo.com,ggpht.com,ytimg.com,youtube.com,play.google.com,youtu.be,googleapis.com,googleusercontent.com,gstatic.com,l.google.com";
#define DEFAULT_SNISTR "googlevideo.com,ggpht.com,ytimg.com,youtube.com,play.google.com,youtu.be,googleapis.com,googleusercontent.com,gstatic.com,l.google.com"
static const char defaul_snistr[] = DEFAULT_SNISTR;
#endif /* YTB_CONFIG_H */

View File

@@ -1,6 +0,0 @@
#ifndef IPT_YTUNBLOCK_H
#define IPT_YTUNBLOCK_H
struct xt_ytunblock_tginfo {};
#endif /* IPT_YTUNBLOCK_H */

View File

@@ -1,341 +0,0 @@
#define _GNU_SOURCE
// Kernel module for youtubeUnblock.
// Make with make kmake && sudo iptables -t mangle -D OUTPUT 1 && sudo make kreload && sudo iptables -t mangle -I OUTPUT -p tcp -j YTUNBLOCK
#include <linux/module.h>
#include <linux/init.h>
#include <linux/printk.h>
#include <linux/mutex.h>
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/netfilter/x_tables.h>
#include "ipt_YTUNBLOCK.h"
#include "mangle.h"
#include "config.h"
#include "raw_replacements.h"
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1");
MODULE_AUTHOR("Vadim Vetrov <vetrovvd@gmail.com>");
MODULE_DESCRIPTION("Linux kernel module for youtube unblock");
static int rsfd;
static struct socket *rawsocket;
DEFINE_MUTEX(rslock);
static int open_raw_socket(void) {
int ret = 0;
ret = sock_create(AF_INET, SOCK_RAW, IPPROTO_RAW, &rawsocket);
if (ret < 0) {
pr_alert("Unable to create raw socket\n");
goto err;
}
sockptr_t optval = {
.kernel = NULL,
.is_kernel = 1
};
int mark = RAWSOCKET_MARK;
optval.kernel = &mark;
ret = sock_setsockopt(rawsocket, SOL_SOCKET, SO_MARK, optval, sizeof(mark));
if (ret < 0)
{
pr_alert("setsockopt(SO_MARK, %d) failed\n", mark);
goto sr_err;
}
int one = 1;
optval.kernel = &one;
return 0;
sr_err:
sock_release(rawsocket);
err:
return ret;
}
static void close_raw_socket(void) {
sock_release(rawsocket);
}
#define AVAILABLE_MTU 1384
static int send_raw_socket(const uint8_t *pkt, uint32_t pktlen) {
if (pktlen > AVAILABLE_MTU) {
pr_warn("The packet is too big and may cause issues!");
__u32 buff1_size = pktlen;
__u32 buff2_size = pktlen;
__u8 *buff1 = kmalloc(pktlen, GFP_ATOMIC);
if (buff1 == NULL) return -1;
__u8 *buff2 = kmalloc(pktlen, GFP_ATOMIC);
if (buff2 == NULL) {
kfree(buff1);
return -1;
}
int ret;
#if defined(USE_TCP_SEGMENTATION) || defined(RAWSOCK_TCP_FSTRAT)
if ((ret = tcp4_frag(pkt, pktlen, AVAILABLE_MTU-128,
buff1, &buff1_size, buff2, &buff2_size)) < 0)
return ret;
#elif defined(USE_IP_FRAGMENTATION) || defined(RAWSOCK_IP_FSTRAT)
if ((ret = ip4_frag(pkt, pktlen, AVAILABLE_MTU-128,
buff1, &buff1_size, buff2, &buff2_size)) < 0)
return ret;
#else
pr_warn("send_raw_socket: Packet is too big but fragmentation is disabled! "
"Pass -DRAWSOCK_TCP_FSTRAT or -DRAWSOCK_IP_FSTRAT as CFLAGS "
"To enable it only for raw socket\n");
return -EINVAL;
#endif
int sent = 0;
ret = send_raw_socket(buff1, buff1_size);
if (ret >= 0) sent += ret;
else {
kfree(buff1);
kfree(buff2);
return ret;
}
kfree(buff1);
ret = send_raw_socket(buff2, buff2_size);
if (ret >= 0) sent += ret;
else {
kfree(buff2);
return ret;
}
kfree(buff2);
return sent;
}
struct iphdr *iph;
int ret;
if ((ret = ip4_payload_split(
(uint8_t *)pkt, pktlen, &iph, NULL, NULL, NULL)) < 0) {
return ret;
}
struct sockaddr_in daddr = {
.sin_family = AF_INET,
.sin_port = 0,
.sin_addr = {
.s_addr = iph->daddr
}
};
struct msghdr msg;
struct kvec iov;
iov.iov_base = (__u8 *)pkt;
iov.iov_len = pktlen;
iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, 1);
msg.msg_flags = 0;
msg.msg_name = &daddr;
msg.msg_namelen = sizeof(struct sockaddr_in);
msg.msg_control = NULL;
msg.msg_controllen = 0;
mutex_lock(&rslock);
ret = kernel_sendmsg(rawsocket, &msg, &iov, 1, pktlen);
mutex_unlock(&rslock);
return ret;
}
static unsigned int ykb_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
if ((skb->mark & RAWSOCKET_MARK) == RAWSOCKET_MARK)
return XT_CONTINUE;
if (skb->head == NULL) return XT_CONTINUE;
// TODO: Mallocs are bad!
uint32_t buflen = skb->len;
__u8 *buf = kmalloc(skb->len, GFP_ATOMIC);
if (buf == NULL) {
pr_err("Cannot alloc enough buffer space");
goto accept;
}
if (skb_copy_bits(skb, 0, buf, skb->len) < 0) {
pr_err("Unable copy bits\n");
goto ac_fkb;
}
struct iphdr *iph;
uint32_t iph_len;
struct tcphdr *tcph;
uint32_t tcph_len;
__u8 *payload;
uint32_t plen;
int ret = tcp4_payload_split(buf, buflen, &iph, &iph_len,
&tcph, &tcph_len, &payload, &plen);
if (ret < 0)
goto ac_fkb;
struct verdict vrd = analyze_tls_data(payload, plen);
if (vrd.gvideo_hello) {
int ret;
pr_info("Googlevideo detected\n");
ip4_set_checksum(iph);
tcp4_set_checksum(tcph, iph);
uint32_t f1len = skb->len;
uint32_t f2len = skb->len;
__u8 *frag1 = kmalloc(f1len, GFP_ATOMIC);
if (!frag1) {
pr_err("Cannot alloc enough gv frag1 buffer space");
goto ac_fkb;
}
__u8 *frag2 = kmalloc(f2len, GFP_ATOMIC);
if (!frag2) {
pr_err("Cannot alloc enough gv frag1 buffer space");
kfree(frag1);
goto ac_fkb;
}
#ifdef FAKE_SNI
uint32_t fksn_len = FAKE_SNI_MAXLEN;
__u8 *fksn_buf = kmalloc(fksn_len, GFP_ATOMIC);
if (!fksn_buf) {
pr_err("Cannot alloc enough gksn buffer space");
goto fallback;
}
ret = gen_fake_sni(iph, tcph, fksn_buf, &fksn_len);
if (ret < 0) {
pr_err("Cannot alloc enough gksn buffer space");
goto fksn_fb;
}
#endif
#if defined(USE_TCP_SEGMENTATION)
size_t ipd_offset = vrd.sni_offset;
size_t mid_offset = ipd_offset + vrd.sni_len / 2;
if ((ret = tcp4_frag(buf, skb->len,
mid_offset, frag1, &f1len, frag2, &f2len)) < 0) {
pr_err("tcp4_frag: %d", ret);
goto fksn_fb;
}
#elif defined(USE_IP_FRAGMENTATION)
size_t ipd_offset = tcph_len + vrd.sni_offset;
size_t mid_offset = ipd_offset + vrd.sni_len / 2;
mid_offset += 8 - mid_offset % 8;
if ((ret = ip4_frag(buf, skb->len,
mid_offset, frag1, &f1len, frag2, &f2len)) < 0) {
pr_err("ip4_frag: %d", ret);
goto fksn_fb;
}
#endif
#ifdef FAKE_SNI
ret = send_raw_socket(fksn_buf, fksn_len);
if (ret < 0) {
pr_err("fksn_send: %d", ret);
goto fksn_fb;
}
#endif
#if defined(USE_NO_FRAGMENTATION)
#ifdef SEG2_DELAY
#error "SEG2_DELAY is incompatible with NO FRAGMENTATION"
#endif
ret = send_raw_socket(buf, buflen);
if (ret < 0) {
pr_err("nofrag_send: %d", ret);
}
goto fksn_fb;
#endif
ret = send_raw_socket(frag2, f2len);
if (ret < 0) {
pr_err("raw frag2 send: %d", ret);
goto fksn_fb;
}
#ifdef SEG2_DELAY
#error "Seg2 delay is unsupported yet for kmod"
#else
ret = send_raw_socket(frag1, f1len);
if (ret < 0) {
pr_err("raw frag1 send: %d", ret);
goto fksn_fb;
}
#endif
fksn_fb:
#ifdef FAKE_SNI
kfree(fksn_buf);
#endif
fallback:
#ifndef SEG2_DELAY
kfree(frag1);
#endif
kfree(frag2);
kfree(buf);
kfree_skb(skb);
return NF_STOLEN;
}
ac_fkb:
kfree(buf);
accept:
return XT_CONTINUE;
}
static int ykb_chk(const struct xt_tgchk_param *par) {
return 0;
}
static struct xt_target ykb_tg_reg __read_mostly = {
.name = "YTUNBLOCK",
.target = ykb_tg,
.table = "mangle",
.hooks = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD),
.targetsize = sizeof(struct xt_ytunblock_tginfo),
.proto = IPPROTO_TCP,
.family = NFPROTO_IPV4,
.checkentry = ykb_chk,
.me = THIS_MODULE,
};
static int __init ykb_init(void) {
int ret = 0;
ret = open_raw_socket();
if (ret < 0) goto err;
ret = xt_register_target(&ykb_tg_reg);
if (ret < 0) goto close_rawsocket;
pr_info("youtubeUnblock kernel module started.\n");
return 0;
close_rawsocket:
close_raw_socket();
err:
return ret;
}
static void __exit ykb_destroy(void) {
xt_unregister_target(&ykb_tg_reg);
close_raw_socket();
pr_info("youtubeUnblock kernel module destroyed.\n");
}
module_init(ykb_init);
module_exit(ykb_destroy);

374
kargs.c Normal file
View File

@@ -0,0 +1,374 @@
#include "config.h"
#include "raw_replacements.h"
#include "types.h"
#include <linux/moduleparam.h>
#define STR_MAXLEN 2048
struct config_t config = {
.frag_sni_reverse = 1,
.frag_sni_faked = 0,
.fragmentation_strategy = FRAGMENTATION_STRATEGY,
.faking_strategy = FAKING_STRATEGY,
.faking_ttl = FAKE_TTL,
.fake_sni = 1,
.fake_sni_seq_len = 1,
.frag_middle_sni = 1,
.frag_sni_pos = 1,
.use_ipv6 = 1,
.fakeseq_offset = 10000,
.mark = DEFAULT_RAWSOCKET_MARK,
.synfake = 0,
.synfake_len = 0,
.sni_detection = SNI_DETECTION_PARSE,
#ifdef SEG2_DELAY
.seg2_delay = SEG2_DELAY,
#else
.seg2_delay = 0,
#endif
#ifdef USE_GSO
.use_gso = 1,
#else
.use_gso = false,
#endif
#ifdef DEBUG
.verbose = 2,
#else
.verbose = 1,
#endif
.domains_str = defaul_snistr,
.domains_strlen = sizeof(defaul_snistr),
.queue_start_num = DEFAULT_QUEUE_NUM,
.fake_sni_pkt = fake_sni_old,
.fake_sni_pkt_sz = sizeof(fake_sni_old) - 1, // - 1 for null-terminator
};
static int unumeric_set(const char *val, const struct kernel_param *kp) {
int n = 0, ret;
ret = kstrtoint(val, 10, &n);
if (ret != 0 || n < 0)
return -EINVAL;
return param_set_int(val, kp);
}
static int boolean_set(const char *val, const struct kernel_param *kp) {
int n = 0, ret;
ret = kstrtoint(val, 10, &n);
if (ret != 0 || (n != 0 && n != 1))
return -EINVAL;
return param_set_int(val, kp);
}
static int inverse_boolean_set(const char *val, const struct kernel_param *kp) {
int n = 0, ret;
ret = kstrtoint(val, 10, &n);
if (ret != 0 || (n != 0 && n != 1))
return -EINVAL;
n = !n;
if (kp->arg == NULL)
return -EINVAL;
*(int *)kp->arg = n;
return 0;
}
static int inverse_boolean_get(char *buffer, const struct kernel_param *kp) {
if (*(int *)kp->arg == 0) {
buffer[0] = '1';
} else {
buffer[0] = '0';
}
buffer[1] = '\0';
return strlen(buffer);
}
static const struct kernel_param_ops unumeric_parameter_ops = {
.set = unumeric_set,
.get = param_get_int
};
static const struct kernel_param_ops boolean_parameter_ops = {
.set = boolean_set,
.get = param_get_int
};
static const struct kernel_param_ops inverse_boolean_ops = {
.set = inverse_boolean_set,
.get = inverse_boolean_get,
};
module_param_cb(fake_sni, &boolean_parameter_ops, &config.fake_sni, 0664);
module_param_cb(fake_sni_seq_len, &unumeric_parameter_ops, &config.fake_sni_seq_len, 0664);
module_param_cb(faking_ttl, &unumeric_parameter_ops, &config.faking_ttl, 0664);
module_param_cb(fake_seq_offset, &unumeric_parameter_ops, &config.fakeseq_offset, 0664);
module_param_cb(frag_sni_reverse, &unumeric_parameter_ops, &config.frag_sni_reverse, 0664);
module_param_cb(frag_sni_faked, &boolean_parameter_ops, &config.frag_sni_faked, 0664);
module_param_cb(frag_middle_sni, &boolean_parameter_ops, &config.frag_middle_sni, 0664);
module_param_cb(frag_sni_pos, &unumeric_parameter_ops, &config.frag_sni_pos, 0664);
module_param_cb(fk_winsize, &unumeric_parameter_ops, &config.fk_winsize, 0664);
module_param_cb(synfake, &boolean_parameter_ops, &config.synfake, 0664);
module_param_cb(synfake_len, &unumeric_parameter_ops, &config.synfake_len, 0664);
module_param_cb(packet_mark, &unumeric_parameter_ops, &config.mark, 0664);
static int sni_domains_set(const char *val, const struct kernel_param *kp) {
size_t len;
int ret;
len = strnlen(val, STR_MAXLEN + 1);
if (len == STR_MAXLEN + 1) {
pr_err("%s: string parameter too long\n", kp->name);
return -ENOSPC;
}
if (len >= 1 && val[len - 1] == '\n') {
len--;
}
ret = param_set_charp(val, kp);
if (ret < 0) {
config.domains_strlen = 0;
} else {
config.domains_strlen = len;
if (len == 3 && !strncmp(val, "all", len)) {
config.all_domains = 1;
} else {
config.all_domains = 0;
}
}
return ret;
}
static const struct kernel_param_ops sni_domains_ops = {
.set = sni_domains_set,
.get = param_get_charp,
};
module_param_cb(sni_domains, &sni_domains_ops, &config.domains_str, 0664);
static int exclude_domains_set(const char *val, const struct kernel_param *kp) {
size_t len;
int ret;
len = strnlen(val, STR_MAXLEN + 1);
if (len == STR_MAXLEN + 1) {
pr_err("%s: string parameter too long\n", kp->name);
return -ENOSPC;
}
ret = param_set_charp(val, kp);
if (ret < 0) {
config.exclude_domains_strlen = 0;
} else {
config.exclude_domains_strlen = len;
}
return ret;
}
static const struct kernel_param_ops exclude_domains_ops = {
.set = exclude_domains_set,
.get = param_get_charp,
};
module_param_cb(exclude_domains, &exclude_domains_ops, &config.exclude_domains_str, 0664);
module_param_cb(no_ipv6, &inverse_boolean_ops, &config.use_ipv6, 0664);
module_param_cb(silent, &inverse_boolean_ops, &config.verbose, 0664);
module_param_cb(quic_drop, &boolean_parameter_ops, &config.quic_drop, 0664);
static int verbose_trace_set(const char *val, const struct kernel_param *kp) {
int n = 0, ret;
ret = kstrtoint(val, 10, &n);
if (ret != 0 || (n != 0 && n != 1))
return -EINVAL;
if (n) {
n = VERBOSE_TRACE;
} else {
n = VERBOSE_DEBUG;
}
if (kp->arg == NULL)
return -EINVAL;
*(int *)kp->arg = n;
return 0;
}
static const struct kernel_param_ops verbose_trace_ops = {
.set = verbose_trace_set,
.get = param_get_int,
};
module_param_cb(trace, &verbose_trace_ops, &config.verbose, 0664);
static int frag_strat_set(const char *val, const struct kernel_param *kp) {
size_t len;
len = strnlen(val, STR_MAXLEN + 1);
if (len == STR_MAXLEN + 1) {
pr_err("%s: string parameter too long\n", kp->name);
return -ENOSPC;
}
if (len >= 1 && val[len - 1] == '\n') {
len--;
}
if (strncmp(val, "tcp", len) == 0) {
*(int *)kp->arg = FRAG_STRAT_TCP;
} else if (strncmp(val, "ip", len) == 0) {
*(int *)kp->arg = FRAG_STRAT_IP;
} else if (strncmp(val, "none", len) == 0) {
*(int *)kp->arg = FRAG_STRAT_NONE;
} else {
return -EINVAL;
}
return 0;
}
static int frag_strat_get(char *buffer, const struct kernel_param *kp) {
switch (*(int *)kp->arg) {
case FRAG_STRAT_TCP:
strcpy(buffer, "tcp\n");
break;
case FRAG_STRAT_IP:
strcpy(buffer, "ip\n");
break;
case FRAG_STRAT_NONE:
strcpy(buffer, "none\n");
break;
default:
strcpy(buffer, "unknown\n");
}
return strlen(buffer);
}
static const struct kernel_param_ops frag_strat_ops = {
.set = frag_strat_set,
.get = frag_strat_get,
};
module_param_cb(fragmentation_strategy, &frag_strat_ops, &config.fragmentation_strategy, 0664);
static int fake_strat_set(const char *val, const struct kernel_param *kp) {
size_t len;
len = strnlen(val, STR_MAXLEN + 1);
if (len == STR_MAXLEN + 1) {
pr_err("%s: string parameter too long\n", kp->name);
return -ENOSPC;
}
if (len >= 1 && val[len - 1] == '\n') {
len--;
}
if (strncmp(val, "randseq", len) == 0) {
*(int *)kp->arg = FAKE_STRAT_RAND_SEQ;
} else if (strncmp(val, "ttl", len) == 0) {
*(int *)kp->arg = FAKE_STRAT_TTL;
} else if (strncmp(val, "tcp_check", len) == 0) {
*(int *)kp->arg = FAKE_STRAT_TCP_CHECK;
} else if (strncmp(val, "pastseq", len) == 0) {
*(int *)kp->arg = FAKE_STRAT_PAST_SEQ;
} else if (strncmp(val, "md5sum", len) == 0) {
*(int *)kp->arg = FAKE_STRAT_TCP_MD5SUM;
} else {
return -EINVAL;
}
return 0;
}
static int fake_strat_get(char *buffer, const struct kernel_param *kp) {
switch (*(int *)kp->arg) {
case FAKE_STRAT_RAND_SEQ:
strcpy(buffer, "randseq\n");
break;
case FAKE_STRAT_TTL:
strcpy(buffer, "ttl\n");
break;
case FAKE_STRAT_TCP_CHECK:
strcpy(buffer, "tcp_check\n");
break;
case FAKE_STRAT_PAST_SEQ:
strcpy(buffer, "pastseq\n");
break;
case FAKE_STRAT_TCP_MD5SUM:
strcpy(buffer, "md5sum\n");
break;
default:
strcpy(buffer, "unknown\n");
}
return strlen(buffer);
}
static const struct kernel_param_ops fake_strat_ops = {
.set = fake_strat_set,
.get = fake_strat_get,
};
module_param_cb(faking_strategy, &fake_strat_ops, &config.faking_strategy, 0664);
static int sni_detection_set(const char *val, const struct kernel_param *kp) {
size_t len;
len = strnlen(val, STR_MAXLEN + 1);
if (len == STR_MAXLEN + 1) {
pr_err("%s: string parameter too long\n", kp->name);
return -ENOSPC;
}
if (len >= 1 && val[len - 1] == '\n') {
len--;
}
if (strncmp(val, "parse", len) == 0) {
*(int *)kp->arg = SNI_DETECTION_PARSE;
} else if (strncmp(val, "brute", len) == 0) {
*(int *)kp->arg = SNI_DETECTION_BRUTE;
} else {
return -EINVAL;
}
return 0;
}
static int sni_detection_get(char *buffer, const struct kernel_param *kp) {
switch (*(int *)kp->arg) {
case SNI_DETECTION_PARSE:
strcpy(buffer, "parse\n");
break;
case SNI_DETECTION_BRUTE:
strcpy(buffer, "brute\n");
break;
default:
strcpy(buffer, "unknown\n");
}
return strlen(buffer);
}
static const struct kernel_param_ops sni_detection_ops = {
.set = sni_detection_set,
.get = sni_detection_get,
};
module_param_cb(sni_detection, &sni_detection_ops, &config.sni_detection, 0664);

View File

@@ -7,36 +7,23 @@ LD := ld
CFLAGS :=
LDFLAGS :=
IPT_CFLAGS := -Wall -Wpedantic -O2
KERNEL_BUILDER_MAKEDIR:=/lib/modules/$(shell uname -r)/build
.PHONY: kmake kload kunload kreload kclean kmclean xclean
kmake: kmod xmod
kmake: kmod
kmod:
$(MAKE) -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
xmod: libipt_YTUNBLOCK.so
libipt_YTUNBLOCK.so: libipt_YTUNBLOCK.o
$(CCLD) -shared -fPIC ${IPT_CFLAGS} -o $@ $^;
libipt_YTUNBLOCK.o: libipt_YTUNBLOCK.c
$(CC) ${IPT_CFLAGS} -D_INIT=lib$*_init -fPIC -c -o $@ $<;
$(MAKE) -C $(KERNEL_BUILDER_MAKEDIR) M=$(PWD) modules
kload:
insmod ipt_YTUNBLOCK.ko
cp ./libipt_YTUNBLOCK.so /usr/lib/xtables/
insmod kyoutubeUnblock.ko
kunload:
-rmmod ipt_YTUNBLOCK
-/bin/rm /usr/lib/xtables/libipt_YTUNBLOCK.so
-rmmod kyoutubeUnblock
kreload: kunload kload
kclean: xtclean kmclean
kclean: kmclean
kmclean:
-$(MAKE) -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
xtclean:
-/bin/rm -f libipt_YTUNBLOCK.so libipt_YTUNBLOCK.o
-$(MAKE) -C $(KERNEL_BUILDER_MAKEDIR) M=$(PWD) clean

226
kmod_utils.c Normal file
View File

@@ -0,0 +1,226 @@
#ifndef KERNEL_SPACE
#error "You are trying to compile the kernel module not in the kernel space"
#endif
#include "kmod_utils.h"
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/socket.h>
#include <linux/net.h>
#include "config.h"
#include "utils.h"
#include "logging.h"
static struct socket *rawsocket;
static struct socket *raw6socket;
int open_raw_socket(void) {
int ret = 0;
ret = sock_create(AF_INET, SOCK_RAW, IPPROTO_RAW, &rawsocket);
if (ret < 0) {
pr_alert("Unable to create raw socket\n");
goto err;
}
// That's funny, but this is how it is done in the kernel
// https://elixir.bootlin.com/linux/v3.17.7/source/net/core/sock.c#L916
rawsocket->sk->sk_mark=config.mark;
return 0;
err:
return ret;
}
void close_raw_socket(void) {
sock_release(rawsocket);
}
static int send_raw_ipv4(const uint8_t *pkt, uint32_t pktlen) {
int ret = 0;
if (pktlen > AVAILABLE_MTU) return -ENOMEM;
struct iphdr *iph;
if ((ret = ip4_payload_split(
(uint8_t *)pkt, pktlen, &iph, NULL, NULL, NULL)) < 0) {
return ret;
}
struct sockaddr_in daddr = {
.sin_family = AF_INET,
.sin_port = 0,
.sin_addr = {
.s_addr = iph->daddr
}
};
struct msghdr msg;
struct kvec iov;
memset(&msg, 0, sizeof(msg));
iov.iov_base = (__u8 *)pkt;
iov.iov_len = pktlen;
msg.msg_flags = 0;
msg.msg_name = &daddr;
msg.msg_namelen = sizeof(struct sockaddr_in);
msg.msg_control = NULL;
msg.msg_controllen = 0;
ret = kernel_sendmsg(rawsocket, &msg, &iov, 1, pktlen);
return ret;
}
int open_raw6_socket(void) {
int ret = 0;
ret = sock_create(AF_INET6, SOCK_RAW, IPPROTO_RAW, &raw6socket);
if (ret < 0) {
pr_alert("Unable to create raw socket\n");
goto err;
}
// That's funny, but this is how it is done in the kernel
// https://elixir.bootlin.com/linux/v3.17.7/source/net/core/sock.c#L916
raw6socket->sk->sk_mark=config.mark;
return 0;
err:
return ret;
}
void close_raw6_socket(void) {
sock_release(raw6socket);
}
int send_raw_ipv6(const uint8_t *pkt, uint32_t pktlen) {
int ret = 0;
if (pktlen > AVAILABLE_MTU) return -ENOMEM;
struct ip6_hdr *iph;
if ((ret = ip6_payload_split(
(uint8_t *)pkt, pktlen, &iph, NULL, NULL, NULL)) < 0) {
return ret;
}
struct sockaddr_in6 daddr = {
.sin6_family = AF_INET6,
/* Always 0 for raw socket */
.sin6_port = 0,
.sin6_addr = iph->ip6_dst
};
struct kvec iov;
struct msghdr msg;
memset(&msg, 0, sizeof(msg));
iov.iov_base = (__u8 *)pkt;
iov.iov_len = pktlen;
msg.msg_flags = 0;
msg.msg_name = &daddr;
msg.msg_namelen = sizeof(struct sockaddr_in6);
msg.msg_control = NULL;
msg.msg_controllen = 0;
ret = kernel_sendmsg(raw6socket, &msg, &iov, 1, pktlen);
return ret;
}
int send_raw_socket(const uint8_t *pkt, uint32_t pktlen) {
int ret;
if (pktlen > AVAILABLE_MTU) {
lgdebug("The packet is too big and may cause issues!");
NETBUF_ALLOC(buff1, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(buff1)) {
lgerror("Allocation error", -ENOMEM);
return -ENOMEM;
}
NETBUF_ALLOC(buff2, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(buff2)) {
lgerror("Allocation error", -ENOMEM);
NETBUF_FREE(buff2);
return -ENOMEM;
}
uint32_t buff1_size = MAX_PACKET_SIZE;
uint32_t buff2_size = MAX_PACKET_SIZE;
switch (config.fragmentation_strategy) {
case FRAG_STRAT_TCP:
if ((ret = tcp_frag(pkt, pktlen, AVAILABLE_MTU-128,
buff1, &buff1_size, buff2, &buff2_size)) < 0) {
goto erret_lc;
}
break;
case FRAG_STRAT_IP:
if ((ret = ip4_frag(pkt, pktlen, AVAILABLE_MTU-128,
buff1, &buff1_size, buff2, &buff2_size)) < 0) {
goto erret_lc;
}
break;
default:
pr_info("send_raw_socket: Packet is too big but fragmentation is disabled!");
ret = -EINVAL;
goto erret_lc;
}
int sent = 0;
ret = send_raw_socket(buff1, buff1_size);
if (ret >= 0) sent += ret;
else {
goto erret_lc;
}
ret = send_raw_socket(buff2, buff2_size);
if (ret >= 0) sent += ret;
else {
goto erret_lc;
}
NETBUF_FREE(buff1);
NETBUF_FREE(buff2);
return sent;
erret_lc:
NETBUF_FREE(buff1);
NETBUF_FREE(buff2);
return ret;
}
int ipvx = netproto_version(pkt, pktlen);
if (ipvx == IP4VERSION)
return send_raw_ipv4(pkt, pktlen);
else if (ipvx == IP6VERSION)
return send_raw_ipv6(pkt, pktlen);
printf("proto version %d is unsupported\n", ipvx);
return -EINVAL;
}
void delay_packet_send(const unsigned char *data, unsigned int data_len, unsigned int delay_ms) {
pr_info("delay_packet_send won't work on current youtubeUnblock version");
send_raw_socket(data, data_len);
}
struct instance_config_t instance_config = {
.send_raw_packet = send_raw_socket,
.send_delayed_packet = delay_packet_send,
};

14
kmod_utils.h Normal file
View File

@@ -0,0 +1,14 @@
#include "types.h"
#ifndef KMOD_UTILS_H
#define KMOD_UTILS_H
int open_raw_socket(void);
void close_raw_socket(void);
int open_raw6_socket(void);
void close_raw6_socket(void);
int send_raw_ipv6(const uint8_t *pkt, uint32_t pktlen);
int send_raw_socket(const uint8_t *pkt, uint32_t pktlen);
void delay_packet_send(const unsigned char *data, unsigned int data_len, unsigned int delay_ms);
#endif /* KMOD_UTILS_H */

147
kytunblock.c Normal file
View File

@@ -0,0 +1,147 @@
#include "nf_wrapper.h"
#ifndef KERNEL_SPACE
#error "You are trying to compile the kernel module not in the kernel space"
#endif
// Kernel module for youtubeUnblock.
// Make with make kmake && sudo iptables -t mangle -D OUTPUT 1 && sudo make kreload && sudo iptables -t mangle -I OUTPUT -p tcp -j YTUNBLOCK
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include "mangle.h"
#include "config.h"
#include "utils.h"
#include "logging.h"
#include "kmod_utils.h"
MODULE_LICENSE("GPL");
MODULE_VERSION("0.3.2");
MODULE_AUTHOR("Vadim Vetrov <vetrovvd@gmail.com>");
MODULE_DESCRIPTION("Linux kernel module for youtube unblock");
static NF_CALLBACK(ykb_nf_hook, skb) {
int ret;
if ((skb->mark & config.mark) == config.mark)
goto accept;
if (skb->head == NULL)
goto accept;
if (skb->len > MAX_PACKET_SIZE)
goto accept;
ret = skb_linearize(skb);
if (ret < 0) {
lgerror("Cannot linearize", ret);
goto accept;
}
int vrd = process_packet(skb->data, skb->len);
switch(vrd) {
case PKT_ACCEPT:
goto accept;
case PKT_DROP:
goto drop;
}
accept:
return NF_ACCEPT;
drop:
kfree_skb(skb);
return NF_STOLEN;
}
static struct nf_hook_ops ykb_nf_reg __read_mostly = {
.hook = ykb_nf_hook,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_MANGLE,
};
static struct nf_hook_ops ykb6_nf_reg __read_mostly = {
.hook = ykb_nf_hook,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP6_PRI_MANGLE,
};
static int __init ykb_init(void) {
int ret = 0;
ret = open_raw_socket();
if (ret < 0) goto err;
if (config.use_ipv6) {
ret = open_raw6_socket();
if (ret < 0) goto close_rawsocket;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0)
struct net *n;
for_each_net(n) {
ret = nf_register_net_hook(n, &ykb6_nf_reg);
if (ret < 0)
lgerror("bad rat",ret);
}
#else
nf_register_hook(&ykb6_nf_reg);
#endif
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0)
struct net *n;
for_each_net(n) {
ret = nf_register_net_hook(n, &ykb_nf_reg);
if (ret < 0)
lgerror("bad rat",ret);
}
#else
nf_register_hook(&ykb_nf_reg);
#endif
pr_info("youtubeUnblock kernel module started.\n");
return 0;
close_rawsocket:
close_raw_socket();
err:
return ret;
}
static void __exit ykb_destroy(void) {
if (config.use_ipv6) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0)
struct net *n;
for_each_net(n)
nf_unregister_net_hook(n, &ykb6_nf_reg);
#else
nf_unregister_hook(&ykb6_nf_reg);
#endif
close_raw6_socket();
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0)
struct net *n;
for_each_net(n)
nf_unregister_net_hook(n, &ykb_nf_reg);
#else
nf_unregister_hook(&ykb_nf_reg);
#endif
close_raw_socket();
pr_info("youtubeUnblock kernel module destroyed.\n");
}
module_init(ykb_init);
module_exit(ykb_destroy);

View File

@@ -1,26 +0,0 @@
// Used to register target in iptables
#include <stdio.h>
#include <xtables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include "ipt_YTUNBLOCK.h"
#define _init __attribute__((constructor)) _INIT
#define __maybe_unused __attribute__((__unused__))
static void YTKB_help(void) {
printf("Youtube Unblock - bypass youtube slowdown DPI in Russia\n");
}
static struct xtables_target ykb_tg_reg = {
.name = "YTUNBLOCK",
.version = XTABLES_VERSION,
.family = NFPROTO_IPV4,
.size = XT_ALIGN(sizeof(struct xt_ytunblock_tginfo)),
.userspacesize = XT_ALIGN(sizeof(struct xt_ytunblock_tginfo)),
.help = YTKB_help,
};
void _init(void) {
xtables_register_target(&ykb_tg_reg);
}

View File

@@ -5,11 +5,12 @@
#define LOG_LEVEL (config.verbose)
#ifdef KERNEL_SPACE
#include <linux/printk.h>
#include <linux/kernel.h>
#include <linux/module.h>
#define printf pr_info
#define perror pr_err
#define lgerror(msg, ret, ...) __extension__ ({ \
printf(msg ": %d\n", ##__VA_ARGS__, ret); \
pr_err(msg ": %d\n", ##__VA_ARGS__, ret); \
})
#else
#include <stdio.h> // IWYU pragma: export
@@ -20,12 +21,16 @@
})
#endif /* PROGRAM_SPACE */
#define lgdebug(msg, ...) \
(LOG_LEVEL >= VERBOSE_DEBUG ? printf(msg, ##__VA_ARGS__) : 0)
#define lgdebugmsg(msg, ...) \
(LOG_LEVEL >= VERBOSE_DEBUG ? printf(msg "\n", ##__VA_ARGS__) : 0)
#define lgdebugmsg(msg, ...) lgdebug(msg "\n", ##__VA_ARGS__)
#define lgtracemsg(msg, ...) \
(LOG_LEVEL >= VERBOSE_TRACE ? printf(msg "\n", ##__VA_ARGS__) : 0)
#define lgtrace(msg, ...) \
(LOG_LEVEL >= VERBOSE_TRACE ? printf(msg, ##__VA_ARGS__) : 0)
#define lgtracemsg(msg, ...) lgtrace(msg "\n", __VA_ARGS__)
#define lgtrace_start(msg, ...) \
(LOG_LEVEL >= VERBOSE_TRACE ? printf("[TRACE] " msg " ( ", ##__VA_ARGS__) : 0)

561
mangle.c
View File

@@ -6,7 +6,7 @@
#include "quic.h"
#include "logging.h"
#ifndef KERNEL_SCOPE
#ifndef KERNEL_SPACE
#include <stdlib.h>
#endif
@@ -16,78 +16,164 @@ int process_packet(const uint8_t *raw_payload, uint32_t raw_payload_len) {
}
const struct iphdr *iph;
const struct ip6_hdr *ip6h;
uint32_t iph_len;
const uint8_t *ip_payload;
uint32_t ip_payload_len;
int transport_proto = -1;
int ipver = netproto_version(raw_payload, raw_payload_len);
int ret;
ret = ip4_payload_split((uint8_t *)raw_payload, raw_payload_len,
if (ipver == IP4VERSION) {
ret = ip4_payload_split((uint8_t *)raw_payload, raw_payload_len,
(struct iphdr **)&iph, &iph_len,
(uint8_t **)&ip_payload, &ip_payload_len);
if (ret < 0)
goto accept;
if (ret < 0)
transport_proto = iph->protocol;
} else if (ipver == IP6VERSION && config.use_ipv6) {
ret = ip6_payload_split((uint8_t *)raw_payload, raw_payload_len,
(struct ip6_hdr **)&ip6h, &iph_len,
(uint8_t **)&ip_payload, &ip_payload_len);
if (ret < 0)
goto accept;
transport_proto = ip6h->ip6_nxt;
} else {
lgtracemsg("Unknown layer 3 protocol version: %d", ipver);
goto accept;
}
switch (iph->protocol) {
switch (transport_proto) {
case IPPROTO_TCP:
return process_tcp4_packet(raw_payload, raw_payload_len);
return process_tcp_packet(raw_payload, raw_payload_len);
case IPPROTO_UDP:
return process_udp4_packet(raw_payload, raw_payload_len);
return process_udp_packet(raw_payload, raw_payload_len);
default:
goto accept;
}
accept:
return PKT_ACCEPT;
drop:
return PKT_DROP;
}
int process_tcp4_packet(const uint8_t *raw_payload, uint32_t raw_payload_len) {
const struct iphdr *iph;
int process_tcp_packet(const uint8_t *raw_payload, uint32_t raw_payload_len) {
const void *ipxh;
uint32_t iph_len;
const struct tcphdr *tcph;
uint32_t tcph_len;
const uint8_t *data;
uint32_t dlen;
int ret = tcp4_payload_split((uint8_t *)raw_payload, raw_payload_len,
(struct iphdr **)&iph, &iph_len, (struct tcphdr **)&tcph, &tcph_len,
int ipxv = netproto_version(raw_payload, raw_payload_len);
lgtrace_start("TCP");
lgtrace_addp("IPv%d", ipxv);
int ret = tcp_payload_split((uint8_t *)raw_payload, raw_payload_len,
(void *)&ipxh, &iph_len,
(struct tcphdr **)&tcph, &tcph_len,
(uint8_t **)&data, &dlen);
if (ret < 0) {
goto accept;
}
if (tcph->syn && config.synfake) {
lgtrace_addp("TCP syn alter");
NETBUF_ALLOC(payload, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(payload)) {
lgerror("Allocation error", -ENOMEM);
goto accept;
}
memcpy(payload, ipxh, iph_len);
memcpy(payload + iph_len, tcph, tcph_len);
uint32_t fake_len = config.fake_sni_pkt_sz;
if (config.synfake_len)
fake_len = min(config.synfake_len, fake_len);
memcpy(payload + iph_len + tcph_len, config.fake_sni_pkt, fake_len);
struct tcphdr *tcph = (struct tcphdr *)(payload + iph_len);
if (ipxv == IP4VERSION) {
struct iphdr *iph = (struct iphdr *)payload;
iph->tot_len = htons(iph_len + tcph_len + fake_len);
set_ip_checksum(payload, iph_len);
set_tcp_checksum(tcph, iph, iph_len);
} else if (ipxv == IP6VERSION) {
struct ip6_hdr *ip6h = (struct ip6_hdr *)payload;
ip6h->ip6_plen = ntohs(tcph_len + fake_len);
set_ip_checksum(ip6h, iph_len);
set_tcp_checksum(tcph, ip6h, iph_len);
}
ret = instance_config.send_raw_packet(payload, iph_len + tcph_len + fake_len);
if (ret < 0) {
lgerror("send_syn_altered", ret);
NETBUF_FREE(payload);
goto accept;
}
lgtrace_addp("rawsocket sent %d", ret);
NETBUF_FREE(payload);
goto drop;
}
if (tcph->syn) goto accept;
struct tls_verdict vrd = analyze_tls_data(data, dlen);
lgtrace_addp("Analyzed, %d", vrd.target_sni);
if (vrd.target_sni) {
lgdebugmsg("Target SNI detected: %.*s", vrd.sni_len, data + vrd.sni_offset);
uint8_t payload[MAX_PACKET_SIZE];
uint32_t payload_len = raw_payload_len;
NETBUF_ALLOC(payload, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(payload)) {
lgerror("Allocation error", -ENOMEM);
goto accept;
}
memcpy(payload, raw_payload, raw_payload_len);
struct iphdr *iph;
void *iph;
uint32_t iph_len;
struct tcphdr *tcph;
uint32_t tcph_len;
uint8_t *data;
uint32_t dlen;
int ret = tcp4_payload_split(payload, payload_len,
int ret = tcp_payload_split(payload, payload_len,
&iph, &iph_len, &tcph, &tcph_len,
&data, &dlen);
if (ret < 0) {
lgerror("tcp_payload_split in targ_sni", ret);
goto accept_lc;
}
if (config.fk_winsize) {
tcph->window = htons(config.fk_winsize);
}
ip4_set_checksum(iph);
tcp4_set_checksum(tcph, iph);
set_ip_checksum(iph, iph_len);
set_tcp_checksum(tcph, iph, iph_len);
if (dlen > 1480 && config.verbose) {
lgdebugmsg("WARNING! Client Hello packet is too big and may cause issues!");
@@ -123,16 +209,17 @@ int process_tcp4_packet(const uint8_t *raw_payload, uint32_t raw_payload_len) {
poses[1] = tmp;
}
ret = send_tcp4_frags(payload, payload_len, poses, cnt, 0);
ret = send_tcp_frags(payload, payload_len, poses, cnt, 0);
if (ret < 0) {
lgerror("tcp4 send frags", ret);
goto accept;
goto accept_lc;
}
goto drop;
goto drop_lc;
}
break;
case FRAG_STRAT_IP: {
case FRAG_STRAT_IP:
if (ipxv == IP4VERSION) {
ipd_offset = ((char *)data - (char *)tcph) + vrd.sni_offset;
mid_offset = ipd_offset + vrd.sni_len / 2;
mid_offset += 8 - mid_offset % 8;
@@ -159,47 +246,66 @@ int process_tcp4_packet(const uint8_t *raw_payload, uint32_t raw_payload_len) {
ret = send_ip4_frags(payload, payload_len, poses, cnt, 0);
if (ret < 0) {
lgerror("ip4 send frags", ret);
goto accept;
goto accept_lc;
}
goto drop;
goto drop_lc;
} else {
printf("WARNING: IP fragmentation is supported only for IPv4\n");
goto default_send;
}
break;
default:
default_send:
ret = instance_config.send_raw_packet(payload, payload_len);
if (ret < 0) {
lgerror("raw pack send", ret);
goto accept;
goto accept_lc;
}
goto drop;
goto drop_lc;
}
goto drop_lc;
accept_lc:
NETBUF_FREE(payload);
goto accept;
drop_lc:
NETBUF_FREE(payload);
goto drop;
}
accept:
lgtrace_addp("accept");
lgtrace_end();
return PKT_ACCEPT;
drop:
lgtrace_addp("drop");
lgtrace_end();
return PKT_DROP;
}
int process_udp4_packet(const uint8_t *pkt, uint32_t pktlen) {
const struct iphdr *iph;
int process_udp_packet(const uint8_t *pkt, uint32_t pktlen) {
const void *iph;
uint32_t iph_len;
const struct udphdr *udph;
const uint8_t *data;
uint32_t dlen;
int ipver = netproto_version(pkt, pktlen);
lgtrace_start("Got udp packet");
lgtrace_addp("IPv%d", ipver);
int ret = udp4_payload_split((uint8_t *)pkt, pktlen,
(struct iphdr **)&iph, &iph_len,
int ret = udp_payload_split((uint8_t *)pkt, pktlen,
(void **)&iph, &iph_len,
(struct udphdr **)&udph,
(uint8_t **)&data, &dlen);
lgtrace_start("Got udp packet");
if (ret < 0) {
lgtrace_addp("undefined");
goto accept;
@@ -275,8 +381,19 @@ int send_ip4_frags(const uint8_t *packet, uint32_t pktlen, const uint32_t *poses
packet, pktlen);
}
} else {
uint8_t frag1[MAX_PACKET_SIZE];
uint8_t frag2[MAX_PACKET_SIZE];
NETBUF_ALLOC(frag1, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(frag1)) {
lgerror("Allocation error", -ENOMEM);
return -ENOMEM;
}
NETBUF_ALLOC(frag2, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(frag2)) {
lgerror("Allocation error", -ENOMEM);
NETBUF_FREE(frag1);
return -ENOMEM;
}
uint32_t f1len = MAX_PACKET_SIZE;
uint32_t f2len = MAX_PACKET_SIZE;
@@ -284,7 +401,8 @@ int send_ip4_frags(const uint8_t *packet, uint32_t pktlen, const uint32_t *poses
if (dvs > poses[0]) {
lgerror("send_frags: Recursive dvs(%d) is more than poses0(%d)", -EINVAL, dvs, poses[0]);
return -EINVAL;
ret = -EINVAL;
goto erret_lc;
}
ret = ip4_frag(packet, pktlen, poses[0] - dvs,
@@ -292,7 +410,7 @@ int send_ip4_frags(const uint8_t *packet, uint32_t pktlen, const uint32_t *poses
if (ret < 0) {
lgerror("send_frags: frag: with context packet with size %d, position: %d, recursive dvs: %d", ret, pktlen, poses[0], dvs);
return ret;
goto erret_lc;
}
if (config.frag_sni_reverse)
@@ -300,28 +418,37 @@ int send_ip4_frags(const uint8_t *packet, uint32_t pktlen, const uint32_t *poses
send_frag1:
ret = send_ip4_frags(frag1, f1len, NULL, 0, 0);
if (ret < 0) {
return ret;
goto erret_lc;
}
if (config.frag_sni_reverse)
goto out;
goto out_lc;
send_frag2:
dvs += poses[0];
ret = send_ip4_frags(frag2, f2len, poses + 1, poses_sz - 1, dvs);
if (ret < 0) {
return ret;
goto erret_lc;
}
if (config.frag_sni_reverse)
goto send_frag1;
out_lc:
NETBUF_FREE(frag1);
NETBUF_FREE(frag2);
goto out;
erret_lc:
NETBUF_FREE(frag1);
NETBUF_FREE(frag2);
return ret;
}
out:
return 0;
}
int send_tcp4_frags(const uint8_t *packet, uint32_t pktlen, const uint32_t *poses, uint32_t poses_sz, uint32_t dvs) {
int send_tcp_frags(const uint8_t *packet, uint32_t pktlen, const uint32_t *poses, uint32_t poses_sz, uint32_t dvs) {
if (poses_sz == 0) {
if (config.seg2_delay && ((dvs > 0) ^ config.frag_sni_reverse)) {
if (!instance_config.send_delayed_packet) {
@@ -333,63 +460,96 @@ int send_tcp4_frags(const uint8_t *packet, uint32_t pktlen, const uint32_t *pose
return 0;
} else {
lgtrace_addp("raw send packet of %d bytes with %d dvs", pktlen, dvs);
return instance_config.send_raw_packet(
packet, pktlen);
}
} else {
uint8_t frag1[MAX_PACKET_SIZE];
uint8_t frag2[MAX_PACKET_SIZE];
uint8_t fake_pad[MAX_PACKET_SIZE];
NETBUF_ALLOC(frag1, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(frag1)) {
lgerror("Allocation error", -ENOMEM);
return -ENOMEM;
}
NETBUF_ALLOC(frag2, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(frag2)) {
lgerror("Allocation error", -ENOMEM);
NETBUF_FREE(frag1);
return -ENOMEM;
}
NETBUF_ALLOC(fake_pad, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(fake_pad)) {
lgerror("Allocation error", -ENOMEM);
NETBUF_FREE(frag1);
NETBUF_FREE(frag2);
return -ENOMEM;
}
uint32_t f1len = MAX_PACKET_SIZE;
uint32_t f2len = MAX_PACKET_SIZE;
uint32_t fake_pad_len = MAX_PACKET_SIZE;
int ret;
if (dvs > poses[0]) {
lgerror("send_frags: Recursive dvs(%d) is more than poses0(%d)", -EINVAL, dvs, poses[0]);
return -EINVAL;
ret = -EINVAL;
goto erret_lc;
}
ret = tcp4_frag(packet, pktlen, poses[0] - dvs,
ret = tcp_frag(packet, pktlen, poses[0] - dvs,
frag1, &f1len, frag2, &f2len);
lgtrace_addp("Packet split in %d bytes position of payload start, dvs: %d to two packets of %d and %d lengths", poses[0], dvs, f1len, f2len);
if (ret < 0) {
lgerror("send_frags: frag: with context packet with size %d, position: %d, recursive dvs: %d", ret, pktlen, poses[0], dvs);
return ret;
lgerror("send_frags: tcp_frag: with context packet with size %d, position: %d, recursive dvs: %d", ret, pktlen, poses[0], dvs);
goto erret_lc;
}
if (config.frag_sni_reverse)
goto send_frag2;
send_frag1:
{
ret = send_tcp4_frags(frag1, f1len, NULL, 0, 0);
ret = send_tcp_frags(frag1, f1len, NULL, 0, 0);
if (ret < 0) {
return ret;
goto erret_lc;
}
if (config.frag_sni_reverse)
goto out;
goto out_lc;
}
send_fake:
if (config.frag_sni_faked) {
uint32_t iphfl, tcphfl;
ret = tcp4_payload_split(frag2, f2len, NULL, &iphfl, NULL, &tcphfl, NULL, NULL);
fake_pad_len = f2len;
ret = tcp_payload_split(frag2, f2len, NULL, &iphfl, NULL, &tcphfl, NULL, NULL);
if (ret < 0) {
lgerror("Invalid frag2", ret);
return ret;
goto erret_lc;
}
memcpy(fake_pad, frag2, iphfl + tcphfl);
memset(fake_pad + iphfl + tcphfl, 0, f2len - iphfl - tcphfl);
ret = fail4_packet(fake_pad, f2len);
struct tcphdr *fakethdr = (void *)(fake_pad + iphfl);
if (config.faking_strategy == FAKE_STRAT_PAST_SEQ) {
lgtrace("frag fake sent with %u -> ", ntohl(fakethdr->seq));
fakethdr->seq = htonl(ntohl(fakethdr->seq) - dvs);
lgtrace_addp("%u, ", ntohl(fakethdr->seq));
}
ret = fail_packet(fake_pad, &fake_pad_len, MAX_PACKET_SIZE);
if (ret < 0) {
lgerror("Failed to fail packet", ret);
return ret;
goto erret_lc;
}
ret = send_tcp4_frags(fake_pad, f2len, NULL, 0, 0);
ret = send_tcp_frags(fake_pad, fake_pad_len, NULL, 0, 0);
if (ret < 0) {
return ret;
goto erret_lc;
}
}
@@ -400,53 +560,71 @@ send_fake:
send_frag2:
{
dvs += poses[0];
ret = send_tcp4_frags(frag2, f2len, poses + 1, poses_sz - 1, dvs);
ret = send_tcp_frags(frag2, f2len, poses + 1, poses_sz - 1, dvs);
if (ret < 0) {
return ret;
goto erret_lc;
}
if (config.frag_sni_reverse)
goto send_fake;
}
out_lc:
NETBUF_FREE(frag1);
NETBUF_FREE(frag2);
NETBUF_FREE(fake_pad);
goto out;
erret_lc:
NETBUF_FREE(frag1);
NETBUF_FREE(frag2);
NETBUF_FREE(fake_pad);
return ret;
}
out:
return 0;
}
int post_fake_sni(const struct iphdr *iph, unsigned int iph_len,
int post_fake_sni(const void *iph, unsigned int iph_len,
const struct tcphdr *tcph, unsigned int tcph_len,
unsigned char sequence_len) {
uint8_t rfsiph[60];
uint8_t rfsiph[128];
uint8_t rfstcph[60];
int ret;
memcpy(rfsiph, iph, iph_len);
memcpy(rfstcph, tcph, tcph_len);
struct iphdr *fsiph = (void *)rfsiph;
void *fsiph = (void *)rfsiph;
struct tcphdr *fstcph = (void *)rfstcph;
for (int i = 0; i < sequence_len; i++) {
uint8_t fake_sni[MAX_PACKET_SIZE];
NETBUF_ALLOC(fake_sni, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(fake_sni)) {
lgerror("Allocation error", -ENOMEM);
return -ENOMEM;
}
uint32_t fsn_len = MAX_PACKET_SIZE;
ret = gen_fake_sni(fsiph, fstcph, fake_sni, &fsn_len);
ret = gen_fake_sni(fsiph, iph_len, fstcph, tcph_len,
fake_sni, &fsn_len);
if (ret < 0) {
lgerror("gen_fake_sni", ret);
return ret;
goto erret_lc;
}
lgtrace_addp("post fake sni #%d", i + 1);
lgtrace_addp("post with %d", fsn_len);
ret = instance_config.send_raw_packet(fake_sni, fsn_len);
if (ret < 0) {
lgerror("send fake sni", ret);
return ret;
goto erret_lc;
}
uint32_t iph_len;
uint32_t tcph_len;
uint32_t plen;
tcp4_payload_split(
tcp_payload_split(
fake_sni, fsn_len,
&fsiph, &iph_len, &fstcph, &tcph_len,
&fsiph, &iph_len,
&fstcph, &tcph_len,
NULL, &plen);
@@ -456,40 +634,21 @@ int post_fake_sni(const struct iphdr *iph, unsigned int iph_len,
fsiph = (void *)rfsiph;
fstcph = (void *)rfstcph;
NETBUF_FREE(fake_sni);
continue;
erret_lc:
NETBUF_FREE(fake_sni);
return ret;
}
return 0;
}
void z_function(const char *str, int *zbuf, size_t len) {
zbuf[0] = len;
ssize_t lh = 0, rh = 1;
for (ssize_t i = 1; i < len; i++) {
zbuf[i] = 0;
if (i < rh) {
zbuf[i] = min(zbuf[i - lh], rh - i);
}
while (i + zbuf[i] < len && str[zbuf[i]] == str[i + zbuf[i]])
zbuf[i]++;
if (i + zbuf[i] > rh) {
lh = i;
rh = i + zbuf[i];
}
}
}
#define TLS_CONTENT_TYPE_HANDSHAKE 0x16
#define TLS_HANDSHAKE_TYPE_CLIENT_HELLO 0x01
#define TLS_EXTENSION_SNI 0x0000
#define TLS_EXTENSION_CLIENT_HELLO_ENCRYPTED 0xfe0d
typedef uint8_t uint8_t;
typedef uint32_t uint32_t;
typedef uint16_t uint16_t;
/**
* Processes tls payload of the tcp request.
*
@@ -510,9 +669,7 @@ struct tls_verdict analyze_tls_data(
uint8_t tls_content_type = *msgData;
uint8_t tls_vmajor = *(msgData + 1);
uint8_t tls_vminor = *(msgData + 2);
uint16_t message_length = ntohs(*(uint16_t *)(msgData + 3));
const uint8_t *message_length_ptr = msgData + 3;
if (tls_vmajor != 0x03) goto nextMessage;
@@ -536,7 +693,6 @@ struct tls_verdict analyze_tls_data(
const uint8_t *msgPtr = handshakeProto;
msgPtr += 1;
const uint8_t *handshakeProto_length_ptr = msgPtr + 1;
msgPtr += 3 + 2 + 32;
if (msgPtr + 1 >= data_end) break;
@@ -556,7 +712,6 @@ struct tls_verdict analyze_tls_data(
if (msgPtr + 2 >= data_end) break;
uint16_t extensionsLen = ntohs(*(uint16_t *)msgPtr);
const uint8_t *extensionsLen_ptr = msgPtr;
msgPtr += 2;
const uint8_t *extensionsPtr = msgPtr;
@@ -573,7 +728,6 @@ struct tls_verdict analyze_tls_data(
uint16_t extensionLen =
ntohs(*(uint16_t *)extensionPtr);
const uint8_t *extensionLen_ptr = extensionPtr;
extensionPtr += 2;
@@ -588,14 +742,13 @@ struct tls_verdict analyze_tls_data(
if (sni_ext_ptr + 2 >= extensions_end) break;
uint16_t sni_ext_dlen = ntohs(*(uint16_t *)sni_ext_ptr);
const uint8_t *sni_ext_dlen_ptr = sni_ext_ptr;
sni_ext_ptr += 2;
const uint8_t *sni_ext_end = sni_ext_ptr + sni_ext_dlen;
if (sni_ext_end >= extensions_end) break;
if (sni_ext_ptr + 3 >= sni_ext_end) break;
uint8_t sni_type = *sni_ext_ptr++;
sni_ext_ptr++;
uint16_t sni_len = ntohs(*(uint16_t *)sni_ext_ptr);
sni_ext_ptr += 2;
@@ -608,7 +761,7 @@ struct tls_verdict analyze_tls_data(
if (config.all_domains) {
vrd.target_sni = 1;
goto out;
goto check_domain;
}
@@ -630,12 +783,46 @@ struct tls_verdict analyze_tls_data(
domain_startp,
domain_len)) {
vrd.target_sni = 1;
goto check_domain;
}
j = i + 1;
}
}
check_domain:
if (vrd.target_sni == 1 && config.exclude_domains_strlen != 0) {
unsigned int j = 0;
for (unsigned int i = 0; i <= config.exclude_domains_strlen; i++) {
if ( i > j &&
(i == config.exclude_domains_strlen ||
config.exclude_domains_str[i] == '\0' ||
config.exclude_domains_str[i] == ',' ||
config.exclude_domains_str[i] == '\n' )) {
unsigned int domain_len = (i - j);
const char *sni_startp = sni_name + sni_len - domain_len;
const char *domain_startp = config.exclude_domains_str + j;
if (sni_len >= domain_len &&
sni_len < 128 &&
!strncmp(sni_startp,
domain_startp,
domain_len)) {
vrd.target_sni = 0;
lgdebugmsg("Excluded SNI: %.*s",
vrd.sni_len, data + vrd.sni_offset);
goto out;
}
j = i + 1;
}
}
}
goto out;
nextExtension:
extensionsPtr += 2 + 2 + extensionLen;
}
@@ -646,6 +833,7 @@ nextMessage:
out:
return vrd;
brute:
if (config.all_domains) {
vrd.target_sni = 1;
@@ -662,12 +850,26 @@ brute:
config.domains_str[i] == ',' ||
config.domains_str[i] == '\n' )) {
uint8_t buf[MAX_PACKET_SIZE];
int zbuf[MAX_PACKET_SIZE];
unsigned int domain_len = (i - j);
const char *domain_startp = config.domains_str + j;
if (domain_len + dlen + 1> MAX_PACKET_SIZE) continue;
if (domain_len + dlen + 1> MAX_PACKET_SIZE) {
continue;
}
NETBUF_ALLOC(buf, MAX_PACKET_SIZE);
if (!NETBUF_CHECK(buf)) {
lgerror("Allocation error", -ENOMEM);
goto out;
}
NETBUF_ALLOC(nzbuf, MAX_PACKET_SIZE * sizeof(int));
if (!NETBUF_CHECK(nzbuf)) {
lgerror("Allocation error", -ENOMEM);
NETBUF_FREE(buf);
goto out;
}
int *zbuf = (void *)nzbuf;
memcpy(buf, domain_startp, domain_len);
memcpy(buf + domain_len, "#", 1);
@@ -680,54 +882,90 @@ brute:
vrd.target_sni = 1;
vrd.sni_len = domain_len;
vrd.sni_offset = (k - domain_len - 1);
NETBUF_FREE(buf);
NETBUF_FREE(nzbuf);
goto out;
}
}
j = i + 1;
NETBUF_FREE(buf);
NETBUF_FREE(nzbuf);
}
}
goto out;
}
int gen_fake_sni(const struct iphdr *iph, const struct tcphdr *tcph,
int gen_fake_sni(const void *ipxh, uint32_t iph_len,
const struct tcphdr *tcph, uint32_t tcph_len,
uint8_t *buf, uint32_t *buflen) {
if (!iph || !tcph || !buf || !buflen)
if (!ipxh || !tcph || !buf || !buflen)
return -EINVAL;
int ip_len = iph->ihl * 4;
int tcph_len = tcph->doff * 4;
int ipxv = netproto_version(ipxh, iph_len);
if (ipxv == IP4VERSION) {
const struct iphdr *iph = ipxh;
memcpy(buf, iph, iph_len);
struct iphdr *niph = (struct iphdr *)buf;
niph->protocol = IPPROTO_TCP;
} else if (ipxv == IP6VERSION) {
const struct ip6_hdr *iph = ipxh;
iph_len = sizeof(struct ip6_hdr);
memcpy(buf, iph, iph_len);
struct ip6_hdr *niph = (struct ip6_hdr *)buf;
niph->ip6_nxt = IPPROTO_TCP;
} else {
return -EINVAL;
}
const char *data = config.fake_sni_pkt;
size_t data_len = config.fake_sni_pkt_sz;
size_t dlen = ip_len + tcph_len + data_len;
uint32_t dlen = iph_len + tcph_len + data_len;
if (*buflen < dlen)
return -ENOMEM;
memcpy(buf, iph, ip_len);
memcpy(buf + ip_len, tcph, tcph_len);
memcpy(buf + ip_len + tcph_len, data, data_len);
memcpy(buf + iph_len, tcph, tcph_len);
memcpy(buf + iph_len + tcph_len, data, data_len);
struct iphdr *niph = (struct iphdr *)buf;
struct tcphdr *ntcph = (struct tcphdr *)(buf + ip_len);
niph->protocol = IPPROTO_TCP;
niph->tot_len = htons(dlen);
fail4_packet(buf, *buflen);
if (ipxv == IP4VERSION) {
struct iphdr *niph = (struct iphdr *)buf;
niph->tot_len = htons(dlen);
} else if (ipxv == IP6VERSION) {
struct ip6_hdr *niph = (struct ip6_hdr *)buf;
niph->ip6_plen = htons(dlen - iph_len);
}
fail_packet(buf, &dlen, *buflen);
*buflen = dlen;
return 0;
}
int fail4_packet(uint8_t *payload, uint32_t plen) {
struct iphdr *iph;
#define TCP_MD5SIG_LEN 16
#define TCP_MD5SIG_KIND 19
struct tcp_md5sig_opt {
uint8_t kind;
uint8_t len;
uint8_t sig[TCP_MD5SIG_LEN];
};
#define TCP_MD5SIG_OPT_LEN (sizeof(struct tcp_md5sig_opt))
// Real length of the option, with NOOP fillers
#define TCP_MD5SIG_OPT_RLEN 20
int fail_packet(uint8_t *payload, uint32_t *plen, uint32_t avail_buflen) {
void *iph;
uint32_t iph_len;
struct tcphdr *tcph;
uint32_t tcph_len;
@@ -735,32 +973,97 @@ int fail4_packet(uint8_t *payload, uint32_t plen) {
uint32_t dlen;
int ret;
ret = tcp4_payload_split(payload, plen,
ret = tcp_payload_split(payload, *plen,
&iph, &iph_len, &tcph, &tcph_len,
&data, &dlen);
uint32_t ipxv = netproto_version(payload, *plen);
if (ret < 0) {
return ret;
}
if (config.faking_strategy == FAKE_STRAT_RAND_SEQ) {
#ifdef KERNEL_SCOPE
tcph->seq = 124;
tcph->ack_seq = 124;
lgtrace("fake seq: %u -> ", ntohl(tcph->seq));
if (config.fakeseq_offset) {
tcph->seq = htonl(ntohl(tcph->seq) - config.fakeseq_offset);
} else {
#ifdef KERNEL_SPACE
tcph->seq = 124;
#else
tcph->seq = random();
tcph->ack_seq = random();
tcph->seq = random();
#endif
}
lgtrace_addp("%u", ntohl(tcph->seq));
} else if (config.faking_strategy == FAKE_STRAT_PAST_SEQ) {
lgtrace("fake seq: %u -> ", ntohl(tcph->seq));
tcph->seq = htonl(ntohl(tcph->seq) - dlen);
lgtrace_addp("%u", ntohl(tcph->seq));
} else if (config.faking_strategy == FAKE_STRAT_TTL) {
iph->ttl = config.faking_ttl;
lgtrace_addp("set fake ttl to %d", config.faking_ttl);
if (ipxv == IP4VERSION) {
((struct iphdr *)iph)->ttl = config.faking_ttl;
} else if (ipxv == IP6VERSION) {
((struct ip6_hdr *)iph)->ip6_hops = config.faking_ttl;
} else {
lgerror("fail_packet: IP version is unsupported", -EINVAL);
return -EINVAL;
}
} else if (config.faking_strategy == FAKE_STRAT_TCP_MD5SUM) {
int optp_len = tcph_len - sizeof(struct tcphdr);
int delta = TCP_MD5SIG_OPT_RLEN - optp_len;
lgtrace_addp("Incr delta %d: %d -> %d", delta, optp_len, optp_len + delta);
if (delta > 0) {
if (avail_buflen - *plen < delta) {
return -1;
}
uint8_t *ndata = data + delta;
uint8_t *ndptr = ndata + dlen;
uint8_t *dptr = data + dlen;
for (size_t i = dlen + 1; i > 0; i--) {
*ndptr = *dptr;
--ndptr, --dptr;
}
data = ndata;
tcph_len = tcph_len + delta;
tcph->doff = tcph_len >> 2;
if (ipxv == IP4VERSION) {
((struct iphdr *)iph)->tot_len = htons(ntohs(((struct iphdr *)iph)->tot_len) + delta);
} else if (ipxv == IP6VERSION) {
((struct ip6_hdr *)iph)->ip6_plen = htons(ntohs(((struct ip6_hdr *)iph)->ip6_plen) + delta);
} else {
lgerror("fail_packet: IP version is unsupported", -EINVAL);
return -EINVAL;
}
optp_len += delta;
*plen += delta;
}
uint8_t *optplace = (uint8_t *)tcph + sizeof(struct tcphdr);
struct tcp_md5sig_opt *mdopt = (void *)optplace;
mdopt->kind = TCP_MD5SIG_KIND;
mdopt->len = TCP_MD5SIG_OPT_LEN;
optplace += sizeof(struct tcp_md5sig_opt);
optp_len -= sizeof(struct tcp_md5sig_opt);
while (optp_len-- > 0) {
*optplace++ = 0x01;
}
}
ip4_set_checksum(iph);
tcp4_set_checksum(tcph, iph);
set_ip_checksum(iph, iph_len);
set_tcp_checksum(tcph, iph, iph_len);
if (config.faking_strategy == FAKE_STRAT_TCP_CHECK) {
lgtrace_addp("break fake tcp checksum");
tcph->check += 1;
}

View File

@@ -22,14 +22,15 @@ struct tls_verdict analyze_tls_data(const uint8_t *data, uint32_t dlen);
/**
* Generates fake client hello message
*/
int gen_fake_sni(const struct iphdr *iph, const struct tcphdr *tcph,
int gen_fake_sni(const void *iph, uint32_t iph_len,
const struct tcphdr *tcph, uint32_t tcph_len,
uint8_t *buf, uint32_t *buflen);
/**
* Invalidates the raw packet. The function aims to invalid the packet
* in such way as it will be accepted by DPI, but dropped by target server
*/
int fail4_packet(uint8_t *payload, uint32_t plen);
int fail_packet(uint8_t *payload, uint32_t *plen, uint32_t avail_buflen);
#define PKT_ACCEPT 0
#define PKT_DROP 1
@@ -45,19 +46,19 @@ int process_packet(const uint8_t *packet, uint32_t packet_len);
* Processe the TCP packet.
* Returns verdict.
*/
int process_tcp4_packet(const uint8_t *raw_payload, uint32_t raw_payload_len);
int process_tcp_packet(const uint8_t *raw_payload, uint32_t raw_payload_len);
/**
* Processes the UDP packet.
* Returns verdict.
*/
int process_udp4_packet(const uint8_t *pkt, uint32_t pktlen);
int process_udp_packet(const uint8_t *pkt, uint32_t pktlen);
/**
* Sends fake client hello.
*/
int post_fake_sni(const struct iphdr *iph, unsigned int iph_len,
int post_fake_sni(const void *iph, unsigned int iph_len,
const struct tcphdr *tcph, unsigned int tcph_len,
unsigned char sequence_len);
@@ -66,7 +67,7 @@ int post_fake_sni(const struct iphdr *iph, unsigned int iph_len,
* Poses are relative to start of TCP payload.
* dvs used internally and should be zero.
*/
int send_tcp4_frags(
int send_tcp_frags(
const uint8_t *packet, uint32_t pktlen,
const uint32_t *poses, uint32_t poses_len, uint32_t dvs);

84
nf_wrapper.h Normal file
View File

@@ -0,0 +1,84 @@
/**
* Thanks https://github.com/NICMx/Jool/blob/5f60dcda5944b01cc43c3be342aad26af8161bcb/include/nat64/mod/common/nf_wrapper.h for mapped kernel versions
*/
#ifndef _JOOL_MOD_NF_WRAPPER_H
#define _JOOL_MOD_NF_WRAPPER_H
/**
* @file
* The kernel API is far from static. In particular, the Netfilter packet entry
* function keeps changing. nf_hook.c, the file where we declare our packet
* entry function, has been quite difficult to read for a while now. It's pretty
* amusing, because we don't even use any of the noisy arguments.
*
* This file declares a usable function header that abstracts away all those
* useless arguments.
*/
#include <linux/version.h>
/* If this is a Red Hat-based kernel (Red Hat, CentOS, Fedora, etc)... */
#ifdef RHEL_RELEASE_CODE
#if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2)
#define NF_CALLBACK(name, skb) unsigned int name( \
const struct nf_hook_ops *ops, \
struct sk_buff *skb, \
const struct net_device *in, \
const struct net_device *out, \
const struct nf_hook_state *state) \
#elif RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 0)
#define NF_CALLBACK(name, skb) unsigned int name( \
const struct nf_hook_ops *ops, \
struct sk_buff *skb, \
const struct net_device *in, \
const struct net_device *out, \
int (*okfn)(struct sk_buff *))
#else
#error "Sorry; this version of RHEL is not supported because it's kind of old."
#endif /* RHEL_RELEASE_CODE >= x */
/* If this NOT a RedHat-based kernel (Ubuntu, Debian, SuSE, etc)... */
#else
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
#define NF_CALLBACK(name, skb) unsigned int name( \
void *priv, \
struct sk_buff *skb, \
const struct nf_hook_state *state)
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
#define NF_CALLBACK(name, skb) unsigned int name( \
const struct nf_hook_ops *ops, \
struct sk_buff *skb, \
const struct nf_hook_state *state)
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
#define NF_CALLBACK(name, skb) unsigned int name( \
const struct nf_hook_ops *ops, \
struct sk_buff *skb, \
const struct net_device *in, \
const struct net_device *out, \
int (*okfn)(struct sk_buff *))
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
#define NF_CALLBACK(name, skb) unsigned int name( \
unsigned int hooknum, \
struct sk_buff *skb, \
const struct net_device *in, \
const struct net_device *out, \
int (*okfn)(struct sk_buff *))
#else
#error "Linux < 3.0 isn't supported at all."
#endif /* LINUX_VERSION_CODE > n */
#endif /* RHEL or not RHEL */
#endif /* _JOOL_MOD_NF_WRAPPER_H */

2
quic.c
View File

@@ -48,7 +48,7 @@ int quic_parse_data(uint8_t *raw_payload, uint32_t raw_payload_len,
}
uint8_t found = 0;
for (uint8_t i = 0; i < sizeof(supported_versions); i++) {
for (uint8_t i = 0; i < 2; i++) {
if (ntohl(nqch->version) == supported_versions[i]) {
found = 1;
}

64
types.h
View File

@@ -3,26 +3,18 @@
#define TYPES_H
#include <asm/byteorder.h>
#ifdef KERNEL_SCOPE
#ifdef KERNEL_SPACE
#include <linux/errno.h> // IWYU pragma: export
#include <linux/string.h> // IWYU pragma: export
#include <linux/types.h>
typedef __u8 uint8_t;
typedef __u16 uint16_t;
typedef __u32 uint32_t;
typedef __u64 uint64_t;
typedef __i8 int8_t;
typedef __i16 int16_t;
typedef __i32 int32_t;
typedef __i64 int64_t;
#else /* USERSPACE_SCOPE */
#else /* USER_SPACE */
#include <errno.h> // IWYU pragma: export
#include <stdint.h> // IWYU pragma: export
#include <string.h> // IWYU pragma: export
#endif /* SCOPES */
#endif /* SPACES */
// Network specific structures
#ifdef KERNEL_SPACE
@@ -30,21 +22,45 @@ typedef __i64 int64_t;
#include <linux/net.h> // IWYU pragma: export
#include <linux/in.h> // IWYU pragma: export
#include <linux/ip.h> // IWYU pragma: export
#include <linux/ipv6.h> // IWYU pragma: export
#include <linux/tcp.h> // IWYU pragma: export
#include <linux/version.h>
#define ip6_hdr ipv6hdr
/* from <netinet/ip.h> */
#define IP_RF 0x8000 /* reserved fragment flag */
#define IP_DF 0x4000 /* dont fragment flag */
#define IP_MF 0x2000 /* more fragments flag */
#define IP_OFFMASK 0x1fff /* mask for fragmenting bits */
#ifdef __LITTLE_ENDIAN
#define __BIG_ENDIAN 4321
#define __BYTE_ORDER __LITTLE_ENDIAN
#elif defined(__BIG_ENDIAN)
#define __LITTLE_ENDIAN 1234
#define __BYTE_ORDER __BIG_ENDIAN
#else
#define USER_SPACE
#error "Unsupported endian"
#endif
#define ip6_plen payload_len
#define ip6_nxt nexthdr
#define ip6_hops hop_limit
#define ip6_hlim hop_limit
#define ip6_src saddr
#define ip6_dst daddr
#else /* USER_SPACE */
#include <arpa/inet.h> // IWYU pragma: export
#include <netinet/ip.h> // IWYU pragma: export
#include <netinet/ip6.h> // IWYU pragma: export
#include <netinet/tcp.h> // IWYU pragma: export
#include <netinet/udp.h> // IWYU pragma: export
#endif
#ifndef KERNEL_SPACE
#define max(a,b)__extension__\
({ \
__typeof__ (a) _a = (a); \
@@ -59,4 +75,28 @@ typedef __i64 int64_t;
_a < _b ? _a : _b; \
})
#endif /* not a KERNEL_SPACE */
/**
* Use NETBUF_ALLOC and NETBUF_FREE as an abstraction of memory allocation.
* Do not use it within expressions, consider these defines as separate statements.
*
* Use NETBUF_CHECK to check that buffer was properly allocated.
*/
#ifdef KERNEL_SPACE
#include <linux/gfp.h>
#define NETBUF_ALLOC(buf, buf_len) __u8* buf = kmalloc(buf_len, GFP_KERNEL);
#define NETBUF_CHECK(buf) ((buf) != NULL)
#define NETBUF_FREE(buf) kfree(buf);
#elif defined(ALLOC_MALLOC)
#include <stdlib.h>
#define NETBUF_ALLOC(buf, buf_len) __u8* buf = malloc(buf_len);
#define NETBUF_CHECK(buf) ((buf) != NULL)
#define NETBUF_FREE(buf) free(buf);
#else
#define NETBUF_ALLOC(buf, buf_len) __u8 buf[buf_len];
#define NETBUF_CHECK(buf) (1)
#define NETBUF_FREE(buf) ;
#endif
#endif /* TYPES_H */

View File

@@ -1,3 +1,6 @@
#Check for using system libs
USE_SYS_LIBS := no
#Userspace app makes here
BUILD_DIR := $(CURDIR)/build
DEPSDIR := $(BUILD_DIR)/deps
@@ -5,8 +8,14 @@ DEPSDIR := $(BUILD_DIR)/deps
CC:=gcc
CCLD:=$(CC)
LD:=ld
override CFLAGS += -Wall -Wpedantic -Wno-unused-variable -I$(DEPSDIR)/include -std=gnu11
override LDFLAGS += -L$(DEPSDIR)/lib
ifeq ($(USE_SYS_LIBS), no)
override CFLAGS += -Wall -Wpedantic -Wno-unused-variable -I$(DEPSDIR)/include -std=gnu11
override LDFLAGS += -L$(DEPSDIR)/lib
REQ = $(LIBNETFILTER_QUEUE) $(LIBMNL) $(LIBCRYPTO)
else
override CFLAGS += -Wall -Wpedantic -Wno-unused-variable -std=gnu11
endif
LIBNFNETLINK_CFLAGS := -I$(DEPSDIR)/include
LIBNFNETLINK_LIBS := -L$(DEPSDIR)/lib
@@ -67,11 +76,11 @@ $(LIBNETFILTER_QUEUE): $(LIBNFNETLINK) $(LIBMNL)
$(MAKE) -C deps/libnetfilter_queue
$(MAKE) install -C deps/libnetfilter_queue
$(APP): $(OBJS) $(LIBNETFILTER_QUEUE) $(LIBMNL) $(LIBCRYPTO)
$(APP): $(OBJS) $(REQ)
@echo 'CCLD $(APP)'
$(CCLD) $(OBJS) -o $(APP) $(LDFLAGS) -lmnl -lnetfilter_queue -lpthread
$(BUILD_DIR)/%.o: %.c $(LIBNETFILTER_QUEUE) $(LIBMNL) $(LIBCRYPTO) config.h
$(BUILD_DIR)/%.o: %.c $(REQ) config.h
@echo 'CC $@'
$(CC) -c $(CFLAGS) $(LDFLAGS) $< -o $@
@@ -93,8 +102,9 @@ clean:
distclean: clean
rm -rf $(BUILD_DIR)
ifeq ($(USE_SYS_LIBS), no)
$(MAKE) distclean -C deps/libnetfilter_queue || true
$(MAKE) distclean -C deps/libmnl || true
$(MAKE) distclean -C deps/libnfnetlink || true
#$(MAKE) distclean -C deps/openssl || true
endif

271
utils.c
View File

@@ -1,12 +1,19 @@
#include "utils.h"
#include "logging.h"
#include "types.h"
#ifdef KERNEL_SPACE
#include <linux/ip.h>
#else
#ifndef KERNEL_SPACE
#include <stdlib.h>
#include <libnetfilter_queue/libnetfilter_queue_ipv4.h>
#include <libnetfilter_queue/libnetfilter_queue_ipv6.h>
#include <libnetfilter_queue/libnetfilter_queue_tcp.h>
#else
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24))
#include <net/ip6_checksum.h>
#include <net/checksum.h>
#else
#include <net/checksum.h>
#endif
#endif
@@ -34,6 +41,41 @@ void ip4_set_checksum(struct iphdr *iph)
#endif
}
void tcp6_set_checksum(struct tcphdr *tcph, struct ip6_hdr *iph) {
#ifdef KERNEL_SPACE
tcph->check = 0;
tcph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr,
ntohs(iph->ip6_plen), IPPROTO_TCP,
csum_partial(tcph, ntohs(iph->ip6_plen), 0));
#else
nfq_tcp_compute_checksum_ipv6(tcph, iph);
#endif
}
int set_ip_checksum(void *iph, uint32_t iphb_len) {
int ipvx = netproto_version(iph, iphb_len);
if (ipvx == IP4VERSION) {
ip4_set_checksum(iph);
} else if (ipvx == IP6VERSION) { // IP6 has no checksums
} else
return -1;
return 0;
}
int set_tcp_checksum(struct tcphdr *tcph, void *iph, uint32_t iphb_len) {
int ipvx = netproto_version(iph, iphb_len);
if (ipvx == IP4VERSION) {
tcp4_set_checksum(tcph, iph);
} else if (ipvx == IP6VERSION) {
tcp6_set_checksum(tcph, iph);
} else
return -1;
return 0;
}
int ip4_payload_split(uint8_t *pkt, uint32_t buflen,
struct iphdr **iph, uint32_t *iph_len,
@@ -44,7 +86,7 @@ int ip4_payload_split(uint8_t *pkt, uint32_t buflen,
}
struct iphdr *hdr = (struct iphdr *)pkt;
if (hdr->version != IPVERSION) {
if (netproto_version(pkt, buflen) != IP4VERSION) {
lgerror("ip4_payload_split: ipversion", -EINVAL);
return -EINVAL;
}
@@ -110,6 +152,97 @@ int tcp4_payload_split(uint8_t *pkt, uint32_t buflen,
return 0;
}
int ip6_payload_split(uint8_t *pkt, uint32_t buflen,
struct ip6_hdr **iph, uint32_t *iph_len,
uint8_t **payload, uint32_t *plen) {
if (pkt == NULL || buflen < sizeof(struct ip6_hdr)) {
lgerror("ip6_payload_split: pkt|buflen", -EINVAL);
return -EINVAL;
}
struct ip6_hdr *hdr = (struct ip6_hdr *)pkt;
if (netproto_version(pkt, buflen) != 6) {
lgerror("ip6_payload_split: ip6version", -EINVAL);
return -EINVAL;
}
uint32_t hdr_len = sizeof(struct ip6_hdr);
uint32_t pktlen = ntohs(hdr->ip6_plen);
if (buflen < pktlen) {
lgerror("ip6_payload_split: buflen cmp pktlen: %d %d", -EINVAL, buflen, pktlen);
return -EINVAL;
}
if (iph)
*iph = hdr;
if (iph_len)
*iph_len = hdr_len;
if (payload)
*payload = pkt + hdr_len;
if (plen)
*plen = pktlen;
return 0;
}
int tcp6_payload_split(uint8_t *pkt, uint32_t buflen,
struct ip6_hdr **iph, uint32_t *iph_len,
struct tcphdr **tcph, uint32_t *tcph_len,
uint8_t **payload, uint32_t *plen) {
struct ip6_hdr *hdr;
uint32_t hdr_len;
struct tcphdr *thdr;
uint32_t thdr_len;
uint8_t *tcph_pl;
uint32_t tcph_plen;
if (ip6_payload_split(pkt, buflen, &hdr, &hdr_len,
&tcph_pl, &tcph_plen)){
return -EINVAL;
}
if (
hdr->ip6_nxt != IPPROTO_TCP ||
tcph_plen < sizeof(struct tcphdr)) {
return -EINVAL;
}
thdr = (struct tcphdr *)(tcph_pl);
thdr_len = thdr->doff * 4;
if (thdr_len > tcph_plen) {
return -EINVAL;
}
if (iph) *iph = hdr;
if (iph_len) *iph_len = hdr_len;
if (tcph) *tcph = thdr;
if (tcph_len) *tcph_len = thdr_len;
if (payload) *payload = tcph_pl + thdr_len;
if (plen) *plen = tcph_plen - thdr_len;
return 0;
}
int tcp_payload_split(uint8_t *pkt, uint32_t buflen,
void **iph, uint32_t *iph_len,
struct tcphdr **tcph, uint32_t *tcph_len,
uint8_t **payload, uint32_t *plen) {
int netvers = netproto_version(pkt, buflen);
if (netvers == IP4VERSION) {
return tcp4_payload_split(pkt, buflen, (struct iphdr **)iph, iph_len, tcph, tcph_len, payload, plen);
} else if (netvers == IP6VERSION) {
return tcp6_payload_split(pkt, buflen, (struct ip6_hdr **)iph, iph_len, tcph, tcph_len, payload, plen);
} else {
lgerror("Internet Protocol version is unsupported", -EINVAL);
return -EINVAL;
}
}
int udp4_payload_split(uint8_t *pkt, uint32_t buflen,
struct iphdr **iph, uint32_t *iph_len,
struct udphdr **udph,
@@ -117,7 +250,6 @@ int udp4_payload_split(uint8_t *pkt, uint32_t buflen,
struct iphdr *hdr;
uint32_t hdr_len;
struct udphdr *uhdr;
uint32_t uhdr_len;
uint8_t *ip_ph;
uint32_t ip_phlen;
@@ -149,6 +281,59 @@ int udp4_payload_split(uint8_t *pkt, uint32_t buflen,
return 0;
}
int udp6_payload_split(uint8_t *pkt, uint32_t buflen,
struct ip6_hdr **iph, uint32_t *iph_len,
struct udphdr **udph,
uint8_t **payload, uint32_t *plen) {
struct ip6_hdr *hdr;
uint32_t hdr_len;
struct udphdr *uhdr;
uint8_t *ip_ph;
uint32_t ip_phlen;
if (ip6_payload_split(pkt, buflen, &hdr, &hdr_len,
&ip_ph, &ip_phlen)){
return -EINVAL;
}
if (
hdr->ip6_nxt != IPPROTO_UDP ||
ip_phlen < sizeof(struct udphdr)) {
return -EINVAL;
}
uhdr = (struct udphdr *)(ip_ph);
if (uhdr->len != 0 && ntohs(uhdr->len) != ip_phlen) {
return -EINVAL;
}
if (iph) *iph = hdr;
if (iph_len) *iph_len = hdr_len;
if (udph) *udph = uhdr;
if (payload) *payload = ip_ph + sizeof(struct udphdr);
if (plen) *plen = ip_phlen - sizeof(struct udphdr);
return 0;
}
int udp_payload_split(uint8_t *pkt, uint32_t buflen,
void **iph, uint32_t *iph_len,
struct udphdr **udph,
uint8_t **payload, uint32_t *plen) {
int netvers = netproto_version(pkt, buflen);
if (netvers == IP4VERSION) {
return udp4_payload_split(pkt, buflen, (struct iphdr **)iph, iph_len, udph, payload, plen);
} else if (netvers == IP6VERSION) {
return udp6_payload_split(pkt, buflen, (struct ip6_hdr **)iph, iph_len, udph, payload, plen);
} else {
lgerror("Internet Protocol version is unsupported", -EINVAL);
return -EINVAL;
}
}
// split packet to two ipv4 fragments.
int ip4_frag(const uint8_t *pkt, uint32_t buflen, uint32_t payload_offset,
uint8_t *frag1, uint32_t *f1len,
@@ -222,9 +407,6 @@ int ip4_frag(const uint8_t *pkt, uint32_t buflen, uint32_t payload_offset,
f2_hdr->frag_off = htons(f2_frag_off);
f2_hdr->tot_len = htons(f2_dlen);
lgdebugmsg("Packet split in portion %u %u", f1_plen, f2_plen);
ip4_set_checksum(f1_hdr);
ip4_set_checksum(f2_hdr);
@@ -232,11 +414,12 @@ int ip4_frag(const uint8_t *pkt, uint32_t buflen, uint32_t payload_offset,
}
// split packet to two tcp-on-ipv4 segments.
int tcp4_frag(const uint8_t *pkt, uint32_t buflen, uint32_t payload_offset,
int tcp_frag(const uint8_t *pkt, uint32_t buflen, uint32_t payload_offset,
uint8_t *seg1, uint32_t *s1len,
uint8_t *seg2, uint32_t *s2len) {
struct iphdr *hdr;
// struct ip6_hdr *hdr6;
void *hdr;
uint32_t hdr_len;
struct tcphdr *tcph;
uint32_t tcph_len;
@@ -247,23 +430,28 @@ int tcp4_frag(const uint8_t *pkt, uint32_t buflen, uint32_t payload_offset,
if (!seg1 || !s1len || !seg2 || !s2len)
return -EINVAL;
if ((ret = tcp4_payload_split((uint8_t *)pkt, buflen,
if ((ret = tcp_payload_split((uint8_t *)pkt, buflen,
&hdr, &hdr_len,
&tcph, &tcph_len,
(uint8_t **)&payload, &plen)) < 0) {
lgerror("tcp4_frag: tcp4_payload_split", ret);
lgerror("tcp_frag: tcp_payload_split", ret);
return -EINVAL;
}
int ipvx = netproto_version(pkt, buflen);
if (
ntohs(hdr->frag_off) & IP_MF ||
ntohs(hdr->frag_off) & IP_OFFMASK) {
lgdebugmsg("tcp4_frag: frag value: %d",
ntohs(hdr->frag_off));
lgerror("tcp4_frag: ip fragmentation is set", -EINVAL);
return -EINVAL;
if (ipvx == IP4VERSION) {
struct iphdr *iphdr = hdr;
if (
ntohs(iphdr->frag_off) & IP_MF ||
ntohs(iphdr->frag_off) & IP_OFFMASK) {
lgdebugmsg("tcp_frag: ip4: frag value: %d",
ntohs(iphdr->frag_off));
lgerror("tcp_frag: ip4: ip fragmentation is set", -EINVAL);
return -EINVAL;
}
}
@@ -292,21 +480,46 @@ int tcp4_frag(const uint8_t *pkt, uint32_t buflen, uint32_t payload_offset,
memcpy(seg1 + hdr_len + tcph_len, payload, s1_plen);
memcpy(seg2 + hdr_len + tcph_len, payload + payload_offset, s2_plen);
struct iphdr *s1_hdr = (void *)seg1;
struct iphdr *s2_hdr = (void *)seg2;
if (ipvx == IP4VERSION) {
struct iphdr *s1_hdr = (void *)seg1;
struct iphdr *s2_hdr = (void *)seg2;
s1_hdr->tot_len = htons(s1_dlen);
s2_hdr->tot_len = htons(s2_dlen);
} else {
struct ip6_hdr *s1_hdr = (void *)seg1;
struct ip6_hdr *s2_hdr = (void *)seg2;
s1_hdr->ip6_plen = htons(s1_dlen - hdr_len);
s2_hdr->ip6_plen = htons(s2_dlen - hdr_len);
}
struct tcphdr *s1_tcph = (void *)(seg1 + hdr_len);
struct tcphdr *s2_tcph = (void *)(seg2 + hdr_len);
s1_hdr->tot_len = htons(s1_dlen);
s2_hdr->tot_len = htons(s2_dlen);
s2_tcph->seq = htonl(ntohl(s2_tcph->seq) + payload_offset);
lgdebugmsg("Packet split in portion %u %u", s1_plen, s2_plen);
tcp4_set_checksum(s1_tcph, s1_hdr);
tcp4_set_checksum(s2_tcph, s2_hdr);
set_tcp_checksum(s1_tcph, seg1, hdr_len);
set_tcp_checksum(s2_tcph, seg2, hdr_len);
return 0;
}
void z_function(const char *str, int *zbuf, size_t len) {
zbuf[0] = len;
int lh = 0, rh = 1;
for (int i = 1; i < (int)len; i++) {
zbuf[i] = 0;
if (i < rh) {
zbuf[i] = min(zbuf[i - lh], rh - i);
}
while (i + zbuf[i] < len && str[zbuf[i]] == str[i + zbuf[i]])
zbuf[i]++;
if (i + zbuf[i] > rh) {
lh = i;
rh = i + zbuf[i];
}
}
}

57
utils.h
View File

@@ -3,6 +3,9 @@
#include "types.h"
#define IP4VERSION 4
#define IP6VERSION 6
/**
* Splits the packet to two IP fragments on position payload_offset.
* payload_offset indicates the position relatively to start of IP payload
@@ -17,11 +20,16 @@ int ip4_frag(const uint8_t *pkt, uint32_t pktlen,
* Splits the packet to two TCP segments on position payload_offset
* payload_offset indicates the position relatively to start of TCP payload.
*/
int tcp4_frag(const uint8_t *pkt, uint32_t pktlen,
uint32_t payload_offset,
// int tcp4_frag(const uint8_t *pkt, uint32_t pktlen,
// uint32_t payload_offset,
// uint8_t *seg1, uint32_t *s1len,
// uint8_t *seg2, uint32_t *s2len);
int tcp_frag(const uint8_t *pkt, uint32_t pktlen,
uint32_t payload_offset,
uint8_t *seg1, uint32_t *s1len,
uint8_t *seg2, uint32_t *s2len);
/**
* Splits the raw packet payload to ip header and ip payload.
*/
@@ -29,6 +37,14 @@ int ip4_payload_split(uint8_t *pkt, uint32_t buflen,
struct iphdr **iph, uint32_t *iph_len,
uint8_t **payload, uint32_t *plen);
static inline int netproto_version(const uint8_t *pkt, uint32_t buflen) {
if (pkt == NULL || buflen == 0)
return -1;
return (*pkt) >> 4;
}
/**
* Splits the raw packet payload to ip header, tcp header and tcp payload.
*/
@@ -37,6 +53,26 @@ int tcp4_payload_split(uint8_t *pkt, uint32_t buflen,
struct tcphdr **tcph, uint32_t *tcph_len,
uint8_t **payload, uint32_t *plen);
/**
* Splits the raw packet payload to ip header and ip payload.
*/
int ip6_payload_split(uint8_t *pkt, uint32_t buflen,
struct ip6_hdr **iph, uint32_t *iph_len,
uint8_t **payload, uint32_t *plen);
/**
* Splits the raw packet payload to ip header, tcp header and tcp payload.
*/
int tcp6_payload_split(uint8_t *pkt, uint32_t buflen,
struct ip6_hdr **iph, uint32_t *iph_len,
struct tcphdr **tcph, uint32_t *tcph_len,
uint8_t **payload, uint32_t *plen);
int tcp_payload_split(uint8_t *pkt, uint32_t buflen,
void **iph, uint32_t *iph_len,
struct tcphdr **tcph, uint32_t *tcph_len,
uint8_t **payload, uint32_t *plen);
/**
* Splits the raw packet payload to ip header, udp header and udp payload.
*/
@@ -45,7 +81,24 @@ int udp4_payload_split(uint8_t *pkt, uint32_t buflen,
struct udphdr **udph,
uint8_t **payload, uint32_t *plen);
int udp6_payload_split(uint8_t *pkt, uint32_t buflen,
struct ip6_hdr **iph, uint32_t *iph_len,
struct udphdr **udph,
uint8_t **payload, uint32_t *plen);
int udp_payload_split(uint8_t *pkt, uint32_t buflen,
void **iph, uint32_t *iph_len,
struct udphdr **udph,
uint8_t **payload, uint32_t *plen);
void tcp4_set_checksum(struct tcphdr *tcph, struct iphdr *iph);
void ip4_set_checksum(struct iphdr *iph);
void ip6_set_checksum(struct ip6_hdr *iph);
void tcp6_set_checksum(struct tcphdr *tcph, struct ip6_hdr *iph);
int set_ip_checksum(void *iph, uint32_t iphb_len);
int set_tcp_checksum(struct tcphdr *tcph, void *iph, uint32_t iphb_len);
void z_function(const char *str, int *zbuf, size_t len);
#endif /* UTILS_H */

View File

@@ -37,6 +37,9 @@
pthread_mutex_t rawsocket_lock;
int rawsocket = -2;
pthread_mutex_t raw6socket_lock;
int raw6socket = -2;
static int open_socket(struct mnl_socket **_nl) {
struct mnl_socket *nl = NULL;
nl = mnl_socket_open(NETLINK_NETFILTER);
@@ -84,7 +87,7 @@ static int open_raw_socket(void) {
return -1;
}
int mark = RAWSOCKET_MARK;
int mark = config.mark;
if (setsockopt(rawsocket, SOL_SOCKET, SO_MARK, &mark, sizeof(mark)) < 0)
{
fprintf(stderr, "setsockopt(SO_MARK, %d) failed\n", mark);
@@ -123,6 +126,131 @@ static int close_raw_socket(void) {
return 0;
}
static int open_raw6_socket(void) {
if (raw6socket != -2) {
errno = EALREADY;
perror("Raw socket is already opened");
return -1;
}
raw6socket = socket(AF_INET6, SOCK_RAW, IPPROTO_RAW);
if (rawsocket == -1) {
perror("Unable to create raw socket");
return -1;
}
int mark = config.mark;
if (setsockopt(raw6socket, SOL_SOCKET, SO_MARK, &mark, sizeof(mark)) < 0)
{
fprintf(stderr, "setsockopt(SO_MARK, %d) failed\n", mark);
return -1;
}
int mst = pthread_mutex_init(&raw6socket_lock, NULL);
if (mst) {
fprintf(stderr, "Mutex err: %d\n", mst);
close(raw6socket);
errno = mst;
return -1;
}
return raw6socket;
}
static int close_raw6_socket(void) {
if (raw6socket < 0) {
errno = EALREADY;
perror("Raw socket is not set");
return -1;
}
if (close(raw6socket)) {
perror("Unable to close raw socket");
pthread_mutex_destroy(&rawsocket_lock);
return -1;
}
pthread_mutex_destroy(&raw6socket_lock);
raw6socket = -2;
return 0;
}
static int send_raw_ipv4(const uint8_t *pkt, uint32_t pktlen) {
int ret;
if (pktlen > AVAILABLE_MTU) return -ENOMEM;
struct iphdr *iph;
if ((ret = ip4_payload_split(
(uint8_t *)pkt, pktlen, &iph, NULL, NULL, NULL)) < 0) {
errno = -ret;
return ret;
}
struct sockaddr_in daddr = {
.sin_family = AF_INET,
/* Always 0 for raw socket */
.sin_port = 0,
.sin_addr = {
.s_addr = iph->daddr
}
};
if (config.threads != 1)
pthread_mutex_lock(&rawsocket_lock);
int sent = sendto(rawsocket,
pkt, pktlen, 0,
(struct sockaddr *)&daddr, sizeof(daddr));
if (config.threads != 1)
pthread_mutex_unlock(&rawsocket_lock);
/* The function will return -errno on error as well as errno value set itself */
if (sent < 0) sent = -errno;
return sent;
}
static int send_raw_ipv6(const uint8_t *pkt, uint32_t pktlen) {
int ret;
if (pktlen > AVAILABLE_MTU) return -ENOMEM;
struct ip6_hdr *iph;
if ((ret = ip6_payload_split(
(uint8_t *)pkt, pktlen, &iph, NULL, NULL, NULL)) < 0) {
errno = -ret;
return ret;
}
struct sockaddr_in6 daddr = {
.sin6_family = AF_INET6,
/* Always 0 for raw socket */
.sin6_port = 0,
.sin6_addr = iph->ip6_dst
};
if (config.threads != 1)
pthread_mutex_lock(&rawsocket_lock);
int sent = sendto(raw6socket,
pkt, pktlen, 0,
(struct sockaddr *)&daddr, sizeof(daddr));
lgtrace_addp("rawsocket sent %d", sent);
if (config.threads != 1)
pthread_mutex_unlock(&rawsocket_lock);
/* The function will return -errno on error as well as errno value set itself */
if (sent < 0) sent = -errno;
return sent;
}
static int send_raw_socket(const uint8_t *pkt, uint32_t pktlen) {
int ret;
@@ -138,7 +266,7 @@ static int send_raw_socket(const uint8_t *pkt, uint32_t pktlen) {
switch (config.fragmentation_strategy) {
case FRAG_STRAT_TCP:
if ((ret = tcp4_frag(pkt, pktlen, AVAILABLE_MTU-128,
if ((ret = tcp_frag(pkt, pktlen, AVAILABLE_MTU-128,
buff1, &buff1_size, buff2, &buff2_size)) < 0) {
errno = -ret;
@@ -175,39 +303,16 @@ static int send_raw_socket(const uint8_t *pkt, uint32_t pktlen) {
return sent;
}
int ipvx = netproto_version(pkt, pktlen);
if (ipvx == IP4VERSION)
return send_raw_ipv4(pkt, pktlen);
else if (ipvx == IP6VERSION)
return send_raw_ipv6(pkt, pktlen);
struct iphdr *iph;
if ((ret = ip4_payload_split(
(uint8_t *)pkt, pktlen, &iph, NULL, NULL, NULL)) < 0) {
errno = -ret;
return ret;
}
struct sockaddr_in daddr = {
.sin_family = AF_INET,
/* Always 0 for raw socket */
.sin_port = 0,
.sin_addr = {
.s_addr = iph->daddr
}
};
if (config.threads != 1)
pthread_mutex_lock(&rawsocket_lock);
int sent = sendto(rawsocket,
pkt, pktlen, 0,
(struct sockaddr *)&daddr, sizeof(daddr));
if (config.threads != 1)
pthread_mutex_unlock(&rawsocket_lock);
/* The function will return -errno on error as well as errno value set itself */
if (sent < 0) sent = -errno;
return sent;
printf("proto version %d is unsupported\n", ipvx);
return -EINVAL;
}
@@ -316,8 +421,8 @@ static int queue_cb(const struct nlmsghdr *nlh, void *data) {
if (attr[NFQA_MARK] != NULL) {
// Skip packets sent by rawsocket to escape infinity loop.
if ((ntohl(mnl_attr_get_u32(attr[NFQA_MARK])) & RAWSOCKET_MARK) ==
RAWSOCKET_MARK) {
if ((ntohl(mnl_attr_get_u32(attr[NFQA_MARK])) & config.mark) ==
config.mark) {
return fallback_accept_packet(packet.id, *qdata);
}
}
@@ -471,6 +576,14 @@ int main(int argc, char *argv[]) {
exit(EXIT_FAILURE);
}
if (config.use_ipv6) {
if (open_raw6_socket() < 0) {
perror("Unable to open raw socket for ipv6");
close_raw_socket();
exit(EXIT_FAILURE);
}
}
struct queue_res *qres = &defqres;
if (config.threads == 1) {
@@ -503,10 +616,9 @@ int main(int argc, char *argv[]) {
}
}
if (close_raw_socket() < 0) {
perror("Unable to close raw socket");
exit(EXIT_FAILURE);
}
close_raw_socket();
if (config.use_ipv6)
close_raw6_socket();
return -qres->status;
}