mirror of
https://github.com/MetaCubeX/Clash.Meta.git
synced 2025-04-19 16:50:56 +00:00
Merge branch 'Alpha' into Meta
This commit is contained in:
commit
a034421a58
71 changed files with 1101 additions and 364 deletions
32
.github/genReleaseNote.sh
vendored
Executable file
32
.github/genReleaseNote.sh
vendored
Executable file
|
@ -0,0 +1,32 @@
|
|||
#!/bin/bash
|
||||
|
||||
while getopts "v:" opt; do
|
||||
case $opt in
|
||||
v)
|
||||
version_range=$OPTARG
|
||||
;;
|
||||
\?)
|
||||
echo "Invalid option: -$OPTARG" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z "$version_range" ]; then
|
||||
echo "Please provide the version range using -v option. Example: ./genReleashNote.sh -v v1.14.1...v1.14.2"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "## What's Changed" > release.md
|
||||
git log --pretty=format:"* %s by @%an" --grep="^feat" -i $version_range | sort -f | uniq >> release.md
|
||||
echo "" >> release.md
|
||||
|
||||
echo "## BUG & Fix" >> release.md
|
||||
git log --pretty=format:"* %s by @%an" --grep="^fix" -i $version_range | sort -f | uniq >> release.md
|
||||
echo "" >> release.md
|
||||
|
||||
echo "## Maintenance" >> release.md
|
||||
git log --pretty=format:"* %s by @%an" --grep="^chore\|^docs\|^refactor" -i $version_range | sort -f | uniq >> release.md
|
||||
echo "" >> release.md
|
||||
|
||||
echo "**Full Changelog**: https://github.com/MetaCubeX/Clash.Meta/compare/$version_range" >> release.md
|
19
.github/workflows/build.yml
vendored
19
.github/workflows/build.yml
vendored
|
@ -268,6 +268,7 @@ jobs:
|
|||
Synchronize ${{ github.ref_name }} branch code updates, keeping only the latest version
|
||||
<br>
|
||||
[我应该下载哪个文件? / Which file should I download?](https://github.com/MetaCubeX/mihomo/wiki/FAQ)
|
||||
[二进制文件筛选 / Binary file selector] (https://metacubex.github.io/Meta-Docs/startup/#_1)
|
||||
[查看文档 / Docs](https://metacubex.github.io/Meta-Docs/)
|
||||
EOF
|
||||
|
||||
|
@ -288,6 +289,23 @@ jobs:
|
|||
needs: [Build]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get tags
|
||||
run: |
|
||||
echo "CURRENTVERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
git fetch --tags
|
||||
echo "PREVERSION=$(git describe --tags --abbrev=0 HEAD^)" >> $GITHUB_ENV
|
||||
|
||||
- name: Generate release notes
|
||||
run: |
|
||||
cp ./.github/genReleaseNote.sh ./
|
||||
bash ./genReleaseNote.sh -v ${PREVERSION}...${CURRENTVERSION}
|
||||
rm ./genReleaseNote.sh
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: artifact
|
||||
|
@ -304,6 +322,7 @@ jobs:
|
|||
tag_name: ${{ github.ref_name }}
|
||||
files: bin/*
|
||||
generate_release_notes: true
|
||||
body_path: release.md
|
||||
|
||||
Docker:
|
||||
if: ${{ !startsWith(github.event_name, 'pull_request') }}
|
||||
|
|
6
.github/workflows/trigger-cmfa-update.yml
vendored
6
.github/workflows/trigger-cmfa-update.yml
vendored
|
@ -15,7 +15,7 @@ on:
|
|||
- Alpha
|
||||
|
||||
jobs:
|
||||
# Send "core-updated" to MetaCubeX/MihomoForAndroid to trigger update-dependencies
|
||||
# Send "core-updated" to MetaCubeX/ClashMetaForAndroid to trigger update-dependencies
|
||||
trigger-CMFA-update:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
@ -27,7 +27,7 @@ jobs:
|
|||
|
||||
- name: Trigger update-dependencies
|
||||
run: |
|
||||
curl -X POST https://api.github.com/repos/MetaCubeX/MihomoForAndroid/dispatches \
|
||||
curl -X POST https://api.github.com/repos/MetaCubeX/ClashMetaForAndroid/dispatches \
|
||||
-H "Accept: application/vnd.github.everest-preview+json" \
|
||||
-H "Authorization: token ${{ steps.generate-token.outputs.token }}" \
|
||||
-d '{"event_type": "core-updated"}'
|
||||
-d '{"event_type": "core-updated"}'
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
<a href="https://goreportcard.com/report/github.com/MetaCubeX/mihomo">
|
||||
<img src="https://goreportcard.com/badge/github.com/MetaCubeX/mihomo?style=flat-square">
|
||||
</a>
|
||||
<img src="https://img.shields.io/github/go-mod/go-version/MetaCubeX/mihomo?style=flat-square">
|
||||
<img src="https://img.shields.io/github/go-mod/go-version/MetaCubeX/mihomo/Alpha?style=flat-square">
|
||||
<a href="https://github.com/MetaCubeX/mihomo/releases">
|
||||
<img src="https://img.shields.io/github/release/MetaCubeX/mihomo/all.svg?style=flat-square">
|
||||
</a>
|
||||
|
|
|
@ -3,7 +3,6 @@ package adapter
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
|
@ -163,6 +162,8 @@ func (p *Proxy) MarshalJSON() ([]byte, error) {
|
|||
// URLTest get the delay for the specified URL
|
||||
// implements C.Proxy
|
||||
func (p *Proxy) URLTest(ctx context.Context, url string, expectedStatus utils.IntRanges[uint16]) (t uint16, err error) {
|
||||
var satisfied bool
|
||||
|
||||
defer func() {
|
||||
alive := err == nil
|
||||
record := C.DelayHistory{Time: time.Now()}
|
||||
|
@ -185,6 +186,11 @@ func (p *Proxy) URLTest(ctx context.Context, url string, expectedStatus utils.In
|
|||
p.extra.Store(url, state)
|
||||
}
|
||||
|
||||
if !satisfied {
|
||||
record.Delay = 0
|
||||
alive = false
|
||||
}
|
||||
|
||||
state.alive.Store(alive)
|
||||
state.history.Put(record)
|
||||
if state.history.Len() > defaultHistoriesNum {
|
||||
|
@ -253,15 +259,10 @@ func (p *Proxy) URLTest(ctx context.Context, url string, expectedStatus utils.In
|
|||
}
|
||||
}
|
||||
|
||||
if expectedStatus != nil && !expectedStatus.Check(uint16(resp.StatusCode)) {
|
||||
// maybe another value should be returned for differentiation
|
||||
err = errors.New("response status is inconsistent with the expected status")
|
||||
}
|
||||
|
||||
satisfied = resp != nil && (expectedStatus == nil || expectedStatus.Check(uint16(resp.StatusCode)))
|
||||
t = uint16(time.Since(start) / time.Millisecond)
|
||||
return
|
||||
}
|
||||
|
||||
func NewProxy(adapter C.ProxyAdapter) *Proxy {
|
||||
return &Proxy{
|
||||
ProxyAdapter: adapter,
|
||||
|
|
|
@ -63,3 +63,9 @@ func WithInAddr(addr net.Addr) Addition {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func WithDSCP(dscp uint8) Addition {
|
||||
return func(metadata *C.Metadata) {
|
||||
metadata.DSCP = dscp
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,6 +50,7 @@ type Hysteria2Option struct {
|
|||
CustomCA string `proxy:"ca,omitempty"`
|
||||
CustomCAString string `proxy:"ca-str,omitempty"`
|
||||
CWND int `proxy:"cwnd,omitempty"`
|
||||
UdpMTU int `proxy:"udp-mtu,omitempty"`
|
||||
}
|
||||
|
||||
func (h *Hysteria2) DialContext(ctx context.Context, metadata *C.Metadata, opts ...dialer.Option) (_ C.Conn, err error) {
|
||||
|
@ -117,6 +118,12 @@ func NewHysteria2(option Hysteria2Option) (*Hysteria2, error) {
|
|||
tlsConfig.NextProtos = option.ALPN
|
||||
}
|
||||
|
||||
if option.UdpMTU == 0 {
|
||||
// "1200" from quic-go's MaxDatagramSize
|
||||
// "-3" from quic-go's DatagramFrame.MaxDataLen
|
||||
option.UdpMTU = 1200 - 3
|
||||
}
|
||||
|
||||
singDialer := proxydialer.NewByNameSingDialer(option.DialerProxy, dialer.NewDialer())
|
||||
|
||||
clientOptions := hysteria2.ClientOptions{
|
||||
|
@ -130,6 +137,7 @@ func NewHysteria2(option Hysteria2Option) (*Hysteria2, error) {
|
|||
TLSConfig: tlsConfig,
|
||||
UDPDisabled: false,
|
||||
CWND: option.CWND,
|
||||
UdpMTU: option.UdpMTU,
|
||||
}
|
||||
|
||||
client, err := hysteria2.NewClient(clientOptions)
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"github.com/metacubex/mihomo/component/dialer"
|
||||
"github.com/metacubex/mihomo/component/proxydialer"
|
||||
"github.com/metacubex/mihomo/component/resolver"
|
||||
"github.com/metacubex/mihomo/component/slowdown"
|
||||
C "github.com/metacubex/mihomo/constant"
|
||||
"github.com/metacubex/mihomo/dns"
|
||||
"github.com/metacubex/mihomo/log"
|
||||
|
@ -136,7 +137,7 @@ func NewWireGuard(option WireGuardOption) (*WireGuard, error) {
|
|||
rmark: option.RoutingMark,
|
||||
prefer: C.NewDNSPrefer(option.IPVersion),
|
||||
},
|
||||
dialer: proxydialer.NewByNameSingDialer(option.DialerProxy, dialer.NewDialer()),
|
||||
dialer: proxydialer.NewSlowDownSingDialer(proxydialer.NewByNameSingDialer(option.DialerProxy, dialer.NewDialer()), slowdown.New()),
|
||||
}
|
||||
runtime.SetFinalizer(outbound, closeWireGuard)
|
||||
|
||||
|
|
|
@ -21,6 +21,8 @@ type Fallback struct {
|
|||
testUrl string
|
||||
selected string
|
||||
expectedStatus string
|
||||
Hidden bool
|
||||
Icon string
|
||||
}
|
||||
|
||||
func (f *Fallback) Now() string {
|
||||
|
@ -90,6 +92,8 @@ func (f *Fallback) MarshalJSON() ([]byte, error) {
|
|||
"testUrl": f.testUrl,
|
||||
"expectedStatus": f.expectedStatus,
|
||||
"fixed": f.selected,
|
||||
"hidden": f.Hidden,
|
||||
"icon": f.Icon,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -137,7 +141,7 @@ func (f *Fallback) Set(name string) error {
|
|||
if !p.AliveForTestUrl(f.testUrl) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*time.Duration(5000))
|
||||
defer cancel()
|
||||
expectedStatus, _ := utils.NewIntRanges[uint16](f.expectedStatus)
|
||||
expectedStatus, _ := utils.NewUnsignedRanges[uint16](f.expectedStatus)
|
||||
_, _ = p.URLTest(ctx, f.testUrl, expectedStatus)
|
||||
}
|
||||
|
||||
|
@ -165,5 +169,7 @@ func NewFallback(option *GroupCommonOption, providers []provider.ProxyProvider)
|
|||
disableUDP: option.DisableUDP,
|
||||
testUrl: option.URL,
|
||||
expectedStatus: option.ExpectedStatus,
|
||||
Hidden: option.Hidden,
|
||||
Icon: option.Icon,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,6 +29,8 @@ type LoadBalance struct {
|
|||
strategyFn strategyFn
|
||||
testUrl string
|
||||
expectedStatus string
|
||||
Hidden bool
|
||||
Icon string
|
||||
}
|
||||
|
||||
var errStrategy = errors.New("unsupported strategy")
|
||||
|
@ -236,6 +238,8 @@ func (lb *LoadBalance) MarshalJSON() ([]byte, error) {
|
|||
"all": all,
|
||||
"testUrl": lb.testUrl,
|
||||
"expectedStatus": lb.expectedStatus,
|
||||
"hidden": lb.Hidden,
|
||||
"icon": lb.Icon,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -268,5 +272,7 @@ func NewLoadBalance(option *GroupCommonOption, providers []provider.ProxyProvide
|
|||
disableUDP: option.DisableUDP,
|
||||
testUrl: option.URL,
|
||||
expectedStatus: option.ExpectedStatus,
|
||||
Hidden: option.Hidden,
|
||||
Icon: option.Icon,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ type GroupCommonOption struct {
|
|||
Use []string `group:"use,omitempty"`
|
||||
URL string `group:"url,omitempty"`
|
||||
Interval int `group:"interval,omitempty"`
|
||||
TestTimeout int `group:"timeout,omitempty"`
|
||||
Lazy bool `group:"lazy,omitempty"`
|
||||
DisableUDP bool `group:"disable-udp,omitempty"`
|
||||
Filter string `group:"filter,omitempty"`
|
||||
|
@ -37,6 +38,8 @@ type GroupCommonOption struct {
|
|||
IncludeAll bool `group:"include-all,omitempty"`
|
||||
IncludeAllProxies bool `group:"include-all-proxies,omitempty"`
|
||||
IncludeAllProviders bool `group:"include-all-providers,omitempty"`
|
||||
Hidden bool `group:"hidden,omitempty"`
|
||||
Icon string `group:"icon,omitempty"`
|
||||
}
|
||||
|
||||
func ParseProxyGroup(config map[string]any, proxyMap map[string]C.Proxy, providersMap map[string]types.ProxyProvider, AllProxies []string, AllProviders []string) (C.ProxyAdapter, error) {
|
||||
|
@ -78,7 +81,7 @@ func ParseProxyGroup(config map[string]any, proxyMap map[string]C.Proxy, provide
|
|||
return nil, fmt.Errorf("%s: %w", groupName, errMissProxy)
|
||||
}
|
||||
|
||||
expectedStatus, err := utils.NewIntRanges[uint16](groupOption.ExpectedStatus)
|
||||
expectedStatus, err := utils.NewUnsignedRanges[uint16](groupOption.ExpectedStatus)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %w", groupName, err)
|
||||
}
|
||||
|
@ -90,9 +93,11 @@ func ParseProxyGroup(config map[string]any, proxyMap map[string]C.Proxy, provide
|
|||
groupOption.ExpectedStatus = status
|
||||
testUrl := groupOption.URL
|
||||
|
||||
if groupOption.URL == "" {
|
||||
groupOption.URL = C.DefaultTestURL
|
||||
testUrl = groupOption.URL
|
||||
if groupOption.Type != "select" && groupOption.Type != "relay" {
|
||||
if groupOption.URL == "" {
|
||||
groupOption.URL = C.DefaultTestURL
|
||||
testUrl = groupOption.URL
|
||||
}
|
||||
}
|
||||
|
||||
if len(GroupProxies) != 0 {
|
||||
|
@ -112,7 +117,7 @@ func ParseProxyGroup(config map[string]any, proxyMap map[string]C.Proxy, provide
|
|||
}
|
||||
}
|
||||
|
||||
hc := provider.NewHealthCheck(ps, testUrl, uint(groupOption.Interval), groupOption.Lazy, expectedStatus)
|
||||
hc := provider.NewHealthCheck(ps, testUrl, uint(groupOption.TestTimeout), uint(groupOption.Interval), groupOption.Lazy, expectedStatus)
|
||||
|
||||
pd, err := provider.NewCompatibleProvider(groupName, ps, hc)
|
||||
if err != nil {
|
||||
|
|
|
@ -3,6 +3,7 @@ package outboundgroup
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/metacubex/mihomo/adapter/outbound"
|
||||
"github.com/metacubex/mihomo/component/dialer"
|
||||
"github.com/metacubex/mihomo/component/proxydialer"
|
||||
|
@ -12,6 +13,8 @@ import (
|
|||
|
||||
type Relay struct {
|
||||
*GroupBase
|
||||
Hidden bool
|
||||
Icon string
|
||||
}
|
||||
|
||||
// DialContext implements C.ProxyAdapter
|
||||
|
@ -106,8 +109,10 @@ func (r *Relay) MarshalJSON() ([]byte, error) {
|
|||
all = append(all, proxy.Name())
|
||||
}
|
||||
return json.Marshal(map[string]any{
|
||||
"type": r.Type().String(),
|
||||
"all": all,
|
||||
"type": r.Type().String(),
|
||||
"all": all,
|
||||
"hidden": r.Hidden,
|
||||
"icon": r.Icon,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -157,5 +162,7 @@ func NewRelay(option *GroupCommonOption, providers []provider.ProxyProvider) *Re
|
|||
"",
|
||||
providers,
|
||||
}),
|
||||
Hidden: option.Hidden,
|
||||
Icon: option.Icon,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,6 +15,8 @@ type Selector struct {
|
|||
*GroupBase
|
||||
disableUDP bool
|
||||
selected string
|
||||
Hidden bool
|
||||
Icon string
|
||||
}
|
||||
|
||||
// DialContext implements C.ProxyAdapter
|
||||
|
@ -57,9 +59,11 @@ func (s *Selector) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
return json.Marshal(map[string]any{
|
||||
"type": s.Type().String(),
|
||||
"now": s.Now(),
|
||||
"all": all,
|
||||
"type": s.Type().String(),
|
||||
"now": s.Now(),
|
||||
"all": all,
|
||||
"hidden": s.Hidden,
|
||||
"icon": s.Icon,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -114,5 +118,7 @@ func NewSelector(option *GroupCommonOption, providers []provider.ProxyProvider)
|
|||
}),
|
||||
selected: "COMPATIBLE",
|
||||
disableUDP: option.DisableUDP,
|
||||
Hidden: option.Hidden,
|
||||
Icon: option.Icon,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,8 @@ type URLTest struct {
|
|||
expectedStatus string
|
||||
tolerance uint16
|
||||
disableUDP bool
|
||||
Hidden bool
|
||||
Icon string
|
||||
fastNode C.Proxy
|
||||
fastSingle *singledo.Single[C.Proxy]
|
||||
}
|
||||
|
@ -174,6 +176,8 @@ func (u *URLTest) MarshalJSON() ([]byte, error) {
|
|||
"testUrl": u.testUrl,
|
||||
"expectedStatus": u.expectedStatus,
|
||||
"fixed": u.selected,
|
||||
"hidden": u.Hidden,
|
||||
"icon": u.Icon,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -237,6 +241,8 @@ func NewURLTest(option *GroupCommonOption, providers []provider.ProxyProvider, o
|
|||
disableUDP: option.DisableUDP,
|
||||
testUrl: option.URL,
|
||||
expectedStatus: option.ExpectedStatus,
|
||||
Hidden: option.Hidden,
|
||||
Icon: option.Icon,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
|
|
|
@ -16,10 +16,6 @@ import (
|
|||
"github.com/dlclark/regexp2"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultURLTestTimeout = time.Second * 5
|
||||
)
|
||||
|
||||
type HealthCheckOption struct {
|
||||
URL string
|
||||
Interval uint
|
||||
|
@ -42,6 +38,7 @@ type HealthCheck struct {
|
|||
lastTouch atomic.TypedValue[time.Time]
|
||||
done chan struct{}
|
||||
singleDo *singledo.Single[struct{}]
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func (hc *HealthCheck) process() {
|
||||
|
@ -198,7 +195,7 @@ func (hc *HealthCheck) execute(b *batch.Batch[bool], url, uid string, option *ex
|
|||
|
||||
p := proxy
|
||||
b.Go(p.Name(), func() (bool, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultURLTestTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), hc.timeout)
|
||||
defer cancel()
|
||||
log.Debugln("Health Checking, proxy: %s, url: %s, id: {%s}", p.Name(), url, uid)
|
||||
_, _ = p.URLTest(ctx, url, expectedStatus)
|
||||
|
@ -212,15 +209,19 @@ func (hc *HealthCheck) close() {
|
|||
hc.done <- struct{}{}
|
||||
}
|
||||
|
||||
func NewHealthCheck(proxies []C.Proxy, url string, interval uint, lazy bool, expectedStatus utils.IntRanges[uint16]) *HealthCheck {
|
||||
func NewHealthCheck(proxies []C.Proxy, url string, timeout uint, interval uint, lazy bool, expectedStatus utils.IntRanges[uint16]) *HealthCheck {
|
||||
if url == "" {
|
||||
// expectedStatus = nil
|
||||
url = C.DefaultTestURL
|
||||
}
|
||||
if timeout == 0 {
|
||||
timeout = 5000
|
||||
}
|
||||
|
||||
return &HealthCheck{
|
||||
proxies: proxies,
|
||||
url: url,
|
||||
timeout: time.Duration(timeout) * time.Millisecond,
|
||||
extra: map[string]*extraOption{},
|
||||
interval: time.Duration(interval) * time.Second,
|
||||
lazy: lazy,
|
||||
|
|
|
@ -22,6 +22,7 @@ type healthCheckSchema struct {
|
|||
Enable bool `provider:"enable"`
|
||||
URL string `provider:"url"`
|
||||
Interval int `provider:"interval"`
|
||||
TestTimeout int `provider:"timeout,omitempty"`
|
||||
Lazy bool `provider:"lazy,omitempty"`
|
||||
ExpectedStatus string `provider:"expected-status,omitempty"`
|
||||
}
|
||||
|
@ -63,7 +64,7 @@ func ParseProxyProvider(name string, mapping map[string]any) (types.ProxyProvide
|
|||
return nil, err
|
||||
}
|
||||
|
||||
expectedStatus, err := utils.NewIntRanges[uint16](schema.HealthCheck.ExpectedStatus)
|
||||
expectedStatus, err := utils.NewUnsignedRanges[uint16](schema.HealthCheck.ExpectedStatus)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -75,7 +76,7 @@ func ParseProxyProvider(name string, mapping map[string]any) (types.ProxyProvide
|
|||
}
|
||||
hcInterval = uint(schema.HealthCheck.Interval)
|
||||
}
|
||||
hc := NewHealthCheck([]C.Proxy{}, schema.HealthCheck.URL, hcInterval, schema.HealthCheck.Lazy, expectedStatus)
|
||||
hc := NewHealthCheck([]C.Proxy{}, schema.HealthCheck.URL, uint(schema.HealthCheck.TestTimeout), hcInterval, schema.HealthCheck.Lazy, expectedStatus)
|
||||
|
||||
var vehicle types.Vehicle
|
||||
switch schema.Type {
|
||||
|
|
|
@ -68,7 +68,8 @@ func ConvertsV2Ray(buf []byte) ([]map[string]any, error) {
|
|||
hysteria["skip-cert-verify"], _ = strconv.ParseBool(query.Get("insecure"))
|
||||
|
||||
proxies = append(proxies, hysteria)
|
||||
case "hysteria2":
|
||||
|
||||
case "hysteria2", "hy2":
|
||||
urlHysteria2, err := url.Parse(line)
|
||||
if err != nil {
|
||||
continue
|
||||
|
@ -79,7 +80,7 @@ func ConvertsV2Ray(buf []byte) ([]map[string]any, error) {
|
|||
hysteria2 := make(map[string]any, 20)
|
||||
|
||||
hysteria2["name"] = name
|
||||
hysteria2["type"] = scheme
|
||||
hysteria2["type"] = "hysteria2"
|
||||
hysteria2["server"] = urlHysteria2.Hostname()
|
||||
if port := urlHysteria2.Port(); port != "" {
|
||||
hysteria2["port"] = port
|
||||
|
@ -101,6 +102,7 @@ func ConvertsV2Ray(buf []byte) ([]map[string]any, error) {
|
|||
hysteria2["up"] = query.Get("up")
|
||||
|
||||
proxies = append(proxies, hysteria2)
|
||||
|
||||
case "tuic":
|
||||
// A temporary unofficial TUIC share link standard
|
||||
// Modified from https://github.com/daeuniverse/dae/discussions/182
|
||||
|
@ -143,7 +145,7 @@ func ConvertsV2Ray(buf []byte) ([]map[string]any, error) {
|
|||
}
|
||||
|
||||
proxies = append(proxies, tuic)
|
||||
|
||||
|
||||
case "trojan":
|
||||
urlTrojan, err := url.Parse(line)
|
||||
if err != nil {
|
||||
|
@ -405,14 +407,27 @@ func ConvertsV2Ray(buf []byte) ([]map[string]any, error) {
|
|||
if query.Get("udp-over-tcp") == "true" || query.Get("uot") == "1" {
|
||||
ss["udp-over-tcp"] = true
|
||||
}
|
||||
if strings.Contains(query.Get("plugin"), "obfs") {
|
||||
obfsParams := strings.Split(query.Get("plugin"), ";")
|
||||
ss["plugin"] = "obfs"
|
||||
ss["plugin-opts"] = map[string]any{
|
||||
"host": obfsParams[2][10:],
|
||||
"mode": obfsParams[1][5:],
|
||||
plugin := query.Get("plugin")
|
||||
if strings.Contains(plugin, ";") {
|
||||
pluginInfo, _ := url.ParseQuery("pluginName=" + strings.ReplaceAll(plugin, ";", "&"))
|
||||
pluginName := pluginInfo.Get("pluginName")
|
||||
if strings.Contains(pluginName, "obfs") {
|
||||
ss["plugin"] = "obfs"
|
||||
ss["plugin-opts"] = map[string]any{
|
||||
"mode": pluginInfo.Get("obfs"),
|
||||
"host": pluginInfo.Get("obfs-host"),
|
||||
}
|
||||
} else if strings.Contains(pluginName, "v2ray-plugin") {
|
||||
ss["plugin"] = "v2ray-plugin"
|
||||
ss["plugin-opts"] = map[string]any{
|
||||
"mode": pluginInfo.Get("mode"),
|
||||
"host": pluginInfo.Get("host"),
|
||||
"path": pluginInfo.Get("path"),
|
||||
"tls": strings.Contains(plugin, "tls"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
proxies = append(proxies, ss)
|
||||
|
||||
case "ssr":
|
||||
|
|
217
common/net/deadline/pipe_sing.go
Normal file
217
common/net/deadline/pipe_sing.go
Normal file
|
@ -0,0 +1,217 @@
|
|||
package deadline
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sagernet/sing/common/buf"
|
||||
N "github.com/sagernet/sing/common/network"
|
||||
)
|
||||
|
||||
type pipeAddr struct{}
|
||||
|
||||
func (pipeAddr) Network() string { return "pipe" }
|
||||
func (pipeAddr) String() string { return "pipe" }
|
||||
|
||||
type pipe struct {
|
||||
wrMu sync.Mutex // Serialize Write operations
|
||||
|
||||
// Used by local Read to interact with remote Write.
|
||||
// Successful receive on rdRx is always followed by send on rdTx.
|
||||
rdRx <-chan []byte
|
||||
rdTx chan<- int
|
||||
|
||||
// Used by local Write to interact with remote Read.
|
||||
// Successful send on wrTx is always followed by receive on wrRx.
|
||||
wrTx chan<- []byte
|
||||
wrRx <-chan int
|
||||
|
||||
once sync.Once // Protects closing localDone
|
||||
localDone chan struct{}
|
||||
remoteDone <-chan struct{}
|
||||
|
||||
readDeadline pipeDeadline
|
||||
writeDeadline pipeDeadline
|
||||
|
||||
readWaitOptions N.ReadWaitOptions
|
||||
}
|
||||
|
||||
// Pipe creates a synchronous, in-memory, full duplex
|
||||
// network connection; both ends implement the Conn interface.
|
||||
// Reads on one end are matched with writes on the other,
|
||||
// copying data directly between the two; there is no internal
|
||||
// buffering.
|
||||
func Pipe() (net.Conn, net.Conn) {
|
||||
cb1 := make(chan []byte)
|
||||
cb2 := make(chan []byte)
|
||||
cn1 := make(chan int)
|
||||
cn2 := make(chan int)
|
||||
done1 := make(chan struct{})
|
||||
done2 := make(chan struct{})
|
||||
|
||||
p1 := &pipe{
|
||||
rdRx: cb1, rdTx: cn1,
|
||||
wrTx: cb2, wrRx: cn2,
|
||||
localDone: done1, remoteDone: done2,
|
||||
readDeadline: makePipeDeadline(),
|
||||
writeDeadline: makePipeDeadline(),
|
||||
}
|
||||
p2 := &pipe{
|
||||
rdRx: cb2, rdTx: cn2,
|
||||
wrTx: cb1, wrRx: cn1,
|
||||
localDone: done2, remoteDone: done1,
|
||||
readDeadline: makePipeDeadline(),
|
||||
writeDeadline: makePipeDeadline(),
|
||||
}
|
||||
return p1, p2
|
||||
}
|
||||
|
||||
func (*pipe) LocalAddr() net.Addr { return pipeAddr{} }
|
||||
func (*pipe) RemoteAddr() net.Addr { return pipeAddr{} }
|
||||
|
||||
func (p *pipe) Read(b []byte) (int, error) {
|
||||
n, err := p.read(b)
|
||||
if err != nil && err != io.EOF && err != io.ErrClosedPipe {
|
||||
err = &net.OpError{Op: "read", Net: "pipe", Err: err}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (p *pipe) read(b []byte) (n int, err error) {
|
||||
switch {
|
||||
case isClosedChan(p.localDone):
|
||||
return 0, io.ErrClosedPipe
|
||||
case isClosedChan(p.remoteDone):
|
||||
return 0, io.EOF
|
||||
case isClosedChan(p.readDeadline.wait()):
|
||||
return 0, os.ErrDeadlineExceeded
|
||||
}
|
||||
|
||||
select {
|
||||
case bw := <-p.rdRx:
|
||||
nr := copy(b, bw)
|
||||
p.rdTx <- nr
|
||||
return nr, nil
|
||||
case <-p.localDone:
|
||||
return 0, io.ErrClosedPipe
|
||||
case <-p.remoteDone:
|
||||
return 0, io.EOF
|
||||
case <-p.readDeadline.wait():
|
||||
return 0, os.ErrDeadlineExceeded
|
||||
}
|
||||
}
|
||||
|
||||
func (p *pipe) Write(b []byte) (int, error) {
|
||||
n, err := p.write(b)
|
||||
if err != nil && err != io.ErrClosedPipe {
|
||||
err = &net.OpError{Op: "write", Net: "pipe", Err: err}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (p *pipe) write(b []byte) (n int, err error) {
|
||||
switch {
|
||||
case isClosedChan(p.localDone):
|
||||
return 0, io.ErrClosedPipe
|
||||
case isClosedChan(p.remoteDone):
|
||||
return 0, io.ErrClosedPipe
|
||||
case isClosedChan(p.writeDeadline.wait()):
|
||||
return 0, os.ErrDeadlineExceeded
|
||||
}
|
||||
|
||||
p.wrMu.Lock() // Ensure entirety of b is written together
|
||||
defer p.wrMu.Unlock()
|
||||
for once := true; once || len(b) > 0; once = false {
|
||||
select {
|
||||
case p.wrTx <- b:
|
||||
nw := <-p.wrRx
|
||||
b = b[nw:]
|
||||
n += nw
|
||||
case <-p.localDone:
|
||||
return n, io.ErrClosedPipe
|
||||
case <-p.remoteDone:
|
||||
return n, io.ErrClosedPipe
|
||||
case <-p.writeDeadline.wait():
|
||||
return n, os.ErrDeadlineExceeded
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (p *pipe) SetDeadline(t time.Time) error {
|
||||
if isClosedChan(p.localDone) || isClosedChan(p.remoteDone) {
|
||||
return io.ErrClosedPipe
|
||||
}
|
||||
p.readDeadline.set(t)
|
||||
p.writeDeadline.set(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *pipe) SetReadDeadline(t time.Time) error {
|
||||
if isClosedChan(p.localDone) || isClosedChan(p.remoteDone) {
|
||||
return io.ErrClosedPipe
|
||||
}
|
||||
p.readDeadline.set(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *pipe) SetWriteDeadline(t time.Time) error {
|
||||
if isClosedChan(p.localDone) || isClosedChan(p.remoteDone) {
|
||||
return io.ErrClosedPipe
|
||||
}
|
||||
p.writeDeadline.set(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *pipe) Close() error {
|
||||
p.once.Do(func() { close(p.localDone) })
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ N.ReadWaiter = (*pipe)(nil)
|
||||
|
||||
func (p *pipe) InitializeReadWaiter(options N.ReadWaitOptions) (needCopy bool) {
|
||||
p.readWaitOptions = options
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *pipe) WaitReadBuffer() (buffer *buf.Buffer, err error) {
|
||||
buffer, err = p.waitReadBuffer()
|
||||
if err != nil && err != io.EOF && err != io.ErrClosedPipe {
|
||||
err = &net.OpError{Op: "read", Net: "pipe", Err: err}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *pipe) waitReadBuffer() (buffer *buf.Buffer, err error) {
|
||||
switch {
|
||||
case isClosedChan(p.localDone):
|
||||
return nil, io.ErrClosedPipe
|
||||
case isClosedChan(p.remoteDone):
|
||||
return nil, io.EOF
|
||||
case isClosedChan(p.readDeadline.wait()):
|
||||
return nil, os.ErrDeadlineExceeded
|
||||
}
|
||||
select {
|
||||
case bw := <-p.rdRx:
|
||||
buffer = p.readWaitOptions.NewBuffer()
|
||||
var nr int
|
||||
nr, err = buffer.Write(bw)
|
||||
if err != nil {
|
||||
buffer.Release()
|
||||
return
|
||||
}
|
||||
p.readWaitOptions.PostReturn(buffer)
|
||||
p.rdTx <- nr
|
||||
return
|
||||
case <-p.localDone:
|
||||
return nil, io.ErrClosedPipe
|
||||
case <-p.remoteDone:
|
||||
return nil, io.EOF
|
||||
case <-p.readDeadline.wait():
|
||||
return nil, os.ErrDeadlineExceeded
|
||||
}
|
||||
}
|
|
@ -35,6 +35,8 @@ func NeedHandshake(conn any) bool {
|
|||
|
||||
type CountFunc = network.CountFunc
|
||||
|
||||
var Pipe = deadline.Pipe
|
||||
|
||||
// Relay copies between left and right bidirectionally.
|
||||
func Relay(leftConn, rightConn net.Conn) {
|
||||
defer runtime.KeepAlive(leftConn)
|
||||
|
|
|
@ -13,7 +13,7 @@ type IntRanges[T constraints.Integer] []Range[T]
|
|||
|
||||
var errIntRanges = errors.New("intRanges error")
|
||||
|
||||
func NewIntRanges[T constraints.Integer](expected string) (IntRanges[T], error) {
|
||||
func newIntRanges[T constraints.Integer](expected string, parseFn func(string) (T, error)) (IntRanges[T], error) {
|
||||
// example: 200 or 200/302 or 200-400 or 200/204/401-429/501-503
|
||||
expected = strings.TrimSpace(expected)
|
||||
if len(expected) == 0 || expected == "*" {
|
||||
|
@ -25,10 +25,10 @@ func NewIntRanges[T constraints.Integer](expected string) (IntRanges[T], error)
|
|||
return nil, fmt.Errorf("%w, too many ranges to use, maximum support 28 ranges", errIntRanges)
|
||||
}
|
||||
|
||||
return NewIntRangesFromList[T](list)
|
||||
return newIntRangesFromList[T](list, parseFn)
|
||||
}
|
||||
|
||||
func NewIntRangesFromList[T constraints.Integer](list []string) (IntRanges[T], error) {
|
||||
func newIntRangesFromList[T constraints.Integer](list []string, parseFn func(string) (T, error)) (IntRanges[T], error) {
|
||||
var ranges IntRanges[T]
|
||||
for _, s := range list {
|
||||
if s == "" {
|
||||
|
@ -41,7 +41,7 @@ func NewIntRangesFromList[T constraints.Integer](list []string) (IntRanges[T], e
|
|||
return nil, errIntRanges
|
||||
}
|
||||
|
||||
start, err := strconv.ParseInt(strings.Trim(status[0], "[ ]"), 10, 64)
|
||||
start, err := parseFn(strings.Trim(status[0], "[ ]"))
|
||||
if err != nil {
|
||||
return nil, errIntRanges
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ func NewIntRangesFromList[T constraints.Integer](list []string) (IntRanges[T], e
|
|||
case 1:
|
||||
ranges = append(ranges, NewRange(T(start), T(start)))
|
||||
case 2:
|
||||
end, err := strconv.ParseUint(strings.Trim(status[1], "[ ]"), 10, 64)
|
||||
end, err := parseFn(strings.Trim(status[1], "[ ]"))
|
||||
if err != nil {
|
||||
return nil, errIntRanges
|
||||
}
|
||||
|
@ -62,6 +62,38 @@ func NewIntRangesFromList[T constraints.Integer](list []string) (IntRanges[T], e
|
|||
return ranges, nil
|
||||
}
|
||||
|
||||
func parseUnsigned[T constraints.Unsigned](s string) (T, error) {
|
||||
if val, err := strconv.ParseUint(s, 10, 64); err == nil {
|
||||
return T(val), nil
|
||||
} else {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
func NewUnsignedRanges[T constraints.Unsigned](expected string) (IntRanges[T], error) {
|
||||
return newIntRanges(expected, parseUnsigned[T])
|
||||
}
|
||||
|
||||
func NewUnsignedRangesFromList[T constraints.Unsigned](list []string) (IntRanges[T], error) {
|
||||
return newIntRangesFromList(list, parseUnsigned[T])
|
||||
}
|
||||
|
||||
func parseSigned[T constraints.Signed](s string) (T, error) {
|
||||
if val, err := strconv.ParseInt(s, 10, 64); err == nil {
|
||||
return T(val), nil
|
||||
} else {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
func NewSignedRanges[T constraints.Signed](expected string) (IntRanges[T], error) {
|
||||
return newIntRanges(expected, parseSigned[T])
|
||||
}
|
||||
|
||||
func NewSignedRangesFromList[T constraints.Signed](list []string) (IntRanges[T], error) {
|
||||
return newIntRangesFromList(list, parseSigned[T])
|
||||
}
|
||||
|
||||
func (ranges IntRanges[T]) Check(status T) bool {
|
||||
if len(ranges) == 0 {
|
||||
return true
|
||||
|
|
66
component/cidr/ipcidr_set.go
Normal file
66
component/cidr/ipcidr_set.go
Normal file
|
@ -0,0 +1,66 @@
|
|||
package cidr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"unsafe"
|
||||
|
||||
"go4.org/netipx"
|
||||
)
|
||||
|
||||
type IpCidrSet struct {
|
||||
// must same with netipx.IPSet
|
||||
rr []netipx.IPRange
|
||||
}
|
||||
|
||||
func NewIpCidrSet() *IpCidrSet {
|
||||
return &IpCidrSet{}
|
||||
}
|
||||
|
||||
func (set *IpCidrSet) AddIpCidrForString(ipCidr string) error {
|
||||
prefix, err := netip.ParsePrefix(ipCidr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return set.AddIpCidr(prefix)
|
||||
}
|
||||
|
||||
func (set *IpCidrSet) AddIpCidr(ipCidr netip.Prefix) (err error) {
|
||||
if r := netipx.RangeOfPrefix(ipCidr); r.IsValid() {
|
||||
set.rr = append(set.rr, r)
|
||||
} else {
|
||||
err = fmt.Errorf("not valid ipcidr range: %s", ipCidr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (set *IpCidrSet) IsContainForString(ipString string) bool {
|
||||
ip, err := netip.ParseAddr(ipString)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return set.IsContain(ip)
|
||||
}
|
||||
|
||||
func (set *IpCidrSet) IsContain(ip netip.Addr) bool {
|
||||
return set.toIPSet().Contains(ip.WithZone(""))
|
||||
}
|
||||
|
||||
func (set *IpCidrSet) Merge() error {
|
||||
var b netipx.IPSetBuilder
|
||||
b.AddSet(set.toIPSet())
|
||||
i, err := b.IPSet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
set.fromIPSet(i)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (set *IpCidrSet) toIPSet() *netipx.IPSet {
|
||||
return (*netipx.IPSet)(unsafe.Pointer(set))
|
||||
}
|
||||
|
||||
func (set *IpCidrSet) fromIPSet(i *netipx.IPSet) {
|
||||
*set = *(*IpCidrSet)(unsafe.Pointer(i))
|
||||
}
|
107
component/cidr/ipcidr_set_test.go
Normal file
107
component/cidr/ipcidr_set_test.go
Normal file
|
@ -0,0 +1,107 @@
|
|||
package cidr
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIpv4(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ipCidr string
|
||||
ip string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "Test Case 1",
|
||||
ipCidr: "149.154.160.0/20",
|
||||
ip: "149.154.160.0",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Test Case 2",
|
||||
ipCidr: "192.168.0.0/16",
|
||||
ip: "10.0.0.1",
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
set := &IpCidrSet{}
|
||||
set.AddIpCidrForString(test.ipCidr)
|
||||
|
||||
result := set.IsContainForString(test.ip)
|
||||
if result != test.expected {
|
||||
t.Errorf("Expected result: %v, got: %v", test.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIpv6(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ipCidr string
|
||||
ip string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "Test Case 1",
|
||||
ipCidr: "2409:8000::/20",
|
||||
ip: "2409:8087:1e03:21::27",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Test Case 2",
|
||||
ipCidr: "240e::/16",
|
||||
ip: "240e:964:ea02:100:1800::71",
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
// Add more test cases as needed
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
set := &IpCidrSet{}
|
||||
set.AddIpCidrForString(test.ipCidr)
|
||||
|
||||
result := set.IsContainForString(test.ip)
|
||||
if result != test.expected {
|
||||
t.Errorf("Expected result: %v, got: %v", test.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ipCidr1 string
|
||||
ipCidr2 string
|
||||
ipCidr3 string
|
||||
expectedLen int
|
||||
}{
|
||||
{
|
||||
name: "Test Case 1",
|
||||
ipCidr1: "2409:8000::/20",
|
||||
ipCidr2: "2409:8000::/21",
|
||||
ipCidr3: "2409:8000::/48",
|
||||
expectedLen: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
set := &IpCidrSet{}
|
||||
set.AddIpCidrForString(test.ipCidr1)
|
||||
set.AddIpCidrForString(test.ipCidr2)
|
||||
set.Merge()
|
||||
|
||||
rangesLen := len(set.rr)
|
||||
|
||||
if rangesLen != test.expectedLen {
|
||||
t.Errorf("Expected len: %v, got: %v", test.expectedLen, rangesLen)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -15,6 +15,11 @@ import (
|
|||
"github.com/metacubex/mihomo/constant/features"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultTCPTimeout = 5 * time.Second
|
||||
DefaultUDPTimeout = DefaultTCPTimeout
|
||||
)
|
||||
|
||||
type dialFunc func(ctx context.Context, network string, ips []netip.Addr, port string, opt *option) (net.Conn, error)
|
||||
|
||||
var (
|
||||
|
|
|
@ -2,10 +2,11 @@ package dialer
|
|||
|
||||
import (
|
||||
"context"
|
||||
"github.com/sagernet/tfo-go"
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/sagernet/tfo-go"
|
||||
)
|
||||
|
||||
type tfoConn struct {
|
||||
|
@ -66,14 +67,14 @@ func (c *tfoConn) Close() error {
|
|||
|
||||
func (c *tfoConn) LocalAddr() net.Addr {
|
||||
if c.Conn == nil {
|
||||
return nil
|
||||
return &net.TCPAddr{}
|
||||
}
|
||||
return c.Conn.LocalAddr()
|
||||
}
|
||||
|
||||
func (c *tfoConn) RemoteAddr() net.Addr {
|
||||
if c.Conn == nil {
|
||||
return nil
|
||||
return &net.TCPAddr{}
|
||||
}
|
||||
return c.Conn.RemoteAddr()
|
||||
}
|
||||
|
@ -123,7 +124,7 @@ func (c *tfoConn) WriterReplaceable() bool {
|
|||
}
|
||||
|
||||
func dialTFO(ctx context.Context, netDialer net.Dialer, network, address string) (net.Conn, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), DefaultTCPTimeout)
|
||||
dialer := tfo.Dialer{Dialer: netDialer, DisableTFO: false}
|
||||
return &tfoConn{
|
||||
dialed: make(chan bool, 1),
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
package router
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"net/netip"
|
||||
"strings"
|
||||
|
||||
"github.com/metacubex/mihomo/component/cidr"
|
||||
"github.com/metacubex/mihomo/component/geodata/strmatcher"
|
||||
"github.com/metacubex/mihomo/component/trie"
|
||||
)
|
||||
|
@ -121,208 +120,37 @@ func (m *v2rayDomainMatcher) ApplyDomain(domain string) bool {
|
|||
return isMatched
|
||||
}
|
||||
|
||||
// CIDRList is an alias of []*CIDR to provide sort.Interface.
|
||||
type CIDRList []*CIDR
|
||||
|
||||
// Len implements sort.Interface.
|
||||
func (l *CIDRList) Len() int {
|
||||
return len(*l)
|
||||
}
|
||||
|
||||
// Less implements sort.Interface.
|
||||
func (l *CIDRList) Less(i int, j int) bool {
|
||||
ci := (*l)[i]
|
||||
cj := (*l)[j]
|
||||
|
||||
if len(ci.Ip) < len(cj.Ip) {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(ci.Ip) > len(cj.Ip) {
|
||||
return false
|
||||
}
|
||||
|
||||
for k := 0; k < len(ci.Ip); k++ {
|
||||
if ci.Ip[k] < cj.Ip[k] {
|
||||
return true
|
||||
}
|
||||
|
||||
if ci.Ip[k] > cj.Ip[k] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return ci.Prefix < cj.Prefix
|
||||
}
|
||||
|
||||
// Swap implements sort.Interface.
|
||||
func (l *CIDRList) Swap(i int, j int) {
|
||||
(*l)[i], (*l)[j] = (*l)[j], (*l)[i]
|
||||
}
|
||||
|
||||
type ipv6 struct {
|
||||
a uint64
|
||||
b uint64
|
||||
}
|
||||
|
||||
type GeoIPMatcher struct {
|
||||
countryCode string
|
||||
reverseMatch bool
|
||||
ip4 []uint32
|
||||
prefix4 []uint8
|
||||
ip6 []ipv6
|
||||
prefix6 []uint8
|
||||
}
|
||||
|
||||
func normalize4(ip uint32, prefix uint8) uint32 {
|
||||
return (ip >> (32 - prefix)) << (32 - prefix)
|
||||
}
|
||||
|
||||
func normalize6(ip ipv6, prefix uint8) ipv6 {
|
||||
if prefix <= 64 {
|
||||
ip.a = (ip.a >> (64 - prefix)) << (64 - prefix)
|
||||
ip.b = 0
|
||||
} else {
|
||||
ip.b = (ip.b >> (128 - prefix)) << (128 - prefix)
|
||||
}
|
||||
return ip
|
||||
cidrSet *cidr.IpCidrSet
|
||||
}
|
||||
|
||||
func (m *GeoIPMatcher) Init(cidrs []*CIDR) error {
|
||||
ip4Count := 0
|
||||
ip6Count := 0
|
||||
|
||||
for _, cidr := range cidrs {
|
||||
ip := cidr.Ip
|
||||
switch len(ip) {
|
||||
case 4:
|
||||
ip4Count++
|
||||
case 16:
|
||||
ip6Count++
|
||||
default:
|
||||
return fmt.Errorf("unexpect ip length: %d", len(ip))
|
||||
addr, ok := netip.AddrFromSlice(cidr.Ip)
|
||||
if !ok {
|
||||
return fmt.Errorf("error when loading GeoIP: invalid IP: %s", cidr.Ip)
|
||||
}
|
||||
err := m.cidrSet.AddIpCidr(netip.PrefixFrom(addr, int(cidr.Prefix)))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when loading GeoIP: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
cidrList := CIDRList(cidrs)
|
||||
sort.Sort(&cidrList)
|
||||
|
||||
m.ip4 = make([]uint32, 0, ip4Count)
|
||||
m.prefix4 = make([]uint8, 0, ip4Count)
|
||||
m.ip6 = make([]ipv6, 0, ip6Count)
|
||||
m.prefix6 = make([]uint8, 0, ip6Count)
|
||||
|
||||
for _, cidr := range cidrs {
|
||||
ip := cidr.Ip
|
||||
prefix := uint8(cidr.Prefix)
|
||||
switch len(ip) {
|
||||
case 4:
|
||||
m.ip4 = append(m.ip4, normalize4(binary.BigEndian.Uint32(ip), prefix))
|
||||
m.prefix4 = append(m.prefix4, prefix)
|
||||
case 16:
|
||||
ip6 := ipv6{
|
||||
a: binary.BigEndian.Uint64(ip[0:8]),
|
||||
b: binary.BigEndian.Uint64(ip[8:16]),
|
||||
}
|
||||
ip6 = normalize6(ip6, prefix)
|
||||
|
||||
m.ip6 = append(m.ip6, ip6)
|
||||
m.prefix6 = append(m.prefix6, prefix)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return m.cidrSet.Merge()
|
||||
}
|
||||
|
||||
func (m *GeoIPMatcher) SetReverseMatch(isReverseMatch bool) {
|
||||
m.reverseMatch = isReverseMatch
|
||||
}
|
||||
|
||||
func (m *GeoIPMatcher) match4(ip uint32) bool {
|
||||
if len(m.ip4) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if ip < m.ip4[0] {
|
||||
return false
|
||||
}
|
||||
|
||||
size := uint32(len(m.ip4))
|
||||
l := uint32(0)
|
||||
r := size
|
||||
for l < r {
|
||||
x := ((l + r) >> 1)
|
||||
if ip < m.ip4[x] {
|
||||
r = x
|
||||
continue
|
||||
}
|
||||
|
||||
nip := normalize4(ip, m.prefix4[x])
|
||||
if nip == m.ip4[x] {
|
||||
return true
|
||||
}
|
||||
|
||||
l = x + 1
|
||||
}
|
||||
|
||||
return l > 0 && normalize4(ip, m.prefix4[l-1]) == m.ip4[l-1]
|
||||
}
|
||||
|
||||
func less6(a ipv6, b ipv6) bool {
|
||||
return a.a < b.a || (a.a == b.a && a.b < b.b)
|
||||
}
|
||||
|
||||
func (m *GeoIPMatcher) match6(ip ipv6) bool {
|
||||
if len(m.ip6) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if less6(ip, m.ip6[0]) {
|
||||
return false
|
||||
}
|
||||
|
||||
size := uint32(len(m.ip6))
|
||||
l := uint32(0)
|
||||
r := size
|
||||
for l < r {
|
||||
x := (l + r) / 2
|
||||
if less6(ip, m.ip6[x]) {
|
||||
r = x
|
||||
continue
|
||||
}
|
||||
|
||||
if normalize6(ip, m.prefix6[x]) == m.ip6[x] {
|
||||
return true
|
||||
}
|
||||
|
||||
l = x + 1
|
||||
}
|
||||
|
||||
return l > 0 && normalize6(ip, m.prefix6[l-1]) == m.ip6[l-1]
|
||||
}
|
||||
|
||||
// Match returns true if the given ip is included by the GeoIP.
|
||||
func (m *GeoIPMatcher) Match(ip net.IP) bool {
|
||||
switch len(ip) {
|
||||
case 4:
|
||||
if m.reverseMatch {
|
||||
return !m.match4(binary.BigEndian.Uint32(ip))
|
||||
}
|
||||
return m.match4(binary.BigEndian.Uint32(ip))
|
||||
case 16:
|
||||
if m.reverseMatch {
|
||||
return !m.match6(ipv6{
|
||||
a: binary.BigEndian.Uint64(ip[0:8]),
|
||||
b: binary.BigEndian.Uint64(ip[8:16]),
|
||||
})
|
||||
}
|
||||
return m.match6(ipv6{
|
||||
a: binary.BigEndian.Uint64(ip[0:8]),
|
||||
b: binary.BigEndian.Uint64(ip[8:16]),
|
||||
})
|
||||
default:
|
||||
return false
|
||||
func (m *GeoIPMatcher) Match(ip netip.Addr) bool {
|
||||
match := m.cidrSet.IsContain(ip)
|
||||
if m.reverseMatch {
|
||||
return !match
|
||||
}
|
||||
return match
|
||||
}
|
||||
|
||||
// GeoIPMatcherContainer is a container for GeoIPMatchers. It keeps unique copies of GeoIPMatcher by country code.
|
||||
|
@ -344,6 +172,7 @@ func (c *GeoIPMatcherContainer) Add(geoip *GeoIP) (*GeoIPMatcher, error) {
|
|||
m := &GeoIPMatcher{
|
||||
countryCode: geoip.CountryCode,
|
||||
reverseMatch: geoip.ReverseMatch,
|
||||
cidrSet: cidr.NewIpCidrSet(),
|
||||
}
|
||||
if err := m.Init(geoip.Cidr); err != nil {
|
||||
return nil, err
|
||||
|
@ -369,8 +198,7 @@ func NewGeoIPMatcher(geoip *GeoIP) (*GeoIPMatcher, error) {
|
|||
return matcher, nil
|
||||
}
|
||||
|
||||
func (m *MultiGeoIPMatcher) ApplyIp(ip net.IP) bool {
|
||||
|
||||
func (m *MultiGeoIPMatcher) ApplyIp(ip netip.Addr) bool {
|
||||
for _, matcher := range m.matchers {
|
||||
if matcher.Match(ip) {
|
||||
return true
|
||||
|
|
34
component/proxydialer/slowdown.go
Normal file
34
component/proxydialer/slowdown.go
Normal file
|
@ -0,0 +1,34 @@
|
|||
package proxydialer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/netip"
|
||||
|
||||
"github.com/metacubex/mihomo/component/slowdown"
|
||||
C "github.com/metacubex/mihomo/constant"
|
||||
)
|
||||
|
||||
type SlowDownDialer struct {
|
||||
C.Dialer
|
||||
Slowdown *slowdown.SlowDown
|
||||
}
|
||||
|
||||
func (d SlowDownDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return slowdown.Do(d.Slowdown, ctx, func() (net.Conn, error) {
|
||||
return d.Dialer.DialContext(ctx, network, address)
|
||||
})
|
||||
}
|
||||
|
||||
func (d SlowDownDialer) ListenPacket(ctx context.Context, network, address string, rAddrPort netip.AddrPort) (net.PacketConn, error) {
|
||||
return slowdown.Do(d.Slowdown, ctx, func() (net.PacketConn, error) {
|
||||
return d.Dialer.ListenPacket(ctx, network, address, rAddrPort)
|
||||
})
|
||||
}
|
||||
|
||||
func NewSlowDownDialer(d C.Dialer, sd *slowdown.SlowDown) SlowDownDialer {
|
||||
return SlowDownDialer{
|
||||
Dialer: d,
|
||||
Slowdown: sd,
|
||||
}
|
||||
}
|
33
component/proxydialer/slowdown_sing.go
Normal file
33
component/proxydialer/slowdown_sing.go
Normal file
|
@ -0,0 +1,33 @@
|
|||
package proxydialer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"github.com/metacubex/mihomo/component/slowdown"
|
||||
M "github.com/sagernet/sing/common/metadata"
|
||||
)
|
||||
|
||||
type SlowDownSingDialer struct {
|
||||
SingDialer
|
||||
Slowdown *slowdown.SlowDown
|
||||
}
|
||||
|
||||
func (d SlowDownSingDialer) DialContext(ctx context.Context, network string, destination M.Socksaddr) (net.Conn, error) {
|
||||
return slowdown.Do(d.Slowdown, ctx, func() (net.Conn, error) {
|
||||
return d.SingDialer.DialContext(ctx, network, destination)
|
||||
})
|
||||
}
|
||||
|
||||
func (d SlowDownSingDialer) ListenPacket(ctx context.Context, destination M.Socksaddr) (net.PacketConn, error) {
|
||||
return slowdown.Do(d.Slowdown, ctx, func() (net.PacketConn, error) {
|
||||
return d.SingDialer.ListenPacket(ctx, destination)
|
||||
})
|
||||
}
|
||||
|
||||
func NewSlowDownSingDialer(d SingDialer, sd *slowdown.SlowDown) SlowDownSingDialer {
|
||||
return SlowDownSingDialer{
|
||||
SingDialer: d,
|
||||
Slowdown: sd,
|
||||
}
|
||||
}
|
|
@ -13,10 +13,6 @@ import (
|
|||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
const (
|
||||
minInterval = time.Minute * 5
|
||||
)
|
||||
|
||||
var (
|
||||
fileMode os.FileMode = 0o666
|
||||
dirMode os.FileMode = 0o755
|
||||
|
@ -164,8 +160,8 @@ func (f *Fetcher[V]) Destroy() error {
|
|||
|
||||
func (f *Fetcher[V]) pullLoop() {
|
||||
initialInterval := f.interval - time.Since(f.UpdatedAt)
|
||||
if initialInterval < minInterval {
|
||||
initialInterval = minInterval
|
||||
if initialInterval > f.interval {
|
||||
initialInterval = f.interval
|
||||
}
|
||||
|
||||
timer := time.NewTimer(initialInterval)
|
||||
|
|
101
component/slowdown/backoff.go
Normal file
101
component/slowdown/backoff.go
Normal file
|
@ -0,0 +1,101 @@
|
|||
// modify from https://github.com/jpillora/backoff/blob/v1.0.0/backoff.go
|
||||
|
||||
package slowdown
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Backoff is a time.Duration counter, starting at Min. After every call to
|
||||
// the Duration method the current timing is multiplied by Factor, but it
|
||||
// never exceeds Max.
|
||||
//
|
||||
// Backoff is not generally concurrent-safe, but the ForAttempt method can
|
||||
// be used concurrently.
|
||||
type Backoff struct {
|
||||
attempt atomic.Uint64
|
||||
// Factor is the multiplying factor for each increment step
|
||||
Factor float64
|
||||
// Jitter eases contention by randomizing backoff steps
|
||||
Jitter bool
|
||||
// Min and Max are the minimum and maximum values of the counter
|
||||
Min, Max time.Duration
|
||||
}
|
||||
|
||||
// Duration returns the duration for the current attempt before incrementing
|
||||
// the attempt counter. See ForAttempt.
|
||||
func (b *Backoff) Duration() time.Duration {
|
||||
d := b.ForAttempt(float64(b.attempt.Add(1) - 1))
|
||||
return d
|
||||
}
|
||||
|
||||
const maxInt64 = float64(math.MaxInt64 - 512)
|
||||
|
||||
// ForAttempt returns the duration for a specific attempt. This is useful if
|
||||
// you have a large number of independent Backoffs, but don't want use
|
||||
// unnecessary memory storing the Backoff parameters per Backoff. The first
|
||||
// attempt should be 0.
|
||||
//
|
||||
// ForAttempt is concurrent-safe.
|
||||
func (b *Backoff) ForAttempt(attempt float64) time.Duration {
|
||||
// Zero-values are nonsensical, so we use
|
||||
// them to apply defaults
|
||||
min := b.Min
|
||||
if min <= 0 {
|
||||
min = 100 * time.Millisecond
|
||||
}
|
||||
max := b.Max
|
||||
if max <= 0 {
|
||||
max = 10 * time.Second
|
||||
}
|
||||
if min >= max {
|
||||
// short-circuit
|
||||
return max
|
||||
}
|
||||
factor := b.Factor
|
||||
if factor <= 0 {
|
||||
factor = 2
|
||||
}
|
||||
//calculate this duration
|
||||
minf := float64(min)
|
||||
durf := minf * math.Pow(factor, attempt)
|
||||
if b.Jitter {
|
||||
durf = rand.Float64()*(durf-minf) + minf
|
||||
}
|
||||
//ensure float64 wont overflow int64
|
||||
if durf > maxInt64 {
|
||||
return max
|
||||
}
|
||||
dur := time.Duration(durf)
|
||||
//keep within bounds
|
||||
if dur < min {
|
||||
return min
|
||||
}
|
||||
if dur > max {
|
||||
return max
|
||||
}
|
||||
return dur
|
||||
}
|
||||
|
||||
// Reset restarts the current attempt counter at zero.
|
||||
func (b *Backoff) Reset() {
|
||||
b.attempt.Store(0)
|
||||
}
|
||||
|
||||
// Attempt returns the current attempt counter value.
|
||||
func (b *Backoff) Attempt() float64 {
|
||||
return float64(b.attempt.Load())
|
||||
}
|
||||
|
||||
// Copy returns a backoff with equals constraints as the original
|
||||
func (b *Backoff) Copy() *Backoff {
|
||||
return &Backoff{
|
||||
Factor: b.Factor,
|
||||
Jitter: b.Jitter,
|
||||
Min: b.Min,
|
||||
Max: b.Max,
|
||||
}
|
||||
}
|
49
component/slowdown/slowdown.go
Normal file
49
component/slowdown/slowdown.go
Normal file
|
@ -0,0 +1,49 @@
|
|||
package slowdown
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type SlowDown struct {
|
||||
errTimes atomic.Int64
|
||||
backoff Backoff
|
||||
}
|
||||
|
||||
func (s *SlowDown) Wait(ctx context.Context) (err error) {
|
||||
select {
|
||||
case <-time.After(s.backoff.Duration()):
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func New() *SlowDown {
|
||||
return &SlowDown{
|
||||
backoff: Backoff{
|
||||
Min: 10 * time.Millisecond,
|
||||
Max: 1 * time.Second,
|
||||
Factor: 2,
|
||||
Jitter: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func Do[T any](s *SlowDown, ctx context.Context, fn func() (T, error)) (t T, err error) {
|
||||
if s.errTimes.Load() > 10 {
|
||||
err = s.Wait(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
t, err = fn()
|
||||
if err != nil {
|
||||
s.errTimes.Add(1)
|
||||
return
|
||||
}
|
||||
s.errTimes.Store(0)
|
||||
s.backoff.Reset()
|
||||
return
|
||||
}
|
|
@ -185,6 +185,10 @@ func addIpv6Cidr(trie *IpCidrTrie, ip net.IP, groupSize int) {
|
|||
}
|
||||
|
||||
for i := 2; i < groupSize; i += 2 {
|
||||
if ip[i] == 0 && ip[i+1] == 0 {
|
||||
node.Mark = true
|
||||
}
|
||||
|
||||
if node.Mark {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -74,6 +74,14 @@ func TestIpv6AddFail(t *testing.T) {
|
|||
assert.IsType(t, new(net.ParseError), err)
|
||||
}
|
||||
|
||||
func TestIpv6SearchSub(t *testing.T) {
|
||||
trie := NewIpCidrTrie()
|
||||
assert.NoError(t, trie.AddIpCidrForString("240e::/18"))
|
||||
|
||||
assert.Equal(t, true, trie.IsContainForString("240e:964:ea02:100:1800::71"))
|
||||
|
||||
}
|
||||
|
||||
func TestIpv6Search(t *testing.T) {
|
||||
trie := NewIpCidrTrie()
|
||||
|
||||
|
|
|
@ -775,7 +775,7 @@ func parseProxies(cfg *RawConfig) (proxies map[string]C.Proxy, providersMap map[
|
|||
}
|
||||
ps = append(ps, proxies[v])
|
||||
}
|
||||
hc := provider.NewHealthCheck(ps, "", 0, true, nil)
|
||||
hc := provider.NewHealthCheck(ps, "", 5000, 0, true, nil)
|
||||
pd, _ := provider.NewCompatibleProvider(provider.ReservedName, ps, hc)
|
||||
providersMap[provider.ReservedName] = pd
|
||||
|
||||
|
@ -1473,7 +1473,7 @@ func parseSniffer(snifferRaw RawSniffer) (*Sniffer, error) {
|
|||
if len(snifferRaw.Sniff) != 0 {
|
||||
for sniffType, sniffConfig := range snifferRaw.Sniff {
|
||||
find := false
|
||||
ports, err := utils.NewIntRangesFromList[uint16](sniffConfig.Ports)
|
||||
ports, err := utils.NewUnsignedRangesFromList[uint16](sniffConfig.Ports)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1500,7 +1500,7 @@ func parseSniffer(snifferRaw RawSniffer) (*Sniffer, error) {
|
|||
// Deprecated: Use Sniff instead
|
||||
log.Warnln("Deprecated: Use Sniff instead")
|
||||
}
|
||||
globalPorts, err := utils.NewIntRangesFromList[uint16](snifferRaw.Ports)
|
||||
globalPorts, err := utils.NewUnsignedRangesFromList[uint16](snifferRaw.Ports)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -43,9 +43,9 @@ const (
|
|||
)
|
||||
|
||||
const (
|
||||
DefaultTCPTimeout = 5 * time.Second
|
||||
DefaultTCPTimeout = dialer.DefaultTCPTimeout
|
||||
DefaultUDPTimeout = dialer.DefaultUDPTimeout
|
||||
DefaultDropTime = 12 * DefaultTCPTimeout
|
||||
DefaultUDPTimeout = DefaultTCPTimeout
|
||||
DefaultTLSTimeout = DefaultTCPTimeout
|
||||
DefaultTestURL = "https://www.gstatic.com/generate_204"
|
||||
)
|
||||
|
|
|
@ -147,6 +147,7 @@ type Metadata struct {
|
|||
SpecialProxy string `json:"specialProxy"`
|
||||
SpecialRules string `json:"specialRules"`
|
||||
RemoteDst string `json:"remoteDestination"`
|
||||
DSCP uint8 `json:"dscp"`
|
||||
|
||||
RawSrcAddr net.Addr `json:"-"`
|
||||
RawDstAddr net.Addr `json:"-"`
|
||||
|
|
|
@ -14,6 +14,7 @@ const (
|
|||
SrcPort
|
||||
DstPort
|
||||
InPort
|
||||
DSCP
|
||||
InUser
|
||||
InName
|
||||
InType
|
||||
|
@ -73,6 +74,8 @@ func (rt RuleType) String() string {
|
|||
return "RuleSet"
|
||||
case Network:
|
||||
return "Network"
|
||||
case DSCP:
|
||||
return "DSCP"
|
||||
case Uid:
|
||||
return "Uid"
|
||||
case SubRules:
|
||||
|
|
|
@ -41,7 +41,7 @@ func (gf *geoipFilter) Match(ip netip.Addr) bool {
|
|||
return false
|
||||
}
|
||||
}
|
||||
return !geoIPMatcher.Match(ip.AsSlice())
|
||||
return !geoIPMatcher.Match(ip)
|
||||
}
|
||||
|
||||
type ipnetFilter struct {
|
||||
|
|
|
@ -49,7 +49,7 @@ func ServeDNSWithDefaultServer(msg *D.Msg) (*D.Msg, error) {
|
|||
|
||||
func FlushCacheWithDefaultResolver() {
|
||||
if r := resolver.DefaultResolver; r != nil {
|
||||
r.(*Resolver).lruCache = lru.New[string, *D.Msg](lru.WithSize[string, *D.Msg](4096), lru.WithStale[string, *D.Msg](true))
|
||||
r.(*Resolver).cache = lru.New(lru.WithSize[string, *D.Msg](4096), lru.WithStale[string, *D.Msg](true))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -289,8 +289,6 @@ func listenPacket(ctx context.Context, proxyAdapter C.ProxyAdapter, proxyName st
|
|||
return proxyAdapter.ListenPacketContext(ctx, metadata, opts...)
|
||||
}
|
||||
|
||||
var errIPNotFound = errors.New("couldn't find ip")
|
||||
|
||||
func batchExchange(ctx context.Context, clients []dnsClient, m *D.Msg) (msg *D.Msg, cache bool, err error) {
|
||||
cache = true
|
||||
fast, ctx := picker.WithTimeout[*D.Msg](ctx, resolver.DefaultDNSTimeout)
|
||||
|
@ -320,12 +318,12 @@ func batchExchange(ctx context.Context, clients []dnsClient, m *D.Msg) (msg *D.M
|
|||
case D.TypeAAAA:
|
||||
if len(ips) == 0 {
|
||||
noIpMsg = m
|
||||
return nil, errIPNotFound
|
||||
return nil, resolver.ErrIPNotFound
|
||||
}
|
||||
case D.TypeA:
|
||||
if len(ips) == 0 {
|
||||
noIpMsg = m
|
||||
return nil, errIPNotFound
|
||||
return nil, resolver.ErrIPNotFound
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -788,6 +788,7 @@ proxy-groups:
|
|||
- vmess1
|
||||
# tolerance: 150
|
||||
# lazy: true
|
||||
# expected-status: 204 # 当健康检查返回状态码与期望值不符时,认为节点不可用
|
||||
url: "https://cp.cloudflare.com/generate_204"
|
||||
interval: 300
|
||||
|
||||
|
@ -851,6 +852,7 @@ proxy-providers:
|
|||
interval: 600
|
||||
# lazy: true
|
||||
url: https://cp.cloudflare.com/generate_204
|
||||
# expected-status: 204 # 当健康检查返回状态码与期望值不符时,认为节点不可用
|
||||
override: # 覆写节点加载时的一些配置项
|
||||
skip-cert-verify: true
|
||||
udp: true
|
||||
|
@ -1068,4 +1070,4 @@ listeners:
|
|||
# authentication-timeout: 1000
|
||||
# alpn:
|
||||
# - h3
|
||||
# max-udp-relay-packet-size: 1500
|
||||
# max-udp-relay-packet-size: 1500
|
35
go.mod
35
go.mod
|
@ -9,22 +9,21 @@ require (
|
|||
github.com/cilium/ebpf v0.12.3
|
||||
github.com/coreos/go-iptables v0.7.0
|
||||
github.com/dlclark/regexp2 v1.10.0
|
||||
github.com/go-chi/chi/v5 v5.0.10
|
||||
github.com/go-chi/chi/v5 v5.0.11
|
||||
github.com/go-chi/cors v1.2.1
|
||||
github.com/go-chi/render v1.0.3
|
||||
github.com/gobwas/ws v1.3.1
|
||||
github.com/gobwas/ws v1.3.2
|
||||
github.com/gofrs/uuid/v5 v5.0.0
|
||||
github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2
|
||||
github.com/jpillora/backoff v1.0.0
|
||||
github.com/klauspost/cpuid/v2 v2.2.6
|
||||
github.com/lunixbochs/struc v0.0.0-20200707160740-784aaebc1d40
|
||||
github.com/mdlayher/netlink v1.7.2
|
||||
github.com/metacubex/gopacket v1.1.20-0.20230608035415-7e2f98a3e759
|
||||
github.com/metacubex/quic-go v0.40.1-0.20231130135418-0c1b47cf9394
|
||||
github.com/metacubex/sing-quic v0.0.0-20231220152840-85620b446796
|
||||
github.com/metacubex/quic-go v0.41.1-0.20240120014142-a02f4a533d4a
|
||||
github.com/metacubex/sing-quic v0.0.0-20240130040922-cbe613c88f20
|
||||
github.com/metacubex/sing-shadowsocks v0.2.6
|
||||
github.com/metacubex/sing-shadowsocks2 v0.1.6-beta.1
|
||||
github.com/metacubex/sing-tun v0.2.0-beta.4
|
||||
github.com/metacubex/sing-shadowsocks2 v0.2.0
|
||||
github.com/metacubex/sing-tun v0.2.1-0.20240130042529-1f983547e9d4
|
||||
github.com/metacubex/sing-vmess v0.1.9-0.20231207122118-72303677451f
|
||||
github.com/metacubex/sing-wireguard v0.0.0-20231209125515-0594297f7232
|
||||
github.com/miekg/dns v1.1.57
|
||||
|
@ -34,25 +33,26 @@ require (
|
|||
github.com/puzpuzpuz/xsync/v3 v3.0.2
|
||||
github.com/sagernet/bbolt v0.0.0-20231014093535-ea5cb2fe9f0a
|
||||
github.com/sagernet/netlink v0.0.0-20220905062125-8043b4a9aa97
|
||||
github.com/sagernet/sing v0.3.0-rc.3
|
||||
github.com/sagernet/sing-mux v0.1.6-beta.1
|
||||
github.com/sagernet/sing v0.3.0
|
||||
github.com/sagernet/sing-mux v0.2.1-0.20240124034317-9bfb33698bb6
|
||||
github.com/sagernet/sing-shadowtls v0.1.4
|
||||
github.com/sagernet/tfo-go v0.0.0-20231209031829-7b5343ac1dc6
|
||||
github.com/sagernet/utls v1.5.4
|
||||
github.com/sagernet/wireguard-go v0.0.0-20231209092712-9a439356a62e
|
||||
github.com/samber/lo v1.39.0
|
||||
github.com/shirou/gopsutil/v3 v3.23.11
|
||||
github.com/shirou/gopsutil/v3 v3.23.12
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.8
|
||||
github.com/zhangyunhao116/fastrand v0.3.0
|
||||
go.uber.org/automaxprocs v1.5.3
|
||||
golang.org/x/crypto v0.16.0
|
||||
golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb
|
||||
golang.org/x/net v0.19.0
|
||||
golang.org/x/sync v0.5.0
|
||||
golang.org/x/sys v0.15.0
|
||||
google.golang.org/protobuf v1.31.0
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
|
||||
golang.org/x/crypto v0.18.0
|
||||
golang.org/x/exp v0.0.0-20240110193028-0dcbfd608b1e
|
||||
golang.org/x/net v0.20.0
|
||||
golang.org/x/sync v0.6.0
|
||||
golang.org/x/sys v0.16.0
|
||||
google.golang.org/protobuf v1.32.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
lukechampine.com/blake3 v1.2.1
|
||||
)
|
||||
|
@ -105,11 +105,10 @@ require (
|
|||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
gitlab.com/yawning/bsaes.git v0.0.0-20190805113838-0a714cd429ec // indirect
|
||||
go.uber.org/mock v0.3.0 // indirect
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect
|
||||
golang.org/x/mod v0.14.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/tools v0.16.0 // indirect
|
||||
)
|
||||
|
||||
replace github.com/sagernet/sing => github.com/metacubex/sing v0.0.0-20231221131356-d73c21c7ea3f
|
||||
replace github.com/sagernet/sing => github.com/metacubex/sing v0.0.0-20240111014253-f1818b6a82b2
|
||||
|
|
64
go.sum
64
go.sum
|
@ -44,8 +44,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos
|
|||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/gaukas/godicttls v0.0.4 h1:NlRaXb3J6hAnTmWdsEKb9bcSBD6BvcIjdGdeb0zfXbk=
|
||||
github.com/gaukas/godicttls v0.0.4/go.mod h1:l6EenT4TLWgTdwslVb4sEMOCf7Bv0JAK67deKr9/NCI=
|
||||
github.com/go-chi/chi/v5 v5.0.10 h1:rLz5avzKpjqxrYwXNfmjkrYYXOyLJd37pz53UFHC6vk=
|
||||
github.com/go-chi/chi/v5 v5.0.10/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||
github.com/go-chi/chi/v5 v5.0.11 h1:BnpYbFZ3T3S1WMpD79r7R5ThWX40TaFB7L31Y8xqSwA=
|
||||
github.com/go-chi/chi/v5 v5.0.11/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||
github.com/go-chi/cors v1.2.1 h1:xEC8UT3Rlp2QuWNEr4Fs/c2EAGVKBwy/1vHx3bppil4=
|
||||
github.com/go-chi/cors v1.2.1/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58=
|
||||
github.com/go-chi/render v1.0.3 h1:AsXqd2a1/INaIfUSKq3G5uA8weYx20FOsM7uSoCyyt4=
|
||||
|
@ -60,16 +60,14 @@ github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU
|
|||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.3.1 h1:Qi34dfLMWJbiKaNbDVzM9x27nZBjmkaW6i4+Ku+pGVU=
|
||||
github.com/gobwas/ws v1.3.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/gobwas/ws v1.3.2 h1:zlnbNHxumkRvfPWgfXu8RBwyNR1x8wh9cf5PTOCqs9Q=
|
||||
github.com/gobwas/ws v1.3.2/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/gofrs/uuid/v5 v5.0.0 h1:p544++a97kEL+svbcFbCQVM9KFu0Yo25UoISXGNNH9M=
|
||||
github.com/gofrs/uuid/v5 v5.0.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
|
||||
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
|
@ -86,8 +84,6 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF
|
|||
github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
|
||||
github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA=
|
||||
github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
|
||||
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
|
||||
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
|
||||
|
@ -108,18 +104,18 @@ github.com/metacubex/gopacket v1.1.20-0.20230608035415-7e2f98a3e759 h1:cjd4biTvO
|
|||
github.com/metacubex/gopacket v1.1.20-0.20230608035415-7e2f98a3e759/go.mod h1:UHOv2xu+RIgLwpXca7TLrXleEd4oR3sPatW6IF8wU88=
|
||||
github.com/metacubex/gvisor v0.0.0-20231209122014-3e43224c7bbc h1:+yTZ6q2EeQCAJNpKNEu5j32Pm23ShD38ElIa635wTrk=
|
||||
github.com/metacubex/gvisor v0.0.0-20231209122014-3e43224c7bbc/go.mod h1:rhBU9tD5ktoGPBtXUquhWuGJ4u+8ZZzBMi2cAdv9q8Y=
|
||||
github.com/metacubex/quic-go v0.40.1-0.20231130135418-0c1b47cf9394 h1:dIT+KB2hknBCrwVAXPeY9tpzzkOZP5m40yqUteRT6/Y=
|
||||
github.com/metacubex/quic-go v0.40.1-0.20231130135418-0c1b47cf9394/go.mod h1:F/t8VnA47xoia8ABlNA4InkZjssvFJ5p6E6jKdbkgAs=
|
||||
github.com/metacubex/sing v0.0.0-20231221131356-d73c21c7ea3f h1:T2PuaAiXMSC3mjRRUmIomuiu3jhi7EWSbzXtVIrVUC4=
|
||||
github.com/metacubex/sing v0.0.0-20231221131356-d73c21c7ea3f/go.mod h1:9pfuAH6mZfgnz/YjP6xu5sxx882rfyjpcrTdUpd6w3g=
|
||||
github.com/metacubex/sing-quic v0.0.0-20231220152840-85620b446796 h1:xiCPttMGAaIh4Ad6t85VxUoUv+Sg88eXzzUvYN8gT5w=
|
||||
github.com/metacubex/sing-quic v0.0.0-20231220152840-85620b446796/go.mod h1:E1e1Uu6YaJddD+c0DtJlSOkfMI0NLdOVhM60KAlcssY=
|
||||
github.com/metacubex/quic-go v0.41.1-0.20240120014142-a02f4a533d4a h1:IMr75VdMnDUhkANZemUWqmOPLfwnemiIaCHRnGCdAsY=
|
||||
github.com/metacubex/quic-go v0.41.1-0.20240120014142-a02f4a533d4a/go.mod h1:F/t8VnA47xoia8ABlNA4InkZjssvFJ5p6E6jKdbkgAs=
|
||||
github.com/metacubex/sing v0.0.0-20240111014253-f1818b6a82b2 h1:upEO8dt9WDBavhgcgkXB3hRcwVNbkTbnd+xyzy6ZQZo=
|
||||
github.com/metacubex/sing v0.0.0-20240111014253-f1818b6a82b2/go.mod h1:9pfuAH6mZfgnz/YjP6xu5sxx882rfyjpcrTdUpd6w3g=
|
||||
github.com/metacubex/sing-quic v0.0.0-20240130040922-cbe613c88f20 h1:wt7ydRxm9Pvw+un6KD97tjLJHMrkzp83HyiGkoz6e7k=
|
||||
github.com/metacubex/sing-quic v0.0.0-20240130040922-cbe613c88f20/go.mod h1:bdHqEysJclB9BzIa5jcKKSZ1qua+YEPjR8fOzzE3vZU=
|
||||
github.com/metacubex/sing-shadowsocks v0.2.6 h1:6oEB3QcsFYnNiFeoevcXrCwJ3sAablwVSgtE9R3QeFQ=
|
||||
github.com/metacubex/sing-shadowsocks v0.2.6/go.mod h1:zIkMeSnb8Mbf4hdqhw0pjzkn1d99YJ3JQm/VBg5WMTg=
|
||||
github.com/metacubex/sing-shadowsocks2 v0.1.6-beta.1 h1:ftbpVCK1+n3jxIP7+NMkRYOFEQtGPodV42MizsPey0w=
|
||||
github.com/metacubex/sing-shadowsocks2 v0.1.6-beta.1/go.mod h1:kUVj2X+2wUh6Z5pAk9WrjDRehPyXolC6nJyFl7ln4V4=
|
||||
github.com/metacubex/sing-tun v0.2.0-beta.4 h1:42F+uF9zKsaWsiUXNKzZD8aRkyPN9m5SdpF2yZEZar8=
|
||||
github.com/metacubex/sing-tun v0.2.0-beta.4/go.mod h1:O8wFThUDfiwb6y56I714dQuyaqT8DW9VCD/wvGesyLM=
|
||||
github.com/metacubex/sing-shadowsocks2 v0.2.0 h1:hqwT/AfI5d5UdPefIzR6onGHJfDXs5zgOM5QSgaM/9A=
|
||||
github.com/metacubex/sing-shadowsocks2 v0.2.0/go.mod h1:LCKF6j1P94zN8ZS+LXRK1gmYTVGB3squivBSXAFnOg8=
|
||||
github.com/metacubex/sing-tun v0.2.1-0.20240130042529-1f983547e9d4 h1:qz256cI4oGBtLT0H3wQYgazLGYLQUEZqMkf0i8sGH5A=
|
||||
github.com/metacubex/sing-tun v0.2.1-0.20240130042529-1f983547e9d4/go.mod h1:P+TjrGTG5AdQRaskP6NiI9gZmgnwR3o5ze9CkIQE+/s=
|
||||
github.com/metacubex/sing-vmess v0.1.9-0.20231207122118-72303677451f h1:QjXrHKbTMBip/C+R79bvbfr42xH1gZl3uFb0RELdZiQ=
|
||||
github.com/metacubex/sing-vmess v0.1.9-0.20231207122118-72303677451f/go.mod h1:olVkD4FChQ5gKMHG4ZzuD7+fMkJY1G8vwOKpRehjrmY=
|
||||
github.com/metacubex/sing-wireguard v0.0.0-20231209125515-0594297f7232 h1:loWjR+k9dxqBSgruGyT5hE8UCRMmCEjxqZbryfY9no4=
|
||||
|
@ -159,8 +155,8 @@ github.com/sagernet/bbolt v0.0.0-20231014093535-ea5cb2fe9f0a h1:+NkI2670SQpQWvkk
|
|||
github.com/sagernet/bbolt v0.0.0-20231014093535-ea5cb2fe9f0a/go.mod h1:63s7jpZqcDAIpj8oI/1v4Izok+npJOHACFCU6+huCkM=
|
||||
github.com/sagernet/netlink v0.0.0-20220905062125-8043b4a9aa97 h1:iL5gZI3uFp0X6EslacyapiRz7LLSJyr4RajF/BhMVyE=
|
||||
github.com/sagernet/netlink v0.0.0-20220905062125-8043b4a9aa97/go.mod h1:xLnfdiJbSp8rNqYEdIW/6eDO4mVoogml14Bh2hSiFpM=
|
||||
github.com/sagernet/sing-mux v0.1.6-beta.1 h1:ADs1TgiMfA628Y2qfv21tEvePDZjBRRYddwtNFZiwe8=
|
||||
github.com/sagernet/sing-mux v0.1.6-beta.1/go.mod h1:WWtRmrwCDgb+g+7Da6o62I9WiMNB0a3w6BJhEpNQlNA=
|
||||
github.com/sagernet/sing-mux v0.2.1-0.20240124034317-9bfb33698bb6 h1:5bCAkvDDzSMITiHFjolBwpdqYsvycdTu71FsMEFXQ14=
|
||||
github.com/sagernet/sing-mux v0.2.1-0.20240124034317-9bfb33698bb6/go.mod h1:khzr9AOPocLa+g53dBplwNDz4gdsyx/YM3swtAhlkHQ=
|
||||
github.com/sagernet/sing-shadowtls v0.1.4 h1:aTgBSJEgnumzFenPvc+kbD9/W0PywzWevnVpEx6Tw3k=
|
||||
github.com/sagernet/sing-shadowtls v0.1.4/go.mod h1:F8NBgsY5YN2beQavdgdm1DPlhaKQlaL6lpDdcBglGK4=
|
||||
github.com/sagernet/smux v0.0.0-20231208180855-7041f6ea79e7 h1:DImB4lELfQhplLTxeq2z31Fpv8CQqqrUwTbrIRumZqQ=
|
||||
|
@ -175,8 +171,8 @@ github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA=
|
|||
github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA=
|
||||
github.com/scjalliance/comshim v0.0.0-20230315213746-5e51f40bd3b9 h1:rc/CcqLH3lh8n+csdOuDfP+NuykE0U6AeYSJJHKDgSg=
|
||||
github.com/scjalliance/comshim v0.0.0-20230315213746-5e51f40bd3b9/go.mod h1:a/83NAfUXvEuLpmxDssAXxgUgrEy12MId3Wd7OTs76s=
|
||||
github.com/shirou/gopsutil/v3 v3.23.11 h1:i3jP9NjCPUz7FiZKxlMnODZkdSIp2gnzfrvsu9CuWEQ=
|
||||
github.com/shirou/gopsutil/v3 v3.23.11/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM=
|
||||
github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4=
|
||||
github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM=
|
||||
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
|
||||
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
|
||||
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
|
||||
|
@ -226,21 +222,21 @@ go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBs
|
|||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
|
||||
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8=
|
||||
golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
|
||||
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/exp v0.0.0-20240110193028-0dcbfd608b1e h1:723BNChdd0c2Wk6WOE320qGBiPtYx0F0Bbm1kriShfE=
|
||||
golang.org/x/exp v0.0.0-20240110193028-0dcbfd608b1e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
|
||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
|
||||
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
||||
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -256,8 +252,9 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
|
@ -268,9 +265,8 @@ golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM=
|
|||
golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
|
||||
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
|
|
@ -78,7 +78,7 @@ func getGroupDelay(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
expectedStatus, err := utils.NewIntRanges[uint16](query.Get("expected"))
|
||||
expectedStatus, err := utils.NewUnsignedRanges[uint16](query.Get("expected"))
|
||||
if err != nil {
|
||||
render.Status(r, http.StatusBadRequest)
|
||||
render.JSON(w, r, ErrBadRequest)
|
||||
|
|
|
@ -113,7 +113,7 @@ func getProxyDelay(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
expectedStatus, err := utils.NewIntRanges[uint16](query.Get("expected"))
|
||||
expectedStatus, err := utils.NewUnsignedRanges[uint16](query.Get("expected"))
|
||||
if err != nil {
|
||||
render.Status(r, http.StatusBadRequest)
|
||||
render.JSON(w, r, ErrBadRequest)
|
||||
|
|
|
@ -21,6 +21,7 @@ type Hysteria2Server struct {
|
|||
IgnoreClientBandwidth bool `yaml:"ignore-client-bandwidth" json:"ignore-client-bandwidth,omitempty"`
|
||||
Masquerade string `yaml:"masquerade" json:"masquerade,omitempty"`
|
||||
CWND int `yaml:"cwnd" json:"cwnd,omitempty"`
|
||||
UdpMTU int `yaml:"udp-mtu" json:"udp-mtu,omitempty"`
|
||||
MuxOption sing.MuxOption `yaml:"mux-option" json:"mux-option,omitempty"`
|
||||
}
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/metacubex/mihomo/adapter/inbound"
|
||||
N "github.com/metacubex/mihomo/common/net"
|
||||
C "github.com/metacubex/mihomo/constant"
|
||||
"github.com/metacubex/mihomo/transport/socks5"
|
||||
)
|
||||
|
@ -30,7 +31,7 @@ func newClient(srcConn net.Conn, tunnel C.Tunnel, additions ...inbound.Addition)
|
|||
return nil, socks5.ErrAddressNotSupported
|
||||
}
|
||||
|
||||
left, right := net.Pipe()
|
||||
left, right := N.Pipe()
|
||||
|
||||
go tunnel.HandleTCPConn(inbound.NewHTTP(dstAddr, srcConn, right, additions...))
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ func handleUpgrade(conn net.Conn, request *http.Request, tunnel C.Tunnel, additi
|
|||
return
|
||||
}
|
||||
|
||||
left, right := net.Pipe()
|
||||
left, right := N.Pipe()
|
||||
|
||||
go tunnel.HandleTCPConn(inbound.NewHTTP(dstAddr, conn, right, additions...))
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ type Hysteria2Option struct {
|
|||
IgnoreClientBandwidth bool `inbound:"ignore-client-bandwidth,omitempty"`
|
||||
Masquerade string `inbound:"masquerade,omitempty"`
|
||||
CWND int `inbound:"cwnd,omitempty"`
|
||||
UdpMTU int `inbound:"udp-mtu,omitempty"`
|
||||
MuxOption MuxOption `inbound:"mux-option,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -58,6 +59,7 @@ func NewHysteria2(options *Hysteria2Option) (*Hysteria2, error) {
|
|||
IgnoreClientBandwidth: options.IgnoreClientBandwidth,
|
||||
Masquerade: options.Masquerade,
|
||||
CWND: options.CWND,
|
||||
UdpMTU: options.UdpMTU,
|
||||
MuxOption: options.MuxOption.Build(),
|
||||
},
|
||||
}, nil
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"net/netip"
|
||||
"strconv"
|
||||
|
||||
N "github.com/metacubex/mihomo/common/net"
|
||||
C "github.com/metacubex/mihomo/constant"
|
||||
)
|
||||
|
||||
|
@ -20,7 +21,7 @@ func HandleTcp(address string) (conn net.Conn, err error) {
|
|||
return nil, errors.New("tcp uninitialized")
|
||||
}
|
||||
// executor Parsed
|
||||
conn1, conn2 := net.Pipe()
|
||||
conn1, conn2 := N.Pipe()
|
||||
|
||||
metadata := &C.Metadata{}
|
||||
metadata.NetWork = C.TCP
|
||||
|
|
|
@ -104,6 +104,12 @@ func New(config LC.Hysteria2Server, tunnel C.Tunnel, additions ...inbound.Additi
|
|||
}
|
||||
}
|
||||
|
||||
if config.UdpMTU == 0 {
|
||||
// "1200" from quic-go's MaxDatagramSize
|
||||
// "-3" from quic-go's DatagramFrame.MaxDataLen
|
||||
config.UdpMTU = 1200 - 3
|
||||
}
|
||||
|
||||
service, err := hysteria2.NewService[string](hysteria2.ServiceOptions{
|
||||
Context: context.Background(),
|
||||
Logger: log.SingLogger,
|
||||
|
@ -115,6 +121,7 @@ func New(config LC.Hysteria2Server, tunnel C.Tunnel, additions ...inbound.Additi
|
|||
Handler: h,
|
||||
MasqueradeHandler: masqueradeHandler,
|
||||
CWND: config.CWND,
|
||||
UdpMTU: config.UdpMTU,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -34,6 +34,14 @@ func setsockopt(rc syscall.RawConn, addr string) error {
|
|||
if err == nil && isIPv6 {
|
||||
err = syscall.SetsockoptInt(int(fd), syscall.SOL_IPV6, IPV6_RECVORIGDSTADDR, 1)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = syscall.SetsockoptInt(int(fd), syscall.SOL_IP, syscall.IP_RECVTOS, 1)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = syscall.SetsockoptInt(int(fd), syscall.SOL_IPV6, syscall.IPV6_RECVTCLASS, 1)
|
||||
}
|
||||
})
|
||||
|
||||
return err
|
||||
|
|
|
@ -79,6 +79,9 @@ func NewUDP(addr string, tunnel C.Tunnel, additions ...inbound.Addition) (*UDPLi
|
|||
continue
|
||||
}
|
||||
|
||||
dscp, _ := getDSCP(oob[:oobn])
|
||||
additions = append(additions, inbound.WithDSCP(dscp))
|
||||
|
||||
if rAddr.Addr().Is4() {
|
||||
// try to unmap 4in6 address
|
||||
lAddr = netip.AddrPortFrom(lAddr.Addr().Unmap(), lAddr.Port())
|
||||
|
|
|
@ -104,7 +104,7 @@ func getOrigDst(oob []byte) (netip.AddrPort, error) {
|
|||
}
|
||||
|
||||
// retrieve the destination address from the SCM.
|
||||
sa, err := unix.ParseOrigDstAddr(&scms[0])
|
||||
sa, err := unix.ParseOrigDstAddr(&scms[1])
|
||||
if err != nil {
|
||||
return netip.AddrPort{}, fmt.Errorf("retrieve destination: %w", err)
|
||||
}
|
||||
|
@ -122,3 +122,30 @@ func getOrigDst(oob []byte) (netip.AddrPort, error) {
|
|||
|
||||
return rAddr, nil
|
||||
}
|
||||
|
||||
func getDSCP (oob []byte) (uint8, error) {
|
||||
scms, err := unix.ParseSocketControlMessage(oob)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("parse control message: %w", err)
|
||||
}
|
||||
dscp, err := parseDSCP(&scms[0])
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("retrieve DSCP: %w", err)
|
||||
}
|
||||
return dscp, nil
|
||||
}
|
||||
|
||||
func parseDSCP(m *unix.SocketControlMessage) (uint8, error) {
|
||||
switch {
|
||||
case m.Header.Level == unix.SOL_IP && m.Header.Type == unix.IP_TOS:
|
||||
dscp := uint8(m.Data[0] >> 2)
|
||||
return dscp, nil
|
||||
|
||||
case m.Header.Level == unix.SOL_IPV6 && m.Header.Type == unix.IPV6_TCLASS:
|
||||
dscp := uint8(m.Data[0] >> 2)
|
||||
return dscp, nil
|
||||
|
||||
default:
|
||||
return 0, nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,6 +12,10 @@ func getOrigDst(oob []byte) (netip.AddrPort, error) {
|
|||
return netip.AddrPort{}, errors.New("UDP redir not supported on current platform")
|
||||
}
|
||||
|
||||
func getDSCP(oob []byte) (uint8, error) {
|
||||
return 0, errors.New("UDP redir not supported on current platform")
|
||||
}
|
||||
|
||||
func dialUDP(network string, lAddr, rAddr netip.AddrPort) (*net.UDPConn, error) {
|
||||
return nil, errors.New("UDP redir not supported on current platform")
|
||||
}
|
||||
|
|
3
main.go
3
main.go
|
@ -72,11 +72,10 @@ func main() {
|
|||
currentDir, _ := os.Getwd()
|
||||
configFile = filepath.Join(currentDir, configFile)
|
||||
}
|
||||
C.SetConfig(configFile)
|
||||
} else {
|
||||
configFile = filepath.Join(C.Path.HomeDir(), C.Path.Config())
|
||||
C.SetConfig(configFile)
|
||||
}
|
||||
C.SetConfig(configFile)
|
||||
|
||||
if geodataMode {
|
||||
C.GeodataMode = true
|
||||
|
|
|
@ -47,8 +47,12 @@ func (srv *Service) Start() {
|
|||
srv.mu.Lock()
|
||||
defer srv.mu.Unlock()
|
||||
log.Infoln("NTP service start, sync system time is %t", srv.syncSystemTime)
|
||||
err := srv.update()
|
||||
if err != nil {
|
||||
log.Errorln("Initialize NTP time failed: %s", err)
|
||||
return
|
||||
}
|
||||
service.running = true
|
||||
srv.update()
|
||||
go srv.loopUpdate()
|
||||
}
|
||||
|
||||
|
@ -71,20 +75,16 @@ func (srv *Service) Running() bool {
|
|||
return srv.running
|
||||
}
|
||||
|
||||
func (srv *Service) update() {
|
||||
func (srv *Service) update() error {
|
||||
var response *ntp.Response
|
||||
var err error
|
||||
for i := 0; i < 3; i++ {
|
||||
response, err = ntp.Exchange(context.Background(), srv.dialer, srv.server)
|
||||
if err != nil {
|
||||
if i == 2 {
|
||||
log.Errorln("Initialize NTP time failed: %s", err)
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Second * 2) // wait for 2 seconds before the next try
|
||||
continue
|
||||
if response, err = ntp.Exchange(context.Background(), srv.dialer, srv.server); err == nil {
|
||||
break
|
||||
}
|
||||
if i == 2 {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
offset = response.ClockOffset
|
||||
if offset > time.Duration(0) {
|
||||
|
@ -94,14 +94,15 @@ func (srv *Service) update() {
|
|||
}
|
||||
if srv.syncSystemTime {
|
||||
timeNow := response.Time
|
||||
err = setSystemTime(timeNow)
|
||||
if err == nil {
|
||||
syncErr := setSystemTime(timeNow)
|
||||
if syncErr == nil {
|
||||
log.Infoln("Sync system time success: %s", timeNow.Local().Format(ntp.TimeLayout))
|
||||
} else {
|
||||
log.Errorln("Write time to system: %s", err)
|
||||
log.Errorln("Write time to system: %s", syncErr)
|
||||
srv.syncSystemTime = false
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (srv *Service) loopUpdate() {
|
||||
|
@ -111,7 +112,10 @@ func (srv *Service) loopUpdate() {
|
|||
return
|
||||
case <-srv.ticker.C:
|
||||
}
|
||||
srv.update()
|
||||
err := srv.update()
|
||||
if err != nil {
|
||||
log.Warnln("Sync time failed: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
49
rules/common/dscp.go
Normal file
49
rules/common/dscp.go
Normal file
|
@ -0,0 +1,49 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/metacubex/mihomo/common/utils"
|
||||
C "github.com/metacubex/mihomo/constant"
|
||||
)
|
||||
|
||||
type DSCP struct {
|
||||
*Base
|
||||
ranges utils.IntRanges[uint8]
|
||||
payload string
|
||||
adapter string
|
||||
}
|
||||
|
||||
func (d *DSCP) RuleType() C.RuleType {
|
||||
return C.DSCP
|
||||
}
|
||||
|
||||
func (d *DSCP) Match(metadata *C.Metadata) (bool, string) {
|
||||
return d.ranges.Check(metadata.DSCP), d.adapter
|
||||
}
|
||||
|
||||
func (d *DSCP) Adapter() string {
|
||||
return d.adapter
|
||||
}
|
||||
|
||||
func (d *DSCP) Payload() string {
|
||||
return d.payload
|
||||
}
|
||||
|
||||
func NewDSCP(dscp string, adapter string) (*DSCP, error) {
|
||||
ranges, err := utils.NewUnsignedRanges[uint8](dscp)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse DSCP rule fail: %w", err)
|
||||
}
|
||||
for _, r := range ranges {
|
||||
if r.End() > 63 {
|
||||
return nil, fmt.Errorf("DSCP couldn't be negative or exceed 63")
|
||||
}
|
||||
}
|
||||
return &DSCP{
|
||||
Base: &Base{},
|
||||
payload: dscp,
|
||||
ranges: ranges,
|
||||
adapter: adapter,
|
||||
}, nil
|
||||
}
|
|
@ -48,7 +48,7 @@ func (g *GEOIP) Match(metadata *C.Metadata) (bool, string) {
|
|||
}
|
||||
return false, g.adapter
|
||||
}
|
||||
return g.geoIPMatcher.Match(ip.AsSlice()), g.adapter
|
||||
return g.geoIPMatcher.Match(ip), g.adapter
|
||||
}
|
||||
|
||||
func (g *GEOIP) Adapter() string {
|
||||
|
|
|
@ -39,7 +39,7 @@ func (p *Port) Payload() string {
|
|||
}
|
||||
|
||||
func NewPort(port string, adapter string, ruleType C.RuleType) (*Port, error) {
|
||||
portRanges, err := utils.NewIntRanges[uint16](port)
|
||||
portRanges, err := utils.NewUnsignedRanges[uint16](port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w, %w", errPayload, err)
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ func NewUid(oUid, adapter string) (*Uid, error) {
|
|||
return nil, fmt.Errorf("uid rule not support this platform")
|
||||
}
|
||||
|
||||
uidRange, err := utils.NewIntRanges[uint32](oUid)
|
||||
uidRange, err := utils.NewUnsignedRanges[uint32](oUid)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w, %w", errPayload, err)
|
||||
}
|
||||
|
|
|
@ -217,6 +217,13 @@ func (logic *Logic) parsePayload(payload string, parseRule ParseRuleFunc) error
|
|||
return err
|
||||
}
|
||||
|
||||
if rule.ShouldResolveIP() {
|
||||
logic.needIP = true
|
||||
}
|
||||
if rule.ShouldFindProcess() {
|
||||
logic.needProcess = true
|
||||
}
|
||||
|
||||
rules = append(rules, rule)
|
||||
}
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package rules
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
C "github.com/metacubex/mihomo/constant"
|
||||
RC "github.com/metacubex/mihomo/rules/common"
|
||||
"github.com/metacubex/mihomo/rules/logic"
|
||||
|
@ -37,6 +38,8 @@ func ParseRule(tp, payload, target string, params []string, subRules map[string]
|
|||
parsed, parseErr = RC.NewPort(payload, target, C.DstPort)
|
||||
case "IN-PORT":
|
||||
parsed, parseErr = RC.NewPort(payload, target, C.InPort)
|
||||
case "DSCP":
|
||||
parsed, parseErr = RC.NewDSCP(payload, target)
|
||||
case "PROCESS-NAME":
|
||||
parsed, parseErr = RC.NewProcess(payload, target, true)
|
||||
case "PROCESS-PATH":
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package provider
|
||||
|
||||
import (
|
||||
"github.com/metacubex/mihomo/component/trie"
|
||||
"github.com/metacubex/mihomo/component/cidr"
|
||||
C "github.com/metacubex/mihomo/constant"
|
||||
"github.com/metacubex/mihomo/log"
|
||||
)
|
||||
|
@ -9,7 +9,8 @@ import (
|
|||
type ipcidrStrategy struct {
|
||||
count int
|
||||
shouldResolveIP bool
|
||||
trie *trie.IpCidrTrie
|
||||
cidrSet *cidr.IpCidrSet
|
||||
//trie *trie.IpCidrTrie
|
||||
}
|
||||
|
||||
func (i *ipcidrStrategy) ShouldFindProcess() bool {
|
||||
|
@ -17,7 +18,8 @@ func (i *ipcidrStrategy) ShouldFindProcess() bool {
|
|||
}
|
||||
|
||||
func (i *ipcidrStrategy) Match(metadata *C.Metadata) bool {
|
||||
return i.trie != nil && i.trie.IsContain(metadata.DstIP.AsSlice())
|
||||
// return i.trie != nil && i.trie.IsContain(metadata.DstIP.AsSlice())
|
||||
return i.cidrSet != nil && i.cidrSet.IsContain(metadata.DstIP)
|
||||
}
|
||||
|
||||
func (i *ipcidrStrategy) Count() int {
|
||||
|
@ -29,13 +31,15 @@ func (i *ipcidrStrategy) ShouldResolveIP() bool {
|
|||
}
|
||||
|
||||
func (i *ipcidrStrategy) Reset() {
|
||||
i.trie = trie.NewIpCidrTrie()
|
||||
// i.trie = trie.NewIpCidrTrie()
|
||||
i.cidrSet = cidr.NewIpCidrSet()
|
||||
i.count = 0
|
||||
i.shouldResolveIP = false
|
||||
}
|
||||
|
||||
func (i *ipcidrStrategy) Insert(rule string) {
|
||||
err := i.trie.AddIpCidrForString(rule)
|
||||
//err := i.trie.AddIpCidrForString(rule)
|
||||
err := i.cidrSet.AddIpCidrForString(rule)
|
||||
if err != nil {
|
||||
log.Warnln("invalid Ipcidr:[%s]", rule)
|
||||
} else {
|
||||
|
@ -44,7 +48,9 @@ func (i *ipcidrStrategy) Insert(rule string) {
|
|||
}
|
||||
}
|
||||
|
||||
func (i *ipcidrStrategy) FinishInsert() {}
|
||||
func (i *ipcidrStrategy) FinishInsert() {
|
||||
i.cidrSet.Merge()
|
||||
}
|
||||
|
||||
func NewIPCidrStrategy() *ipcidrStrategy {
|
||||
return &ipcidrStrategy{}
|
||||
|
|
|
@ -402,10 +402,11 @@ func (c *quicPktConn) WriteTo(p []byte, addr string) error {
|
|||
_ = struc.Pack(&msgBuf, &msg)
|
||||
err = c.Session.SendDatagram(msgBuf.Bytes())
|
||||
if err != nil {
|
||||
if errSize, ok := err.(quic.ErrMessageTooLarge); ok {
|
||||
var errSize *quic.DatagramTooLargeError
|
||||
if errors.As(err, &errSize) {
|
||||
// need to frag
|
||||
msg.MsgID = uint16(fastrand.Intn(0xFFFF)) + 1 // msgID must be > 0 when fragCount > 1
|
||||
fragMsgs := fragUDPMessage(msg, int(errSize))
|
||||
fragMsgs := fragUDPMessage(msg, int(errSize.PeerMaxDatagramFrameSize))
|
||||
for _, fragMsg := range fragMsgs {
|
||||
msgBuf.Reset()
|
||||
_ = struc.Pack(&msgBuf, &fragMsg)
|
||||
|
|
|
@ -364,7 +364,7 @@ func (t *clientImpl) ListenPacketWithDialer(ctx context.Context, metadata *C.Met
|
|||
return nil, common.TooManyOpenStreams
|
||||
}
|
||||
|
||||
pipe1, pipe2 := net.Pipe()
|
||||
pipe1, pipe2 := N.Pipe()
|
||||
var connId uint32
|
||||
for {
|
||||
connId = fastrand.Uint32()
|
||||
|
|
|
@ -123,7 +123,7 @@ func (q *quicStreamPacketConn) WaitReadFrom() (data []byte, put func(), addr net
|
|||
|
||||
func (q *quicStreamPacketConn) WriteTo(p []byte, addr net.Addr) (n int, err error) {
|
||||
if q.udpRelayMode != common.QUIC && len(p) > q.maxUdpRelayPacketSize {
|
||||
return 0, quic.ErrMessageTooLarge(q.maxUdpRelayPacketSize)
|
||||
return 0, &quic.DatagramTooLargeError{PeerMaxDatagramFrameSize: int64(q.maxUdpRelayPacketSize)}
|
||||
}
|
||||
if q.closed {
|
||||
return 0, net.ErrClosed
|
||||
|
|
|
@ -348,7 +348,7 @@ func (t *clientImpl) ListenPacketWithDialer(ctx context.Context, metadata *C.Met
|
|||
return nil, common.TooManyOpenStreams
|
||||
}
|
||||
|
||||
pipe1, pipe2 := net.Pipe()
|
||||
pipe1, pipe2 := N.Pipe()
|
||||
var connId uint16
|
||||
for {
|
||||
connId = uint16(fastrand.Intn(0xFFFF))
|
||||
|
|
|
@ -12,7 +12,9 @@ import (
|
|||
// MaxFragSize is a safe udp relay packet size
|
||||
// because tuicv5 support udp fragment so we unneeded to do a magic modify for quic-go to increase MaxDatagramFrameSize
|
||||
// it may not work fine in some platform
|
||||
var MaxFragSize = 1200 - PacketOverHead
|
||||
// "1200" from quic-go's MaxDatagramSize
|
||||
// "-3" from quic-go's DatagramFrame.MaxDataLen
|
||||
var MaxFragSize = 1200 - PacketOverHead - 3
|
||||
|
||||
func fragWriteNative(quicConn quic.Connection, packet Packet, buf *bytes.Buffer, fragSize int) (err error) {
|
||||
fullPayload := packet.DATA
|
||||
|
|
|
@ -137,7 +137,7 @@ func (q *quicStreamPacketConn) WaitReadFrom() (data []byte, put func(), addr net
|
|||
|
||||
func (q *quicStreamPacketConn) WriteTo(p []byte, addr net.Addr) (n int, err error) {
|
||||
if len(p) > 0xffff { // uint16 max
|
||||
return 0, quic.ErrMessageTooLarge(0xffff)
|
||||
return 0, &quic.DatagramTooLargeError{PeerMaxDatagramFrameSize: 0xffff}
|
||||
}
|
||||
if q.closed {
|
||||
return 0, net.ErrClosed
|
||||
|
@ -187,9 +187,9 @@ func (q *quicStreamPacketConn) WriteTo(p []byte, addr net.Addr) (n int, err erro
|
|||
err = q.quicConn.SendDatagram(data)
|
||||
}
|
||||
|
||||
var tooLarge quic.ErrMessageTooLarge
|
||||
var tooLarge *quic.DatagramTooLargeError
|
||||
if errors.As(err, &tooLarge) {
|
||||
err = fragWriteNative(q.quicConn, packet, buf, int(tooLarge)-PacketOverHead)
|
||||
err = fragWriteNative(q.quicConn, packet, buf, int(tooLarge.PeerMaxDatagramFrameSize)-PacketOverHead)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
|
|
|
@ -82,6 +82,6 @@ func closeAllLocalCoon(lAddr string) {
|
|||
})
|
||||
}
|
||||
|
||||
func handleSocket(ctx C.ConnContext, outbound net.Conn) {
|
||||
N.Relay(ctx.Conn(), outbound)
|
||||
func handleSocket(inbound, outbound net.Conn) {
|
||||
N.Relay(inbound, outbound)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package tunnel
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
|
@ -10,12 +11,11 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jpillora/backoff"
|
||||
|
||||
N "github.com/metacubex/mihomo/common/net"
|
||||
"github.com/metacubex/mihomo/component/nat"
|
||||
P "github.com/metacubex/mihomo/component/process"
|
||||
"github.com/metacubex/mihomo/component/resolver"
|
||||
"github.com/metacubex/mihomo/component/slowdown"
|
||||
"github.com/metacubex/mihomo/component/sniffer"
|
||||
C "github.com/metacubex/mihomo/constant"
|
||||
"github.com/metacubex/mihomo/constant/features"
|
||||
|
@ -584,7 +584,7 @@ func handleTCPConn(connCtx C.ConnContext) {
|
|||
peekMutex.Lock()
|
||||
defer peekMutex.Unlock()
|
||||
_ = conn.SetReadDeadline(time.Time{}) // reset
|
||||
handleSocket(connCtx, remoteConn)
|
||||
handleSocket(conn, remoteConn)
|
||||
}
|
||||
|
||||
func shouldResolveIP(rule C.Rule, metadata *C.Metadata) bool {
|
||||
|
@ -684,23 +684,33 @@ func getRules(metadata *C.Metadata) []C.Rule {
|
|||
}
|
||||
}
|
||||
|
||||
func retry[T any](ctx context.Context, ft func(context.Context) (T, error), fe func(err error)) (t T, err error) {
|
||||
b := &backoff.Backoff{
|
||||
Min: 10 * time.Millisecond,
|
||||
Max: 1 * time.Second,
|
||||
Factor: 2,
|
||||
Jitter: true,
|
||||
func shouldStopRetry(err error) bool {
|
||||
if errors.Is(err, resolver.ErrIPNotFound) {
|
||||
return true
|
||||
}
|
||||
if errors.Is(err, resolver.ErrIPVersion) {
|
||||
return true
|
||||
}
|
||||
if errors.Is(err, resolver.ErrIPv6Disabled) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func retry[T any](ctx context.Context, ft func(context.Context) (T, error), fe func(err error)) (t T, err error) {
|
||||
s := slowdown.New()
|
||||
for i := 0; i < 10; i++ {
|
||||
t, err = ft(ctx)
|
||||
if err != nil {
|
||||
if fe != nil {
|
||||
fe(err)
|
||||
}
|
||||
select {
|
||||
case <-time.After(b.Duration()):
|
||||
if shouldStopRetry(err) {
|
||||
return
|
||||
}
|
||||
if s.Wait(ctx) == nil {
|
||||
continue
|
||||
case <-ctx.Done():
|
||||
} else {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
|
|
Loading…
Add table
Reference in a new issue