Skip to content
This repository was archived by the owner on Mar 17, 2024. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 1 addition & 13 deletions examples/vmess.client.toml

Large diffs are not rendered by default.

4 changes: 4 additions & 0 deletions examples/vmess.server.toml

Large diffs are not rendered by default.

3 changes: 3 additions & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ require (
github.com/BurntSushi/toml v1.2.1
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
github.com/biter777/countries v1.5.6
github.com/cloudflare/circl v1.3.1
github.com/dustin/go-humanize v1.0.0
github.com/e1732a364fed/ui v0.0.1-alpha.12
github.com/gobwas/ws v1.1.0
Expand Down Expand Up @@ -85,3 +86,5 @@ retract v1.0.2
retract v1.0.1

retract v1.0.0

replace github.com/cloudflare/circl => ..\octeep\circl
144 changes: 38 additions & 106 deletions proxy/vmess/aead.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package vmess

import (
"crypto/cipher"
"crypto/rand"
"encoding/binary"
"fmt"
"io"
Expand All @@ -17,18 +16,15 @@ type aeadWriter struct {
buf []byte
count uint16
iv []byte

shakeParser *ShakeSizeParser
}

func AEADWriter(w io.Writer, aead cipher.AEAD, iv []byte, shakeParser *ShakeSizeParser) io.Writer {
func AEADWriter(w io.Writer, aead cipher.AEAD, iv []byte) io.Writer {
return &aeadWriter{
Writer: w,
AEAD: aead,
buf: utils.GetPacket(), //make([]byte, lenSize+chunkSize),
nonce: make([]byte, aead.NonceSize()),
iv: iv,
shakeParser: shakeParser,
Writer: w,
AEAD: aead,
buf: utils.GetPacket(), //make([]byte, lenSize+chunkSize),
nonce: make([]byte, aead.NonceSize()),
iv: iv,
}
}

Expand All @@ -37,72 +33,43 @@ func (w *aeadWriter) Write(b []byte) (n int, err error) {
return
}

if w.shakeParser != nil {

encryptedSize := (len(b) + w.Overhead())
paddingSize := int(w.shakeParser.NextPaddingLen())
sizeBytes := 2
totalSize := 2 + encryptedSize + paddingSize

eb := w.buf[:totalSize]

w.shakeParser.Encode(uint16(encryptedSize+paddingSize), eb[:sizeBytes])
encryptBuf := eb[sizeBytes : sizeBytes+encryptedSize]

binary.BigEndian.PutUint16(w.nonce[:2], w.count)
copy(w.nonce[2:], w.iv[2:12])

w.Seal(encryptBuf[:0], w.nonce, b, nil)
w.count++

if paddingSize > 0 {
rand.Read(eb[sizeBytes+encryptedSize:])
}
buf := w.buf
//这里默认len(b)不大于 64k, 否则会闪退; 不过因为本作所有缓存最大就是64k,所以应该是不会出现问题的,所以也不加判断了。
n = len(b)
buf = buf[:lenSize+n+w.Overhead()]

_, err = w.Writer.Write(eb)
n = len(b)
payloadBuf := buf[lenSize : lenSize+n]
binary.BigEndian.PutUint16(buf[:lenSize], uint16(n+w.Overhead()))

} else {
buf := w.buf
//这里默认len(b)不大于 64k, 否则会闪退; 不过因为本作所有缓存最大就是64k,所以应该是不会出现问题的,所以也不加判断了。
n = len(b)
buf = buf[:lenSize+n+w.Overhead()]
binary.BigEndian.PutUint16(w.nonce[:2], w.count)
copy(w.nonce[2:], w.iv[2:12])

payloadBuf := buf[lenSize : lenSize+n]
binary.BigEndian.PutUint16(buf[:lenSize], uint16(n+w.Overhead()))
w.Seal(payloadBuf[:0], w.nonce, b, nil)
w.count++

binary.BigEndian.PutUint16(w.nonce[:2], w.count)
copy(w.nonce[2:], w.iv[2:12])

w.Seal(payloadBuf[:0], w.nonce, b, nil)
w.count++

_, err = w.Writer.Write(buf)
}
_, err = w.Writer.Write(buf)

return
}

type aeadReader struct {
io.Reader
cipher.AEAD
nonce []byte
buf []byte
leftover []byte
count uint16
iv []byte
shakeParser *ShakeSizeParser
done bool
nonce []byte
buf []byte
leftover []byte
count uint16
iv []byte
done bool
}

func AEADReader(r io.Reader, aead cipher.AEAD, iv []byte, shakeParser *ShakeSizeParser) io.Reader {
func AEADReader(r io.Reader, aead cipher.AEAD, iv []byte) io.Reader {
return &aeadReader{
Reader: r,
AEAD: aead,
buf: utils.GetPacket(),
nonce: make([]byte, aead.NonceSize()),
iv: iv,
shakeParser: shakeParser,
Reader: r,
AEAD: aead,
buf: utils.GetPacket(),
nonce: make([]byte, aead.NonceSize()),
iv: iv,
}
}

Expand All @@ -121,43 +88,20 @@ func (r *aeadReader) Read(b []byte) (int, error) {
// get length

var l uint16
var padding uint16
// var padding uint16
var err error

if r.shakeParser == nil {

_, err = io.ReadFull(r.Reader, r.buf[:lenSize])
if err != nil {
return 0, err
}

l = binary.BigEndian.Uint16(r.buf[:lenSize])
} else {
//顺序不要搞错,要先 读padding,然后再 shake 长度,否则会出错. 实测v2ray的vmess的padding默认就是开启状态
padding = r.shakeParser.NextPaddingLen()

var sbA [2]byte
sb := sbA[:]

if _, err = io.ReadFull(r.Reader, sb); err != nil {
return 0, err
}
l, err = r.shakeParser.Decode(sb)
if err != nil {
return 0, err
}

if l == uint16(r.AEAD.Overhead())+padding {
r.done = true
return 0, io.EOF
}

_, err = io.ReadFull(r.Reader, r.buf[:lenSize])
if err != nil {
return 0, err
}

l = binary.BigEndian.Uint16(r.buf[:lenSize])

if l == 0 {
return 0, nil
}
if l > chunkSize && r.shakeParser == nil {
if l > chunkSize { // && r.shakeParser == nil
return 0, fmt.Errorf("l>chunkSize(16k), %d", l) //有可能出现这种情况
}

Expand All @@ -170,28 +114,16 @@ func (r *aeadReader) Read(b []byte) (int, error) {
return 0, err
}

if r.shakeParser != nil {
buf = buf[:int(l)-int(padding)]
}

binary.BigEndian.PutUint16(r.nonce[:2], r.count)
copy(r.nonce[2:], r.iv[2:12])

returnedData, err := r.Open(buf[:0], r.nonce, buf, nil)
_, err = r.Open(buf[:0], r.nonce, buf, nil)
r.count++
if err != nil {
return 0, err
}

var dataLen int

if r.shakeParser == nil {
dataLen = int(l) - r.Overhead()

} else {
dataLen = len(returnedData)

}
dataLen := int(l) - r.Overhead()

m := copy(b, buf[:dataLen])
if m < int(dataLen) {
Expand Down
63 changes: 19 additions & 44 deletions proxy/vmess/anti_replay.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,15 @@ const (
)

/*
我们用map的方式 实现 authid 防重放 机制. 不用v2ray的代码实现。
我们用map的方式 实现 authid 防重放 机制. 不用v2ray的代码实现。

v2ray中 “使用 两个filter,每隔120秒swap一次filter” 的方式,感觉不够严谨。第121~240秒的话,
实际上 第一个 filter 仍然会存储之前所有数据,而只是第二个filter被重置了,导致 第一个filter是防240秒重放。
然后下一个120秒的话,又是 第二个 filter 防 240秒 重放,总之不太符合 标准定义。
因为这样的话,实际上把时间按120秒分块了,如果一个id在 第 1秒被使用,然后在第122秒被重新使用,按v2ray的实现,
依然会被认定为重放攻击。
v2ray中 “使用 两个filter,每隔120秒swap一次filter” 的方式,感觉不够严谨。第121~240秒的话,
实际上 第一个 filter 仍然会存储之前所有数据,而只是第二个filter被重置了,导致 第一个filter是防240秒重放。
然后下一个120秒的话,又是 第二个 filter 防 240秒 重放,总之不太符合 标准定义。
因为这样的话,实际上把时间按120秒分块了,如果一个id在 第 1秒被使用,然后在第122秒被重新使用,按v2ray的实现,
依然会被认定为重放攻击。

我们只要用 v2ray的 sessionHistory的方式,存储过期时间,然后定时清理 即可。
我们只要用 v2ray的 sessionHistory的方式,存储过期时间,然后定时清理 即可。
*/
type authid_antiReplayMachine struct {
sync.RWMutex
Expand Down Expand Up @@ -74,33 +74,8 @@ func (arm *authid_antiReplayMachine) stop() {

}

func (arm *authid_antiReplayMachine) check(authid [16]byte) (ok bool) {
now := time.Now()
arm.RLock()
expireTime, has := arm.authidMap[authid]
arm.RUnlock()

if !has {
arm.Lock()
arm.authidMap[authid] = now.Add(authid_antiReplyDuration)
arm.Unlock()

return true
}
if expireTime.Before(now) {
arm.Lock()
arm.authidMap[authid] = now.Add(authid_antiReplyDuration)
arm.Unlock()

return true
}
return false
}

type sessionID struct {
user [16]byte
key [16]byte
nonce [16]byte
user [16]byte
}
type session_antiReplayMachine struct {
sync.RWMutex
Expand Down Expand Up @@ -153,21 +128,21 @@ func (h *session_antiReplayMachine) initCache() {
h.sessionMap = make(map[sessionID]time.Time, 128)
}

func (h *session_antiReplayMachine) check(session sessionID) bool {
h.Lock()
// func (h *session_antiReplayMachine) check(session sessionID) bool {
// h.Lock()

now := time.Now()
// now := time.Now()

if expire, found := h.sessionMap[session]; found && expire.After(now) {
h.Unlock()
return false
}
// if expire, found := h.sessionMap[session]; found && expire.After(now) {
// h.Unlock()
// return false
// }

h.sessionMap[session] = now.Add(sessionAntiReplayDuration)
h.Unlock()
// h.sessionMap[session] = now.Add(sessionAntiReplayDuration)
// h.Unlock()

return true
}
// return true
// }

func (h *session_antiReplayMachine) removeExpiredEntries(now time.Time) {

Expand Down
2 changes: 1 addition & 1 deletion proxy/vmess/chunk.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import (

const (
lenSize = 2
chunkSize = 1 << 14 // 16384
chunkSize = 1 << 15 // 32768
)

type chunkedWriter struct {
Expand Down
Loading