31 Commits

Author SHA1 Message Date
07d9f3afe6 refactor: replace Get/Set patterns with idiomatic Go interfaces
Docker Build and Push / build-and-push-tags (push) Successful in 10m59s
Docker Build and Push / build-and-push-branches (push) Has been skipped
- rename constructors to New
- remove Get/Set-style accessors
- replace string-based enums with iota-backed types
2026-01-14 15:28:17 +07:00
abd103b5ab fix(port): add atomic ClaimPort() to prevent race condition
Docker Build and Push / build-and-push-tags (push) Successful in 3m23s
Docker Build and Push / build-and-push-branches (push) Has been skipped
- Replace GetPortStatus/SetPortStatus calls with atomic ClaimPort() operation.
- Fixed a logic error when handling headless tunneling.
2026-01-12 18:25:35 +07:00
560c98b869 refactor: consolidate error handling with fail() function in session handlers
Docker Build and Push / build-and-push-tags (push) Successful in 3m21s
Docker Build and Push / build-and-push-branches (push) Has been skipped
- Replace repetitive error handling code with fail() function in HandleGlobalRequest
- Standardize error response pattern across all handler methods
- Improve code maintainability and reduce duplication
2026-01-12 14:42:42 +07:00
e1f5d73e03 feat: add headless mode support for SSH -N connections
Docker Build and Push / build-and-push-branches (push) Has been skipped
Docker Build and Push / build-and-push-tags (push) Successful in 3m3s
- use s.lifecycle.GetConnection().Wait() to block until SSH connection closes
- Prevent premature session closure in headless mode

In headless mode (ssh -N), there's no channel interaction to block on,
so the session would immediately return and close. Now blocking on
conn.Wait() keeps the session alive until the client disconnects.
2026-01-11 15:21:11 +07:00
19fd6d59d2 Merge pull request 'main' (#62) from main into staging
Docker Build and Push / build-and-push-tags (push) Has been skipped
Docker Build and Push / build-and-push-branches (push) Successful in 3m32s
Reviewed-on: #62
2026-01-09 12:15:30 +00:00
e3988b339f Merge pull request 'fix(deps): update module github.com/caddyserver/certmagic to v0.25.1' (#61) from renovate/github.com-caddyserver-certmagic-0.x into main
Docker Build and Push / build-and-push-tags (push) Has been skipped
Docker Build and Push / build-and-push-branches (push) Successful in 3m21s
Reviewed-on: #61
2026-01-09 12:15:05 +00:00
336948a397 fix(deps): update module github.com/caddyserver/certmagic to v0.25.1 2026-01-09 10:00:35 +00:00
50ae422de8 Merge pull request 'staging' (#60) from staging into main
Docker Build and Push / build-and-push-tags (push) Has been skipped
Docker Build and Push / build-and-push-branches (push) Successful in 3m20s
Reviewed-on: #60
2026-01-09 09:33:28 +00:00
8467ed555e revert 01ddc76f7e
Docker Build and Push / build-and-push-tags (push) Has been skipped
Docker Build and Push / build-and-push-branches (push) Has been cancelled
revert Merge pull request 'fix(deps): update module github.com/caddyserver/certmagic to v0.25.1' (#58) from renovate/github.com-caddyserver-certmagic-0.x into main
2026-01-09 09:33:04 +00:00
01ddc76f7e Merge pull request 'fix(deps): update module github.com/caddyserver/certmagic to v0.25.1' (#58) from renovate/github.com-caddyserver-certmagic-0.x into main
Docker Build and Push / build-and-push-branches (push) Waiting to run
Docker Build and Push / build-and-push-tags (push) Has been skipped
2026-01-09 09:30:23 +00:00
ffb3565ff5 fix(deps): update module github.com/caddyserver/certmagic to v0.25.1 2026-01-09 09:30:18 +00:00
6d700ef6dd Merge pull request 'feat/grpc-integration' (#59) from feat/grpc-integration into staging
Docker Build and Push / build-and-push-branches (push) Successful in 5m25s
Docker Build and Push / build-and-push-tags (push) Has been skipped
Reviewed-on: #59
2026-01-09 09:24:20 +00:00
b8acb6da4c ci: remove renovate
Docker Build and Push / build-and-push-tags (push) Has been skipped
Docker Build and Push / build-and-push-branches (push) Has been cancelled
2026-01-08 13:03:02 +07:00
6b4127f0ef feat: add authenticated user info and restructure handleConnection
Docker Build and Push / build-and-push-branches (push) Has been skipped
Docker Build and Push / build-and-push-tags (push) Successful in 3m21s
- Display authenticated username in welcome page information box
- Refactor handleConnection function for better structure and clarity
2026-01-07 23:07:02 +07:00
16d48ff906 refactor(grpc/client): simplify processEventStream with per-event handlers
Docker Build and Push / build-and-push-branches (push) Has been skipped
Docker Build and Push / build-and-push-tags (push) Successful in 3m20s
- Extract eventHandlers dispatch table
- Add per-event handlers: handleSlugChange, handleGetSessions, handleTerminateSession
- Introduce sendNode helper to centralize send/error handling and preserve connection-error propagation
- Add protoToTunnelType for tunnel-type validation
- Map unknown proto.TunnelType to types.UNKNOWN in protoToTunnelType and return a descriptive error
- Reduce boilerplate and improve readability of processEventStream
2026-01-06 20:14:56 +07:00
6213ff8a30 feat: implement forwarder session termination
Docker Build and Push / build-and-push-branches (push) Has been skipped
Docker Build and Push / build-and-push-tags (push) Successful in 3m36s
2026-01-06 18:32:48 +07:00
4ffaec9d9a refactor: inject SessionRegistry interface instead of individual functions
Docker Build and Push / build-and-push-branches (push) Has been skipped
Docker Build and Push / build-and-push-tags (push) Successful in 4m16s
2026-01-05 16:49:17 +07:00
6de0a618ee update: proto file to v1.3.0
Docker Build and Push / build-and-push-branches (push) Has been skipped
Docker Build and Push / build-and-push-tags (push) Successful in 4m0s
2026-01-05 00:55:51 +07:00
8cc70fa45e feat(session): use session key for registry 2026-01-05 00:50:42 +07:00
d666ae5545 fix: use correct environment variable key
Docker Build and Push / build-and-push-branches (push) Has been skipped
Docker Build and Push / build-and-push-tags (push) Successful in 4m1s
2026-01-04 18:21:34 +07:00
5edb3c8086 fix: startup order
Docker Build and Push / build-and-push-branches (push) Has been skipped
Docker Build and Push / build-and-push-tags (push) Successful in 3m51s
2026-01-04 15:19:03 +07:00
5b603d8317 feat: implement sessions request from grpc server
Docker Build and Push / build-and-push-branches (push) Has been skipped
Docker Build and Push / build-and-push-tags (push) Successful in 4m7s
2026-01-03 21:17:01 +07:00
5ceade81db Merge pull request 'staging' (#57) from staging into main
Docker Build and Push / build-and-push-tags (push) Has been skipped
Docker Build and Push / build-and-push-branches (push) Successful in 3m57s
renovate / renovate (push) Failing after 34s
Reviewed-on: #57
2026-01-03 13:07:49 +00:00
8fd9f8b567 feat: implement sessions request from grpc server
Docker Build and Push / build-and-push-branches (push) Has been skipped
Docker Build and Push / build-and-push-tags (push) Has been cancelled
2026-01-03 20:06:14 +07:00
30e84ac3b7 feat: implement get sessions by user 2026-01-02 22:58:54 +07:00
fd6ffc2500 feat(grpc): integrate slug edit handling 2026-01-02 18:27:48 +07:00
e1cd4ed981 WIP: gRPC integration, initial implementation 2026-01-01 21:03:17 +07:00
96d2b88f95 WIP: gRPC integration, initial implementation 2026-01-01 21:01:15 +07:00
8a456d2cde Merge pull request 'staging' (#55) from staging into main
Docker Build and Push / build-and-push-tags (push) Has been skipped
Docker Build and Push / build-and-push-branches (push) Successful in 5m50s
renovate / renovate (push) Successful in 35s
Reviewed-on: #55
2025-12-31 08:51:25 +00:00
8841230653 Merge pull request 'fix: prevent subdomain change to already-in-use subdomains' (#54) from staging into main
Docker Build and Push / build-and-push (push) Successful in 5m20s
renovate / renovate (push) Successful in 38s
Reviewed-on: #54
2025-12-30 12:42:05 +00:00
4d0a7deaf2 Merge pull request 'staging' (#53) from staging into main
Docker Build and Push / build-and-push (push) Successful in 3m33s
renovate / renovate (push) Successful in 22s
Reviewed-on: #53
2025-12-29 17:18:25 +00:00
8 changed files with 377 additions and 335 deletions
+1 -1
View File
@@ -1,4 +1,4 @@
FROM golang:1.25.6-alpine AS go_builder FROM golang:1.25.5-alpine AS go_builder
ARG VERSION=dev ARG VERSION=dev
ARG BUILD_DATE=unknown ARG BUILD_DATE=unknown
+3 -3
View File
@@ -11,7 +11,7 @@ require (
github.com/joho/godotenv v1.5.1 github.com/joho/godotenv v1.5.1
github.com/libdns/cloudflare v0.2.2 github.com/libdns/cloudflare v0.2.2
github.com/muesli/termenv v0.16.0 github.com/muesli/termenv v0.16.0
golang.org/x/crypto v0.47.0 golang.org/x/crypto v0.46.0
google.golang.org/grpc v1.78.0 google.golang.org/grpc v1.78.0
google.golang.org/protobuf v1.36.11 google.golang.org/protobuf v1.36.11
) )
@@ -48,8 +48,8 @@ require (
golang.org/x/mod v0.31.0 // indirect golang.org/x/mod v0.31.0 // indirect
golang.org/x/net v0.48.0 // indirect golang.org/x/net v0.48.0 // indirect
golang.org/x/sync v0.19.0 // indirect golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.40.0 // indirect golang.org/x/sys v0.39.0 // indirect
golang.org/x/text v0.33.0 // indirect golang.org/x/text v0.32.0 // indirect
golang.org/x/tools v0.40.0 // indirect golang.org/x/tools v0.40.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect
) )
-6
View File
@@ -120,8 +120,6 @@ go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U=
go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ= go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ=
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
@@ -134,14 +132,10 @@ golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
+94 -39
View File
@@ -2,6 +2,7 @@ package client
import ( import (
"context" "context"
"crypto/tls"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@@ -15,6 +16,7 @@ import (
proto "git.fossy.my.id/bagas/tunnel-please-grpc/gen" proto "git.fossy.my.id/bagas/tunnel-please-grpc/gen"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/keepalive" "google.golang.org/grpc/keepalive"
@@ -22,34 +24,83 @@ import (
"google.golang.org/protobuf/types/known/timestamppb" "google.golang.org/protobuf/types/known/timestamppb"
) )
type Client interface { type GrpcConfig struct {
SubscribeEvents(ctx context.Context, identity, authToken string) error Address string
ClientConn() *grpc.ClientConn UseTLS bool
AuthorizeConn(ctx context.Context, token string) (authorized bool, user string, err error) InsecureSkipVerify bool
Close() error Timeout time.Duration
CheckServerHealth(ctx context.Context) error KeepAlive bool
MaxRetries int
KeepAliveTime time.Duration
KeepAliveTimeout time.Duration
PermitWithoutStream bool
} }
type client struct {
type Client struct {
conn *grpc.ClientConn conn *grpc.ClientConn
address string config *GrpcConfig
sessionRegistry session.Registry sessionRegistry session.Registry
eventService proto.EventServiceClient eventService proto.EventServiceClient
authorizeConnectionService proto.UserServiceClient authorizeConnectionService proto.UserServiceClient
closing bool closing bool
} }
func New(address string, sessionRegistry session.Registry) (Client, error) { func DefaultConfig() *GrpcConfig {
var opts []grpc.DialOption return &GrpcConfig{
Address: "localhost:50051",
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) UseTLS: false,
InsecureSkipVerify: false,
kaParams := keepalive.ClientParameters{
Time: 2 * time.Minute,
Timeout: 10 * time.Second, Timeout: 10 * time.Second,
KeepAlive: true,
MaxRetries: 3,
KeepAliveTime: 2 * time.Minute,
KeepAliveTimeout: 10 * time.Second,
PermitWithoutStream: false, PermitWithoutStream: false,
} }
}
func New(config *GrpcConfig, sessionRegistry session.Registry) (*Client, error) {
if config == nil {
config = DefaultConfig()
} else {
defaults := DefaultConfig()
if config.Address == "" {
config.Address = defaults.Address
}
if config.Timeout == 0 {
config.Timeout = defaults.Timeout
}
if config.MaxRetries == 0 {
config.MaxRetries = defaults.MaxRetries
}
if config.KeepAliveTime == 0 {
config.KeepAliveTime = defaults.KeepAliveTime
}
if config.KeepAliveTimeout == 0 {
config.KeepAliveTimeout = defaults.KeepAliveTimeout
}
}
var opts []grpc.DialOption
if config.UseTLS {
tlsConfig := &tls.Config{
InsecureSkipVerify: config.InsecureSkipVerify,
}
creds := credentials.NewTLS(tlsConfig)
opts = append(opts, grpc.WithTransportCredentials(creds))
} else {
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
}
if config.KeepAlive {
kaParams := keepalive.ClientParameters{
Time: config.KeepAliveTime,
Timeout: config.KeepAliveTimeout,
PermitWithoutStream: config.PermitWithoutStream,
}
opts = append(opts, grpc.WithKeepaliveParams(kaParams)) opts = append(opts, grpc.WithKeepaliveParams(kaParams))
}
opts = append(opts, opts = append(opts,
grpc.WithDefaultCallOptions( grpc.WithDefaultCallOptions(
@@ -58,24 +109,24 @@ func New(address string, sessionRegistry session.Registry) (Client, error) {
), ),
) )
conn, err := grpc.NewClient(address, opts...) conn, err := grpc.NewClient(config.Address, opts...)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to connect to gRPC server at %s: %w", address, err) return nil, fmt.Errorf("failed to connect to gRPC server at %s: %w", config.Address, err)
} }
eventService := proto.NewEventServiceClient(conn) eventService := proto.NewEventServiceClient(conn)
authorizeConnectionService := proto.NewUserServiceClient(conn) authorizeConnectionService := proto.NewUserServiceClient(conn)
return &client{ return &Client{
conn: conn, conn: conn,
address: address, config: config,
sessionRegistry: sessionRegistry, sessionRegistry: sessionRegistry,
eventService: eventService, eventService: eventService,
authorizeConnectionService: authorizeConnectionService, authorizeConnectionService: authorizeConnectionService,
}, nil }, nil
} }
func (c *client) SubscribeEvents(ctx context.Context, identity, authToken string) error { func (c *Client) SubscribeEvents(ctx context.Context, identity, authToken string) error {
const ( const (
baseBackoff = time.Second baseBackoff = time.Second
maxBackoff = 30 * time.Second maxBackoff = 30 * time.Second
@@ -158,7 +209,7 @@ func (c *client) SubscribeEvents(ctx context.Context, identity, authToken string
} }
} }
func (c *client) processEventStream(subscribe grpc.BidiStreamingClient[proto.Node, proto.Events]) error { func (c *Client) processEventStream(subscribe grpc.BidiStreamingClient[proto.Node, proto.Events]) error {
handlers := c.eventHandlers(subscribe) handlers := c.eventHandlers(subscribe)
for { for {
@@ -179,7 +230,7 @@ func (c *client) processEventStream(subscribe grpc.BidiStreamingClient[proto.Nod
} }
} }
func (c *client) eventHandlers(subscribe grpc.BidiStreamingClient[proto.Node, proto.Events]) map[proto.EventType]func(*proto.Events) error { func (c *Client) eventHandlers(subscribe grpc.BidiStreamingClient[proto.Node, proto.Events]) map[proto.EventType]func(*proto.Events) error {
return map[proto.EventType]func(*proto.Events) error{ return map[proto.EventType]func(*proto.Events) error{
proto.EventType_SLUG_CHANGE: func(evt *proto.Events) error { return c.handleSlugChange(subscribe, evt) }, proto.EventType_SLUG_CHANGE: func(evt *proto.Events) error { return c.handleSlugChange(subscribe, evt) },
proto.EventType_GET_SESSIONS: func(evt *proto.Events) error { return c.handleGetSessions(subscribe, evt) }, proto.EventType_GET_SESSIONS: func(evt *proto.Events) error { return c.handleGetSessions(subscribe, evt) },
@@ -187,7 +238,7 @@ func (c *client) eventHandlers(subscribe grpc.BidiStreamingClient[proto.Node, pr
} }
} }
func (c *client) handleSlugChange(subscribe grpc.BidiStreamingClient[proto.Node, proto.Events], evt *proto.Events) error { func (c *Client) handleSlugChange(subscribe grpc.BidiStreamingClient[proto.Node, proto.Events], evt *proto.Events) error {
slugEvent := evt.GetSlugEvent() slugEvent := evt.GetSlugEvent()
user := slugEvent.GetUser() user := slugEvent.GetUser()
oldSlug := slugEvent.GetOld() oldSlug := slugEvent.GetOld()
@@ -221,7 +272,7 @@ func (c *client) handleSlugChange(subscribe grpc.BidiStreamingClient[proto.Node,
}, "slug change success response") }, "slug change success response")
} }
func (c *client) handleGetSessions(subscribe grpc.BidiStreamingClient[proto.Node, proto.Events], evt *proto.Events) error { func (c *Client) handleGetSessions(subscribe grpc.BidiStreamingClient[proto.Node, proto.Events], evt *proto.Events) error {
sessions := c.sessionRegistry.GetAllSessionFromUser(evt.GetGetSessionsEvent().GetIdentity()) sessions := c.sessionRegistry.GetAllSessionFromUser(evt.GetGetSessionsEvent().GetIdentity())
var details []*proto.Detail var details []*proto.Detail
@@ -245,7 +296,7 @@ func (c *client) handleGetSessions(subscribe grpc.BidiStreamingClient[proto.Node
}, "send get sessions response") }, "send get sessions response")
} }
func (c *client) handleTerminateSession(subscribe grpc.BidiStreamingClient[proto.Node, proto.Events], evt *proto.Events) error { func (c *Client) handleTerminateSession(subscribe grpc.BidiStreamingClient[proto.Node, proto.Events], evt *proto.Events) error {
terminate := evt.GetTerminateSessionEvent() terminate := evt.GetTerminateSessionEvent()
user := terminate.GetUser() user := terminate.GetUser()
slug := terminate.GetSlug() slug := terminate.GetSlug()
@@ -287,7 +338,7 @@ func (c *client) handleTerminateSession(subscribe grpc.BidiStreamingClient[proto
}, "terminate session success response") }, "terminate session success response")
} }
func (c *client) sendNode(subscribe grpc.BidiStreamingClient[proto.Node, proto.Events], node *proto.Node, context string) error { func (c *Client) sendNode(subscribe grpc.BidiStreamingClient[proto.Node, proto.Events], node *proto.Node, context string) error {
if err := subscribe.Send(node); err != nil { if err := subscribe.Send(node); err != nil {
if c.isConnectionError(err) { if c.isConnectionError(err) {
return err return err
@@ -297,7 +348,7 @@ func (c *client) sendNode(subscribe grpc.BidiStreamingClient[proto.Node, proto.E
return nil return nil
} }
func (c *client) protoToTunnelType(t proto.TunnelType) (types.TunnelType, error) { func (c *Client) protoToTunnelType(t proto.TunnelType) (types.TunnelType, error) {
switch t { switch t {
case proto.TunnelType_HTTP: case proto.TunnelType_HTTP:
return types.HTTP, nil return types.HTTP, nil
@@ -308,11 +359,11 @@ func (c *client) protoToTunnelType(t proto.TunnelType) (types.TunnelType, error)
} }
} }
func (c *client) ClientConn() *grpc.ClientConn { func (c *Client) GetConnection() *grpc.ClientConn {
return c.conn return c.conn
} }
func (c *client) AuthorizeConn(ctx context.Context, token string) (authorized bool, user string, err error) { func (c *Client) AuthorizeConn(ctx context.Context, token string) (authorized bool, user string, err error) {
check, err := c.authorizeConnectionService.Check(ctx, &proto.CheckRequest{AuthToken: token}) check, err := c.authorizeConnectionService.Check(ctx, &proto.CheckRequest{AuthToken: token})
if err != nil { if err != nil {
return false, "UNAUTHORIZED", err return false, "UNAUTHORIZED", err
@@ -324,8 +375,17 @@ func (c *client) AuthorizeConn(ctx context.Context, token string) (authorized bo
return true, check.GetUser(), nil return true, check.GetUser(), nil
} }
func (c *client) CheckServerHealth(ctx context.Context) error { func (c *Client) Close() error {
healthClient := grpc_health_v1.NewHealthClient(c.ClientConn()) if c.conn != nil {
log.Printf("Closing gRPC connection to %s", c.config.Address)
c.closing = true
return c.conn.Close()
}
return nil
}
func (c *Client) CheckServerHealth(ctx context.Context) error {
healthClient := grpc_health_v1.NewHealthClient(c.GetConnection())
resp, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{ resp, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{
Service: "", Service: "",
}) })
@@ -338,16 +398,11 @@ func (c *client) CheckServerHealth(ctx context.Context) error {
return nil return nil
} }
func (c *client) Close() error { func (c *Client) GetConfig() *GrpcConfig {
if c.conn != nil { return c.config
log.Printf("Closing gRPC connection to %s", c.address)
c.closing = true
return c.conn.Close()
}
return nil
} }
func (c *client) isConnectionError(err error) bool { func (c *Client) isConnectionError(err error) bool {
if c.closing { if c.closing {
return false return false
} }
+16 -15
View File
@@ -49,7 +49,7 @@ func main() {
sshConfig := &ssh.ServerConfig{ sshConfig := &ssh.ServerConfig{
NoClientAuth: true, NoClientAuth: true,
ServerVersion: fmt.Sprintf("SSH-2.0-TunnelPlease-%s", version.GetShortVersion()), ServerVersion: fmt.Sprintf("SSH-2.0-TunnlPls-%s", version.GetShortVersion()),
} }
sshKeyPath := "certs/ssh/id_rsa" sshKeyPath := "certs/ssh/id_rsa"
@@ -77,7 +77,7 @@ func main() {
shutdownChan := make(chan os.Signal, 1) shutdownChan := make(chan os.Signal, 1)
signal.Notify(shutdownChan, os.Interrupt, syscall.SIGTERM) signal.Notify(shutdownChan, os.Interrupt, syscall.SIGTERM)
var grpcClient client.Client var grpcClient *client.Client
if isNodeMode { if isNodeMode {
grpcHost := config.Getenv("GRPC_ADDRESS", "localhost") grpcHost := config.Getenv("GRPC_ADDRESS", "localhost")
grpcPort := config.Getenv("GRPC_PORT", "8080") grpcPort := config.Getenv("GRPC_PORT", "8080")
@@ -87,13 +87,21 @@ func main() {
log.Fatalf("NODE_TOKEN is required in node mode") log.Fatalf("NODE_TOKEN is required in node mode")
} }
grpcClient, err = client.New(grpcAddr, sessionRegistry) c, err := client.New(&client.GrpcConfig{
Address: grpcAddr,
UseTLS: false,
InsecureSkipVerify: false,
Timeout: 10 * time.Second,
KeepAlive: true,
MaxRetries: 3,
}, sessionRegistry)
if err != nil { if err != nil {
log.Fatalf("failed to create grpc client: %v", err) log.Fatalf("failed to create grpc client: %v", err)
} }
grpcClient = c
healthCtx, healthCancel := context.WithTimeout(ctx, 5*time.Second) healthCtx, healthCancel := context.WithTimeout(ctx, 5*time.Second)
if err = grpcClient.CheckServerHealth(healthCtx); err != nil { if err := grpcClient.CheckServerHealth(healthCtx); err != nil {
healthCancel() healthCancel()
log.Fatalf("gRPC health check failed: %v", err) log.Fatalf("gRPC health check failed: %v", err)
} }
@@ -101,15 +109,14 @@ func main() {
go func() { go func() {
identity := config.Getenv("DOMAIN", "localhost") identity := config.Getenv("DOMAIN", "localhost")
if err = grpcClient.SubscribeEvents(ctx, identity, nodeToken); err != nil { if err := grpcClient.SubscribeEvents(ctx, identity, nodeToken); err != nil {
errChan <- fmt.Errorf("failed to subscribe to events: %w", err) errChan <- fmt.Errorf("failed to subscribe to events: %w", err)
} }
}() }()
} }
var app server.Server
go func() { go func() {
app, err = server.New(sshConfig, sessionRegistry, grpcClient) app, err := server.NewServer(sshConfig, sessionRegistry, grpcClient)
if err != nil { if err != nil {
errChan <- fmt.Errorf("failed to start server: %s", err) errChan <- fmt.Errorf("failed to start server: %s", err)
return return
@@ -118,7 +125,7 @@ func main() {
}() }()
select { select {
case err = <-errChan: case err := <-errChan:
log.Printf("error happen : %s", err) log.Printf("error happen : %s", err)
case sig := <-shutdownChan: case sig := <-shutdownChan:
log.Printf("received signal %s, shutting down", sig) log.Printf("received signal %s, shutting down", sig)
@@ -126,14 +133,8 @@ func main() {
cancel() cancel()
if app != nil {
if err = app.Close(); err != nil {
log.Printf("failed to close server : %s", err)
}
}
if grpcClient != nil { if grpcClient != nil {
if err = grpcClient.Close(); err != nil { if err := grpcClient.Close(); err != nil {
log.Printf("failed to close grpc conn : %s", err) log.Printf("failed to close grpc conn : %s", err)
} }
} }
+9 -21
View File
@@ -14,18 +14,14 @@ import (
"golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh"
) )
type Server interface { type Server struct {
Start() conn *net.Listener
Close() error
}
type server struct {
listener net.Listener
config *ssh.ServerConfig config *ssh.ServerConfig
sessionRegistry session.Registry sessionRegistry session.Registry
grpcClient client.Client grpcClient *client.Client
} }
func New(sshConfig *ssh.ServerConfig, sessionRegistry session.Registry, grpcClient client.Client) (Server, error) { func NewServer(sshConfig *ssh.ServerConfig, sessionRegistry session.Registry, grpcClient *client.Client) (*Server, error) {
listener, err := net.Listen("tcp", fmt.Sprintf(":%s", config.Getenv("PORT", "2200"))) listener, err := net.Listen("tcp", fmt.Sprintf(":%s", config.Getenv("PORT", "2200")))
if err != nil { if err != nil {
log.Fatalf("failed to listen on port 2200: %v", err) log.Fatalf("failed to listen on port 2200: %v", err)
@@ -47,23 +43,19 @@ func New(sshConfig *ssh.ServerConfig, sessionRegistry session.Registry, grpcClie
} }
} }
return &server{ return &Server{
listener: listener, conn: &listener,
config: sshConfig, config: sshConfig,
sessionRegistry: sessionRegistry, sessionRegistry: sessionRegistry,
grpcClient: grpcClient, grpcClient: grpcClient,
}, nil }, nil
} }
func (s *server) Start() { func (s *Server) Start() {
log.Println("SSH server is starting on port 2200...") log.Println("SSH server is starting on port 2200...")
for { for {
conn, err := s.listener.Accept() conn, err := (*s.conn).Accept()
if err != nil { if err != nil {
if errors.Is(err, net.ErrClosed) {
log.Println("listener closed, stopping server")
return
}
log.Printf("failed to accept connection: %v", err) log.Printf("failed to accept connection: %v", err)
continue continue
} }
@@ -72,11 +64,7 @@ func (s *server) Start() {
} }
} }
func (s *server) Close() error { func (s *Server) handleConnection(conn net.Conn) {
return s.listener.Close()
}
func (s *server) handleConnection(conn net.Conn) {
sshConn, chans, forwardingReqs, err := ssh.NewServerConn(conn, s.config) sshConn, chans, forwardingReqs, err := ssh.NewServerConn(conn, s.config)
if err != nil { if err != nil {
log.Printf("failed to establish SSH connection: %v", err) log.Printf("failed to establish SSH connection: %v", err)
+252
View File
@@ -0,0 +1,252 @@
package session
import (
"bytes"
"encoding/binary"
"fmt"
"log"
"net"
portUtil "tunnel_pls/internal/port"
"tunnel_pls/internal/random"
"tunnel_pls/types"
"golang.org/x/crypto/ssh"
)
var blockedReservedPorts = []uint16{1080, 1433, 1521, 1900, 2049, 3306, 3389, 5432, 5900, 6379, 8080, 8443, 9000, 9200, 27017}
func (s *session) HandleGlobalRequest(GlobalRequest <-chan *ssh.Request) {
for req := range GlobalRequest {
switch req.Type {
case "shell", "pty-req":
err := req.Reply(true, nil)
if err != nil {
log.Println("Failed to reply to request:", err)
return
}
case "window-change":
p := req.Payload
if len(p) < 16 {
log.Println("invalid window-change payload")
err := req.Reply(false, nil)
if err != nil {
log.Println("Failed to reply to request:", err)
return
}
return
}
cols := binary.BigEndian.Uint32(p[0:4])
rows := binary.BigEndian.Uint32(p[4:8])
s.interaction.SetWH(int(cols), int(rows))
err := req.Reply(true, nil)
if err != nil {
log.Println("Failed to reply to request:", err)
return
}
default:
log.Println("Unknown request type:", req.Type)
err := req.Reply(false, nil)
if err != nil {
log.Println("Failed to reply to request:", err)
return
}
}
}
}
func (s *session) HandleTCPIPForward(req *ssh.Request) {
log.Println("Port forwarding request detected")
fail := func(msg string) {
log.Println(msg)
if err := req.Reply(false, nil); err != nil {
log.Println("Failed to reply to request:", err)
return
}
if err := s.lifecycle.Close(); err != nil {
log.Printf("failed to close session: %v", err)
}
}
reader := bytes.NewReader(req.Payload)
addr, err := readSSHString(reader)
if err != nil {
fail(fmt.Sprintf("Failed to read address from payload: %v", err))
return
}
var rawPortToBind uint32
if err = binary.Read(reader, binary.BigEndian, &rawPortToBind); err != nil {
fail(fmt.Sprintf("Failed to read port from payload: %v", err))
return
}
if rawPortToBind > 65535 {
fail(fmt.Sprintf("Port %d is larger than allowed port of 65535", rawPortToBind))
return
}
portToBind := uint16(rawPortToBind)
if isBlockedPort(portToBind) {
fail(fmt.Sprintf("Port %d is blocked or restricted", portToBind))
return
}
switch portToBind {
case 80, 443:
s.HandleHTTPForward(req, portToBind)
default:
s.HandleTCPForward(req, addr, portToBind)
}
}
func (s *session) HandleHTTPForward(req *ssh.Request, portToBind uint16) {
fail := func(msg string, key *types.SessionKey) {
log.Println(msg)
if key != nil {
s.registry.Remove(*key)
}
if err := req.Reply(false, nil); err != nil {
log.Println("Failed to reply to request:", err)
}
}
slug := random.GenerateRandomString(20)
key := types.SessionKey{Id: slug, Type: types.HTTP}
if !s.registry.Register(key, s) {
fail(fmt.Sprintf("Failed to register client with slug: %s", slug), nil)
return
}
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.BigEndian, uint32(portToBind))
if err != nil {
fail(fmt.Sprintf("Failed to write port to buffer: %v", err), &key)
return
}
log.Printf("HTTP forwarding approved on port: %d", portToBind)
err = req.Reply(true, buf.Bytes())
if err != nil {
fail(fmt.Sprintf("Failed to reply to request: %v", err), &key)
return
}
s.forwarder.SetType(types.HTTP)
s.forwarder.SetForwardedPort(portToBind)
s.slug.Set(slug)
s.lifecycle.SetStatus(types.RUNNING)
}
func (s *session) HandleTCPForward(req *ssh.Request, addr string, portToBind uint16) {
fail := func(msg string) {
log.Println(msg)
if err := req.Reply(false, nil); err != nil {
log.Println("Failed to reply to request:", err)
return
}
if err := s.lifecycle.Close(); err != nil {
log.Printf("failed to close session: %v", err)
}
}
cleanup := func(msg string, port uint16, listener net.Listener, key *types.SessionKey) {
log.Println(msg)
if key != nil {
s.registry.Remove(*key)
}
if port != 0 {
if setErr := portUtil.Default.SetPortStatus(port, false); setErr != nil {
log.Printf("Failed to reset port status: %v", setErr)
}
}
if listener != nil {
if closeErr := listener.Close(); closeErr != nil {
log.Printf("Failed to close listener: %v", closeErr)
}
}
if err := req.Reply(false, nil); err != nil {
log.Println("Failed to reply to request:", err)
}
_ = s.lifecycle.Close()
}
if portToBind == 0 {
unassigned, ok := portUtil.Default.GetUnassignedPort()
if !ok {
fail("No available port")
return
}
portToBind = unassigned
}
if claimed := portUtil.Default.ClaimPort(portToBind); !claimed {
fail(fmt.Sprintf("Port %d is already in use or restricted", portToBind))
return
}
log.Printf("Requested forwarding on %s:%d", addr, portToBind)
listener, err := net.Listen("tcp", fmt.Sprintf("0.0.0.0:%d", portToBind))
if err != nil {
cleanup(fmt.Sprintf("Port %d is already in use or restricted", portToBind), portToBind, nil, nil)
return
}
key := types.SessionKey{Id: fmt.Sprintf("%d", portToBind), Type: types.TCP}
if !s.registry.Register(key, s) {
cleanup(fmt.Sprintf("Failed to register TCP client with id: %s", key.Id), portToBind, listener, nil)
return
}
buf := new(bytes.Buffer)
err = binary.Write(buf, binary.BigEndian, uint32(portToBind))
if err != nil {
cleanup(fmt.Sprintf("Failed to write port to buffer: %v", err), portToBind, listener, &key)
return
}
log.Printf("TCP forwarding approved on port: %d", portToBind)
err = req.Reply(true, buf.Bytes())
if err != nil {
cleanup(fmt.Sprintf("Failed to reply to request: %v", err), portToBind, listener, &key)
return
}
s.forwarder.SetType(types.TCP)
s.forwarder.SetListener(listener)
s.forwarder.SetForwardedPort(portToBind)
s.slug.Set(key.Id)
s.lifecycle.SetStatus(types.RUNNING)
go s.forwarder.AcceptTCPConnections()
}
func readSSHString(reader *bytes.Reader) (string, error) {
var length uint32
if err := binary.Read(reader, binary.BigEndian, &length); err != nil {
return "", err
}
strBytes := make([]byte, length)
if _, err := reader.Read(strBytes); err != nil {
return "", err
}
return string(strBytes), nil
}
func isBlockedPort(port uint16) bool {
if port == 80 || port == 443 {
return false
}
if port < 1024 && port != 0 {
return true
}
for _, p := range blockedReservedPorts {
if p == port {
return true
}
}
return false
}
+1 -249
View File
@@ -1,17 +1,10 @@
package session package session
import ( import (
"bytes"
"encoding/binary"
"errors"
"fmt" "fmt"
"io"
"log" "log"
"net"
"time" "time"
"tunnel_pls/internal/config" "tunnel_pls/internal/config"
portUtil "tunnel_pls/internal/port"
"tunnel_pls/internal/random"
"tunnel_pls/session/forwarder" "tunnel_pls/session/forwarder"
"tunnel_pls/session/interaction" "tunnel_pls/session/interaction"
"tunnel_pls/session/lifecycle" "tunnel_pls/session/lifecycle"
@@ -52,8 +45,6 @@ type session struct {
registry Registry registry Registry
} }
var blockedReservedPorts = []uint16{1080, 1433, 1521, 1900, 2049, 3306, 3389, 5432, 5900, 6379, 8080, 8443, 9000, 9200, 27017}
func New(conn *ssh.ServerConn, initialReq <-chan *ssh.Request, sshChan <-chan ssh.NewChannel, sessionRegistry Registry, user string) Session { func New(conn *ssh.ServerConn, initialReq <-chan *ssh.Request, sshChan <-chan ssh.NewChannel, sessionRegistry Registry, user string) Session {
slugManager := slug.New() slugManager := slug.New()
forwarderManager := forwarder.New(slugManager) forwarderManager := forwarder.New(slugManager)
@@ -160,10 +151,7 @@ func (s *session) Start() error {
s.HandleTCPIPForward(tcpipReq) s.HandleTCPIPForward(tcpipReq)
s.interaction.Start() s.interaction.Start()
if err := s.lifecycle.Connection().Wait(); err != nil && !errors.Is(err, io.EOF) && !errors.Is(err, net.ErrClosed) { s.lifecycle.Connection().Wait()
log.Printf("ssh connection closed with error: %v", err)
}
if err := s.lifecycle.Close(); err != nil { if err := s.lifecycle.Close(); err != nil {
log.Printf("failed to close session: %v", err) log.Printf("failed to close session: %v", err)
return err return err
@@ -191,239 +179,3 @@ func (s *session) waitForTCPIPForward() *ssh.Request {
return nil return nil
} }
} }
func (s *session) HandleGlobalRequest(GlobalRequest <-chan *ssh.Request) {
for req := range GlobalRequest {
switch req.Type {
case "shell", "pty-req":
err := req.Reply(true, nil)
if err != nil {
log.Println("Failed to reply to request:", err)
return
}
case "window-change":
p := req.Payload
if len(p) < 16 {
log.Println("invalid window-change payload")
err := req.Reply(false, nil)
if err != nil {
log.Println("Failed to reply to request:", err)
return
}
return
}
cols := binary.BigEndian.Uint32(p[0:4])
rows := binary.BigEndian.Uint32(p[4:8])
s.interaction.SetWH(int(cols), int(rows))
err := req.Reply(true, nil)
if err != nil {
log.Println("Failed to reply to request:", err)
return
}
default:
log.Println("Unknown request type:", req.Type)
err := req.Reply(false, nil)
if err != nil {
log.Println("Failed to reply to request:", err)
return
}
}
}
}
func (s *session) HandleTCPIPForward(req *ssh.Request) {
log.Println("Port forwarding request detected")
fail := func(msg string) {
log.Println(msg)
if err := req.Reply(false, nil); err != nil {
log.Println("Failed to reply to request:", err)
return
}
if err := s.lifecycle.Close(); err != nil {
log.Printf("failed to close session: %v", err)
}
}
reader := bytes.NewReader(req.Payload)
addr, err := readSSHString(reader)
if err != nil {
fail(fmt.Sprintf("Failed to read address from payload: %v", err))
return
}
var rawPortToBind uint32
if err = binary.Read(reader, binary.BigEndian, &rawPortToBind); err != nil {
fail(fmt.Sprintf("Failed to read port from payload: %v", err))
return
}
if rawPortToBind > 65535 {
fail(fmt.Sprintf("Port %d is larger than allowed port of 65535", rawPortToBind))
return
}
portToBind := uint16(rawPortToBind)
if isBlockedPort(portToBind) {
fail(fmt.Sprintf("Port %d is blocked or restricted", portToBind))
return
}
switch portToBind {
case 80, 443:
s.HandleHTTPForward(req, portToBind)
default:
s.HandleTCPForward(req, addr, portToBind)
}
}
func (s *session) HandleHTTPForward(req *ssh.Request, portToBind uint16) {
fail := func(msg string, key *types.SessionKey) {
log.Println(msg)
if key != nil {
s.registry.Remove(*key)
}
if err := req.Reply(false, nil); err != nil {
log.Println("Failed to reply to request:", err)
}
}
randomString := random.GenerateRandomString(20)
key := types.SessionKey{Id: randomString, Type: types.HTTP}
if !s.registry.Register(key, s) {
fail(fmt.Sprintf("Failed to register client with slug: %s", randomString), nil)
return
}
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.BigEndian, uint32(portToBind))
if err != nil {
fail(fmt.Sprintf("Failed to write port to buffer: %v", err), &key)
return
}
log.Printf("HTTP forwarding approved on port: %d", portToBind)
err = req.Reply(true, buf.Bytes())
if err != nil {
fail(fmt.Sprintf("Failed to reply to request: %v", err), &key)
return
}
s.forwarder.SetType(types.HTTP)
s.forwarder.SetForwardedPort(portToBind)
s.slug.Set(randomString)
s.lifecycle.SetStatus(types.RUNNING)
}
func (s *session) HandleTCPForward(req *ssh.Request, addr string, portToBind uint16) {
fail := func(msg string) {
log.Println(msg)
if err := req.Reply(false, nil); err != nil {
log.Println("Failed to reply to request:", err)
return
}
if err := s.lifecycle.Close(); err != nil {
log.Printf("failed to close session: %v", err)
}
}
cleanup := func(msg string, port uint16, listener net.Listener, key *types.SessionKey) {
log.Println(msg)
if key != nil {
s.registry.Remove(*key)
}
if port != 0 {
if setErr := portUtil.Default.SetPortStatus(port, false); setErr != nil {
log.Printf("Failed to reset port status: %v", setErr)
}
}
if listener != nil {
if closeErr := listener.Close(); closeErr != nil {
log.Printf("Failed to close listener: %v", closeErr)
}
}
if err := req.Reply(false, nil); err != nil {
log.Println("Failed to reply to request:", err)
}
_ = s.lifecycle.Close()
}
if portToBind == 0 {
unassigned, ok := portUtil.Default.GetUnassignedPort()
if !ok {
fail("No available port")
return
}
portToBind = unassigned
}
if claimed := portUtil.Default.ClaimPort(portToBind); !claimed {
fail(fmt.Sprintf("Port %d is already in use or restricted", portToBind))
return
}
log.Printf("Requested forwarding on %s:%d", addr, portToBind)
listener, err := net.Listen("tcp", fmt.Sprintf("0.0.0.0:%d", portToBind))
if err != nil {
cleanup(fmt.Sprintf("Port %d is already in use or restricted", portToBind), portToBind, nil, nil)
return
}
key := types.SessionKey{Id: fmt.Sprintf("%d", portToBind), Type: types.TCP}
if !s.registry.Register(key, s) {
cleanup(fmt.Sprintf("Failed to register TCP client with id: %s", key.Id), portToBind, listener, nil)
return
}
buf := new(bytes.Buffer)
err = binary.Write(buf, binary.BigEndian, uint32(portToBind))
if err != nil {
cleanup(fmt.Sprintf("Failed to write port to buffer: %v", err), portToBind, listener, &key)
return
}
log.Printf("TCP forwarding approved on port: %d", portToBind)
err = req.Reply(true, buf.Bytes())
if err != nil {
cleanup(fmt.Sprintf("Failed to reply to request: %v", err), portToBind, listener, &key)
return
}
s.forwarder.SetType(types.TCP)
s.forwarder.SetListener(listener)
s.forwarder.SetForwardedPort(portToBind)
s.slug.Set(key.Id)
s.lifecycle.SetStatus(types.RUNNING)
go s.forwarder.AcceptTCPConnections()
}
func readSSHString(reader *bytes.Reader) (string, error) {
var length uint32
if err := binary.Read(reader, binary.BigEndian, &length); err != nil {
return "", err
}
strBytes := make([]byte, length)
if _, err := reader.Read(strBytes); err != nil {
return "", err
}
return string(strBytes), nil
}
func isBlockedPort(port uint16) bool {
if port == 80 || port == 443 {
return false
}
if port < 1024 && port != 0 {
return true
}
for _, p := range blockedReservedPorts {
if p == port {
return true
}
}
return false
}