diff --git a/.github/workflows/build_and_publish_test.yml b/.github/workflows/build_and_publish_test.yml index 935d6cd..4eb6522 100644 --- a/.github/workflows/build_and_publish_test.yml +++ b/.github/workflows/build_and_publish_test.yml @@ -128,7 +128,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: '1.25.1' + go-version: '1.25.3' cache: false # Install system dependencies based on runner OS @@ -198,18 +198,6 @@ jobs: echo "CGO_ENABLED=1" >> $GITHUB_ENV echo "GOOS=windows" >> $GITHUB_ENV - - name: "Build rapidyenc (macOS)" - if: matrix.os == 'darwin' - timeout-minutes: 5 - run: | - cd rapidyenc - if [ "${{ matrix.arch }}" = "amd64" ]; then - ./crossbuild_rapidyenc_darwin-amd64.sh || ./build_rapidyenc_linux-amd64.sh - else - # For arm64, we might need to build natively or cross-compile - ./build_rapidyenc_linux-amd64.sh - fi - - name: "Build rapidyenc (Alpine/musl)" if: matrix.libc == 'musl' timeout-minutes: 5 @@ -217,106 +205,14 @@ jobs: cd rapidyenc && ./build_rapidyenc_linux-amd64.sh # Note: May need Alpine-specific build script - # Run tests with race detector for supported platforms - - name: "Run race detector tests (amd64/linux only)" - if: matrix.os == 'linux' && matrix.arch == 'amd64' && !matrix.cross_compile - timeout-minutes: 5 - run: go test -race ./rapidyenc/ - - - name: "Run normal tests (non-cross-compile platforms)" - if: ${{ !(matrix.cross_compile) && !(matrix.os == 'linux' && matrix.arch == 'amd64') }} - timeout-minutes: 5 - run: go test ./rapidyenc/ - - - name: "Skip tests for cross-compiled platforms" - if: matrix.cross_compile - run: echo "Skipping tests for cross-compiled platform ${{ matrix.name }} (requires emulation or native hardware)" - - - name: "Test rapidyenc integration" - if: ${{ !(matrix.cross_compile) }} - timeout-minutes: 2 - run: | - # Build first if not cross-compiling - go build -o NZBreX -tags other . - ./NZBreX -testrapidyenc -# - name: Clean Go module cache -# run: | -# rm -rf ~/.cache/go-build ~/go/pkg/mod -# - name: Restore Go modules cache -# # your cache restore step here - - - name: "Set up Go" - uses: actions/setup-go@v5 - with: - go-version: '1.24.3' - cache: true - - #- name: Cache Go modules - # uses: actions/cache@v4 - # with: - # path: | - # ~/.cache/go-build - # ~/go/pkg/mod - # key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - # restore-keys: | - # ${{ runner.os }}-go- - - - name: "clone rapidyenc" - run: | - cd rapidyenc - if [ ! -e rapidyenc ]; then - ./clone_rapidyenc.sh - if [ ! -e rapidyenc/.git ]; then - echo "rapidyenc/ src not found, exiting" - exit 1 - fi - else - echo "rapidyenc/ src exists, skipping clone" - fi - - # Build rapidyenc for different platforms - - name: "Build rapidyenc (Linux amd64)" - if: matrix.os == 'linux' && matrix.arch == 'amd64' && !matrix.cross_compile - timeout-minutes: 5 - run: cd rapidyenc && ./build_rapidyenc_linux-amd64.sh - - - name: "Build rapidyenc (Linux arm64 cross-compile)" - if: matrix.os == 'linux' && matrix.arch == 'arm64' && matrix.cross_compile - timeout-minutes: 5 - run: | - cd rapidyenc && ./build_rapidyenc_linux-arm64.sh - echo "CC=aarch64-linux-gnu-gcc" >> $GITHUB_ENV - echo "CXX=aarch64-linux-gnu-g++" >> $GITHUB_ENV - echo "CGO_ENABLED=1" >> $GITHUB_ENV - echo "GOARCH=arm64" >> $GITHUB_ENV - - - name: "Build rapidyenc (Windows amd64 cross-compile)" - if: matrix.os == 'windows' && matrix.arch == 'amd64' - timeout-minutes: 5 - run: | - cd rapidyenc && ./crossbuild_rapidyenc_windows-amd64.sh - echo "CC=x86_64-w64-mingw32-gcc" >> $GITHUB_ENV - echo "CGO_ENABLED=1" >> $GITHUB_ENV - echo "GOOS=windows" >> $GITHUB_ENV - - name: "Build rapidyenc (macOS)" if: matrix.os == 'darwin' timeout-minutes: 5 run: | cd rapidyenc - if [ "${{ matrix.arch }}" = "amd64" ]; then - ./crossbuild_rapidyenc_darwin-amd64.sh || ./build_rapidyenc_linux-amd64.sh - else - # For arm64, we might need to build natively or cross-compile - ./build_rapidyenc_linux-amd64.sh - fi - - - name: "Build rapidyenc (Alpine/musl)" - if: matrix.libc == 'musl' - timeout-minutes: 5 - run: | - cd rapidyenc && ./build_rapidyenc_linux-amd64.sh - # Note: May need Alpine-specific build script + # macOS builds natively - the build_rapidyenc_linux-amd64.sh script + # is actually a native build script (despite the misleading name) + ./build_rapidyenc_linux-amd64.sh darwin # Run tests with race detector for supported platforms - name: "Run race detector tests (amd64/linux only)" @@ -388,11 +284,14 @@ jobs: mkdir -p builds/${{ matrix.name }}/usr/bin if [ "${{ matrix.os }}" == "windows" ]; then binary_name=NZBreX.exe + # For Windows: use static linking to avoid DLL dependencies + LDFLAGS="-X main.appVersion=${{ env.VERSION }} -linkmode external -extldflags '-static -static-libgcc -static-libstdc++'" else binary_name=NZBreX + LDFLAGS="-X main.appVersion=${{ env.VERSION }}" fi echo "BINARY=$binary_name" >> $GITHUB_ENV - GOARCH=${{ matrix.arch }} GOOS=${{ matrix.os }} go build -ldflags="-s -w -X main.appVersion=${{ env.VERSION }}" -o builds/${{ matrix.name }}/usr/bin/$binary_name + GOARCH=${{ matrix.arch }} GOOS=${{ matrix.os }} go build -ldflags="$LDFLAGS" -o builds/${{ matrix.name }}/usr/bin/$binary_name if [ ! -f cleanHeaders.txt ]; then echo "cleanHeaders.txt not found! Build cannot continue." >&2 exit 1 @@ -418,7 +317,7 @@ jobs: cat builds/${{ matrix.name }}/usr/bin/$binary_name.sha512sum fi # - # packing the build + # packing the build (only create zip and deb for now) # # .zip pwd && ls -lha && echo ".zip Packing builds/${{ matrix.name }}/usr/bin/$binary_name" @@ -427,18 +326,18 @@ jobs: LICENSE README.md rapidyenc/LICENSE rapidyenc/rapidyenc/README.md rapidyenc/rapidyenc/crcutil-1.0/LICENSE rapidyenc/rapidyenc/build/rapidyenc_* rapidyenc/rapidyenc/build/librapidyenc.* # # .tgz (tar + gzip) - pwd && ls -lha && echo ".tgz Packing builds/${{ matrix.name }}/usr/bin/$binary_name" - tar -czf "NZBreX_${{ env.VERSION }}-${{ env.SHA7 }}-${{ matrix.name }}${{ env.LIBC }}.tgz" \ - -C builds/${{ matrix.name }}/usr/bin $binary_name $binary_name.sha256sum $binary_name.sha512sum \ - -C ${{ github.workspace }} cleanHeaders.txt provider.sample.json provider.ygg.json \ - -C ${{ github.workspace }} LICENSE README.md rapidyenc/LICENSE rapidyenc/rapidyenc/README.md rapidyenc/rapidyenc/crcutil-1.0/LICENSE rapidyenc/rapidyenc/build/rapidyenc_* rapidyenc/rapidyenc/build/librapidyenc.* + #pwd && ls -lha && echo ".tgz Packing builds/${{ matrix.name }}/usr/bin/$binary_name" + #tar -czf "NZBreX_${{ env.VERSION }}-${{ env.SHA7 }}-${{ matrix.name }}${{ env.LIBC }}.tgz" \ + # -C builds/${{ matrix.name }}/usr/bin $binary_name $binary_name.sha256sum $binary_name.sha512sum \ + # -C ${{ github.workspace }} cleanHeaders.txt provider.sample.json provider.ygg.json \ + # -C ${{ github.workspace }} LICENSE README.md rapidyenc/LICENSE rapidyenc/rapidyenc/README.md rapidyenc/rapidyenc/crcutil-1.0/LICENSE rapidyenc/rapidyenc/build/rapidyenc_* rapidyenc/rapidyenc/build/librapidyenc.* # # .xz (tar + xz) - pwd && ls -lha && echo ".xz Packing builds/${{ matrix.name }}/usr/bin/$binary_name" - tar -cJf "NZBreX_${{ env.VERSION }}-${{ env.SHA7 }}-${{ matrix.name }}${{ env.LIBC }}.tar.xz" \ - -C builds/${{ matrix.name }}/usr/bin $binary_name $binary_name.sha256sum $binary_name.sha512sum \ - -C ${{ github.workspace }} cleanHeaders.txt provider.sample.json provider.ygg.json \ - -C ${{ github.workspace }} LICENSE README.md rapidyenc/LICENSE rapidyenc/rapidyenc/README.md rapidyenc/rapidyenc/crcutil-1.0/LICENSE rapidyenc/rapidyenc/build/rapidyenc_* rapidyenc/rapidyenc/build/librapidyenc.* + #pwd && ls -lha && echo ".xz Packing builds/${{ matrix.name }}/usr/bin/$binary_name" + #tar -cJf "NZBreX_${{ env.VERSION }}-${{ env.SHA7 }}-${{ matrix.name }}${{ env.LIBC }}.tar.xz" \ + # -C builds/${{ matrix.name }}/usr/bin $binary_name $binary_name.sha256sum $binary_name.sha512sum \ + # -C ${{ github.workspace }} cleanHeaders.txt provider.sample.json provider.ygg.json \ + # -C ${{ github.workspace }} LICENSE README.md rapidyenc/LICENSE rapidyenc/rapidyenc/README.md rapidyenc/rapidyenc/crcutil-1.0/LICENSE rapidyenc/rapidyenc/build/rapidyenc_* rapidyenc/rapidyenc/build/librapidyenc.* # # done packing # @@ -558,8 +457,8 @@ jobs: prerelease: ${{ contains(env.VERSION, 'test') || contains(env.VERSION, 'beta') || contains(env.VERSION, 'alpha') }} files: | NZBreX_${{ env.VERSION }}-${{ env.SHA7 }}-${{ matrix.name }}${{ env.LIBC }}.deb - NZBreX_${{ env.VERSION }}-${{ env.SHA7 }}-${{ matrix.name }}${{ env.LIBC }}.tgz - NZBreX_${{ env.VERSION }}-${{ env.SHA7 }}-${{ matrix.name }}${{ env.LIBC }}.tar.xz + #NZBreX_${{ env.VERSION }}-${{ env.SHA7 }}-${{ matrix.name }}${{ env.LIBC }}.tgz + #NZBreX_${{ env.VERSION }}-${{ env.SHA7 }}-${{ matrix.name }}${{ env.LIBC }}.tar.xz NZBreX_${{ env.VERSION }}-${{ env.SHA7 }}-${{ matrix.name }}${{ env.LIBC }}.zip NZBreX_${{ env.VERSION }}-${{ env.SHA7 }}-${{ matrix.name }}${{ env.LIBC }}.sha256sum NZBreX_${{ env.VERSION }}-${{ env.SHA7 }}-${{ matrix.name }}${{ env.LIBC }}.sha512sum diff --git a/.gitignore b/.gitignore index 12f3814..64e335c 100644 --- a/.gitignore +++ b/.gitignore @@ -36,6 +36,7 @@ test*.* *.pem *.key *.crt +.*passwd* bak/* log/ diff --git a/Cache.go b/Cache.go index 6df7892..a9a65e6 100644 --- a/Cache.go +++ b/Cache.go @@ -3,6 +3,7 @@ package main import ( "bufio" "fmt" + "log" "os" "path/filepath" "strings" @@ -156,19 +157,25 @@ func (c *Cache) GoCacheWriter(cid int) { } n := c.CacheWriter(item) wrote_bytes += uint64(n) + // Memory slot already released by GoDownsRoutine before Add2Cache } } // end func c.GoCacheWriter func (c *Cache) GoYencWriter(cid int) { var wrote_bytes uint64 + dlog(always, "GoYencWriter %d started", cid) for { + dlog(cfg.opt.DebugCache, "GoYencWriter %d waiting for item...", cid) yitem := <-c.yenc_writer_chan if yitem == nil { + dlog(always, "YencWriter %d received nil item. closing. wrote total %d bytes", cid, wrote_bytes) c.yenc_writer_chan <- nil return } + dlog(c.debug, "GoYencWriter %d processing seg.Id='%s'", cid, yitem.item.segment.Id) n := c.YencWriter(yitem) wrote_bytes += uint64(n) + dlog(c.debug, "GoYencWriter %d finished seg.Id='%s' wrote=%d bytes", cid, yitem.item.segment.Id, n) } } // end func c.GoCacheWriter @@ -295,20 +302,31 @@ func (c *Cache) WriteYenc(item *segmentChanItem, yPart *yenc.Part) { dlog(always, "ERROR WriteYenc: empty Body seg.Id='%s'", item.segment.Id) return } - GCounter.Incr("yencQueueCnt") - GCounter.Incr("TOTAL_yencQueueCnt") + + // Set flag BEFORE incrementing counter to avoid race item.mux.Lock() item.flaginYenc = true item.mux.Unlock() - c.yenc_writer_chan <- ¥c_item{ - item: item, - yPart: yPart, + +wait: + for { + select { + case c.yenc_writer_chan <- ¥c_item{item: item, yPart: yPart}: // enqueue + // Increment counter AFTER successful enqueue + GCounter.Incr("yencQueueCnt") + GCounter.Incr("TOTAL_yencQueueCnt") + break wait + default: + // chan full + log.Printf("WriteYenc waiting to enqueue seg.Id='%s' yenc_queue_cnt=%d channel=%d/%d", item.segment.Id, GCounter.GetValue("yencQueueCnt"), len(c.yenc_writer_chan), cap(c.yenc_writer_chan)) + time.Sleep(time.Second) + } } } // emd func WriteYenc func (c *Cache) YencWriter(yitem *yenc_item) (wrote_bytes int) { defer GCounter.Decr("yencQueueCnt") - + dlog(always, "YencWriter processing seg.Id='%s'", yitem.item.segment.Id) yitem.item.mux.Lock() if yitem.item.hashedId == "" { yitem.item.hashedId = SHA256str("<" + yitem.item.segment.Id + ">") @@ -318,53 +336,59 @@ func (c *Cache) YencWriter(yitem *yenc_item) (wrote_bytes int) { _, _, yencdir, fp, fp_tmp := c.GetYenc(yitem.item) if FileExists(fp) { - c.resetYencFlagsOnErr(yitem.item) + dlog(c.debug, "YencWriter file already exists, skipping: '%s'", fp) + yitem.item.mux.Lock() + yitem.item.flaginYenc = false + yitem.item.flagisYenc = true // File exists, so yenc IS done + yitem.item.mux.Unlock() return 0 } if !Mkdir(yencdir) { + dlog(always, "ERROR YencWriter Mkdir failed dir='%s'", yencdir) c.resetYencFlagsOnErr(yitem.item) return 0 } - dlog(c.debug, "Writing yenc part: '%s'", fp_tmp) - if file, err := os.OpenFile(fp_tmp, os.O_CREATE|os.O_WRONLY, 0644); err == nil { - defer func() { - if cerr := file.Close(); cerr != nil { - dlog(always, "ERROR YencWriter file.Close err='%v'", cerr) - } - }() - datawriter := bufio.NewWriterSize(file, DefaultYencWriteBuffer) - if n, err := datawriter.Write(yitem.yPart.Body); err != nil { - dlog(always, "ERROR YencWriter datawriter.Write err='%v'", err) - c.resetYencFlagsOnErr(yitem.item) - return 0 - } else { - wrote_bytes += n - } + dlog(always, "DEBUG: Writing yenc part: '%s'", fp_tmp) + file, err := os.OpenFile(fp_tmp, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + dlog(always, "ERROR YencWriter OpenFile failed fp_tmp='%s' err='%v'", fp_tmp, err) + c.resetYencFlagsOnErr(yitem.item) + return 0 + } - if err := datawriter.Flush(); err != nil { - dlog(always, "ERROR YencWriter datawriter.Flush err='%v'", err) - c.resetYencFlagsOnErr(yitem.item) - return 0 - } + datawriter := bufio.NewWriterSize(file, DefaultYencWriteBuffer) + if n, err := datawriter.Write(yitem.yPart.Body); err != nil { + dlog(always, "ERROR YencWriter datawriter.Write err='%v'", err) + c.resetYencFlagsOnErr(yitem.item) + return 0 + } else { + wrote_bytes += n + } - if err := os.Rename(fp_tmp, fp); err != nil { - dlog(always, "ERROR YencWriter move .tmp failed err='%v'", err) - c.resetYencFlagsOnErr(yitem.item) - return 0 - } + if err := datawriter.Flush(); err != nil { + dlog(always, "ERROR YencWriter datawriter.Flush err='%v'", err) + c.resetYencFlagsOnErr(yitem.item) + return 0 + } - yitem.item.mux.Lock() - yitem.item.flaginYenc = false - yitem.item.flagisYenc = true - /* // watch out for broken wings #99ffff! - if yitem.item.flaginUP { - doMemReturn = false - } - */ - yitem.item.mux.Unlock() - } // end OpenFile + if err := os.Rename(fp_tmp, fp); err != nil { + dlog(always, "ERROR YencWriter move .tmp failed err='%v'", err) + c.resetYencFlagsOnErr(yitem.item) + return 0 + } + + if err := file.Close(); err != nil { + dlog(always, "ERROR YencWriter file.Close err='%v'", err) + c.resetYencFlagsOnErr(yitem.item) + return 0 + } + + yitem.item.mux.Lock() + yitem.item.flaginYenc = false + yitem.item.flagisYenc = true + yitem.item.mux.Unlock() yitem.yPart.Body = nil yitem.yPart = nil return diff --git a/Config.go b/Config.go index c268cd1..3aa6d4b 100644 --- a/Config.go +++ b/Config.go @@ -7,7 +7,7 @@ import ( "github.com/go-while/go-loggedrwmutex" ) -const UseSharedCC = false // experimental devel flag, to test sharedConn between routines +const UseSharedCC = true // experimental devel flag, to test sharedConn between routines const UseReadDeadConn = false // experimental devel flag, to test reading from dead connections const UseNoDeadline = true // experimental devel flag, to test reading from dead connections @@ -39,9 +39,9 @@ const ( // DefaultConnectTimeout defines the timeout for connecting to a server DefaultConnectTimeout = 9 * time.Second // DefaultConnectErrSleep defines the time to wait before retrying a connection after an error - DefaultConnectErrSleep = 9 * time.Second + DefaultConnectErrSleep = 3 * time.Second // DefaultRequeueDelay defines the delay before requeuing an item in the segment channel - DefaultRequeueDelay = 9 * time.Second + //DefaultRequeueDelay = 9 * time.Second ) type Config struct { @@ -101,14 +101,19 @@ type CFG struct { DebugFlags bool `json:"DebugFlags"` // if true, enable printing item flags DebugRapidYenc bool `json:"DebugRapidYenc"` // if true, enable rapidyenc debug output //DebugSTREAM bool `json:"DebugSTREAM"` // if true, enable STREAM debug output - Verbose bool `json:"Verbose"` // if true, enable verbose output - Bar bool `json:"Bar"` // if true, show progress bar - Colors bool `json:"Colors"` // if true, enable colored output - MaxArtSize int `json:"MaxArtSize"` // maximum article size in bytes - SloMoC int `json:"SloMoC"` // slow motion for checking articles - SloMoD int `json:"SloMoD"` // slow motion for downloading articles - SloMoU int `json:"SloMoU"` // slow motion for uploading articles - SessThreshold int `json:"SessThreshold"` // max number of sessions a processor keeps open + Verbose bool `json:"Verbose"` // if true, enable verbose output + Bar bool `json:"Bar"` // if true, show progress bar + Colors bool `json:"Colors"` // if true, enable colored output + MaxArtSize int `json:"MaxArtSize"` // maximum article size in bytes + SloMoC int `json:"SloMoC"` // slow motion for checking articles + SloMoD int `json:"SloMoD"` // slow motion for downloading articles + SloMoU int `json:"SloMoU"` // slow motion for uploading articles + SessThreshold int `json:"SessThreshold"` // max number of sessions a processor keeps open + TLSCertPem string `json:"TLSCertPem"` // path to the TLS certificate in PEM format: /etc/letsencrypt/live/sub.domain.com/fullchain.pem + TLSPrivKey string `json:"TLSPrivKey"` // path to the TLS key in PEM format: /etc/letsencrypt/live/sub.domain.com/privkey.pem + ProxyTCP int `json:"ProxyTCP"` // port for the TCP proxy, 0 = no proxy + ProxyTLS int `json:"ProxyTLS"` // port for the TLS proxy, 0 = no proxy + ProxyPasswdFile string `json:"ProxyPasswdFile"` // path to the file with proxy passwords, format: "username|bcryptHash|MaxConns|ExpireAt(unix timestamp)|post" per line } // end CFG struct type Provider struct { @@ -116,6 +121,7 @@ type Provider struct { NoCheck bool // if true, no check will be done for this provider NoDownload bool // if true, no download will be done for this provider NoUpload bool // if true, no upload will be done for this provider + Newsreader bool // if true, provider allows newsreader commands (group, list, xhdr, xover,... etc.) Group string // group name is used internally to divide providers accounts into groups Name string // provider name, used for logging and identification Host string // provider host, used for connecting to the server @@ -126,6 +132,7 @@ type Provider struct { Username string // username for authentication Password string // password for authentication MaxConns int // maximum number of connections to the provider + OpenConns int // current number of open connections TCPMode string // TCP mode to use (tcp, tcp4, tcp6) PreferIHAVE bool // if true, prefer IHAVE over POST method MaxConnErrors int // maximum number of errors before giving up on a connection @@ -150,8 +157,26 @@ type Provider struct { refreshed uint64 // number of articles refreshed verified uint64 // number of articles verified } + // speed tracking (updated by Speedmeter every PrintStats seconds) + speed struct { + downloadSpeed float64 // bytes/sec (download) + uploadSpeed float64 // bytes/sec (upload) + lastUpdated int64 // unix timestamp of last update + } } // end Provider struct +func (p *Provider) IncrementOpenConns() { + p.mux.Lock() + p.OpenConns++ + p.mux.Unlock() +} + +func (p *Provider) DecrementOpenConns() { + p.mux.Lock() + p.OpenConns-- + p.mux.Unlock() +} + type segmentChanItem struct { // segmentChanItem is used to store information about a segment/article // that is being processed in the segment channel. @@ -198,6 +223,7 @@ type segmentChanItem struct { flaginYenc bool // if true, item is in writing to yenc cache flagisYenc bool // if true, item has been written to yenc cache flagCache bool // if true, item is cached + memlocked int // counts up if item is memlocked checkedOn int // counts up if item has been checked on a provider pushedDL int // a counter for debugging pushedUP int // a counter for debugging @@ -306,7 +332,7 @@ const ( cmdARTICLE = "ARTICLE" cmdHEAD = "HEAD" cmdBODY = "BODY" - cmdSTAT = "STAT" - cmdIHAVE = "IHAVE" - cmdPOST = "POST" + //cmdSTAT = "STAT" + //cmdIHAVE = "IHAVE" + //cmdPOST = "POST" ) diff --git a/ConnPool.go b/ConnPool.go index b4dc0d9..1c9e11c 100644 --- a/ConnPool.go +++ b/ConnPool.go @@ -71,17 +71,15 @@ type ConnPool struct { openConns int // counter nextconnId uint64 // unique connection id //mux sync.RWMutex - mux *loggedrwmutex.LoggedSyncRWMutex - counter *Counter_uint64 // used to count connections, parked conns, etc. - pool chan *ConnItem // idle/parked conns are in here - poolmap map[uint64]*ConnItem // idle/parked conns are in here - //wait []chan *ConnItem - rserver string // "host:port" + mux *loggedrwmutex.LoggedSyncRWMutex + Counter *Counter_uint64 // used to count connections, parked conns, etc. + pool chan *ConnItem // idle/parked conns are in here + rserver string // "host:port" wants_auth bool // we have created an endless loop // provider.ConnPool.provider.ConnPool.provider.ConnPool.provider.ConnPool.provider.ConnPool.provider.ConnPool.CloseConn(provider, connitem) xD // if we want to call closeconn from outside the routine which keep a provider ptr too - // we cann call ConnPools[provider.id].CloseConn(provider, connitem) + // we cann call ConnPools[provider.id].CloseConn(connitem) provider *Provider // pointer to the provider which created this ConnPool and holds the config s *SESSION // session pointer to the session which created this ConnPool (access via c.s.%%SESSIONVARIABLES%% OR provider.ConnPool.s.%%SESSIONVARIABLES%%). } @@ -125,9 +123,8 @@ func NewConnPool(s *SESSION, provider *Provider, workerWGconnReady *sync.WaitGro wants_auth := (provider.Username != "" && provider.Password != "") provider.ConnPool = &ConnPool{ - counter: NewCounter(10), // used to count connections, parked conns, etc. + Counter: NewCounter(10), // used to count connections, parked conns, etc. pool: make(chan *ConnItem, provider.MaxConns), - poolmap: make(map[uint64]*ConnItem, provider.MaxConns), // use map rserver: rserver, wants_auth: wants_auth, provider: provider, mux: &loggedrwmutex.LoggedSyncRWMutex{Name: fmt.Sprintf("ConnPool-%s", provider.Name)}, @@ -211,8 +208,7 @@ func (c *ConnPool) connect() (connitem *ConnItem, err error) { if isNetworkUnreachable(err) { return nil, fmt.Errorf("error connect Unreachable network! '%s' @ '%s' err='%v'", c.provider.Host, c.provider.Name, err) } - dlog(always, "ERROR connect Dial '%s' retry in %.0fs wants_ssl=%t err='%v'", c.provider.Name, DefaultConnectErrSleep.Seconds(), c.provider.SSL, err) - //time.Sleep(DefaultConnectErrSleep) + dlog(cfg.opt.DebugConnPool, "ERROR connect Dial '%s' no retry err='%v'", c.provider.Name, err) return nil, err } case true: @@ -228,8 +224,7 @@ func (c *ConnPool) connect() (connitem *ConnItem, err error) { if isNetworkUnreachable(err) { return nil, fmt.Errorf("error connect Unreachable network! '%s' @ '%s' err='%v'", c.provider.Host, c.provider.Name, err) } - dlog(always, "ERROR connect Dial '%s' retry in %.0fs wants_ssl=%t err='%v'", c.provider.Name, DefaultConnectErrSleep.Seconds(), c.provider.SSL, err) - //time.Sleep(DefaultConnectErrSleep) + dlog(cfg.opt.DebugConnPool, "ERROR connect Dial '%s' no-retry err='%v'", c.provider.Name, err) return nil, err } } // end switch @@ -247,8 +242,7 @@ func (c *ConnPool) connect() (connitem *ConnItem, err error) { if conn != nil { conn.Close() } - dlog(always, "ERROR connect welcome '%s' retry in %.0fs code=%d msg='%s' err='%v'", c.provider.Name, DefaultConnectErrSleep.Seconds(), code, msg, err) - //time.Sleep(DefaultConnectErrSleep) + dlog(always, "ERROR in connect '%s' welcome banner code=%d err='%v'", c.provider.Name, code, err) return nil, err } dlog(cfg.opt.DebugConnPool, "ConnPool connect welcome '%s' time0=(%d ms) time00=(%d ms) time000=(%d ms)", c.provider.Name, time.Since(time0).Milliseconds(), time.Since(time00).Milliseconds(), time.Since(time000).Milliseconds()) @@ -259,8 +253,8 @@ func (c *ConnPool) connect() (connitem *ConnItem, err error) { } // end func connect func (c *ConnPool) auth(srvtp *textproto.Conn, conn net.Conn, start time.Time) (connitem *ConnItem, err error) { - var code int - var msg string + //var code int + //var msg string time1 := time.Now() // send auth user sequence id, err := srvtp.Cmd("AUTHINFO USER %s", c.provider.Username) @@ -268,21 +262,19 @@ func (c *ConnPool) auth(srvtp *textproto.Conn, conn net.Conn, start time.Time) ( if conn != nil { conn.Close() } - dlog(always, "ERROR AUTH#1 Cmd(AUTHINFO USER ...) '%s' retry in %.0fs err='%v' ", c.provider.Name, DefaultConnectErrSleep.Seconds(), err) - //time.Sleep(DefaultConnectErrSleep) + //dlog(always, "ERROR AUTH#1 Cmd(AUTHINFO USER ...) '%s' no retry err='%v' ", c.provider.Name, err) return nil, err } time2 := time.Now() // await response from server srvtp.StartResponse(id) - code, msg, err = srvtp.ReadCodeLine(nntpMoreInfoCode) // 381 is the code for "more information required" + _, _, err = srvtp.ReadCodeLine(nntpMoreInfoCode) // 381 is the code for "more information required" srvtp.EndResponse(id) if err != nil { if conn != nil { conn.Close() } - dlog(always, "ERROR AUTH#2 ReadCodeLine(381) step#2 '%s' retry in %.0fs code=%d msg='%s' err='%v'", c.provider.Name, DefaultConnectErrSleep.Seconds(), code, msg, err) - time.Sleep(DefaultConnectErrSleep) + //dlog(always, "ERROR AUTH#2 ReadCodeLine(381) step#2 '%s' code=%d msg='%s' err='%v'", c.provider.Name, code, msg, err) return nil, err } time3 := time.Now() @@ -292,21 +284,19 @@ func (c *ConnPool) auth(srvtp *textproto.Conn, conn net.Conn, start time.Time) ( if conn != nil { conn.Close() } - dlog(always, "ERROR AUTH#3 Cmd(AUTHINFO PASS ...) '%s' retry in %.0fs err='%v'", c.provider.Name, DefaultConnectErrSleep.Seconds(), err) - //time.Sleep(DefaultConnectErrSleep) + //dlog(always, "ERROR AUTH#3 Cmd(AUTHINFO PASS ...) '%s' no retry err='%v'", c.provider.Name, err) return nil, err } time4 := time.Now() // await response from server srvtp.StartResponse(id) - code, msg, err = srvtp.ReadCodeLine(nntpAuthSuccess) // 281 is the code for "authentication successful" + _, _, err = srvtp.ReadCodeLine(nntpAuthSuccess) // 281 is the code for "authentication successful" srvtp.EndResponse(id) if err != nil { if conn != nil { conn.Close() } - dlog(always, "ERROR AUTH#4 ReadCodeLine(281) '%s' retry in %.0fs code=%d msg='%s' err='%v'", c.provider.Name, DefaultConnectErrSleep.Seconds(), code, msg, err) - //time.Sleep(DefaultConnectErrSleep) + //dlog(always, "ERROR AUTH#4 ReadCodeLine(281) '%s' no retry err='%v'", c.provider.Name, err) return nil, err } time5 := time.Now() @@ -323,55 +313,33 @@ func (c *ConnPool) NewConn() (connitem *ConnItem, err error) { // another routine was faster... dlog(cfg.opt.DebugConnPool, "ConnPool NewConn: another routine was faster! openConns=%d provider.MaxConns=%d '%s'", c.openConns, c.provider.MaxConns, c.provider.Name) c.mux.Unlock() // mutex c459a - err = fmt.Errorf("retry getConn, not newConn! all conns are already established") - return + return nil, fmt.Errorf("retry getConn, not newConn! all conns are already established") } if c.openConns < c.provider.MaxConns { dlog(cfg.opt.DebugConnPool, "ConnPool NewConn: connect to '%s' openConns=%d provider.MaxConns=%d", c.provider.Name, c.openConns, c.provider.MaxConns) - c.openConns++ openConnsBefore := c.openConns + c.openConns++ c.mux.Unlock() // mutex c459a start := time.Now() // connect to the provider - if c.provider.MaxConnErrors <= 0 { - for { - connitem, err = c.connect() - if err != nil || connitem == nil || connitem.conn == nil { - continue // retry connect - } - if cfg.opt.DebugConnPool { - c.mux.RLock() // mutex c461 - dlog(always, "ConnPool NewConn: got new connid=%d '%s' openConns after=%d/%d before=%d connectTook=(%d ms) err='%v", connitem.connid, c.provider.Name, c.openConns, c.provider.MaxConns, openConnsBefore, time.Since(start).Milliseconds(), err) - c.mux.RUnlock() // mutex c461 - } - // we have a new connection! - GCounter.Incr("TOTAL_NewConns") - return // established new connection and returns connitem - } - } else { - for retried := 0; retried < c.provider.MaxConnErrors; retried++ { - connitem, err = c.connect() - if err != nil || connitem == nil || connitem.conn == nil { - continue // retry connect - } - if cfg.opt.DebugConnPool { - c.mux.RLock() // mutex c461 - dlog(always, "ConnPool NewConn: got new connid=%d '%s' openConns after=%d/%d before=%d connectTook=(%d ms) err='%v", connitem.connid, c.provider.Name, c.openConns, c.provider.MaxConns, openConnsBefore, time.Since(start).Milliseconds(), err) - c.mux.RUnlock() // mutex c461 - } - // we have a new connection! - GCounter.Incr("TOTAL_NewConns") - return // established new connection and returns connitem - } + connitem, err = c.connect() + if err != nil || connitem == nil || connitem.conn == nil { + c.mux.Lock() // mutex c460 + c.openConns-- + dlog(cfg.opt.DebugConnPool, "ERROR in ConnPool NewConn: connect failed '%s' openConns=%d connitem='%v' err='%v'", c.provider.Name, c.openConns, connitem, err) + c.mux.Unlock() // mutex c460 + return nil, fmt.Errorf("error in ConnPool NewConn: connect failed '%s' err='%v'", c.provider.Name, err) } - c.mux.Lock() // mutex c460 - c.openConns-- - dlog(always, "ERROR in ConnPool NewConn: connect failed '%s' openConns=%d connitem='%v' err='%v'", c.provider.Name, c.openConns, connitem, err) - c.mux.Unlock() // mutex c460 - return + c.mux.RLock() // mutex c461 + dlog(cfg.opt.DebugConnPool, "ConnPool NewConn: got new connid=%d '%s' openConns after=%d/%d before=%d connectTook=(%d ms) err='%v", connitem.connid, c.provider.Name, c.openConns, c.provider.MaxConns, openConnsBefore, time.Since(start).Milliseconds(), err) + c.mux.RUnlock() // mutex c461 + // we have a new connection! + GCounter.Incr("TOTAL_NewConns") + go c.provider.IncrementOpenConns() + return connitem, nil // established new connection and returns connitem } c.mux.Unlock() // mutex c459a - return + return nil, fmt.Errorf("error in ConnPool NewConn: openConns=%d > provider.MaxConns", c.openConns) } // end func NewConn func (c *ConnPool) GetConn() (connitem *ConnItem, err error) { @@ -427,7 +395,7 @@ getConnFromPool: dlog(always, "INFO ConnPool GetConn: got long idle=(%d ms) '%s', close and get NewConn", time.Since(connitem.parktime).Milliseconds(), c.provider.Name) c.CloseConn(connitem, nil) connitem, err = c.NewConn() - if connitem == nil || err != nil { + if err != nil || connitem == nil || connitem.conn == nil { continue getConnFromPool } // extend the read deadline of the new connection @@ -474,16 +442,13 @@ getConnFromPool: // try to open a new connection newconnitem, aerr := c.NewConn() - if newconnitem == nil || aerr != nil { + if aerr != nil || newconnitem == nil || newconnitem.conn == nil { dlog(cfg.opt.DebugConnPool, "WARN in ConnPool GetConn: NewConn failed '%s' connitem='%v' aerr='%v'", c.provider.Name, newconnitem, aerr) time.Sleep(DefaultConnectErrSleep) // wait a bit before retrying retried++ if retried >= c.provider.MaxConnErrors { // we have retried too often - dlog(always, "ERROR in ConnPool GetConn: retried %d times to get a new conn '%s' giving up! aerr='%v'", retried, c.provider.Name, aerr) - //c.mux.Lock() // mutex c457 - //c.openConns-- // decrease open conns as we failed to get a new one - //c.mux.Unlock() // mutex c457 + dlog(cfg.opt.DebugConnPool, "ERROR in ConnPool GetConn: retried %d times to get a new conn '%s' giving up! aerr='%v'", retried, c.provider.Name, aerr) err = aerr break getConnFromPool // break out of the for loop } @@ -575,6 +540,7 @@ func (c *ConnPool) CloseConn(connitem *ConnItem, sharedConnChan chan *ConnItem) c.openConns-- dlog(cfg.opt.DebugConnPool, "DisConn '%s' inPool=%d open=%d", c.provider.Name, len(c.pool), c.openConns) c.mux.Unlock() + go c.provider.DecrementOpenConns() // if a sharedConnChan is supplied we send a nil to the channel // a nil as connitem signals the routines to get a new conn // mostly because conn was closed by network, protocol error or timeout @@ -734,16 +700,15 @@ func isNetworkUnreachable(err error) bool { } // end func isNetworkUnreachable (written by AI! GPT-4o) // GetNewSharedConnChannel creates a new shared connection channel. -// The sharedCC channel is used to share connections between (anonymous) goroutines which will work on the same item. +// The sharedCC channel is used to share connections between (anonymous) goroutines // which will work on the same item. func GetNewSharedConnChannel(wid int, provider *Provider) (sharedCC chan *ConnItem, err error) { - sharedCC = make(chan *ConnItem, 1) // buffered channel with size 1 connitem, err := provider.ConnPool.GetConn() // get an idle or new connection from the pool if err != nil { - dlog(always, "ERROR a GoWorker (%d) failed to connect '%s' err='%v'", wid, provider.Name, err) + dlog(cfg.opt.DebugWorker, "ERROR in GoWorker (%d) GetNewSharedConnChannel failed to connect '%s' err='%v'", wid, provider.Name, err) return nil, err } - //sharedCC <- nil // put a nil item into the channel to signal that no connection is available yet + sharedCC = make(chan *ConnItem, 1) // shares 1 connection between goroutines sharedCC <- connitem return sharedCC, nil } @@ -774,9 +739,7 @@ func SharedConnGet(sharedCC chan *ConnItem, provider *Provider) (connitem *ConnI } if !needNewConn && aconnitem != nil && aconnitem.conn != nil { // we have a valid connection, use it - if cfg.opt.BUG { - dlog(cfg.opt.DebugConnPool, "SharedConnGet: got shared connection '%s' aconnitem='%#v'", provider.Name, aconnitem) - } + dlog(cfg.opt.DebugConnPool, "SharedConnGet: got shared connection '%s' aconnitem='%#v'", provider.Name, aconnitem) provider.ConnPool.ExtendConn(connitem) return aconnitem, nil } @@ -787,13 +750,13 @@ func SharedConnGet(sharedCC chan *ConnItem, provider *Provider) (connitem *ConnI newconnitem, err := provider.ConnPool.GetConn() if err != nil || newconnitem == nil || newconnitem.conn == nil { // unable to get a new connection, put nil back into sharedCC - dlog(always, "ERROR SharedConnGet connect failed Provider '%s' err='%v'", provider.Name, err) + dlog(cfg.opt.DebugConnPool, "ERROR SharedConnGet connect failed Provider '%s' err='%v'", provider.Name, err) sharedCC <- nil // put nil back into sharedCC - return + return nil, err } connitem = newconnitem // use the new connection item provider.ConnPool.ExtendConn(connitem) - return + return connitem, nil } // end func SharedConnGet // SharedConnReturn puts a connection back into the sharedCC channel. @@ -850,7 +813,7 @@ func Speedmeter(byteSize int64, cp *ConnPool, cnt *Counter_uint64, workerWGconnR case cp == nil && cnt == nil: name, group, counter = "Global", "Speedmeter", GCounter case cp != nil && cnt == nil: - name, group, counter = cp.provider.Name, cp.provider.Group, cp.counter + name, group, counter = cp.provider.Name, cp.provider.Group, cp.Counter case cp == nil && cnt != nil: name, group, counter = "Global", "Speedmeter", cnt default: @@ -873,6 +836,16 @@ func Speedmeter(byteSize int64, cp *ConnPool, cnt *Counter_uint64, workerWGconnR txSpeed, txMbps := ConvertSpeed(int64(tmpTX), PrintStats) dlPerc := int(float64(totalRX) / float64(byteSize) * 100) upPerc := int(float64(totalTX) / float64(byteSize) * 100) + + // Store speed values in provider struct for web UI (if this is a provider-specific speedmeter) + if cp != nil && cp.provider != nil { + cp.provider.mux.Lock() + cp.provider.speed.downloadSpeed = float64(rxSpeed) * 1024 // convert KiB/s to bytes/sec + cp.provider.speed.uploadSpeed = float64(txSpeed) * 1024 // convert KiB/s to bytes/sec + cp.provider.speed.lastUpdated = time.Now().Unix() + cp.provider.mux.Unlock() + } + printSpeedTable(dlPerc, upPerc, totalRX, totalTX, rxSpeed, txSpeed, rxMbps, txMbps, name, group) } diff --git a/Flags.go b/Flags.go index ac41c16..dc25e99 100644 --- a/Flags.go +++ b/Flags.go @@ -7,15 +7,20 @@ import ( "fmt" "io" "log" + mrand "math/rand" "os" "path/filepath" "runtime" + "strconv" + "strings" "time" "github.com/go-while/NZBreX/rapidyenc" prof "github.com/go-while/go-cpu-mem-profiler" ) +var AddUserDataToProxy string // AddUserToProxy is a flag to add user to proxy + func ParseFlags() { flag.BoolVar(&version, "version", false, "prints app version") // essentials @@ -80,6 +85,12 @@ func ParseFlags() { flag.IntVar(&cfg.opt.MaxArtSize, "maxartsize", DefaultMaxArticleSize, "limits article size to 1M (mostly articles have ~700K only)") flag.BoolVar(&testmode, "zzz-shr-testmode", false, "[true|false] only used to test compilation on self-hosted runners (default: false)") flag.BoolVar(&testrapidyenc, "testrapidyenc", false, "[true|false] will test rapidyenc testfiles on boot and exit (default: false)") + flag.IntVar(&cfg.opt.ProxyTCP, "proxytcp", 0, "if set: use this port (e.g.: 1119) for TCP proxying (default: 0 = no proxy)") + flag.IntVar(&cfg.opt.ProxyTLS, "proxytls", 0, "if set: use this port (e.g.: 1563) for TLS proxying (default: 0 = no proxy)") + flag.StringVar(&cfg.opt.TLSCertPem, "tlscrt", "fullchain.pem", "path to TLS certificate file for proxy (default: fullchain.pem in current dir)") + flag.StringVar(&cfg.opt.TLSPrivKey, "tlskey", "privkey.pem", "path to TLS private key file for proxy (default: privkey.pem in current dir)") + flag.StringVar(&AddUserDataToProxy, "proxyadduser", "", "if set: adds this user to proxy ( \ne.g.: -proxyadduser='username1234|password4567|maxconns|expires|(no)post'\n ) \nPassword entered MUST be cleartext, hashing is done when writing to .passwd file!\n Expiration: you can use a 'number of days' with 'd' at the end (e.g. 7d, 30d, 365d) or 'h' for hours (1h, 12h, ...) or 'm' minutes (1m, 30m, ...), it will be converted to a valid unixtimestamp expiring in the future from now on! (default: empty = no user added)\nExample -proxyadduser \"HelloWorld|NotAsecurePassword|5|42d|nopost\" creates a user with 5 conns and 42 days to expiration") + flag.StringVar(&cfg.opt.ProxyPasswdFile, "proxypasswdfile", ".proxypasswd", "if set: use this file for proxy (default: .proxypasswd in current dir)") // cosmetics: segmentBar needs fixing: only when everything else works! //flag.BoolVar(&cfg.opt.Bar, "bar", false, "show progress bars") // FIXME TODO //flag.BoolVar(&cfg.opt.Colors, "colors", false, "adds colors to s") // FIXME TODO @@ -94,6 +105,83 @@ func ParseFlags() { Prof = prof.NewProf() RunProf() } + if cfg.opt.ProxyPasswdFile == "" { + cfg.opt.ProxyPasswdFile = ".proxypasswd" // default passwd file for proxy + } + if AddUserDataToProxy != "" { + // Format: 'user|password|maxconns|expires|(no)post' (expires is unix timestamp) + parts := strings.SplitN(AddUserDataToProxy, "|", 5) + if len(parts) != 5 { + log.Fatalf("-proxyadduser expects format: user|password|maxconns|expires|(no)post (got: %q)", AddUserDataToProxy) + } + var username, password string + Ausername := strings.TrimSpace(parts[0]) + Apassword := strings.TrimSpace(parts[1]) + if Ausername == "auto" && Apassword == "auto" { + // create auto user with random password + username, password, _ = generateRandomHexCredentials() + } else { + username = Ausername + password = Apassword + } + if len(username) < 10 { + log.Fatalf("-proxyadduser: username must be at least 10 characters long, got %d characters", len(username)) + } + if len(password) < 10 { + log.Fatalf("-proxyadduser: password must be at least 10 characters long, got %d characters", len(password)) + } + if Ausername == "auto" && Apassword == "auto" { + dlog(always, "-proxyadduser: auto generated credentials: username='%s' password='%s'", username, password) + } + maxconns, err1 := strconv.Atoi(parts[2]) + expiresStr := strings.TrimSpace(parts[3]) + var expires int64 + if strings.HasSuffix(expiresStr, "d") { + days, err := strconv.ParseInt(strings.TrimSuffix(expiresStr, "d"), 10, 64) + if err != nil { + log.Fatalf("-proxyadduser: invalid expires (days): %s", expiresStr) + } + expires = time.Now().Add(time.Duration(days) * 24 * time.Hour).Unix() + } else if strings.HasSuffix(expiresStr, "h") { + hours, err := strconv.ParseInt(strings.TrimSuffix(expiresStr, "h"), 10, 64) + if err != nil { + log.Fatalf("-proxyadduser: invalid expires (hours): %s", expiresStr) + } + expires = time.Now().Add(time.Duration(hours) * time.Hour).Unix() + } else if strings.HasSuffix(expiresStr, "m") { + minutes, err := strconv.ParseInt(strings.TrimSuffix(expiresStr, "m"), 10, 64) + if err != nil { + log.Fatalf("-proxyadduser: invalid expires (minutes): %s", expiresStr) + } + expires = time.Now().Add(time.Duration(minutes) * time.Minute).Unix() + } else { + expiresInt, err2 := strconv.ParseInt(parts[3], 10, 64) + if err1 != nil || err2 != nil { + log.Fatalf("-proxyadduser: invalid maxconns or expires: maxconns='%s' expires='%s'", parts[2], parts[3]) + } + expires = expiresInt + } + if username == "" || password == "" { + log.Fatalf("-proxyadduser: username and password must not be empty") + } + if expires < time.Now().Unix() { + log.Fatalf("-proxyadduser: expires must be a future timestamp, got %d", expires) + } + // Compose new passwd file name + newPasswdFile := fmt.Sprintf("%s.new.%d.%d", cfg.opt.ProxyPasswdFile, time.Now().Unix(), mrand.Intn(65535)) + userData := &UserData{ + Username: username, + Password: password, + MaxConns: maxconns, + ExpireAt: expires, + Posting: strings.HasPrefix(strings.ToLower(parts[4]), "post"), + } + if err := addUserToProxyPasswdFile(userData, newPasswdFile); err != nil { + log.Fatalf("Failed to add user to %s: %v", newPasswdFile, err) + } + // Redact password before logging + os.Exit(0) + } // test rapidyenc decoder if testrapidyenc { // this is only for testing rapidyenc decoder @@ -218,6 +306,10 @@ func ParseFlags() { os.Exit(1) } + if cfg.opt.ProxyTCP > 0 || cfg.opt.ProxyTLS > 0 { + proxy = true + } + dlog(cfg.opt.Verbose, "Settings: '%#v'", *cfg.opt) } // end func ParseFlags diff --git a/MemLimit.go b/MemLimit.go index 90450a0..696ae8e 100644 --- a/MemLimit.go +++ b/MemLimit.go @@ -15,6 +15,7 @@ package main */ import ( + "log" "os" "github.com/go-while/go-loggedrwmutex" @@ -71,6 +72,13 @@ func (m *MemLimiter) MemAvail() (retbool bool) { } func (m *MemLimiter) MemLockWait(item *segmentChanItem, who string) { + item.mux.Lock() + if item.memlocked > 0 { + item.mux.Unlock() + dlog(always, "MemLockWait called on already memlocked item seg.Id='%s' who='%s'", item.segment.Id, who) + return + } + item.mux.Unlock() GCounter.Incr("MemLockWait") defer GCounter.Decr("MemLockWait") @@ -102,6 +110,9 @@ func (m *MemLimiter) MemLockWait(item *segmentChanItem, who string) { } // end for waithere */ <-m.memchan // infinite wait to get a slot from chan + item.mux.Lock() + item.memlocked++ + item.mux.Unlock() m.mux.Lock() m.waiting-- @@ -114,7 +125,13 @@ func (m *MemLimiter) MemLockWait(item *segmentChanItem, who string) { // it is also called if no upload and is written to cache func (m *MemLimiter) MemReturn(who string, item *segmentChanItem) { //dlog(cfg.opt.DebugMemlim, "MemReturn free seg.Id='%s' who='%s'", item.segment.Id, who) - defer GCounter.Incr("TOTAL_MemReturned") + item.mux.RLock() + if item.memlocked == 0 { + dlog(always, "MemReturn called on non-memlocked item seg.Id='%s' who='%s'", item.segment.Id, who) + item.mux.RUnlock() + return // not memlocked, nothing to do + } + item.mux.RUnlock() // remove map entry from mem m.mux.Lock() @@ -124,12 +141,14 @@ func (m *MemLimiter) MemReturn(who string, item *segmentChanItem) { // return the slot select { case m.memchan <- struct{}{}: // return mem slot into chan - //pass + GCounter.Incr("TOTAL_MemReturned") default: // wtf chan is full?? that's a bug! - dlog(always, "ERROR on MemReturn chan is full seg.Id='%s' who='%s'", item.segment.Id, who) - os.Exit(1) // this is a bug! we should never return a slot to a full chan! + // this is a bug! we should never return a slot to a full chan! + log.Fatalf("ERROR on MemReturn chan is full seg.Id='%s' who='%s'", item.segment.Id, who) } - + item.mux.Lock() + item.memlocked-- + item.mux.Unlock() dlog(cfg.opt.DebugMemlim, "MemReturned seg.Id='%s' who='%s'", item.segment.Id, who) } // end func memlim.MemReturn diff --git a/NetConn.go b/NetConn.go index 9137595..e999743 100644 --- a/NetConn.go +++ b/NetConn.go @@ -47,16 +47,16 @@ var itemReadLineCommands = map[string]struct{}{ } // CMD_STAT checks if the article exists on the server -// and returns the status code or an error if it fails. -func CMD_STAT(connitem *ConnItem, item *segmentChanItem) (int, error) { +// and returns the status code, message and an error if it fails. +func CMD_STAT(connitem *ConnItem, item *segmentChanItem) (int, string, error) { if connitem == nil || connitem.conn == nil || connitem.srvtp == nil { - return 0, fmt.Errorf("error CMD_STAT srvtp=nil") + return 0, "", fmt.Errorf("error CMD_STAT srvtp=nil") } start := time.Now() - id, err := connitem.srvtp.Cmd("STAT <%s>", item.segment.Id) + id, err := connitem.srvtp.Cmd("STAT %s", item.segment.Id) if err != nil { dlog(always, "ERROR checkMessageID @ '%s' srvtp.Cmd err='%v'", connitem.c.provider.Name, err) - return 0, err + return 0, "", err } connitem.srvtp.StartResponse(id) code, msg, err := connitem.srvtp.ReadCodeLine(223) @@ -65,67 +65,72 @@ func CMD_STAT(connitem *ConnItem, item *segmentChanItem) (int, error) { case 223: // article exists... or should! dlog(cfg.opt.DebugSTAT, "CMD_STAT +OK+ seg.Id='%s' @ '%s'", item.segment.Id, connitem.c.provider.Name) - return code, nil + return code, msg, nil case 430: // "430 No Such Article" dlog(cfg.opt.DebugSTAT, "CMD_STAT -NO- seg.Id='%s' @ '%s'", item.segment.Id, connitem.c.provider.Name) - return code, nil + return code, msg, nil case 451: dlog(cfg.opt.DebugSTAT, "CMD_STAT got DMCA code=451 seg.Id='%s' @ '%s' msg='%s'", item.segment.Id, connitem.c.provider.Name, msg) - return code, nil + return code, msg, nil } - return code, fmt.Errorf("error CMD_STAT returned unknown code=%d msg='%s' @ '%s' reqTook='%v' err='%v'", code, msg, connitem.c.provider.Name, time.Since(start), err) + return code, msg, fmt.Errorf("error CMD_STAT returned unknown code=%d msg='%s' @ '%s' reqTook='%v' err='%v'", code, msg, connitem.c.provider.Name, time.Since(start), err) } // end func CMD_STAT // CMD_ARTICLE fetches the article from the server. // It returns the response code, message, size of the article, and any error encountered. // If the article is successfully fetched, it will be stored in item.article. -func CMD_ARTICLE(connitem *ConnItem, item *segmentChanItem) (int, string, uint64, error) { +func CMD(connitem *ConnItem, item *segmentChanItem, command string) (int, string, uint64, error) { if connitem == nil || connitem.conn == nil || connitem.srvtp == nil { - return 0, "", 0, fmt.Errorf("error in CMD_ARTICLE srvtp=nil") + return 0, "", 0, fmt.Errorf("error in CMD srvtp=nil") } - if debugthis { - return 220, "fake article", 1234, nil + rcode := 0 + switch command { + case cmdARTICLE: + rcode = 220 + case cmdHEAD: + rcode = 221 + case cmdBODY: + rcode = 222 } - start := time.Now() - id, aerr := connitem.srvtp.Cmd("ARTICLE <%s>", item.segment.Id) + id, aerr := connitem.srvtp.Cmd("%s %s", command, item.segment.Id) if aerr != nil { - dlog(always, "ERROR in CMD_ARTICLE srvtp.Cmd @ '%s' err='%v'", connitem.c.provider.Name, aerr) + dlog(always, "ERROR in CMD srvtp.Cmd @ '%s' err='%v'", connitem.c.provider.Name, aerr) return 0, "", 0, aerr } connitem.srvtp.StartResponse(id) - code, msg, err := connitem.srvtp.ReadCodeLine(220) + code, msg, err := connitem.srvtp.ReadCodeLine(rcode) connitem.srvtp.EndResponse(id) switch code { - case 220: - // article is coming + case 220, 221, 222: + // article/head/body is coming // old textproto.ReadDotLines replaced with new function: readArticleDotLines // to clean up headers directly while fetching from network // and decoding yenc on the fly - rcode, rxb, _, err := readDotLines(connitem, item, cmdARTICLE) + rcode, rxb, _, err := readDotLines(connitem, item, command) if err != nil { - dlog(always, "ERROR in CMD_ARTICLE srvtp.ReadDotLines @ '%s' err='%v' code=%d rcode=%d", connitem.c.provider.Name, err, code, rcode) + dlog(always, "ERROR in CMD %s srvtp.ReadDotLines @ '%s' err='%v' code=%d rcode=%d", command, connitem.c.provider.Name, err, code, rcode) return code, "", uint64(rxb), err } - dlog(cfg.opt.DebugARTICLE, "CMD_ARTICLE seg.Id='%s' @ '%s' msg='%s' rxb=%d lines=%d code=%d dlcnt=%d fails=%d", item.segment.Id, connitem.c.provider.Name, msg, item.size, len(item.article), code, item.dlcnt, item.fails) + dlog(cfg.opt.DebugARTICLE, "CMD %s seg.Id='%s' @ '%s' msg='%s' rxb=%d lines=%d code=%d dlcnt=%d fails=%d", command, item.segment.Id, connitem.c.provider.Name, msg, item.size, len(item.article), code, item.dlcnt, item.fails) if rcode == 99932 { code = 99932 // bad crc32 } return code, msg, uint64(rxb), nil case 430: - dlog(cfg.opt.DebugARTICLE, "INFO CMD_ARTICLE:430 seg.Id='%s' @ '%s' msg='%s' err='%v' dlcnt=%d fails=%d", item.segment.Id, connitem.c.provider.Name, msg, err, item.dlcnt, item.fails) + dlog(cfg.opt.DebugARTICLE, "INFO CMD %s:430 seg.Id='%s' @ '%s' msg='%s' err='%v' dlcnt=%d fails=%d", command, item.segment.Id, connitem.c.provider.Name, msg, err, item.dlcnt, item.fails) return code, msg, 0, nil // not an error, just no such article case 451: - dlog((cfg.opt.Verbose || cfg.opt.Print430), "INFO CMD_ARTICLE:451 seg.Id='%s' @ '%s' msg='%s' err='%v' dlcnt=%d fails=%d", item.segment.Id, connitem.c.provider.Name, msg, err, item.dlcnt, item.fails) + dlog((cfg.opt.Verbose || cfg.opt.Print430), "INFO CMD %s:451 seg.Id='%s' @ '%s' msg='%s' err='%v' dlcnt=%d fails=%d", command, item.segment.Id, connitem.c.provider.Name, msg, err, item.dlcnt, item.fails) return code, msg, 0, nil // not an error, just DMCA default: // returns the unknown code with an error! } - return code, msg, 0, fmt.Errorf("error in CMD_ARTICLE got unknown code=%d msg='%s' @ '%s' reqTook='%v' err='%v'", code, msg, connitem.c.provider.Name, time.Since(start), err) + return code, msg, 0, fmt.Errorf("error in CMD %s got unknown code=%d msg='%s' @ '%s' reqTook='%v' err='%v'", command, code, msg, connitem.c.provider.Name, time.Since(start), err) } // end func CMD_ARTICLE // CMD_IHAVE sends an article to the server using the IHAVE command. @@ -154,7 +159,7 @@ func CMD_IHAVE(connitem *ConnItem, item *segmentChanItem) (int, string, uint64, * 437 Transfer rejected; do not retry */ wireformat := false // not implemented. read below in: case true - id, err := connitem.srvtp.Cmd("IHAVE <%s>", item.segment.Id) + id, err := connitem.srvtp.Cmd("IHAVE %s", item.segment.Id) if err != nil { dlog(always, "ERROR CMD_IHAVE @ '%s' srvtp.Cmd err='%v'", connitem.c.provider.Name, err) return 0, "", 0, err @@ -327,26 +332,26 @@ func CMD_POST(connitem *ConnItem, item *segmentChanItem) (int, string, uint64, e // The function supports parsing headers, handling continued lines, and cleaning headers based on the configuration. // It also supports yenc decoding if enabled in the configuration. // WARNING: TODO! Be careful when reading X/OVER or X/HDR on large groups because you may run out of memory! -func readDotLines(connitem *ConnItem, item *segmentChanItem, what string) (code int, rxb int, content []string, err error) { +func readDotLines(connitem *ConnItem, item *segmentChanItem, command string) (code int, rxb int, content []string, err error) { if connitem.conn == nil || connitem.srvtp == nil { connitem.c.CloseConn(connitem, nil) return 0, 0, nil, fmt.Errorf("error readArticleDotLines: conn or srvtp nil @ '%s'", connitem.c.provider.Name) } - if !IsItemCommand(what) && !IsOtherCommand(what) { + if !IsItemCommand(command) && !IsOtherCommand(command) { // if not an item command or other command! this is a bug! // we just die here because returning an error will f***up the connection as it is already receiving lines from remote - log.Printf("error readArticleDotLines: invalid command '%s'", what) + log.Printf("error readArticleDotLines: invalid command '%s'", command) os.Exit(1) } - if IsOtherCommand(what) && item != nil { + if IsOtherCommand(command) && item != nil { // do not submit an item for other commands! this is a bug! // we just die here because returning an error will f***up the connection as it is already receiving lines from remote - log.Printf("error readArticleDotLines: do not submit an item for command '%s'", what) + log.Printf("error readArticleDotLines: do not submit an item for command '%s'", command) os.Exit(1) } var decoder *yenc.Decoder var parseHeader, ignoreNextContinuedLine, gotYencHeader, gotMultipart, brokenYenc bool // = false, false, false, false - if what == cmdARTICLE || what == cmdHEAD { + if command == cmdARTICLE || command == cmdHEAD { parseHeader = true } async, sentLinesToDecoder := false, 0 // initialize async and sentLinesToDecoder, will be set depending on -yenctest=1,2,3,4 @@ -473,28 +478,32 @@ readlines: // see every line thats coming in //dlog( "readArticleDotLines: seg.Id='%s' line='%s'", segment.Id, line) rxb += len(line) - if IsItemCommand(what) && rxb > cfg.opt.MaxArtSize { + if IsItemCommand(command) && rxb > cfg.opt.MaxArtSize { // max article size reached, stop reading // this is a DoS protection, so we do not read more than maxartsize - err = fmt.Errorf("error readDotLines: maxartsize=%d > rxb=%d seg.Id='%s' what='%s'", cfg.opt.MaxArtSize, rxb, item.segment.Id, what) + err = fmt.Errorf("error readDotLines: maxartsize=%d > rxb=%d seg.Id='%s' what='%s'", cfg.opt.MaxArtSize, rxb, item.segment.Id, command) log.Print(err) connitem.c.CloseConn(connitem, nil) return 0, rxb, nil, err } - // found final dot in line, break here - if len(line) == 1 && line == "." { - break - } - - if parseHeader && len(line) == 0 { + if (parseHeader && len(line) == 0) || (command == cmdHEAD && len(line) == 1 && line == ".") { // reading header ends here parseHeader = false + if !proxy { + // add new headers for ignored ones + now := time.Now().Format(time.RFC1123Z) + datestr := fmt.Sprintf("Date: %s", now) + content = append(content, datestr) + } else { + //content = append(content, "X-NZBreX: "+appVersion) + } + content = append(content, "Path: not-for-mail") + } - // add new headers for ignored ones - now := time.Now().Format(time.RFC1123Z) - datestr := fmt.Sprintf("Date: %s", now) - content = append(content, datestr) + // found final dot in line, break here + if len(line) == 1 && line == "." { + break } if parseHeader { @@ -511,6 +520,11 @@ readlines: if cfg.opt.CleanHeaders { // ignore headers from cleanHeader slice for _, key := range cleanHeader { + if proxy && key == "Date:" { + // if we are a proxy, we do not want to clean the Date header + // because we want to keep the original Date header from the article + continue + } if strings.HasPrefix(line, key) { ignoreNextContinuedLine = true dlog(cfg.opt.DebugARTICLE, "cleanHeader: seg.ID='%s' ignore key='%s'", item.segment.Id, key) @@ -522,9 +536,9 @@ readlines: content = append(content, line) } // end parseHeader - if !parseHeader && what != cmdHEAD { + if !parseHeader && command != cmdHEAD { i++ // counts body lines - if what == cmdARTICLE || what == cmdBODY { + if command == cmdARTICLE || command == cmdBODY { // dot-stuffing on received lines /* Receiver Side: How to Handle Dot-Stuffing @@ -540,12 +554,12 @@ readlines: } } - if what == cmdARTICLE || what == cmdBODY || IsOtherCommand(what) { + if command == cmdARTICLE || command == cmdBODY || IsOtherCommand(command) { // if we are in ARTICLE or BODY or any other multiline command, we store the line content = append(content, line) } - if cfg.opt.YencCRC && !brokenYenc && (what == cmdARTICLE || what == cmdBODY) { + if cfg.opt.YencCRC && !brokenYenc && (command == cmdARTICLE || command == cmdBODY) { switch cfg.opt.YencTest { case 1: // case 1 needs double the memory @@ -668,9 +682,9 @@ readlines: } else if async && decodeBodyChan != nil { // case 3: close(decodeBodyChan) // close the channel to signal we are done with reading lines } - dlog(cfg.opt.Debug, "readDotLines: seg.Id='%s' rxb=%d content=(%d lines) took=(%d µs) what='%s'", item.segment.Id, rxb, len(content), time.Since(startReadLines).Microseconds(), what) + dlog(cfg.opt.Debug, "readDotLines: seg.Id='%s' rxb=%d content=(%d lines) took=(%d µs) what='%s'", item.segment.Id, rxb, len(content), time.Since(startReadLines).Microseconds(), command) - if cfg.opt.YencCRC && !brokenYenc && (what == cmdARTICLE || what == cmdBODY) { + if cfg.opt.YencCRC && !brokenYenc && (command == cmdARTICLE || command == cmdBODY) { yencstart := time.Now() var startReadSignals time.Time var isBadCrc bool @@ -862,8 +876,14 @@ readlines: dlog(cfg.opt.DebugRapidYenc, "readDotLines: rapidyenc yenc.Part.Validate OK seg.Id='%s' @ '%s' part.Body=%d Number=%d crc32=%x", item.segment.Id, connitem.c.provider.Name, len(part.Body), part.Number, part.Crc32) } // end if cfg.opt.DoubleCheckRapidYencCRC - // Now write to cache - cache.WriteYenc(item, part) + // Now write to cache (only if yencout flag is enabled) + if cfg.opt.YencWrite && cacheON { + cache.WriteYenc(item, part) + } else { + // Free memory if not writing to cache + part.Body = nil + part = nil + } } // end switch yencTest dlog(cfg.opt.DebugWorker, "readDotLines: YencCRC yenctest=%d brokenYenc=%t seg.Id='%s' @ '%s' rxb=%d content=(%d lines) Part.Validate:took=(%d µs) readDotLines:took=(%d µs) startReadSignals:took=(%d µs) cfg.opt.YencWrite=%t err='%v'", cfg.opt.YencTest, brokenYenc, item.segment.Id, connitem.c.provider.Name, rxb, len(content), time.Since(startReadLines).Microseconds(), time.Since(yencstart).Microseconds(), time.Since(startReadSignals).Microseconds(), cfg.opt.YencWrite, err) if isBadCrc || brokenYenc { @@ -880,10 +900,12 @@ readlines: // so we can return the content dlog(cfg.opt.DebugARTICLE, "readDotLines: seg.Id='%s' @ '%s' rxb=%d content=(%d lines)", item.segment.Id, connitem.c.provider.Name, rxb, len(content)) - item.mux.Lock() - defer item.mux.Unlock() + if !proxy && item.mux != nil { + item.mux.Lock() + defer item.mux.Unlock() + } - switch what { + switch command { case cmdARTICLE: item.article = content item.size = rxb @@ -896,9 +918,9 @@ readlines: item.headsize = rxb default: // handle multi-line dot-terminated command - if !IsOtherCommand(what) { + if !IsOtherCommand(command) { // error unknown command - return 0, rxb, nil, fmt.Errorf("error readDotLines parsed unknown command '%s' @ '%s'", what, connitem.c.provider.Name) + return 0, rxb, nil, fmt.Errorf("error readDotLines parsed unknown command '%s' @ '%s'", command, connitem.c.provider.Name) } // these commands are not stored in item, but will be returned as content // pass @@ -906,7 +928,7 @@ readlines: // reaching here means we have read the article or body or head // or any other command that is not stored in the item - if !IsOtherCommand(what) { + if !IsOtherCommand(command) { // clears content on head, body or article content = nil // clear content if not an other command } // else pass diff --git a/Proxy.go b/Proxy.go new file mode 100644 index 0000000..60b5e82 --- /dev/null +++ b/Proxy.go @@ -0,0 +1,792 @@ +package main + +import ( + "bufio" + "crypto/tls" + "fmt" + "log" + mrand "math/rand" + "net" + "net/textproto" + "strconv" + "strings" + "sync" + "time" +) + +var ( + nntpWelcomeMessage = "ready" // 20x 'NNTP Welcome message' for NNTP clients. x will be set by the server: globalAllowPosting true|false + welcomeCode = 201 // don't change, will be set by server on boot to 200 or 201 depending on globalAllowPosting + // Allow posting by default, can be set to false to disable posting (e.g., for read-only mode) + // this is a global flag. if user/passwd config does not allow posting. user will not be able to post, even if this is true. + globalAllowPosting = true + + CID = uint64(0) // Global connection ID counter, can be used for session tracking - currently unused + + proxyMutex = &sync.RWMutex{} // proxyMutex is used to synchronize access to passwdMap, ProxySessions and CountConns + proxyCron = time.Now() // reload passwdMap every minute + passwdMap = make(map[string]*UserData) // passwdMap holds user credentials (k is username, v is UserData) + ProxySessions = make(map[string]*ProxySession) // ProxySessions map to hold active user sessions (k is username, v is ProxySession) + CountConns = make(map[string]int) // CountConns keeps track of active connections per user (k is username, v is count) + ProxyParent *SESSION // ProxyParent is the parent session for the proxy, used to link sessions to the main loop + CliRxTxCounter = make(map[string]*Counter_uint64) // RxTxCounter is a global counter for received and sent bytes (used for statistics) + CliRxTxMux = &sync.RWMutex{} // CliRxTxMux is used to synchronize access to CliRxTxCounter + statsChan = make(chan *statsItem, 1000) // statsChan is used to send segment items for statistics processing + articleNotFound = &ArticleNotFound{Map: make(map[string]map[string]*A430)} // Global variable to track articles not found by provider +) + +// ArticleNotFound is a map to track articles not found by provider (k is provider group, v is map of message IDs) +type ArticleNotFound struct { + mux sync.RWMutex // Mutex to protect access to the map + Map map[string]map[string]*A430 // Map of provider groups to message IDs not found +} + +type A430 struct { + expires time.Time // Expiration time for the A430 article not found +} +type statsItem struct { + username string // Username of the client + rxbytes uint64 // Received bytes + txbytes uint64 // Sent bytes + clear error // Clear is used to indicate that the stats item should be cleared +} + +// UserData holds user information. +// When loading from .passwd, Password will be the bcrypt hash string. +// When adding a new user via addUserToPasswdFile, Password should be plaintext to be hashed. +type UserData struct { + Username string + Password string // For loading: bcrypt hash. For adding new user: plaintext. + MaxConns int // Optional: max connections per user, if needed + ExpireAt int64 // Optional: expiration time for the user, if needed (Unix timestamp) + Posting bool // Optional: indicates if the user is allowed to post articles +} + +// ProxySession represents an active user session (currently a placeholder, expand as needed) +type ProxySession struct { + id uint64 // Unique session ID, can be used for tracking + mux sync.RWMutex // Mutex for session data access + cmdmux sync.RWMutex // Mutex for command handling + Authed bool // Indicates if the user is authenticated + Username string // Username of the authenticated user + Password string // password for the session, can be used for re-authentication + ExpireAt int64 // session expiration time (Unix timestamp) + Conn net.Conn // The user's network connection + Writer *bufio.Writer // bufio writer for the client connection to send articles, headers, bodies, list, xover, xhdr, ... (big data) + //tpReader *textproto.Reader // textproto reader for easier command handling + //tpWriter *textproto.Writer // textproto writer for easier command handling + CliTp *textproto.Conn // textproto connection for easier command handling + tmpRXBytes uint64 // proxy has RECEIVED this amount of bytes FROM CLIENT via POST/IHAVE/TAKETHIS in last 60 seconds + RXBytes uint64 // proxy has RECEIVED this amount of bytes FROM CLIENT via POST/IHAVE/TAKETHIS in total this session + tmpTXBytes uint64 // proxy has SENT this amount of bytes TO CLIENT via ARTICLE/HEAD/BODY in last 60 seconds + TXBytes uint64 // proxy has SENT this amount of bytes TO CLIENT via ARTICLE/HEAD/BODY in total this session + ConnectedAt time.Time // Timestamp when the session was created + LastCmd time.Time // Timestamp of the last command received + Group string // current group the user is in (used by GROUP command) + MsgNum int64 // current message number in the group (used by STAT, ARTICLE, etc. commands) + Cron time.Time // last run of periodic tasks, e.g., checking session expiration + selectedProvider *Provider // The provider selected for this session, used for routing commands + // Add other session-specific data here, e.g., current group, article pointer, etc. +} + +// StartNNTPServer initializes and starts the NNTP server. +// addr is the listen address (e.g., ":1119"). +// passwdFilePath is the path to the .passwd file. +// certFile is the path to the TLS certificate file (optional). +// keyFile is the path to the TLS private key file (optional). +func StartNNTPServer(addr string, passwdFilePath string, certFile string, keyFile string) { + time.Sleep(time.Duration(mrand.Intn(128)) * time.Millisecond) // Random delay to simulate server startup + err := loadPasswdFile(passwdFilePath) + if err != nil { + // Decide if server should start if passwd file is missing/corrupt. + // For this example, it logs a warning and continues (no users will be able to auth). + dlog(always, "WARNING: Could not find passwd file '%s': %v. Server starting without users.", passwdFilePath, err) + dlog(always, "To manually add users, use the '-proxyadduser' command or craft a new .passwd file manually") + dlog(always, "Example -proxyadduser \"HelloWorld|NotAsecurePassword|5|42d\" creates a user with 5 conns and 42 days to expiration") + + } else if len(passwdMap) == 0 { + dlog(always, "Warning: Passwd file '%s' loaded, but no users found or all entries were invalid.", passwdFilePath) + } + + var listener net.Listener + + if certFile != "" && keyFile != "" { + dlog(always, "Attempting to start TLS NNTP server on %s", addr) + cer, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + log.Fatalf("Failed to load TLS key pair: %v. Falling back to non-TLS or specify valid cert/key.", err) + // As a fallback, could attempt non-TLS, but for now, we exit if TLS is configured but fails to load. + // To fallback, you might set a flag and then proceed to the non-TLS listener block. + return // Or handle fallback more gracefully + } + + config := &tls.Config{Certificates: []tls.Certificate{cer}} + listener, err = tls.Listen("tcp", addr, config) + if err != nil { + log.Fatalf("Failed to start TLS NNTP server on %s: %v", addr, err) + } + dlog(always, "TLS NNTP server listening on %s", addr) + } else { + dlog(always, "Starting non-TLS NNTP server on %s", addr) + listener, err = net.Listen("tcp", addr) + if err != nil { + log.Fatalf("Failed to start non-TLS NNTP server on %s: %v", addr, err) + } + dlog(always, "Non-TLS NNTP server listening on %s", addr) + } + + defer listener.Close() + + for { + conn, err := listener.Accept() + if err != nil { + // Check if the error is due to the listener being closed. + if opError, ok := err.(*net.OpError); ok && opError.Err.Error() == "use of closed network connection" { + dlog(always, "Listener closed, shutting down accept loop.") + break // Exit loop if listener is closed + } + dlog(always, "Error accepting connection: %v", err) + continue // Continue to try accepting other connections + } + globalmux.RLock() // Lock the global mutex to ensure thread-safe access to ProxyParent + if ProxyParent != nil { + go handleConnection(conn) // Handle each client in a new goroutine + } else { + log.Printf("No ProxyParent available / not booted ... closing connection from %s", conn.RemoteAddr()) + conn.Close() // If ProxyParent is nil, we cannot handle this connection, so close it + } + globalmux.RUnlock() // Unlock the global mutex after checking ProxyParent + } // end for listener.Accept() + dlog(always, "NNTP server on %s stopped.", addr) +} // end func StartNNTPServer + +// StartProxyServers launches the NNTP server on configured TCP and/or TLS ports. +// It uses the global cfg variable for configuration parameters. +func StartProxyServers(appOpt *CFG) { + if appOpt == nil { + dlog(always, "CRITICAL: Application configuration (appOpt) is nil. Cannot start proxy servers.") + return + } + if globalAllowPosting { + welcomeCode = 200 // If posting is allowed, use 200 + } + started := false + + if appOpt.ProxyTCP > 0 { + tcpAddr := fmt.Sprintf(":%d", appOpt.ProxyTCP) + dlog(always, "Attempting to start non-TLS NNTP proxy on port %d", appOpt.ProxyTCP) + go StartNNTPServer(tcpAddr, appOpt.ProxyPasswdFile, "", "") + started = true + } else { + dlog(always, "Non-TLS NNTP proxy (ProxyTCP) not configured or port is 0, skipping.") + } + + if appOpt.ProxyTLS > 0 { + if appOpt.TLSCertPem == "" || appOpt.TLSPrivKey == "" { + dlog(always, "TLS NNTP proxy (ProxyTLS) configured for port %d, but TLSCertPem or TLSPrivKey is missing. Skipping TLS proxy.", appOpt.ProxyTLS) + } else { + tlsAddr := fmt.Sprintf(":%d", appOpt.ProxyTLS) + dlog(always, "Attempting to start TLS NNTP proxy (NNTPS) on port %d", appOpt.ProxyTLS) + go StartNNTPServer(tlsAddr, appOpt.ProxyPasswdFile, appOpt.TLSCertPem, appOpt.TLSPrivKey) + started = true + } + } else { + dlog(always, "TLS NNTP proxy (ProxyTLS) not configured or port is 0, skipping.") + } + + if !started { + dlog(always, "No NNTP proxy servers were started (neither ProxyTCP nor ProxyTLS were configured with valid ports/settings).") + } +} // end func StartProxyServers + +// GroupInfo holds information about a newsgroup +type GroupInfo struct { + Name string // Group name + Description string // Group description + Count int // Article count + Low int64 // Low water mark (oldest article number) + High int64 // High water mark (newest article number) + Posting bool // Posting allowed flag +} + +// ArticleOverview holds fields for the XOVER/OVER response +type ArticleOverview struct { + Number int64 // Article number + Subject string // Subject header + From string // From header + Date string // Date header + MessageID string // Message-ID header + References string // References header + Bytes int // Size in bytes + Lines int // Lines count + ExtraFields map[string]string // Additional fields +} + +// handleGroupCommand processes the GROUP command by selecting a newsgroup +func (ps *ProxySession) handleGroupCommand(args []string) error { + if len(args) < 1 { + ps.CliTp.PrintfLine("501 Syntax error: GROUP ") + return nil + } + groupName := args[0] + + // Find a provider that has this group + var group *GroupInfo + + for _, provider := range ProxyParent.providerList { + if provider.NoDownload || !provider.Newsreader { + continue // Skip providers that don't allow downloads + } + + connitem, err := provider.ConnPool.GetConn() + if err != nil { + dlog(always, "ERROR GetConn for provider %s: %v", provider.Name, err) + continue // Try next provider + } + + id, err := connitem.srvtp.Cmd("GROUP %s", groupName) + if err != nil { + provider.ConnPool.CloseConn(connitem, nil) + continue // Try next provider + } + + connitem.srvtp.StartResponse(id) + code, msg, err := connitem.srvtp.ReadCodeLine(211) + connitem.srvtp.EndResponse(id) + + // 211 count low high group_name + if err == nil && code == 211 { + parts := strings.Fields(msg) + if len(parts) >= 4 { + count, _ := strconv.Atoi(parts[0]) + low, _ := strconv.ParseInt(parts[1], 10, 64) + high, _ := strconv.ParseInt(parts[2], 10, 64) + + group = &GroupInfo{ + Name: groupName, + Count: count, + Low: low, + High: high, + } + + ps.selectedProvider = provider // Set the selected provider for this session when requesting GROUP + provider.ConnPool.ParkConn(0, connitem, "proxy") + break + } + } + + provider.ConnPool.ParkConn(0, connitem, "proxy") + } + + if group == nil { + ps.selectedProvider = nil // Reset selected provider if no group found + ps.CliTp.PrintfLine("411 No such newsgroup") + return nil + } + + // Update the session with the selected group info + ps.Group = group.Name + ps.MsgNum = group.High // Set to the high water mark as default + + // Return a successful GROUP response: 211 count low high group_name + ps.CliTp.PrintfLine("211 %d %d %d %s", + group.Count, group.Low, group.High, group.Name) + + dlog(always, "%s | Selected group: %s (articles: %d, range: %d-%d)", + ps.Username, group.Name, group.Count, group.Low, group.High) + + return nil +} + +// handleListCommand processes the LIST command +func (ps *ProxySession) handleListCommand(args []string) error { + var variant string + if len(args) >= 1 { + variant = strings.ToUpper(args[0]) + } + // Find a suitable provider + selected := false // Track if we found a provider for this session + for _, provider := range ProxyParent.providerList { + if provider.NoDownload || !provider.Newsreader { + continue + } + if ps.selectedProvider != nil && provider != ps.selectedProvider { + continue // Skip if this provider is not the selected one for this session + } + + connitem, err := provider.ConnPool.GetConn() + if err != nil { + dlog(always, "ERROR GetConn for provider %s: %v", provider.Name, err) + continue + } + + // Different LIST variants + var command string + if variant == "" { + command = "LIST" + } else { + command = fmt.Sprintf("LIST %s", variant) + } + + id, err := connitem.srvtp.Cmd("%s", command) + if err != nil { + provider.ConnPool.CloseConn(connitem, nil) + continue + } + + connitem.srvtp.StartResponse(id) + code, _, err := connitem.srvtp.ReadCodeLine(215) + if err != nil || code != 215 { + connitem.srvtp.EndResponse(id) + provider.ConnPool.CloseConn(connitem, nil) + continue + } + + // Read the dot-delimited response directly into a list of strings + lines, err := connitem.srvtp.ReadDotLines() + connitem.srvtp.EndResponse(id) + + if err != nil { + provider.ConnPool.CloseConn(connitem, nil) + continue + } + provider.ConnPool.ParkConn(0, connitem, "proxy") + selected = true + if ps.selectedProvider == nil { + ps.selectedProvider = provider // Set the selected provider for this session when requesting LIST + } + + // Start our response to the client + ps.CliTp.PrintfLine("215 List of newsgroups follows") + dw := ps.CliTp.DotWriter() + + // Pass through the lines + for _, line := range lines { + fmt.Fprintln(dw, line) + } + + dw.Close() + return nil + } + + // If we couldn't find any provider or all failed + if !selected { + if ps.selectedProvider != nil { + ps.selectedProvider = nil + } + ps.CliTp.PrintfLine("503 No providers available for LIST command") + return nil + } + return nil +} + +// handleXOverCommand processes the XOVER/OVER command +func (ps *ProxySession) handleXOverCommand(args []string, isXOVER bool) error { + // Must have a selected group first + if ps.Group == "" { + ps.CliTp.PrintfLine("412 No newsgroup selected") + return nil + } + + var rangeArg string + if len(args) >= 1 { + rangeArg = args[0] + } else { + // If no range specified, use current article number + if ps.MsgNum <= 0 { + ps.CliTp.PrintfLine("420 No current article selected") + return nil + } + rangeArg = fmt.Sprintf("%d", ps.MsgNum) + } + + // Process the range argument + var startNum, endNum int64 + + if strings.Contains(rangeArg, "-") { + parts := strings.Split(rangeArg, "-") + if len(parts) == 2 { + var err error + startNum, err = strconv.ParseInt(parts[0], 10, 64) + if err != nil { + ps.CliTp.PrintfLine("501 Invalid article range") + return nil + } + + if parts[1] == "" { + // Format like "1000-" means "1000 to the end" + endNum = 0 // Will be handled as "to the end" by the server + } else { + endNum, err = strconv.ParseInt(parts[1], 10, 64) + if err != nil { + ps.CliTp.PrintfLine("501 Invalid article range") + return nil + } + } + } + } else { + var err error + startNum, err = strconv.ParseInt(rangeArg, 10, 64) + if err != nil { + ps.CliTp.PrintfLine("501 Invalid article number") + return nil + } + endNum = startNum // Just one article + } + + // Find provider with this group + selected := false // Track if we found a provider for this session + for _, provider := range ProxyParent.providerList { + if provider.NoDownload || !provider.Newsreader { + continue + } + if ps.selectedProvider != nil && provider != ps.selectedProvider { + continue // Skip if this provider is not the selected one for this session + } + connitem, err := provider.ConnPool.GetConn() + if err != nil { + dlog(always, "ERROR GetConn for provider %s: %v", provider.Name, err) + continue + } + + // Select the group first (required before XOVER/OVER) + id, err := connitem.srvtp.Cmd("GROUP %s", ps.Group) + if err != nil { + provider.ConnPool.CloseConn(connitem, nil) + continue + } + + connitem.srvtp.StartResponse(id) + code, _, err := connitem.srvtp.ReadCodeLine(211) + connitem.srvtp.EndResponse(id) + + if err != nil || code != 211 { + provider.ConnPool.CloseConn(connitem, nil) + continue + } + + // Now run the XOVER/OVER command + var command string + if isXOVER { + command = "XOVER" + } else { + command = "OVER" + } + + if endNum > 0 { + command = fmt.Sprintf("%s %d-%d", command, startNum, endNum) + } else if endNum == 0 { + command = fmt.Sprintf("%s %d-", command, startNum) + } else { + command = fmt.Sprintf("%s %d", command, startNum) + } + + id, err = connitem.srvtp.Cmd("%s", command) + if err != nil { + provider.ConnPool.CloseConn(connitem, nil) + continue + } + + connitem.srvtp.StartResponse(id) + code, _, err = connitem.srvtp.ReadCodeLine(224) + if err != nil || code != 224 { + connitem.srvtp.EndResponse(id) + provider.ConnPool.CloseConn(connitem, nil) + continue + } + + // Read the dot-delimited response + lines, err := connitem.srvtp.ReadDotLines() + connitem.srvtp.EndResponse(id) + + if err != nil { + provider.ConnPool.CloseConn(connitem, nil) + continue + } + selected = true // We found a provider that can handle this command + // Start our response to the client + ps.CliTp.PrintfLine("224 Overview information follows") + dw := ps.CliTp.DotWriter() + + // Pass through the lines + for _, line := range lines { + fmt.Fprintln(dw, line) + } + + dw.Close() + provider.ConnPool.ParkConn(0, connitem, "proxy") + return nil + } + if !selected { + if ps.selectedProvider != nil { + ps.selectedProvider = nil + } + ps.CliTp.PrintfLine("503 No providers have the selected group") + return nil + } + return nil +} + +// handleXHdrCommand processes the XHDR/HDR command +func (ps *ProxySession) handleXHdrCommand(args []string, isXHDR bool) error { + if len(args) < 1 { + ps.CliTp.PrintfLine("501 Syntax error: XHDR
[range|]") + return nil + } + + // Must have a selected group first, unless message-id is specified + if ps.Group == "" && !strings.HasPrefix(args[len(args)-1], "<") { + ps.CliTp.PrintfLine("412 No newsgroup selected") + return nil + } + + headerField := args[0] + + // Get the message ID or range + var messageID string + var rangeSpec string + + if len(args) >= 2 { + if strings.HasPrefix(args[1], "<") { + // It's a message ID + messageID = args[1] + } else { + // It's a range spec + rangeSpec = args[1] + } + } else { + // If no range/msgid specified, use current article number + if ps.MsgNum <= 0 { + ps.CliTp.PrintfLine("420 No current article selected") + return nil + } + rangeSpec = fmt.Sprintf("%d", ps.MsgNum) + } + + // Find provider with this group + selected := false // Track if we found a provider for this session + for _, provider := range ProxyParent.providerList { + if provider.NoDownload || !provider.Newsreader { + continue + } + if ps.selectedProvider != nil && provider != ps.selectedProvider { + continue // Skip if this provider is not the selected one for this session + } + + connitem, err := provider.ConnPool.GetConn() + if err != nil { + dlog(always, "ERROR GetConn for provider %s: %v", provider.Name, err) + continue + } + + // Select the group first if using an article number range + if messageID == "" { + id, err := connitem.srvtp.Cmd("GROUP %s", ps.Group) + if err != nil { + provider.ConnPool.CloseConn(connitem, nil) + continue + } + + connitem.srvtp.StartResponse(id) + code, _, err := connitem.srvtp.ReadCodeLine(211) + connitem.srvtp.EndResponse(id) + + if err != nil || code != 211 { + provider.ConnPool.CloseConn(connitem, nil) + continue + } + } + + // Now run the XHDR/HDR command + var command string + if isXHDR { + command = "XHDR" + } else { + command = "HDR" + } + + if messageID != "" { + command = fmt.Sprintf("%s %s %s", command, headerField, messageID) + } else { + command = fmt.Sprintf("%s %s %s", command, headerField, rangeSpec) + } + + id, err := connitem.srvtp.Cmd("%s", command) + if err != nil { + provider.ConnPool.CloseConn(connitem, nil) + continue + } + + connitem.srvtp.StartResponse(id) + code, _, err := connitem.srvtp.ReadCodeLine(221) + if err != nil || code != 221 { + connitem.srvtp.EndResponse(id) + provider.ConnPool.CloseConn(connitem, nil) + continue + } + + // Read the dot-delimited response + lines, err := connitem.srvtp.ReadDotLines() + connitem.srvtp.EndResponse(id) + + if err != nil { + provider.ConnPool.CloseConn(connitem, nil) + continue + } + selected = true + // Start our response to the client + ps.CliTp.PrintfLine("221 Header follows") + dw := ps.CliTp.DotWriter() + + // Pass through the lines + for _, line := range lines { + fmt.Fprintln(dw, line) + } + + dw.Close() + provider.ConnPool.ParkConn(0, connitem, "proxy") + return nil + } + if !selected { + if ps.selectedProvider != nil { + ps.selectedProvider = nil + } + ps.CliTp.PrintfLine("503 No providers have the selected group or article") + return nil + } + // If we couldn't find any provider or all failed + return nil +} + +// handleNextOrLastCommand processes the NEXT/LAST command +func (ps *ProxySession) handleNextOrLastCommand(isNext bool) error { + // Must have a selected group first + if ps.Group == "" { + ps.CliTp.PrintfLine("412 No newsgroup selected") + return nil + } + + // Must have a current article selected + if ps.MsgNum <= 0 { + ps.CliTp.PrintfLine("420 No current article selected") + return nil + } + + // Find provider with this group + selected := false // Track if we found a provider for this session + for _, provider := range ProxyParent.providerList { + if provider.NoDownload || !provider.Newsreader { + continue + } + if ps.selectedProvider != nil && provider != ps.selectedProvider { + continue // Skip if this provider is not the selected one for this session + } + + connitem, err := provider.ConnPool.GetConn() + if err != nil { + dlog(always, "ERROR GetConn for provider %s: %v", provider.Name, err) + continue + } + + // Select the group first + id, err := connitem.srvtp.Cmd("GROUP %s", ps.Group) + if err != nil { + provider.ConnPool.CloseConn(connitem, nil) + continue + } + + connitem.srvtp.StartResponse(id) + code, _, err := connitem.srvtp.ReadCodeLine(211) + connitem.srvtp.EndResponse(id) + + if err != nil { + if code > 0 { + provider.ConnPool.ParkConn(0, connitem, "proxy") + } else { + provider.ConnPool.CloseConn(connitem, nil) + } + continue + } + + // Set the current article + id, err = connitem.srvtp.Cmd("STAT %d", ps.MsgNum) + if err != nil { + provider.ConnPool.CloseConn(connitem, nil) + continue + } + + connitem.srvtp.StartResponse(id) + code, _, err = connitem.srvtp.ReadCodeLine(223) + connitem.srvtp.EndResponse(id) + + if err != nil { + if code > 0 { + provider.ConnPool.ParkConn(0, connitem, "proxy") + } else { + provider.ConnPool.CloseConn(connitem, nil) + } + continue + } + + // Now run the NEXT/LAST command + var command string + if isNext { + command = "NEXT" + } else { + command = "LAST" + } + + id, err = connitem.srvtp.Cmd("%s", command) + if err != nil { + provider.ConnPool.CloseConn(connitem, nil) + continue + } + + connitem.srvtp.StartResponse(id) + code, msg, err := connitem.srvtp.ReadCodeLine(223) + connitem.srvtp.EndResponse(id) + if code > 0 { + selected = true + provider.ConnPool.ParkConn(0, connitem, "proxy") + } else { + provider.ConnPool.CloseConn(connitem, nil) + } + if err != nil || code != 223 { + if code == 421 { + // No next/previous article in the group + var which string + if isNext { + which = "next" + } else { + which = "previous" + } + ps.CliTp.PrintfLine("421 No %s article to retrieve", which) + return nil + } + } else { + // 223 article_number message_id + parts := strings.Fields(msg) + if len(parts) >= 2 { + newNum, _ := strconv.ParseInt(parts[0], 10, 64) + messageID := parts[1] + + // Update the current article pointer + ps.MsgNum = newNum + + // Return successful response + ps.CliTp.PrintfLine("223 %d %s", newNum, messageID) + } else { + ps.CliTp.PrintfLine("503 Invalid response from server") + } + } + return nil + } + if !selected { + if ps.selectedProvider != nil { + ps.selectedProvider = nil + } + ps.CliTp.PrintfLine("503 No providers have the selected group") + return nil + } + return nil +} diff --git a/ProxyHelper.go b/ProxyHelper.go new file mode 100644 index 0000000..498326b --- /dev/null +++ b/ProxyHelper.go @@ -0,0 +1,457 @@ +package main + +import ( + "bufio" + "crypto/rand" + "fmt" + "io" + "log" + mrand "math/rand" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "golang.org/x/crypto/bcrypt" +) + +func LinesWriter(cliwriter *bufio.Writer, conn net.Conn, code int, item *segmentChanItem) (txb uint64, err error) { + defer func() { + if cliwriter != nil { + err := cliwriter.Flush() // Ensure all buffered data is written to the client + if err != nil { + log.Printf("Error in DotWriter: %v", err) + } + } + }() + var lines *[]string // lines to be sent to the client + switch code { + case 220: + lines = &item.article + case 221: + lines = &item.head + case 222: // Valid response codes for ARTICLE, HEAD, BODY + lines = &item.body + // These codes indicate successful retrieval of article, header, or body + } + // code Num + n, err := io.WriteString(cliwriter, fmt.Sprintf("%d 0 %s", code, item.segment.Id)+CRLF) + txb += uint64(n) + if err != nil { + return txb, fmt.Errorf("error DotWriter WriteString writer @ '%s' err='%v'", conn.RemoteAddr(), err) + } + // Write lines to the client connection using our own buffered dot writer + for _, line := range *lines { + // dot-stuffing when sending to client + if strings.HasPrefix(line, ".") { + line = "." + line // prepend a dot to the line + } + n, err := io.WriteString(cliwriter, line+CRLF) + txb += uint64(n) + if err != nil { + return txb, fmt.Errorf("error DotWriter WriteString writer err='%v'", err) + } + + } + // final sequence + n, err = io.WriteString(cliwriter, DOT+CRLF) + txb += uint64(n) + if err != nil { + return txb, fmt.Errorf("error DotWriter WriteString writer err='%v'", err) + } + return txb, nil +} // end func LinesWriter + +func (ps *ProxySession) printCapabilities() { + // Respond with server capabilities (RFC 3977 Section 5.3) + ps.CliTp.PrintfLine("101 Capability list:") + dw := ps.CliTp.DotWriter() + fmt.Fprintln(dw, "VERSION 2") // Indicates RFC 3977 support + fmt.Fprintln(dw, "READER") // Indicates MODE READER support + fmt.Fprintln(dw, "AUTHINFO USER PASS") // Supports AUTHINFO USER and AUTHINFO PASS + // Add other capabilities like LIST, IHAVE, POST, etc., as implemented + dw.Close() +} + +// Passwords in the file are expected to be bcrypt hash strings. +// File format: username:bcrypt_hash:maxconns:ExpireAt_unix_timestamp +func loadPasswdFile(filename string) error { + // Before loading, merge any .new.* files into the main passwd file + dir := filepath.Dir(filename) + base := filepath.Base(filename) + if dir == "" { + dir = "." + } + pattern := fmt.Sprintf("%s.new.", base) + entries, err := os.ReadDir(dir) + if err == nil { + for _, entry := range entries { + if entry.IsDir() { + continue + } + name := entry.Name() + if strings.HasPrefix(name, pattern) { + newfile := filepath.Join(dir, name) + // Append contents to main passwd file + func() { + mainf, err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0640) + if err != nil { + log.Printf("Could not open %s for appending: %v", filename, err) + return + } + defer mainf.Close() + f, err := os.Open(newfile) + if err != nil { + log.Printf("Could not open %s: %v", newfile, err) + return + } + defer f.Close() + if _, err := io.Copy(mainf, f); err != nil { + log.Printf("Failed to import %s into %s: %v", newfile, filename, err) + } else { + log.Printf("Imported new user(s) from %s into %s", newfile, filename) + os.Remove(newfile) + } + }() + } + } + } + + file, err := os.Open(filename) + if err != nil { + return fmt.Errorf("failed to open passwd file %s: %w", filename, err) + } + defer file.Close() + + proxyMutex.Lock() + defer proxyMutex.Unlock() + + // Clear existing map before loading to support reloading + for k := range passwdMap { + delete(passwdMap, k) + } + + scanner := bufio.NewScanner(file) + lineNumber := 0 + usersLoaded := 0 + newpasswdMap := make(map[string]*UserData) // Temporary map to hold new users + for scanner.Scan() { + lineNumber++ + line := strings.TrimSpace(scanner.Text()) + if line == "" || strings.HasPrefix(line, "#") { // Skip empty lines and comments + continue + } + parts := strings.SplitN(line, "|", 5) // username|bcrypt_hash|maxconns|ExpireAt|(no)post + if len(parts) != 5 { + log.Printf("Skipping malformed line in passwd file (%d) (expected 5 parts, got %d): %s", lineNumber, len(parts), line) + continue + } + // check the splitted line parts + username := strings.TrimSpace(parts[0]) + bcryptHashFromFile := parts[1] + maxconnsStr := parts[2] + ExpireAtStr := parts[3] + posting := strings.HasPrefix(strings.ToLower(parts[4]), "post") + + maxconns, err := strconv.ParseInt(maxconnsStr, 10, 64) + if err != nil { + log.Printf("Skipping line in passwd file (%d) for user '%s' due to invalid maxconns ('%s'): %v", lineNumber, username, maxconnsStr, err) + continue + } + ExpireAt, err := strconv.ParseInt(ExpireAtStr, 10, 64) + if err != nil { + log.Printf("Skipping line in passwd file (%d) for user '%s' due to invalid ExpireAt ('%s'): %v", lineNumber, username, ExpireAtStr, err) + continue + } + if ExpireAt < time.Now().Unix() { + log.Printf("Skipping line in passwd file (%d) for user '%s' due to expired account (ExpireAt: %d, Current: %d)", lineNumber, username, ExpireAt, time.Now().Unix()) + continue + } + + if len(username) < 10 { + log.Printf("Skipping line in passwd file (%d) due to short username: %s", lineNumber, line) + continue + } + if len(bcryptHashFromFile) < 60 { + // Bcrypt hashes are typically 60 characters long, so we check for that. + // If the hash is empty or too short, we skip this user. + log.Printf("Skipping user '%s' on line in passwd file (%d) due to empty/short password hash.", username, lineNumber) + continue + } + + // check if the username already exists in the map + if _, exists := newpasswdMap[username]; exists { + delete(newpasswdMap, username) // Remove existing entry to avoid duplicates + log.Printf("WARNING: Skipping duplicate user '%s' on line %d in passwd file.", username, lineNumber) + continue // Skip duplicate usernames + } + newpasswdMap[username] = &UserData{ + Username: username, + Password: bcryptHashFromFile, // Store the bcrypt hash directly + MaxConns: int(maxconns), + ExpireAt: ExpireAt, + Posting: posting, + } + usersLoaded++ + } + + if err := scanner.Err(); err != nil { + return fmt.Errorf("error reading passwd file %s: %w", filename, err) + } + + if usersLoaded == 0 { + log.Printf("Warning: No users loaded from passwd file '%s'.", filename) + } else { + passwdMap = newpasswdMap + log.Printf("Successfully loaded %d users from '%s'.", usersLoaded, filename) + } + return nil +} + +// verifyPassword checks if the provided plaintext password matches the stored bcrypt hash for the user. +func verifyPassword(user string, plainPassword string) bool { + proxyMutex.RLock() + userData, ok := passwdMap[user] + proxyMutex.RUnlock() + + if !ok { + return false // User not found + } + time.Sleep(time.Duration(mrand.Intn(128)) * time.Millisecond) // small delay + // userData.Password holds the full bcrypt hash string from the .passwd file + err := bcrypt.CompareHashAndPassword([]byte(userData.Password), []byte(plainPassword)) + return err == nil // If err is nil, the password matches +} + +// addUserToPasswdFile adds a new user to the passwd file with a bcrypt hashed password. +// The UserData.Password field should contain the plaintext password for the new user. +func addUserToProxyPasswdFile(userData *UserData, filename string) error { + if userData.Password == "" { + return fmt.Errorf("cannot add user '%s': plaintext password is empty", userData.Username) + } + + // Generate bcrypt hash from the plaintext password + // bcrypt.DefaultCost is 10. You can increase this for more security (e.g., 12-14), + // but it will also be slower to hash and verify. + hashedPasswordBytes, err := bcrypt.GenerateFromPassword([]byte(userData.Password), bcrypt.DefaultCost) + if err != nil { + return fmt.Errorf("failed to generate bcrypt hash for user '%s': %w", userData.Username, err) + } + bcrpytHashString := string(hashedPasswordBytes) + + // Format the line to be appended to the file + // username:bcrypt_hash_string:maxconns:ExpireAt + posting := "nopost" // Default ACL for new users, can be changed if needed + if userData.Posting { + posting = "post" // Set ACL to post if userData.Posting is true + } + newUserLine := fmt.Sprintf("%s|%s|%d|%d|%s\n", userData.Username, bcrpytHashString, userData.MaxConns, userData.ExpireAt, posting) + + // Open the file in append mode, create if it doesn't exist + file, err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0640) + if err != nil { + return fmt.Errorf("failed to open passwd file '%s' for appending: %w", filename, err) + } + defer file.Close() + + if _, err := file.WriteString(newUserLine); err != nil { + return fmt.Errorf("failed to write new user '%s' to passwd file '%s': %w", userData.Username, filename, err) + } + + log.Printf("Successfully added user '%s' to '%s' with a bcrypt hashed password.", userData.Username, filename) + // Optionally, reload passwdMap or add the new user directly to the in-memory map + // For simplicity here, we assume a restart or separate reload mechanism if immediate in-memory update is needed. + // To update in-memory map immediately: + /* TODO + proxyMutex.Lock() + passwdMap[userData.Username] = &UserData{ + Username: userData.Username, + Password: bcrpytHashString, // Store the newly generated hash + MaxConns: userData.MaxConns, + ExpireAt: userData.ExpireAt, + } + proxyMutex.Unlock() + log.Printf("User '%s' also updated in the in-memory passwdMap.", userData.Username) + */ + return nil +} + +const DefaultPasswordLength = 8 // 8 bytes will give us 16 hex characters, which is a common length for usernames/passwords + +// generateRandomHexCredentials generates a random username and password, each 8 bytes, output as hex strings. +func generateRandomHexCredentials() (username string, password string, err error) { + // Use crypto/rand for cryptographically secure random bytes + // If you want longer usernames/passwords, adjust the length accordingly. + // For example, 10 bytes would give 20 hex characters. + buf := make([]byte, DefaultPasswordLength) + if _, err := rand.Read(buf); err != nil { + return "", "", fmt.Errorf("failed to generate random username: %w", err) + } + username = fmt.Sprintf("%x", buf) + if _, err := rand.Read(buf); err != nil { + return "", "", fmt.Errorf("failed to generate random password: %w", err) + } + password = fmt.Sprintf("%x", buf) + return username, password, nil +} + +func (ps *ProxySession) Close() { + ps.mux.Lock() + defer ps.mux.Unlock() + if ps.CliTp != nil { + ps.CliTp.Close() // Close the textproto connection + ps.CliTp = nil // Clear textproto connection to avoid dangling pointer + } + if ps.Conn != nil { + ps.Conn.Close() // Close the network connection + ps.Conn = nil // Clear the connection to avoid dangling pointer + } + ps.Authed = false // Mark session as unauthenticated + ps.Username = "" // Clear username to avoid dangling pointer + ps.Password = "" // Clear username to avoid dangling pointer + ps.ExpireAt = 0 // Clear expiration time + username := ps.Username // Store username for logging + ps.Username = "" // Clear username to avoid dangling pointer + ps.Password = "" // Clear username to avoid dangling pointer + ps.ExpireAt = 0 // Clear expiration time + log.Printf("Closed session for user '%s'", username) +} + +func (ps *ProxySession) IsExpired() (isExpired bool) { + ps.mux.RLock() + isExpired = ps.ExpireAt > 0 && time.Now().Unix() > ps.ExpireAt + ps.mux.RUnlock() + if isExpired { + ps.Close() // Close the session if it has expired + } + return +} // end func IsExpired + +func GoCliRxTxCounter() { + // GoCliRxTxCounter starts a goroutine to periodically log received and sent bytes per user + go func() { + for { + si := <-statsChan // Wait for stats items from the channel + CliRxTxMux.Lock() // Lock the global mutex to ensure thread-safe access to CliRxTxCounter + if si.clear != nil { + delete(CliRxTxCounter, si.username) + CliRxTxMux.Unlock() + continue + } + if _, ok := CliRxTxCounter[si.username]; !ok { + CliRxTxCounter[si.username] = NewCounter(4) // Initialize counter for the user if it doesn't exist + } + if si.rxbytes > 0 { + CliRxTxCounter[si.username].Add("tmpRXBytes", si.rxbytes) + } + if si.txbytes > 0 { + CliRxTxCounter[si.username].Add("tmpTXBytes", si.txbytes) + } + CliRxTxMux.Unlock() // Unlock the global mutex + } + }() + go func() { + for { + time.Sleep(time.Minute) + CliRxTxMux.Lock() + for username, counter := range CliRxTxCounter { + if counter == nil { + continue + } + rxbytes := counter.GetReset("tmpRXBytes") // Get and reset temporary RX bytes + txbytes := counter.GetReset("tmpTXBytes") // Get and reset temporary TX bytes + var RXspeedInKB, TXspeedInKB float64 + if rxbytes > 0 { + counter.Add("RXBytes", rxbytes) + RXspeedInKB = float64(rxbytes) / 1024 / 60 // Calculate users upload speed in KB/s (proxy received) + } + if txbytes > 0 { + counter.Add("TXBytes", txbytes) + TXspeedInKB = float64(txbytes) / 1024 / 60 // Calculate users download speed in KB/s (proxy transceived) + } + var rxMiB, txMiB float64 + trx := counter.GetValue("RXBytes") + ttx := counter.GetValue("TXBytes") + if trx > 1024*1024 { + rxMiB = float64(trx) / 1024 / 1024 + } + if ttx > 1024*1024 { + txMiB = float64(ttx) / 1024 / 1024 + } + if RXspeedInKB > 0 || TXspeedInKB > 0 { + log.Printf(" %s | session DL speed: %.0f KiB/s (%.0f MiB) [%d bytes] | session UL speed: %.0f KiB/s (%.0f MiB) [%d bytes]", username, TXspeedInKB, txMiB, ttx, RXspeedInKB, rxMiB, trx) + } else { + log.Printf(" %s | idle, no data transfer in last minute", username) + // TODO close session if no data transfer in last minute? + } + } + CliRxTxMux.Unlock() + } + }() +} + +func IsArticleNotFoundAtProviderGroup(messageId string, providerGroup string) bool { + // Check if the article is not found at the provider group + articleNotFound.mux.RLock() + defer articleNotFound.mux.RUnlock() + if providerGroupMap, exists := articleNotFound.Map[providerGroup]; exists { + if a430, found := providerGroupMap[messageId]; found { + if time.Now().Before(a430.expires) { + log.Printf("cache: a430 isflag messageId '%s' not found at provider group '%s'", messageId, providerGroup) + return true // messageId flaged as not found and entry not expired + } + log.Printf("cache: a430 expired messageId '%s' at provider group '%s'", messageId, providerGroup) + go ClearArticleNotFoundAtProviderGroup(messageId, providerGroup) // Clear expired article not found entry + } + } + return false // not cached +} + +func ClearArticleNotFoundAtProviderGroup(messageId string, providerGroup string) { + // Clear the article not found at the provider group + articleNotFound.mux.Lock() + defer articleNotFound.mux.Unlock() + if providerGroupMap, exists := articleNotFound.Map[providerGroup]; exists { + if _, found := providerGroupMap[messageId]; found { + delete(providerGroupMap, messageId) // Remove the article not found entry + log.Printf("Cleared article not found for message ID '%s' at provider group '%s'", messageId, providerGroup) + } else { + log.Printf("Article '%s' not found at provider group '%s' (nothing to clear)", messageId, providerGroup) + } + } else { + log.Printf("Provider group '%s' does not exist in article not found map", providerGroup) + } +} + +func SetArticleNotFoundAtProviderGroup(messageId string, providerGroup string) { + // Set the article not found at the provider group + articleNotFound.mux.Lock() + defer articleNotFound.mux.Unlock() + if _, exists := articleNotFound.Map[providerGroup]; !exists { + articleNotFound.Map[providerGroup] = make(map[string]*A430) + } + articleNotFound.Map[providerGroup][messageId] = &A430{ + expires: time.Now().Add(1 * time.Minute), // Set expiration to 1 minute from now + } + log.Printf("Set article not found for message ID '%s' at provider group '%s'", messageId, providerGroup) +} + +// isValidMessageID checks if the provided message ID is valid according to NNTP standards. +func isValidMessageID(ps *ProxySession, messageID string) (isvalid bool, num int64) { + // Placeholder for message ID validation logic + if strings.HasPrefix(messageID, "<") && strings.HasSuffix(messageID, ">") { + // we dont check for @ in the message ID, as it is not required by RFC 5536 + isvalid = true + return + } + number, err := strconv.ParseInt(messageID, 10, 64) + if err == nil && number > 0 { + num = number + // If it's a number, we consider it valid + return + } + log.Printf(" %s | Invalid message ID: %s", ps.Username, messageID) + return +} diff --git a/README.md b/README.md index 3781219..1a8d0d0 100644 --- a/README.md +++ b/README.md @@ -75,9 +75,10 @@ The tool uses the NNTP commands: ## Installation -1. Compile from source or download the latest executable from the [Releases](../../releases) page. -2. Configure `provider.json` with your Usenet provider details. -3. Run the program from the command line: +1. **Download pre-built binaries** from the [Releases](../../releases) page, or +2. **Compile from source** (see Build Instructions below) +3. Configure `provider.json` with your Usenet provider details. +4. Run the program from the command line: ```sh ./nzbrex -checkonly -nzb nzbs/ubuntu-24.04-live-server-amd64.iso.nzb.gz @@ -85,6 +86,37 @@ The tool uses the NNTP commands: ./nzbrex --cd=cache --checkfirst --nzb=nzbs/ubuntu-24.04-live-server-amd64.iso.nzb.gz ``` +### Build Instructions + +NZBreX requires the rapidyenc C++ library to be compiled before building the Go application. + +#### Linux Build +```sh +# Build rapidyenc library +cd rapidyenc && ./build_rapidyenc_linux-amd64.sh && cd .. + +# Build NZBreX +go build . +``` + +#### Windows Native Build +```cmd +REM Install dependencies: Go, MinGW-w64, CMake +REM Then run: +build_windows.bat +``` + +#### Windows Cross-Compilation (from Linux) +```sh +# Install dependencies +sudo apt install gcc-mingw-w64-x86-64 g++-mingw-w64-x86-64 cmake + +# Run cross-compilation script +./local_crossbuild_windows-amd64.sh +``` + +The Windows executable will be statically linked and only depend on core Windows system libraries. + --- ## Running NZBreX @@ -218,6 +250,26 @@ Example output: --- +## Troubleshooting + +### Windows Executable Issues + +If you encounter an error like "The code execution cannot proceed because libc++.so.6.dll was not found" on Windows: + +1. **Download the latest release** - This issue has been fixed in recent builds +2. **Ensure you're using the correct Windows version** - Download the Windows-specific executable from the releases page +3. **The executable is self-contained** - No additional runtime libraries should be required + +The Windows executable is statically linked and only depends on core Windows system libraries that are guaranteed to be available. + +### Build Issues + +- **rapidyenc library missing**: Run the appropriate rapidyenc build script before building NZBreX +- **Cross-compilation fails**: Ensure MinGW-w64 and cmake are installed +- **Link errors**: Make sure the rapidyenc library matches your target platform + +--- + ## Contributing & Reporting Issues Please open an issue on [GitHub Issues](../../issues) or a [Discussion](../../discussions) if you encounter problems. diff --git a/Routines.go b/Routines.go index 5388adb..5acd747 100644 --- a/Routines.go +++ b/Routines.go @@ -40,17 +40,18 @@ func (s *SESSION) GoCheckRoutine(wid int, provider *Provider, item *segmentChanI connitem, err = provider.ConnPool.GetConn() } if err != nil { - return 0, fmt.Errorf("ERROR in GoCheckRoutine: ConnGet '%s' connitem='%v' sharedCC='%v' err='%v'", provider.Name, connitem, sharedCC, err) + return 0, fmt.Errorf("error in GoCheckRoutine: ConnGet '%s' connitem='%v' sharedCC='%v' err='%v'", provider.Name, connitem, sharedCC, err) } if connitem == nil || connitem.conn == nil { - return 0, fmt.Errorf("ERROR in GoCheckRoutine: ConnGet got nil item or conn '%s' connitem='%v' sharedCC='%v' err='%v'", provider.Name, connitem, sharedCC, err) + return 0, fmt.Errorf("error in GoCheckRoutine: ConnGet got nil item or conn '%s' connitem='%v' sharedCC='%v' err='%v'", provider.Name, connitem, sharedCC, err) } - code, err = CMD_STAT(connitem, item) + code, msg, err := CMD_STAT(connitem, item) if code == 0 && err != nil { // connection problem, closed? + item.FlagError(provider.id) provider.ConnPool.CloseConn(connitem, sharedCC) // close conn on error - dlog(always, "WARN checking seg.Id='%s' failed @ '%s' err='%v'", item.segment.Id, provider.Name, err) + dlog(always, "WARN checking seg.Id='%s' failed @ '%s' code=%d msg='%s' err='%v'", item.segment.Id, provider.Name, code, msg, err) return code, err } @@ -151,9 +152,7 @@ func (s *SESSION) GoDownsRoutine(wid int, provider *Provider, item *segmentChanI // check cache before download if cacheON && cache.ReadCache(item) > 0 { - // item has been read from cache - //DecreaseDLQueueCnt() // decrease when read from cache // DISABLED - //memlim.MemReturn(who+":cacheRead", item) + dlog(cfg.opt.DebugDR, "GoDownsRoutine: cache hit seg.Id='%s' @ '%s'#'%s'", item.segment.Id, provider.Name, provider.Group) return 920, nil } start := time.Now() // start time for this routine @@ -166,20 +165,20 @@ func (s *SESSION) GoDownsRoutine(wid int, provider *Provider, item *segmentChanI connitem, err = provider.ConnPool.GetConn() } if err != nil { - return 0, fmt.Errorf("ERROR in GoDownsRoutine: ConnGet '%s' connitem='%v' sharedCC='%v' err='%v'", provider.Name, connitem, sharedCC, err) + return 0, fmt.Errorf("error in GoDownsRoutine: ConnGet '%s' connitem='%v' sharedCC='%v' err='%v'", provider.Name, connitem, sharedCC, err) } if connitem == nil || connitem.conn == nil { - return 0, fmt.Errorf("ERROR in GoDownsRoutine: ConnGet got nil item or conn '%s' connitem='%v' sharedCC='%v' err='%v'", provider.Name, connitem, sharedCC, err) + return 0, fmt.Errorf("error in GoDownsRoutine: ConnGet got nil item or conn '%s' connitem='%v' sharedCC='%v' err='%v'", provider.Name, connitem, sharedCC, err) } dlog(cfg.opt.DebugWorker, "GoDownsRoutine got connitem='%v' sharedCC='%v' --> CMD_ARTICLE seg.Id='%s'", connitem, sharedCC, item.segment.Id) startArticle := time.Now() - code, msg, rxb, err := CMD_ARTICLE(connitem, item) - + code, msg, rxb, err := CMD(connitem, item, cmdARTICLE) if err != nil { - dlog(always, "ERROR in GoDownsRoutine: CMD_ARTICLE seg.Id='%s' @ '%s'#'%s' err='%v'", item.segment.Id, provider.Name, provider.Group, err) - // handle connection problem / closed connection + // connection problem, closed? + item.FlagError(provider.id) provider.ConnPool.CloseConn(connitem, sharedCC) // close conn on error + dlog(always, "ERROR in GoDownsRoutine: CMD_ARTICLE seg.Id='%s' @ '%s'#'%s' err='%v'", item.segment.Id, provider.Name, provider.Group, err) return 0, fmt.Errorf("error in GoDownsRoutine: CMD_ARTICLE seg.Id='%s' @ '%s'#'%s' err='%v'", item.segment.Id, provider.Name, provider.Group, err) } @@ -198,8 +197,8 @@ func (s *SESSION) GoDownsRoutine(wid int, provider *Provider, item *segmentChanI s.counter.Add("TOTAL_RXbytes", uint64(item.size)) // to calulate total download speed of this provider - provider.ConnPool.counter.Add("TMP_RXbytes", uint64(item.size)) - provider.ConnPool.counter.Add("TOTAL_RXbytes", uint64(item.size)) + provider.ConnPool.Counter.Add("TMP_RXbytes", uint64(item.size)) + provider.ConnPool.Counter.Add("TOTAL_RXbytes", uint64(item.size)) // to calulate global total download speed GCounter.Add("TMP_RXbytes", uint64(item.size)) @@ -242,6 +241,7 @@ func (s *SESSION) GoDownsRoutine(wid int, provider *Provider, item *segmentChanI // pass item to cache. dlog(cfg.opt.DebugWorker, "DEBUG GoDownsRoutine CMD_ARTICLE: reached cache.Add2Cache seg.Id='%s' @ '%s'#'%s'", item.segment.Id, provider.Name, provider.Group) + cache.Add2Cache(item) // pass to ParkConn @@ -326,6 +326,15 @@ func (s *SESSION) GoReupsRoutine(wid int, provider *Provider, item *segmentChanI defer GCounter.Decr("GoReupsRoutines") //who := fmt.Sprintf("UR=%d@'%s' seg.Id='%s'", wid, provider.Name, item.segment.Id) // DISABLED MEMRETURN + item.mux.Lock() + memlocked := item.memlocked > 0 + item.mux.Unlock() + + if !memlocked { + dlog(cfg.opt.DebugWorker, "GoReupsRoutine: item not memlocked seg.Id='%s' @ '%s'#'%s'", item.segment.Id, provider.Name, provider.Group) + memlim.MemLockWait(item, "GoUR") // gets memlock here + } + var err error var connitem *ConnItem if sharedCC != nil { @@ -334,10 +343,10 @@ func (s *SESSION) GoReupsRoutine(wid int, provider *Provider, item *segmentChanI connitem, err = provider.ConnPool.GetConn() } if err != nil { - return 0, fmt.Errorf("ERROR in GoReupsRoutine: ConnGet '%s' connitem='%v' sharedCC='%v' err='%v'", provider.Name, connitem, sharedCC, err) + return 0, fmt.Errorf("error in GoReupsRoutine: ConnGet '%s' connitem='%v' sharedCC='%v' err='%v'", provider.Name, connitem, sharedCC, err) } if connitem == nil || connitem.conn == nil || connitem.srvtp == nil { - return 0, fmt.Errorf("ERROR in GoReupsRoutine: ConnGet got nil item or conn '%s' connitem='%v' sharedCC='%v' err='%v'", provider.Name, connitem, sharedCC, err) + return 0, fmt.Errorf("error in GoReupsRoutine: ConnGet got nil item or conn '%s' connitem='%v' sharedCC='%v' err='%v'", provider.Name, connitem, sharedCC, err) } var uploaded, unwanted, retry bool @@ -351,9 +360,10 @@ func (s *SESSION) GoReupsRoutine(wid int, provider *Provider, item *segmentChanI } else if provider.capabilities.ihave { cmd = 2 } else { - //provider.mux.RUnlock() // FIXME TODO #b8bd287b: + // connection problem, closed? + item.FlagError(provider.id) provider.ConnPool.CloseConn(connitem, sharedCC) // close conn on error - return 0, fmt.Errorf("WARN selecting upload mode failed '%s' caps='%#v'", provider.Name, provider.capabilities) + return 0, fmt.Errorf("selecting upload mode failed '%s' caps='%#v'", provider.Name, provider.capabilities) } //provider.mux.RUnlock() // FIXME TODO #b8bd287b: @@ -367,6 +377,12 @@ func (s *SESSION) GoReupsRoutine(wid int, provider *Provider, item *segmentChanI switch cmd { case 1: code, msg, txb, err = CMD_POST(connitem, item) + if code == 0 && err != nil { + // connection problem, closed? + item.FlagError(provider.id) + provider.ConnPool.CloseConn(connitem, sharedCC) + return 0, fmt.Errorf("error in GoReupsRoutine: CMD_POST seg.Id='%s' @ '%s'#'%s' err='%v'", item.segment.Id, provider.Name, provider.Group, err) + } switch code { case 240: uploaded = true @@ -378,15 +394,23 @@ func (s *SESSION) GoReupsRoutine(wid int, provider *Provider, item *segmentChanI case 2: code, msg, txb, err = CMD_IHAVE(connitem, item) + if code == 0 && err != nil { + // connection problem, closed? + item.FlagError(provider.id) + provider.ConnPool.CloseConn(connitem, sharedCC) // close conn on error + return 0, fmt.Errorf("error in GoReupsRoutine: CMD_IHAVE seg.Id='%s' @ '%s'#'%s' err='%v'", item.segment.Id, provider.Name, provider.Group, err) + } switch code { case 235: uploaded = true // pass case 436: retry = true + err = nil // pass case 437: unwanted = true + err = nil // pass default: dlog(always, "ERROR in GoReupsRoutine: CMD_IHAVE seg.Id='%s' @ '%s'#'%s' code=%d msg='%s' err='%v'", item.segment.Id, provider.Name, provider.Group, code, msg, err) @@ -402,8 +426,8 @@ func (s *SESSION) GoReupsRoutine(wid int, provider *Provider, item *segmentChanI s.counter.Add("TOTAL_TXbytes", uint64(item.size)) // to calulate total upload speed of this provider - provider.ConnPool.counter.Add("TMP_TXbytes", uint64(item.size)) - provider.ConnPool.counter.Add("TOTAL_TXbytes", uint64(item.size)) + provider.ConnPool.Counter.Add("TMP_TXbytes", uint64(item.size)) + provider.ConnPool.Counter.Add("TOTAL_TXbytes", uint64(item.size)) // to calulate global total upload speed GCounter.Add("TMP_TXbytes", uint64(item.size)) @@ -423,6 +447,7 @@ func (s *SESSION) GoReupsRoutine(wid int, provider *Provider, item *segmentChanI } item.flaginUP = false item.flagisUP = true + item.fails = 0 item.mux.Unlock() // update provider statistics provider.mux.Lock() // mutex #87c9 articles.refreshed++ @@ -485,9 +510,10 @@ func (s *SESSION) GoReupsRoutine(wid int, provider *Provider, item *segmentChanI } if err != nil { - dlog(always, "ERROR in GoReupsRoutine: seg.Id='%s' @ '%s'#'%s' err='%v'", item.segment.Id, provider.Name, provider.Group, err) // handle connection problem / closed connection + item.FlagError(provider.id) provider.ConnPool.CloseConn(connitem, sharedCC) // close conn on error + dlog(always, "ERROR in GoReupsRoutine: seg.Id='%s' @ '%s'#'%s' err='%v'", item.segment.Id, provider.Name, provider.Group, err) return 0, fmt.Errorf("error in GoReupsRoutine: seg.Id='%s' @ '%s'#'%s' err='%v'", item.segment.Id, provider.Name, provider.Group, err) } @@ -507,7 +533,7 @@ func (s *SESSION) StopRoutines() { } // pushing nil into the segment chans will stop the routines for _, provider := range s.providerList { - closeSegmentChannel(s.segmentChansCheck[provider.Group]) + //closeSegmentChannel(s.segmentChansCheck[provider.Group]) closeSegmentChannel(s.segmentChansDowns[provider.Group]) closeSegmentChannel(s.segmentChansReups[provider.Group]) diff --git a/Server.go b/Server.go new file mode 100644 index 0000000..db1d104 --- /dev/null +++ b/Server.go @@ -0,0 +1,538 @@ +package main + +import ( + "bufio" // Added for random salt generation + // Added for TLS support + "time" + + "github.com/Tensai75/nzbparser" + // Added for bcrypt password hashing + // "encoding/hex" // Uncomment if you want to log/debug hashes as hex strings + "fmt" + "io" + "log" // Added for random delays in password verification (to mitigate timing attacks) + mrand "math/rand" // Added for random delays in password verification (to mitigate timing attacks) + "net" + "net/textproto" // Added for textproto + "strings" +) + +// handleConnection manages a single NNTP proxy client connection. +func handleConnection(conn net.Conn) { + //log.Printf("Handling connection from %s", conn.RemoteAddr()) + cliTp := textproto.NewConn(conn) + var currentUser string // Stores username after successful AUTHINFO USER + authenticated := false + now := time.Now() // Get the current time for session initialization + var ps = &ProxySession{ + Conn: conn, // Store the connection in the session + LastCmd: now, // Initialize last command time + ConnectedAt: now, // Set the connection time + } // ProxySession to hold user session data + + // Ensure connection is closed and proxy session cleaned up when done + defer func(s *ProxySession) { + if ps.Authed && s.Username != "" { + proxyMutex.Lock() + if CountConns[s.Username] > 0 { + CountConns[s.Username]-- + log.Printf("Decremented connection count for user '%s'. Active connections: %d", currentUser, CountConns[currentUser]) + } else { + log.Printf("Connection count for user '%s' was already 0 or less, not decrementing. This might indicate an issue.", s.Username) + } + // ProxySessions is used to track this specific session, remove it here. + delete(ProxySessions, s.Username) + statsChan <- &statsItem{ + username: s.Username, + clear: fmt.Errorf("1"), + } + proxyMutex.Unlock() + } + if ps.Authed { + dlog(always, "Closed connection for user '%s'", s.Username) + } + if ps.CliTp != nil { + ps.CliTp.Close() // Close the textproto connection + } else if s.Conn != nil { + ps.Conn.Close() + } + ps.Authed = false // Clear authentication status + ps.Username = "" // Clear username to avoid dangling pointer + ps.ConnectedAt = time.Time{} // Clear timestamp to avoid dangling pointer + ps.LastCmd = time.Time{} // Clear last command time to avoid dangling pointer + }(ps) + + // Send initial welcome message (RFC 3977: 200 or 201) + // 200 service available, posting allowed + // 201 service available, posting prohibited + time.Sleep(time.Duration(mrand.Intn(128)) * time.Millisecond) // Random delay to simulate server startup + cliTp.PrintfLine("%d %s", welcomeCode, nntpWelcomeMessage) + // incoming client connection is captured in this for loop until QUIT command or error occurs + //cliTp.W.Flush() // Ensure the welcome message is sent immediately + +forever: + for { + // Read commands from the client in a loop + line, err := cliTp.ReadLine() // Use ReadLine from textproto + if err != nil { + if err != io.EOF { + // Check for common textproto errors, like malformed lines or read errors + if perr, ok := err.(*textproto.Error); ok { + dlog(always, "Textproto error from client err='%v'", perr) + // You might want to send a specific NNTP error code back to the client here + // For example, if it's a syntax error related to line endings or length. + // tpWriter.PrintfLine("501 Syntax error or line too long") + } else { + //DEBUG log.Printf("Error reading from client %s: %v", conn.RemoteAddr(), err) + } + } else { + //DEBUG log.Printf("Client %s disconnected (EOF).", conn.RemoteAddr()) + } + return + } + if len(line) > 128 { // only command lines are captured here + // reading a line longer than 128 characters is not allowed by RFC 3977 + // Line is too long, send error response + cliTp.PrintfLine("501 Syntax error: cmd line too long") + return + } + line = strings.TrimSpace(line) // TrimSpace is still useful + parts := strings.Fields(line) + if len(parts) == 0 { + // just close on an empty line + return + } + var args []string + command := strings.ToUpper(parts[0]) + if len(parts) >= 2 { + args = parts[1:] // Get all parts after the command as arguments + } + + //dlog(cfg.opt.Bug, "Client %s command: %s args='%v'", conn.RemoteAddr(), line, args) + + ps.LastCmd = time.Now() // Update last command timestamp for unauthenticated users + + if authenticated { + ps.cmdmux.Lock() + if err := ps.handleRequest(command, args); err != nil { + log.Printf("Error handling command '%s' for user '%s': %v", command, ps.Username, err) + ps.cmdmux.Unlock() + break + } + ps.cmdmux.Unlock() + continue forever // Continue to handle further commands after handling the request + } + + switch command { + + case "CAPABILITIES": + ps.printCapabilities() + + case "AUTHINFO": + if authenticated { + cliTp.PrintfLine("502 Already authenticated") + return + } + if len(parts) < 2 { + cliTp.PrintfLine("501 Syntax error in AUTHINFO command") + return + } + authCmd := strings.ToUpper(parts[1]) + switch authCmd { + + case "USER": + if len(parts) < 3 { + cliTp.PrintfLine("501 Syntax error: AUTHINFO USER ") + return + } + currentUser = parts[2] + // RFC 3977 suggests 381 if user is valid, otherwise 481/502 or proceed and fail at PASS. + // To avoid user enumeration, some servers always respond 381. + time.Sleep(time.Duration(mrand.Intn(128)) * time.Millisecond) // Random delay + cliTp.PrintfLine("381 Password required") + continue forever // Continue to handle further commands after AUTHINFO USER + + case "PASS": + if currentUser == "" { + cliTp.PrintfLine("482 Authentication commands out of sequence (AUTHINFO USER first)") + return + } + if len(parts) < 3 { + cliTp.PrintfLine("501 Syntax error: AUTHINFO PASS ") + return + } + time.Sleep(time.Duration(mrand.Intn(128)) * time.Millisecond) // Random delay + passwordToVerify := parts[2] + + if !verifyPassword(currentUser, passwordToVerify) { + cliTp.PrintfLine("481 Authentication failed") + log.Printf("Failed authentication attempt for user '%s' from %s", currentUser, conn.RemoteAddr()) + return + } + + proxyMutex.RLock() // Lock before checking and updating CountConns and user data + reloadCron := time.Since(proxyCron) > time.Minute + proxyMutex.RUnlock() + + if reloadCron { + proxyMutex.Lock() + proxyCron = time.Now() + proxyMutex.Unlock() + if err := loadPasswdFile(cfg.opt.ProxyPasswdFile); err != nil { + // Unlock before sleeping + time.Sleep(time.Duration(mrand.Intn(1000)) * time.Millisecond) // Random delay + log.Printf("Failed to reload passwd file: %v", err) + cliTp.PrintfLine("481 Authentication failed (passwd file reload error)") + return + } + } + + proxyMutex.RLock() + + userData, userExists := passwdMap[currentUser] + if !userExists { + // This case should ideally not be reached if verifyPassword relies on passwdMap + proxyMutex.RUnlock() + time.Sleep(time.Duration(mrand.Intn(1000)) * time.Millisecond) // Random delay + cliTp.PrintfLine("481 Authentication failed (user data inconsistency)") + log.Printf("User data not found for '%s' in passwdMap after successful verifyPassword. Potential data inconsistency.", currentUser) + return + } + + // Check if account is expired + if userData.ExpireAt > 0 && time.Now().Unix() > userData.ExpireAt { + proxyMutex.RUnlock() + time.Sleep(time.Duration(mrand.Intn(1000)) * time.Millisecond) // Random delay + cliTp.PrintfLine("481 Authentication failed (account expired)") + log.Printf("Authentication failed for user '%s' from %s: account expired (ExpireAt: %d, Current: %d)", currentUser, conn.RemoteAddr(), userData.ExpireAt, time.Now().Unix()) + return + } + proxyMutex.RUnlock() + + proxyMutex.Lock() + // Check connection limit + if userData.MaxConns > 0 && CountConns[currentUser] >= userData.MaxConns { + proxyMutex.Unlock() + time.Sleep(time.Duration(mrand.Intn(1000)) * time.Millisecond) // Random delay + cliTp.PrintfLine("452 Too many connections for this user. Please try again later.") + log.Printf("Connection denied for user '%s' from %s: too many connections (current: %d, max: %d)", currentUser, conn.RemoteAddr(), CountConns[currentUser], userData.MaxConns) + // Do not set authenticated to true, connection is rejected before full authentication. + return + } + // Connection allowed, increment count and flag authenticate + CountConns[currentUser]++ + authenticated = true + + // Create a new ProxySession for the authenticated user + + CID++ // Increment global connection ID counter + ps.id = CID // Assign a unique session ID + ps.Authed = true + ps.Username = currentUser + ps.Password = passwordToVerify // Store the hashed password in the session so we can check every now and then if password has changed and close the session + ps.ExpireAt = userData.ExpireAt // Set session expiration time from user data + ps.Conn = conn // Store the connection in the session + ps.CliTp = cliTp // Create a textproto connection for easier command handling + ps.Writer = bufio.NewWriter(conn) // Create a bufio writer for the client connection + ps.Cron = ps.ConnectedAt // Initialize cron time for periodic tasks + + // Store the session in the global ProxySessions map + ProxySessions[currentUser] = ps + + cliTp.PrintfLine("281 Welcome to NZBreX Proxy! Your conns: %d/%d. Exp: '%v'", + CountConns[currentUser], userData.MaxConns, time.Unix(userData.ExpireAt, 0).Format(time.RFC1123Z)) + + dlog(cfg.opt.Debug, "User '%s' authenticated. Active connections for user: %d/%d", currentUser, CountConns[currentUser], userData.MaxConns) + + proxyMutex.Unlock() // Unlock after updating CountConns and user data + + continue forever // Continue to handle further commands after successful authentication + + default: + cliTp.PrintfLine("501 Unknown AUTHINFO subcommand: %s", authCmd) + return + } + + case "MODE": + if len(parts) < 2 { + cliTp.PrintfLine("501 Syntax error in MODE command") + return + } + switch strings.ToUpper(parts[1]) { + case "READER": + cliTp.PrintfLine("201 Posting prohibited") + + case "STREAM": + if !authenticated { + cliTp.PrintfLine("480 Authentication required for MODE STREAM") + return + } + cliTp.PrintfLine("200 Switching to STREAM mode") + default: + cliTp.PrintfLine("501 Unknown mode") + return + } + continue forever // Continue to handle further commands after MODE command + + case "QUIT": + cliTp.PrintfLine("205 Closing connection - goodbye.") + log.Printf("Client %s issued QUIT.", conn.RemoteAddr()) + return + + default: + if authenticated { + // Handle other NNTP commands for authenticated users here + // e.g., GROUP, ARTICLE, LIST, NEXT, LAST, DATE, etc. + cliTp.PrintfLine("500 Unknown command: %s (authenticated)", command) + } else { + // RFC 3977: "480 Authentication required" for most commands if not authenticated + cliTp.PrintfLine("480 Authentication required") + } + } // end switch command + } // end for forever +} // end func handleConnection + +// handleRequest processes NNTP commands for an authenticated user session. +func (ps *ProxySession) handleRequest(command string, args []string) error { + // Placeholder for handling specific NNTP commands in the session + // This function can be expanded to handle commands like GROUP, ARTICLE, etc. + // For now, we just log the command received. + // returning any error (e.g.: via fmt.Errorf) will disconnect the user + var retry error = nil // retry is used to indicate that the command should be retried + if !ps.Authed || (ps.Username == "") || (ps.CliTp == nil) || ps.ExpireAt < time.Now().Unix() { + ps.CliTp.PrintfLine("480 Authentication required for %s command", command) + return fmt.Errorf("authentication required") + } + //dlog(always, "Handling command '%s' for user '%s' in session.", command, ps.Username) + // Handle these commands for authenticated users + if time.Since(ps.Cron) > time.Second*15 { // every 15s + // calulate download speed of this user session + //txspeedinKB := float64(ps.tmpTXBytes) / 1024 / float64(time.Since(ps.Cron)) // speed in KB + //rxspeedinKB := float64(ps.tmpRXBytes) / 1024 / float64(time.Since(ps.Cron)) // speed in KB + //log.Printf(" %s | session DL speed: %.0f KB/s | session UL speed: %.0f KB/s", ps.Username, rxspeedinKB, txspeedinKB) + if ps.tmpTXBytes > 0 || ps.tmpRXBytes > 0 { + statsChan <- &statsItem{ + username: ps.Username, + rxbytes: ps.tmpRXBytes, + txbytes: ps.tmpTXBytes, + } + ps.tmpTXBytes = 0 + ps.tmpRXBytes = 0 + } + ps.Cron = time.Now() // Reset cron time for the next speed calculation + + } + pass := false + var item *segmentChanItem // segmentChanItem to hold the message ID or number + switch command { //switch command1 + + // extract messageId from command + case "STAT", "ARTICLE", "BODY", "HEAD": + if len(args) == 0 { + ps.CliTp.PrintfLine("501 Syntax error: %s requires a message ID or number", command) + return fmt.Errorf("syntax error: %s requires a message ID or number", command) + } + isvalid, num := isValidMessageID(ps, args[0]) + if !isvalid && num <= 0 { + // protocol error, message ID is not valid + ps.CliTp.PrintfLine("501 Syntax error: command %s requires a valid message ID", command) + return fmt.Errorf("syntax error: command %s requires a valid message ID", command) + + } else if num > 0 && ps.Group == "" { + // TODO select newsgroup + ps.CliTp.PrintfLine("412 No newsgroup selected to read messageid: %d", num) + return retry + + } else if isvalid { + item = &segmentChanItem{ + segment: &nzbparser.NzbSegment{ + Id: args[0], + }, + } + ps.MsgNum, ps.Group = 0, "" // Reset message number and group on valid + pass = true // we have a valid message ID, so we can pass it to a provider + // TODO: add disk caching here? + } else if num > 0 { + // newsreader message number + ps.MsgNum = num // Store the message number in the session + pass = true // we have a valid message number, so we can pass it to a provider + item = &segmentChanItem{ + segment: &nzbparser.NzbSegment{ + Id: args[0], + }, + } + } + + case "CAPABILITIES": + ps.printCapabilities() + return nil // No error, capabilities printed + + case "DATE": + ps.CliTp.PrintfLine("111 %s", time.Now().Format(time.RFC1123Z)) + return nil // No error, date printed + + case "LIST": + return ps.handleListCommand(args) + + case "XOVER", "OVER": + return ps.handleXOverCommand(args, command == "XOVER") + + case "XHDR", "HDR": + return ps.handleXHdrCommand(args, command == "XHDR") + + case "GROUP": + return ps.handleGroupCommand(args) + + case "NEXT": + return ps.handleNextOrLastCommand(true) + + case "LAST": + return ps.handleNextOrLastCommand(false) + + case "QUIT": + ps.CliTp.PrintfLine("205 Closing connection - goodbye. uploaded=%d downloaded=%d connected='%v'", ps.RXBytes, ps.TXBytes, time.Since(ps.ConnectedAt)) + log.Printf(" %s | quit", ps.Username) + return fmt.Errorf("client quit") + + default: + ps.CliTp.PrintfLine("502 Unknown command") + return fmt.Errorf("unknown command: %s", command) + } // end switch command1 + + if !pass { + ps.CliTp.PrintfLine("501 Syntax error: command %s requires a valid message ID", command) + return fmt.Errorf("syntax error: command %s requires a valid message ID", command) + } + // Now we have a valid command and messageId (if applicable), proceed to handle the request + var response string // response to be sent to the client after loopProvider has completed + checkedProviderGroups := make(map[string]bool) +loopProvider: + for _, provider := range ProxyParent.providerList { + switch command { + case "ARTICLE", "BODY", "HEAD", "STAT": + if provider.NoDownload || (ps.Group != "" && ps.MsgNum > 0 && !provider.Newsreader) { + // yes, doing stat on a provider that does not allow downloading articles does not make sense + response = "430 NODL" // 430 No Download, provider does not allow downloading articles + continue loopProvider // Skip this provider if it does not allow downloading articles + } + } + + if checkedProviderGroups[provider.Group] || + IsArticleNotFoundAtProviderGroup(item.segment.Id, provider.Group) { + response = "430 NOPG" // 430 cached Not Found in ProviderGroup + // Skip this provider if it has already been checked or is not available for download + continue loopProvider + } + connitem, err := provider.ConnPool.GetConn() // providerconn / proxyconn + if err != nil { + dlog(always, "ERROR GetConn for provider %s: %v", provider.Name, err) + continue loopProvider // Skip this provider if connection fails + } + if ps.Group != "" && ps.MsgNum > 0 { + // execute GROUP command on remote if we have a group selected + id, err := connitem.srvtp.Cmd("GROUP %s", ps.Group) + if err != nil { + dlog(always, "ERROR CMD_GROUP for provider %s: %v", provider.Name, err) + provider.ConnPool.CloseConn(connitem, nil) + continue loopProvider + } + connitem.srvtp.StartResponse(id) + code, _, err := connitem.srvtp.ReadCodeLine(211) + connitem.srvtp.EndResponse(id) + + if err != nil { + dlog(always, "ERROR CMD_GROUP command=%s for provider %s: %v", provider.Name, command, err) + if code > 0 { + provider.ConnPool.ParkConn(0, connitem, "proxy") + } else { + provider.ConnPool.CloseConn(connitem, nil) + } + continue + } + } + dlog(cfg.opt.BUG, " %s | provider %s: %s got pc='%v'", ps.Username, provider.Name, command, connitem) + + // got connection to a provider + switch command { // switch command2 + case "ARTICLE", "BODY", "HEAD": + // Handle commands ARTICLE, BODY, HEAD + code, msg, rxb, err := CMD(connitem, item, command) + if rxb > 0 { + // to calulate total download speed of this provider + // Update provider's RXBytes + provider.ConnPool.Counter.Add("TMP_RXbytes", rxb) + provider.ConnPool.Counter.Add("TOTAL_RXbytes", rxb) + } + if err != nil { + provider.ConnPool.CloseConn(connitem, nil) // Close the connection on error + dlog(always, "ERROR CMD_ARTICLE for provider %s: code=%d msg='%s' err='%v'", provider.Name, code, msg, err) + continue loopProvider + } + switch code { + case 220, 221, 222: // Valid response codes for ARTICLE, HEAD, BODY + provider.ConnPool.ParkConn(0, connitem, "proxy") // Park the connection for reuse + // send data to client + txb, err := LinesWriter(ps.Writer, ps.Conn, code, item) // Send data to client + if txb > 0 { + // Update bytes sent to client + ps.tmpTXBytes += txb + ps.TXBytes += txb + } + if err != nil { + return fmt.Errorf("error writing data command %s to client: %v", command, err) + } + response = "0" // full response already sent, set response to 0 to indicate success + break loopProvider // Break out of the provider loop after handling the command + + case 423, 430, 451: // messageid not found + provider.ConnPool.ParkConn(0, connitem, "proxy") + checkedProviderGroups[provider.Group] = true + response = fmt.Sprintf("%d %s", code, msg) + SetArticleNotFoundAtProviderGroup(item.segment.Id, provider.Group) // Set article not found at provider group + continue loopProvider + default: + provider.ConnPool.CloseConn(connitem, nil) + dlog(always, "ERROR in CMD for provider %s: cmd=%s code=%d msg='%s'", provider.Name, command, code, msg) + response = fmt.Sprintf("502 Unknown Response: %d %s", code, msg) + continue loopProvider + } + case "STAT": + // Handle STAT command + code, msg, err := CMD_STAT(connitem, item) + if err != nil { + dlog(always, "ERROR CMD_STAT for provider %s: err='%v'", provider.Name, err) + provider.ConnPool.CloseConn(connitem, nil) // Close the connection on error + continue loopProvider + } + switch code { + case 223: // Article found + provider.ConnPool.ParkConn(0, connitem, "proxy") // Park the connection for reuse + response = fmt.Sprintf("%d 0 %s", code, item.segment.Id) + break loopProvider // Break out of the provider loop after handling the command + + case 423, 430, 451: // messageid not found + provider.ConnPool.ParkConn(0, connitem, "proxy") + checkedProviderGroups[provider.Group] = true + response = fmt.Sprintf("%d nf", code) + SetArticleNotFoundAtProviderGroup(item.segment.Id, provider.Group) // Set article not found at provider group + continue loopProvider + + default: + provider.ConnPool.CloseConn(connitem, nil) + response = fmt.Sprintf("%d %s", code, msg) + // Handle other error codes + dlog(always, "ERROR CMD_STAT for provider %s: code=%d msg='%s'", provider.Name, code, msg) + continue loopProvider // Continue to the next provider + } + } // end switch command2 + } // end for loopProvider + if response != "0" { + if response != "" { + ps.CliTp.PrintfLine("%s", response) + } else { + dlog(always, " %s | ERROR response to client empty", ps.Username) + ps.CliTp.PrintfLine("500 Unknown error occurred while processing command %s", command) + } + } + return nil // Return nil to indicate the command was handled successfully. an error will disconnect the user +} // end func handleRequest diff --git a/Session.go b/Session.go index c41ed33..2a80637 100644 --- a/Session.go +++ b/Session.go @@ -82,6 +82,8 @@ type SESSION struct { WorkDividerChan chan *WrappedItem // channel to send items to the work divider checkFeedDone bool // checkDone is set to true when the segment feeder has finished feeding to channel, check may still be activly running! segcheckdone bool // segcheckdone is set to true when the segment check is done + proxy bool // flag to signal proxy is used in LaunchSession + bootedWorkers int // counts how many workers have been booted } // end type SESSION struct func (p *PROCESSOR) NewProcessor() error { @@ -107,14 +109,14 @@ func (p *PROCESSOR) NewProcessor() error { } //p.refresh = time.Duration(0 * time.Second) // default. call processor.SetDirRefresh after NewProcessor p.nzbDir = cfg.opt.NzbDir - p.seenFiles = make(map[string]bool, 128) + p.seenFiles = make(map[string]bool) go p.watchDirThread() } - p.sessMap = make(map[uint64]*SESSION, 128) + p.sessMap = make(map[uint64]*SESSION) p.stopChan = make(chan struct{}, 1) go p.processorThread() p.IsRunning = true - dlog(always, "NewProcessor: nzbDir='%s' refresh=%d", p.nzbDir, p.refresh) + dlog(always, "NewProcessor: nzbDir='%s' refresh=%d proxy=%t", p.nzbDir, p.refresh, proxy) return nil } // end func NewProcessor @@ -130,35 +132,41 @@ func (p *PROCESSOR) LaunchSession(s *SESSION, nzbfilepath string, waitSession *s defer waitSession.Done() } - // checks for the correct inputs - if nzbfilepath == "" && s == nil { - // we have no nzbfilepath and no session - // this is a bug in the code, we can't handle both at the same time! - return fmt.Errorf("error LaunchSession: need nzbfilepath or session") - - } else if nzbfilepath != "" && s != nil { - // we have a nzbfilepath and a session - // this is a bug in the code, we can't handle both at the same time! - return fmt.Errorf("error LaunchSession: nzbfilepath and session supplied! can only take one") - - } else if nzbfilepath != "" && s == nil { - // we have a nzbfilepath but no session - // no session supplied, create one! - if sessId, newsession := p.newSession(nzbfilepath); sessId <= 0 || newsession == nil { - return fmt.Errorf("error LaunchSession: sessId <= 0 ?! nzbfilepath='%s' newsession='%#v' err='%v'", nzbfilepath, newsession, err) + if !proxy { // the normal case, we are processing an nzb file. don't use the session var here! + // not booting proxy, no session supplied, create a new session + // checks for the correct inputs + if nzbfilepath == "" && s == nil { + // we have no nzbfilepath and no session + // this is a bug in the code, we can't handle both at the same time! + return fmt.Errorf("error LaunchSession: need nzbfilepath or session") + + } else if nzbfilepath != "" && s != nil { + // we have a nzbfilepath and a session + // this is a bug in the code, we can't handle both at the same time! + return fmt.Errorf("error LaunchSession: nzbfilepath and session supplied! can only take one") + + } else if nzbfilepath != "" && s == nil { + // we have a nzbfilepath but no session + // no session supplied, create one! + if sessId, newsession := p.newSession(nzbfilepath); sessId <= 0 || newsession == nil { + return fmt.Errorf("error LaunchSession: sessId <= 0 ?! nzbfilepath='%s' newsession='%#v' err='%v'", nzbfilepath, newsession, err) + } else { + // we have a new session + dlog(cfg.opt.Verbose, "LaunchSession: created new session with sessId=%d nzbfilepath='%s'", sessId, nzbfilepath) + s = newsession + } + + } else if nzbfilepath == "" && s != nil { + // we have no nzbfilepath but a session + // pass: we have "s" as session! } else { - // we have a new session - dlog(cfg.opt.Verbose, "LaunchSession: created new session with sessId=%d nzbfilepath='%s'", sessId, nzbfilepath) - s = newsession + return fmt.Errorf("error LaunchSession: uncatched bug in launch checks: nzbfilepath='%s' s='%#v'", nzbfilepath, s) } + } // end if !proxy - } else if nzbfilepath == "" && s != nil { - // we have no nzbfilepath but a session - // pass: we have "s" as session! - } else { - return fmt.Errorf("error LaunchSession: uncatched bug in launch checks: nzbfilepath='%s' s='%#v'", nzbfilepath, s) + if s == nil { + return fmt.Errorf("error LaunchSession: session is nil, can't start") } - s.mux.Lock() if s.preBoot || s.active { // we are already booting or active @@ -189,82 +197,90 @@ func (p *PROCESSOR) LaunchSession(s *SESSION, nzbfilepath string, waitSession *s defer SetLogToTerminal() // reset log output to stdout after the session is done } - dlog(cfg.opt.Verbose, "LaunchSession Settings: '%#v'", *cfg.opt) // - s.preparationStartTime = time.Now() - dlog(cfg.opt.Verbose, "LaunchSession Loading NZB: '%s'", s.nzbPath) + if !s.proxy { + dlog(cfg.opt.Verbose, "LaunchSession Settings: '%#v'", *cfg.opt) // + s.preparationStartTime = time.Now() + dlog(cfg.opt.Verbose, "LaunchSession Loading NZB: '%s'", s.nzbPath) - if s.nzbFile != nil { - // nzbfile is still open and loaded, pass + if s.nzbFile != nil { + // nzbfile is still open and loaded, pass - } else if s.nzbFile == nil && nzbfilepath != "" { - // we have a nzbfilepath and no nzbFile loaded yet - // nzbfile has not been opened and parsed before (or has been closed) - nzbfile, err := loadNzbFile(s.nzbPath) - if err != nil || nzbfile == nil { - return fmt.Errorf("error unable to load file s.nzbPath='%s' err=%v'", s.nzbPath, err) - } - s.nzbFile = nzbfile + } else if s.nzbFile == nil && nzbfilepath != "" { + // we have a nzbfilepath and no nzbFile loaded yet + // nzbfile has not been opened and parsed before (or has been closed) + nzbfile, err := loadNzbFile(s.nzbPath) + if err != nil || nzbfile == nil { + return fmt.Errorf("error unable to load file s.nzbPath='%s' err=%v'", s.nzbPath, err) + } + s.nzbFile = nzbfile - dlog(cfg.opt.Debug, "nzbfile='%#v'", s.nzbFile) + dlog(cfg.opt.Debug, "nzbfile='%#v'", s.nzbFile) - // loop through all file tags within the NZB file - for _, file := range nzbfile.Files { - if cfg.opt.Debug { - fmt.Printf(">> nzbfile file='%#v'\n\n", file) - } - s.fileStatLock.Lock() - s.fileStat[file.Filename] = new(fileStatistic) - s.fileStat[file.Filename].available = make(providerStatistic) - s.fileStat[file.Filename].totalSegments = uint64(file.TotalSegments) - s.fileStatLock.Unlock() - // loop through all segment tags within each file tag - if cfg.opt.Debug { - for _, agroup := range file.Groups { - if agroup != "" && !slices.Contains(s.nzbGroups, agroup) { - s.nzbGroups = append(s.nzbGroups, agroup) + // loop through all file tags within the NZB file + for _, file := range nzbfile.Files { + if cfg.opt.Debug { + fmt.Printf(">> nzbfile file='%#v'\n\n", file) + } + s.fileStatLock.Lock() + s.fileStat[file.Filename] = new(fileStatistic) + s.fileStat[file.Filename].available = make(providerStatistic) + s.fileStat[file.Filename].totalSegments = uint64(file.TotalSegments) + s.fileStatLock.Unlock() + // loop through all segment tags within each file tag + if cfg.opt.Debug { + for _, agroup := range file.Groups { + if agroup != "" && !slices.Contains(s.nzbGroups, agroup) { + s.nzbGroups = append(s.nzbGroups, agroup) + } } + dlog(always, "NewsGroups: %v", s.nzbGroups) + } + // filling s.segmentList + for _, segment := range file.Segments { + dlog(cfg.opt.BUG, "append nzb to segmentList: Id='%s' file='%s'", segment.Id, file.Filename) + // If you add more fields to the 'segmentChanItem' struct, the compiler will catch missing initializations here and crash on compilation. + // mux := new(sync.RWMutex) + segmux := &loggedrwmutex.LoggedSyncRWMutex{Name: "segment-" + segment.Id} // use a logged sync mutex to log locking and unlocking + // create a new segmentChanItem for each segment + item := &segmentChanItem{ + segmux, s, &segment, &file, // sync.RWMutex, *SESSION, *nzbparser.Segment, *nzbparser.File + "", // string fields: 1. hashedId will be filled below + &s.nzbHashedname, // *string fields + make(chan int, 1), make(chan bool, 1), // chan fields + // map fields for segment status + make(map[int]bool, len(s.providerList)), + make(map[int]bool, len(s.providerList)), + make(map[int]bool, len(s.providerList)), + make(map[int]bool, len(s.providerList)), + make(map[int]bool, len(s.providerList)), + make(map[int]bool, len(s.providerList)), + make(map[int]bool, len(s.providerList)), + make(map[int]bool, len(s.providerList)), + make(map[int]bool, len(s.providerList)), + []string{}, []string{}, []string{}, // []string fields + false, false, false, false, false, false, false, false, false, // bool fields + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} // int fields + if len(segment.Id) < 3 { // a@a + dlog(always, "ERROR LaunchSession: segment.Id='%s' is too short! file='%s'", segment.Id, file.Filename) + continue + } + if !strings.HasPrefix(segment.Id, "<") && !strings.HasSuffix(segment.Id, ">") { + item.segment.Id = "<" + segment.Id + ">" // ensure segment.Id is always in the format "" for consistency + } + item.hashedId = SHA256str(item.segment.Id) + s.segmentList = append(s.segmentList, item) } - dlog(always, "NewsGroups: %v", s.nzbGroups) - } - // filling s.segmentList - for _, segment := range file.Segments { - dlog(cfg.opt.BUG, "append nzb to segmentList: Id='%s' file='%s'", segment.Id, file.Filename) - // If you add more fields to the 'segmentChanItem' struct, the compiler will catch missing initializations here and crash on compilation. - // mux := new(sync.RWMutex) - segmux := &loggedrwmutex.LoggedSyncRWMutex{Name: "segment-" + segment.Id} // use a logged sync mutex to log locking and unlocking - // create a new segmentChanItem for each segment - item := &segmentChanItem{ - segmux, s, &segment, &file, // sync.RWMutex, *SESSION, *nzbparser.Segment, *nzbparser.File - SHA256str("<" + segment.Id + ">"), // string fields - &s.nzbHashedname, // *string fields - make(chan int, 1), make(chan bool, 1), // chan fields - // map fields for segment status - make(map[int]bool, len(s.providerList)), - make(map[int]bool, len(s.providerList)), - make(map[int]bool, len(s.providerList)), - make(map[int]bool, len(s.providerList)), - make(map[int]bool, len(s.providerList)), - make(map[int]bool, len(s.providerList)), - make(map[int]bool, len(s.providerList)), - make(map[int]bool, len(s.providerList)), - make(map[int]bool, len(s.providerList)), - []string{}, []string{}, []string{}, // []string fields - false, false, false, false, false, false, false, false, false, // bool fields - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} // int fields - s.segmentList = append(s.segmentList, item) } - } - - mibsize := float64(nzbfile.Bytes) / 1024 / 1024 - artsize := mibsize / float64(len(s.segmentList)) * 1024 - s.nzbSize = nzbfile.Bytes // store the size of the nzb file in bytes - dlog(always, "%s [%s] loaded NZB: '%s' [%d/%d] ( %.02f MiB | ~%.0f KiB/segment )", appName, appVersion, s.nzbName, len(s.segmentList), nzbfile.TotalSegments, mibsize, artsize) - - } // end if s.nzbFile - // left-padding for log output - s.digStr = fmt.Sprintf("%d", len(s.segmentList)) - s.D = fmt.Sprintf("%d", len(s.digStr)) + mibsize := float64(nzbfile.Bytes) / 1024 / 1024 + artsize := mibsize / float64(len(s.segmentList)) * 1024 + s.nzbSize = nzbfile.Bytes // store the size of the nzb file in bytes + dlog(always, "%s [%s] loaded NZB: '%s' [%d/%d] ( %.02f MiB | ~%.0f KiB/segment )", appName, appVersion, s.nzbName, len(s.segmentList), nzbfile.TotalSegments, mibsize, artsize) + // left-padding for log output + s.digStr = fmt.Sprintf("%d", len(s.segmentList)) + s.D = fmt.Sprintf("%d", len(s.digStr)) + } // end if s.nzbFile + } // end if !s.proxy // re-load the provider list var workerWGconnReady sync.WaitGroup // workerWGconnReady is used to wait for all connections to be established before starting the work @@ -317,7 +333,14 @@ func (p *PROCESSOR) LaunchSession(s *SESSION, nzbfilepath string, waitSession *s } } globalmux.Unlock() - + if s.proxy { + // infinite block here for proxy server. if a wg is supplied it will never be released + _, ok := <-s.stopChan + if !ok { + dlog(always, "LaunchSession: proxy server stopChan closed, exiting LaunchSession") + return + } + } dlog(cfg.opt.Debug, "Loaded s.providerList: %d ... preparation took '%v' | cfg.opt.MemMax=%d totalMaxConns=%d", len(s.providerList), time.Since(s.preparationStartTime).Milliseconds(), cfg.opt.MemMax, totalMaxConns) // setup wait groups @@ -345,17 +368,20 @@ func (p *PROCESSOR) LaunchSession(s *SESSION, nzbfilepath string, waitSession *s s.segmentCheckStartTime = time.Now() // booting work divider go s.GoWorkDivider(&waitDivider, &waitDividerDone) - dlog(cfg.opt.Debug, "sess: waitDividerDone.Wait()") + dlog(always, "sess: waiting on waitDividerDone.Wait()") waitDividerDone.Wait() - dlog(cfg.opt.Debug, "sess: waitDividerDone.Wait() released") + dlog(always, "sess: released waitDividerDone.Wait()") s.StopRoutines() - dlog(cfg.opt.Debug, "sess: waitWorker.Wait()") + dlog(always, "sess: waiting on waitWorker.Wait()") waitWorker.Wait() - dlog(cfg.opt.Debug, "sess: waitWorker.Wait() released, waiting on waitPool.Wait()") + dlog(always, "sess: released waitWorker.Wait(), waiting on waitPool.Wait()") waitPool.Wait() - dlog(cfg.opt.Debug, "sess: waitPool.Wait() released") + dlog(always, "sess: released waitPool.Wait() closing all provider connections") + for _, provider := range s.providerList { + KillConnPool(provider) // close the connection pool for this provider + } result, runtime_info := s.Results(s.preparationStartTime) diff --git a/Utils.go b/Utils.go index 0ee854f..4035634 100644 --- a/Utils.go +++ b/Utils.go @@ -3,6 +3,7 @@ package main import ( //"bufio" //"bytes" + "archive/zip" "bufio" "compress/gzip" "crypto/sha256" @@ -79,6 +80,38 @@ func loadNzbFile(path string) (*nzbparser.Nzb, error) { } defer gzReader.Close() parsedReader = gzReader + } else if strings.HasSuffix(strings.ToLower(path), ".zip") { + // ZIP requires random access, can't stream directly + // Need to read file info first + fi, err := f.Stat() + if err != nil { + return nil, err + } + zipReader, err := zip.NewReader(f, fi.Size()) + if err != nil { + return nil, err + } + // Find first .nzb file in ZIP + if len(zipReader.File) == 0 { + return nil, fmt.Errorf("zip file is empty") + } + var nzbFile *zip.File + for _, zf := range zipReader.File { + if strings.HasSuffix(strings.ToLower(zf.Name), ".nzb") { + nzbFile = zf + dlog(always, "loadNzbFile: found nzb file in zip: '%s'", zf.Name) + break + } + } + if nzbFile == nil { + return nil, fmt.Errorf("no .nzb file found in zip archive") + } + rc, err := nzbFile.Open() + if err != nil { + return nil, err + } + defer rc.Close() + parsedReader = rc } else { parsedReader = f } @@ -226,11 +259,10 @@ func LoadHeadersFromFile(path string) ([]string, error) { // AppendFileBytes appends null bytes to the end of a file. // It opens the file in append mode, creates it if it does not exist, and writes the specified number of null bytes. -// If nullbytes is 0, it does nothing. -// If nullbytes is negative, it returns an error. +// If nullbytes is 0 or negative, it returns an error. // If the file does not exist, it creates a new file with the specified number of null bytes. // If the file exists, it appends the specified number of null bytes to the end of the file. -func AppendFileBytes(nullbytes int, dstPath string) error { +func AppendFileBytes(nullbytes int, dstPath string) (err error) { if nullbytes <= 0 { return fmt.Errorf("error AppendFileBytes nullbytes=%d must be greater than 0", nullbytes) } @@ -239,24 +271,38 @@ func AppendFileBytes(nullbytes int, dstPath string) error { } // Open destination file in append mode, create if not exists - dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) - if err != nil { - return err + dstFile, openErr := os.OpenFile(dstPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) + if openErr != nil { + return openErr // Return opening error directly } + // Defer Close and handle its error. + // If err is already set (e.g., from Write or Flush), this deferred Close error will not overwrite it. + // If err is nil, and Close fails, err will be set to the Close error. defer func() { - cerr := dstFile.Close() - if err == nil && cerr != nil { - err = cerr + if closeErr := dstFile.Close(); closeErr != nil { + if err == nil { // Only assign closeErr if no other error has occurred + err = fmt.Errorf("error closing file '%s': %v", dstPath, closeErr) + } + // Optional: Log closeErr if err was already set, e.g.: + // else { log.Printf("AppendFileBytes: additionally failed to close '%s' during error handling: %v", dstPath, closeErr) } } }() - nul := make([]byte, nullbytes) + + writer := bufio.NewWriter(dstFile) for i := 0; i < nullbytes; i++ { - nul = append(nul, 0x00) + if err = writer.WriteByte(0x00); err != nil { + // err is assigned (it's the named return), will be returned. Defer will run. + return err + } } - if _, writeErr := dstFile.Write(nul); writeErr != nil { - return writeErr + + // Ensure all buffered data is written to the file + if err = writer.Flush(); err != nil { + // err is assigned, will be returned. Defer will run. + return err } - return nil + + return nil // If everything is successful, err remains nil (or gets set by defer if Close fails) } // end func AppendFileBytes // AppendFile appends (merges) the file contents of srcPath to dstPath. @@ -268,54 +314,64 @@ func AppendFileBytes(nullbytes int, dstPath string) error { // If the destination file does not exist, it creates a new file. // If the source file is empty, it does nothing. func AppendFile(srcPath string, dstPath string, delsrc bool) (err error) { - if srcPath == "" || dstPath == "" { - return fmt.Errorf("error AppendFile srcPath='%s' or dstPath='%s' empty", srcPath, dstPath) + if srcPath == "" { + return fmt.Errorf("error AppendFile srcPath is empty") + } + if dstPath == "" { + return fmt.Errorf("error AppendFile dstPath is empty") } - srcFile, err := os.Open(srcPath) - if err != nil { - return err + // Open source file for reading + srcFile, openSrcErr := os.Open(srcPath) + if openSrcErr != nil { + return fmt.Errorf("error AppendFile opening source file '%s': %w", srcPath, openSrcErr) } defer func() { - cerr := srcFile.Close() - if err == nil && cerr != nil { - err = cerr + if closeErr := srcFile.Close(); closeErr != nil { + if err == nil { // Only assign closeErr if no other error has occurred + err = fmt.Errorf("error AppendFile closing source file '%s': %w", srcPath, closeErr) + } + // Optional: Log closeErr if err was already set + // else { log.Printf("AppendFile: additionally failed to close source '%s' during error handling: %v", srcPath, closeErr) } } }() - dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) - if err != nil { - return err + // Open destination file in append mode, create if not exists + dstFile, openDstErr := os.OpenFile(dstPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) + if openDstErr != nil { + return fmt.Errorf("error AppendFile opening destination file '%s': %w", dstPath, openDstErr) } defer func() { - cerr := dstFile.Close() - if err == nil && cerr != nil { - err = cerr + if closeErr := dstFile.Close(); closeErr != nil { + if err == nil { // Only assign closeErr if no other error has occurred + err = fmt.Errorf("error AppendFile closing destination file '%s': %w", dstPath, closeErr) + } + // Optional: Log closeErr if err was already set + // else { log.Printf("AppendFile: additionally failed to close destination '%s' during error handling: %v", dstPath, closeErr) } } }() - buf := make([]byte, DefaultYencWriteBuffer) - for { - n, readErr := srcFile.Read(buf) - if n > 0 { - if _, writeErr := dstFile.Write(buf[:n]); writeErr != nil { - return writeErr - } - } - if readErr == io.EOF { - break - } - if readErr != nil { - return readErr - } + // Create a buffer and copy in chunks using io.CopyBuffer for efficiency + // DefaultYencWriteBuffer is assumed to be defined elsewhere, e.g., const DefaultYencWriteBuffer = 32 * 1024 + buf := make([]byte, DefaultYencWriteBuffer) // Ensure DefaultYencWriteBuffer is a reasonable size + if _, copyErr := io.CopyBuffer(dstFile, srcFile, buf); copyErr != nil { + return fmt.Errorf("error AppendFile copying from '%s' to '%s': %w", srcPath, dstPath, copyErr) } + + // Ensure all buffered data for dstFile is written to disk before attempting to remove srcFile + if syncErr := dstFile.Sync(); syncErr != nil { + return fmt.Errorf("error AppendFile syncing destination file '%s': %w", dstPath, syncErr) + } + if delsrc { - if err := os.Remove(srcPath); err != nil { - return fmt.Errorf("error Yenc AppendFile Remove err='%v'", err) + if removeErr := os.Remove(srcPath); removeErr != nil { + // Assign to err, so it's returned by the named return, and defer for dstFile can still run. + err = fmt.Errorf("error AppendFile removing source file '%s': %w", srcPath, removeErr) + return err } } - return -} // end func AppendFile (written by AI! GPT-4o, complaint and changed by GPT-4.1!) + return nil // If everything is successful, err remains nil (or gets set by defers if Close fails) +} // end func AppendFile func SHA256SumFile(path string) (string, error) { // Open the file for reading @@ -341,79 +397,83 @@ func SHA256SumFile(path string) (string, error) { func (s *SESSION) writeCsvFile() (err error) { // not tested since rewrite if !cfg.opt.Csv { - return + return nil // Not an error, just not enabled + } + if s.nzbPath == "" { + return fmt.Errorf("writeCsvFile: nzbPath is empty, cannot determine CSV filename") } + csvFileName := strings.TrimSuffix(filepath.Base(s.nzbPath), filepath.Ext(filepath.Base(s.nzbPath))) + ".csv" - f, err := os.Create(csvFileName) - if err != nil { - return fmt.Errorf("unable to open csv file: %v", err) + f, createErr := os.Create(csvFileName) + if createErr != nil { + return fmt.Errorf("writeCsvFile: unable to create csv file '%s': %w", csvFileName, createErr) } defer func() { - if cerr := f.Close(); cerr != nil { - log.Printf("ERROR closing file %s: %v", csvFileName, cerr) + if closeErr := f.Close(); closeErr != nil { + log.Printf("writeCsvFile: error closing csv file '%s': %v", csvFileName, closeErr) // Log the close error regardless + if err == nil { // Only assign closeErr to the return if no other error has occurred + err = fmt.Errorf("writeCsvFile: error closing csv file '%s': %w", csvFileName, closeErr) + } } }() - log.Println("writing csv file...") - fmt.Print("Writing csv file... ") + + log.Println("writing csv file...", csvFileName) // Added filename to log + fmt.Printf("Writing csv file '%s'... ", csvFileName) // Added filename to print + csvWriter := csv.NewWriter(f) - firstLine := true - // make sorted provider name slice + + // Prepare header + // Make sorted provider name slice providers := make([]string, 0, len(s.providerList)) - for n := range s.providerList { - providers = append(providers, s.providerList[n].Name) - } - sort.Strings(providers) - for fileName, file := range s.fileStat { - // write first line - if firstLine { - line := make([]string, len(providers)+2) - line[0] = "Filename" - line[1] = "Total segments" - for n, providerName := range providers { - line[n+2] = providerName - } - if err := csvWriter.Write(line); err != nil { - return fmt.Errorf("unable to write to the csv file: %v", err) - } - firstLine = false - } - // write line + for _, p := range s.providerList { // More idiomatic loop + providers = append(providers, p.Name) + } + sort.Strings(providers) // Ensure consistent column order + + header := make([]string, len(providers)+2) + header[0] = "Filename" + header[1] = "TotalSegments" + copy(header[2:], providers) + + if writeErr := csvWriter.Write(header); writeErr != nil { + return fmt.Errorf("writeCsvFile: unable to write header to csv file '%s': %w", csvFileName, writeErr) + } + + // Write data rows + // To ensure consistent row order if fileStat is a map, consider sorting keys + fileNames := make([]string, 0, len(s.fileStat)) + for fn := range s.fileStat { + fileNames = append(fileNames, fn) + } + sort.Strings(fileNames) // Sort filenames for consistent output order + + for _, fileName := range fileNames { + file := s.fileStat[fileName] line := make([]string, len(providers)+2) line[0] = fileName - line[1] = fmt.Sprintf("%v", file.totalSegments) - for n, providerName := range providers { + line[1] = fmt.Sprintf("%d", file.totalSegments) // Use %d for integers + for i, providerName := range providers { if value, ok := file.available[providerName]; ok { - line[n+2] = fmt.Sprintf("%v", value) + line[i+2] = fmt.Sprintf("%d", value) // Use %d for integers } else { - line[n+2] = "0" + line[i+2] = "0" } } - if err := csvWriter.Write(line); err != nil { - return fmt.Errorf("unable to write to the csv file: %v", err) + if writeErr := csvWriter.Write(line); writeErr != nil { + return fmt.Errorf("writeCsvFile: unable to write line for '%s' to csv file '%s': %w", fileName, csvFileName, writeErr) } } + csvWriter.Flush() - if err := csvWriter.Error(); err != nil { - return fmt.Errorf("unable to write to the csv file: %v", err) + if flushErr := csvWriter.Error(); flushErr != nil { + return fmt.Errorf("writeCsvFile: error flushing csv writer for '%s': %w", csvFileName, flushErr) } - dlog(cfg.opt.Csv, "writeCsv: done") - return -} // end func writeCsv -/* - func setGlobalTimerNow(timer *time.Time) { - globalmux.Lock() - *timer = time.Now() - globalmux.Unlock() - } + fmt.Println("Done.") // Indicate completion for the fmt.Print above + dlog(cfg.opt.Csv, "writeCsvFile: done for '%s'", csvFileName) + return nil // err is nil if execution reaches here successfully +} // end func writeCsv - func getGlobalTimerSince(timer time.Time) time.Duration { - globalmux.RLock() - duration := time.Since(timer) - globalmux.RUnlock() - return duration - } -*/ func ConvertSpeed(bytes int64, durationSeconds int64) (kibPerSec int64, mbps float64) { if durationSeconds <= 0 { return 0, 0 diff --git a/Workers.go b/Workers.go index 3d489d4..36dc22f 100644 --- a/Workers.go +++ b/Workers.go @@ -37,13 +37,8 @@ func (s *SESSION) GoBootWorkers(waitDivider *sync.WaitGroup, workerWGconnReady * dlog(always, "Cached: %d/%d", cached, len(s.segmentList)) } - if cfg.opt.ChanSize > 0 { - if len(s.segmentList) < cfg.opt.ChanSize { - cfg.opt.ChanSize = len(s.segmentList) - } - } else { + if cfg.opt.ChanSize <= 0 { cfg.opt.ChanSize = len(s.segmentList) - //cfg.opt.ChanSize = DefaultChanSize } // loop over the providerList and boot up anonymous workers for each provider @@ -104,6 +99,7 @@ func (s *SESSION) GoBootWorkers(waitDivider *sync.WaitGroup, workerWGconnReady * // the check routine may still be activly checking! s.checkFeedDone = true s.mux.Unlock() + close(segmentChanCheck) // close the chan to signal no more items will come }(s.segmentChansCheck[provider.Group]) } globalmux.Unlock() @@ -117,6 +113,13 @@ func (s *SESSION) GoBootWorkers(waitDivider *sync.WaitGroup, workerWGconnReady * connitem, err := provider.ConnPool.GetConn() if err != nil { dlog(always, "ERROR Boot Provider '%s' err='%v'", provider.Name, err) + // Release waitWorker and waitPool counters for all workers that won't be started + for i := 0; i < provider.MaxConns; i++ { + waitWorker.Done() // 3 times per worker + waitWorker.Done() + waitWorker.Done() + waitPool.Done() // 1 time per worker + } return } // check of capabilities @@ -146,7 +149,7 @@ func (s *SESSION) GoBootWorkers(waitDivider *sync.WaitGroup, workerWGconnReady * dlog(cfg.opt.Debug, "GoBootWorkers PreBoot Provider '%s' launch wid=%d/%d", provider.Name, wid, provider.MaxConns) } // GoWorker connecting.... - go s.GoWorker(wid, provider, waitWorker, workerWGconnReady, waitPool) + go s.GoWorker(wid, provider, waitWorker, waitPool) time.Sleep(time.Duration(rand.Intn(50*provider.MaxConns)) * time.Millisecond) // random sleep to avoid all workers connecting at the same time } @@ -158,181 +161,252 @@ func (s *SESSION) GoBootWorkers(waitDivider *sync.WaitGroup, workerWGconnReady * dlog(cfg.opt.DebugWorker, "GoBootWorkers: all workers connected (or died if no conn could be established)") } // end func GoBootWorkers -func (s *SESSION) GoWorker(wid int, provider *Provider, waitWorker *sync.WaitGroup, workerWGconnReady *sync.WaitGroup, waitPool *sync.WaitGroup) { +func (s *SESSION) GoWorker(wid int, provider *Provider, waitWorker *sync.WaitGroup, waitPool *sync.WaitGroup) { dlog(cfg.opt.DebugWorker, "GoWorker (%d) launching routines '%s'", wid, provider.Name) - + var thisWorkerWG sync.WaitGroup + thisWorkerWG.Add(3) // we have 3 routines per worker // Obtain a connection for this worker and share it among the check, download, and reupload routines. // The connection is not returned to the pool until all three routines have finished. var sharedCC chan *ConnItem = nil var err error if UseSharedCC { - // Get a shared connection channel for the provider} + dlog(cfg.opt.DebugWorker, "GoWorker (%d) trying to get shared connection channel for '%s'", wid, provider.Name) + // Get a shared connection channel for the provider pre filled with an established connection sharedCC, err = GetNewSharedConnChannel(wid, provider) if err != nil || len(sharedCC) == 0 { - dlog(always, "ERROR GoWorker (%d) failed to get shared connection channel for '%s' err='%v'", wid, provider.Name, err) + // release for the 3 routines we won't start + waitWorker.Done() + waitWorker.Done() + waitWorker.Done() + // release this GoWorker from the pool + waitPool.Done() + dlog(always, "ERROR GoWorker (%d) failed to get shared conn for '%s' err='%v' close worker", wid, provider.Name, err) return } } - + s.mux.Lock() + s.bootedWorkers++ + s.mux.Unlock() /* new worker code CheckRoutine */ segCC := s.segmentChansCheck[provider.Group] - go func(wid int, provider *Provider, waitWorker *sync.WaitGroup, sharedCC chan *ConnItem, segCC chan *segmentChanItem) { + go func(wid int, provider *Provider, waitWorker *sync.WaitGroup, thisWorkerWG *sync.WaitGroup, sharedCC chan *ConnItem, segCC chan *segmentChanItem) { + var processed uint64 defer waitWorker.Done() + defer thisWorkerWG.Done() + timeOut := time.Now().Add(15 * time.Second) + lastGood := time.Now() + toChan := make(chan struct{}, 1) + go func() { + for { + time.Sleep(3 * time.Second) + select { + case toChan <- struct{}{}: + default: + } + } + }() forGoCheckRoutine: for { - item, ok := <-segCC - if !ok { - return // channel is closed, exit the routine - } - if item == nil { - if cfg.opt.DebugWorker { - log.Print("CheckRoutine received a nil pointer to quit") + select { + case <-toChan: + if time.Now().After(timeOut) { + dlog(always, "CheckRoutine: (%d@'%s') timeout reached, exiting. lastGood: %v", wid, provider.Name, time.Since(lastGood)) + break forGoCheckRoutine } - segCC <- nil // refill the nil so others will die too - break forGoCheckRoutine - } - - switch cfg.opt.ByPassSTAT { - case false: - code, err := s.GoCheckRoutine(wid, provider, item, sharedCC) - item.PrintItemFlags(cfg.opt.DebugFlags, true, fmt.Sprintf("post-GoCheckRoutine: code=%d", code)) - if err != nil { // re-queue? - dlog(always, "ERROR in GoCheckRoutine err='%v'", err) + dlog(time.Since(lastGood) > 9*time.Second, "CheckRoutine: (%d@'%s') waiting on segCC len=%d timeout in: %v (lastGood: %v)", wid, provider.Name, len(segCC), time.Until(timeOut), time.Since(lastGood)) + case item, ok := <-segCC: + if !ok { + dlog(cfg.opt.DebugWorker, "CheckRoutine: channel closed (%d@'%s')", wid, provider.Name) + break forGoCheckRoutine // channel is closed, exit the routine } - case true: - log.Fatal("you should not be here! Quitting...") // FIXME TODO: remove this fatal error - /* - item.mux.Lock() - item.flaginDL = true - - //if !item.pushedDL { - IncreaseDLQueueCnt() // cfg.opt.ByPassSTAT - //GCounter.IncrMax("dlQueueCnt", uint64(len(s.segmentList)), "CheckRoutine:ByPassSTAT") // cfg.opt.ByPassSTAT - //GCounter.IncrMax("TOTAL_dlQueueCnt", uint64(len(s.segmentList)), "CheckRoutine:ByPassSTAT") //cfg.opt.ByPassSTAT - //} - item.pushedDL++ // mark as pushed to download queue ByPassSTAT - item.mux.Unlock() - // ! TODO FIXME : use s.WorkDividerChan ? - s.segmentChansDowns[provider.Group] <- item // bypass STAT: GoCheckRoutine push to download queue - */ + if item == nil { + dlog(always, "CheckRoutine received a nil pointer to quit") + segCC <- nil // refill the nil so others will die too + break forGoCheckRoutine + } + timeOut = time.Now().Add(15 * time.Second) + lastGood = time.Now() + switch cfg.opt.ByPassSTAT { + case false: + code, err := s.GoCheckRoutine(wid, provider, item, sharedCC) + item.PrintItemFlags(cfg.opt.DebugFlags, true, fmt.Sprintf("post-GoCheckRoutine: code=%d", code)) + if err != nil { // re-queue? + dlog(always, "ERROR in GoCheckRoutine err='%v'", err) + } + case true: + log.Fatal("you should not be here! Quitting...") // FIXME TODO: remove this fatal error + } + processed++ + continue forGoCheckRoutine } - continue forGoCheckRoutine } // end forGoCheckRoutine - dlog(always, "CheckRoutine: wid=%d provider='%s' exiting", wid, provider.Name) - }(wid, provider, waitWorker, sharedCC, segCC) // end go func() + dlog(processed > 0, "CheckRoutine exiting (%d@'%s') processed: %d", wid, provider.Name, processed) + }(wid, provider, waitWorker, &thisWorkerWG, sharedCC, segCC) // end go func() /* new worker code DownsRoutine */ segCD := s.segmentChansDowns[provider.Group] - go func(wid int, provider *Provider, waitWorker *sync.WaitGroup, sharedCC chan *ConnItem, segCD chan *segmentChanItem) { + go func(wid int, provider *Provider, waitWorker *sync.WaitGroup, thisWorkerWG *sync.WaitGroup, sharedCC chan *ConnItem, segCD chan *segmentChanItem) { + var processed uint64 defer waitWorker.Done() + defer thisWorkerWG.Done() + timeOut := time.Now().Add(15 * time.Second) + lastGood := time.Now() + toChan := make(chan struct{}, 1) + go func() { + for { + time.Sleep(3 * time.Second) + select { + case toChan <- struct{}{}: + default: + } + } + }() forGoDownsRoutine: for { - dlog(cfg.opt.DebugWorker, "GoDownsRoutine: wid=%d provider='%s' wait on segCD len=%d", wid, provider.Name, len(segCD)) - item, ok := <-segCD - if !ok { - return // channel is closed, exit the routine - } - if item == nil { - if cfg.opt.DebugWorker { - log.Print("DownsRoutine received a nil pointer to quit") - } - segCD <- nil // refill the nil so others will die too - break forGoDownsRoutine - } - dlog(cfg.opt.DebugWorker, "GoDownsRoutine: wid=%d provider='%s' start seg.Id='%s'", wid, provider.Name, item.segment.Id) - - start := time.Now() - who := fmt.Sprintf("DR=%d@'%s'#'%s' seg.Id='%s'", wid, provider.Name, provider.Group, item.segment.Id) - memlim.MemLockWait(item, who) - dlog(cfg.opt.DebugWorker && cfg.opt.DebugMemlim, "GoDownsRoutine got MemCheckWait who='%s' waited=(%d ms)", who, time.Since(start).Milliseconds()) - errStr := "" - StartDowns := time.Now() - code, err := s.GoDownsRoutine(wid, provider, item, sharedCC) - item.PrintItemFlags(cfg.opt.DebugFlags, true, fmt.Sprintf("post-GoDownsRoutine: code=%d", code)) - DecreaseDLQueueCnt() - if err != nil || (code != 220 && code != 920) { - if code != 430 { - // 430 is a normal error code for GoDownsRoutine, so we don't log it as an error - errStr = fmt.Sprintf("ERROR in GoDownsRoutine code='%d' err='%v'", code, err) - dlog(always, "%s", errStr) + dlog(cfg.opt.DebugWorker, "GoDownsRoutine: (%d@'%s') wait on segCD len=%d", wid, provider.Name, len(segCD)) + select { + case <-toChan: + s.mux.RLock() + segcheckdone := s.segcheckdone // get the segcheckdone state + s.mux.RUnlock() + if !segcheckdone { + timeOut = time.Now().Add(15 * time.Second) + lastGood = time.Now() // reset lastGood if segcheck is not done + continue forGoDownsRoutine } - memlim.MemReturn("MemRetOnERR: "+errStr, item) // memfree GoDownsRoutine on error - continue forGoDownsRoutine - } - var speedInKBytes float64 - mode := "downloaded" - if item.size > 0 { - speedInKBytes = (float64(item.size) / 1024) / float64(time.Since(StartDowns).Seconds()) - } - switch code { - case 220: - // pass + if segcheckdone && time.Now().After(timeOut) { + dlog(always, "GoDownsRoutine: (%d@'%s') timeout reached, exiting. lastGood: %v", wid, provider.Name, time.Since(lastGood)) + break forGoDownsRoutine + } + dlog(time.Since(lastGood) > 9*time.Second && always && segcheckdone, "GoDownsRoutine: (%d@'%s') waiting on segCD len=%d timeout in: %v (lastGood: %v)", wid, provider.Name, len(segCD), time.Until(timeOut), time.Since(lastGood)) + case item, ok := <-segCD: + if !ok { + dlog(cfg.opt.DebugWorker, "GoDownsRoutine: channel closed (%d@'%s')", wid, provider.Name) + break forGoDownsRoutine + } + if item == nil { + dlog(cfg.opt.DebugWorker, "DownsRoutine received a nil pointer to quit") + segCD <- nil // refill the nil so others will die too + break forGoDownsRoutine + } + timeOut = time.Now().Add(15 * time.Second) + lastGood = time.Now() + dlog(cfg.opt.DebugWorker, "GoDownsRoutine: (%d@'%s') start seg.Id='%s'", wid, provider.Name, item.segment.Id) - case 920: // 920 is a special code for GoDownsRoutine to indicate that the item has been read from cache - mode = "cache read" - } + start := time.Now() + who := fmt.Sprintf("DR=%d@'%s'#'%s' seg.Id='%s'", wid, provider.Name, provider.Group, item.segment.Id) + memlim.MemLockWait(item, who) // gets memlock here + dlog(cfg.opt.DebugWorker && cfg.opt.DebugMemlim, "GoDownsRoutine got MemCheckWait who='%s' waited=(%d ms)", who, time.Since(start).Milliseconds()) + errStr := "" + StartDowns := time.Now() + code, err := s.GoDownsRoutine(wid, provider, item, sharedCC) + item.PrintItemFlags(cfg.opt.DebugFlags, true, fmt.Sprintf("post-GoDownsRoutine: code=%d", code)) + processed++ + DecreaseDLQueueCnt() + if err != nil || (code != 220 && code != 920) { + if code != 430 { + // 430 is a normal error code for GoDownsRoutine, so we don't log it as an error + errStr = fmt.Sprintf("ERROR in GoDownsRoutine code='%d' err='%v'. no retry", code, err) + dlog(always, "%s", errStr) + } + memlim.MemReturn("MemRetOnERR: "+errStr, item) // memfree GoDownsRoutine on error + continue forGoDownsRoutine + } + var speedInKBytes float64 + mode := "downloaded" + if item.size > 0 { + speedInKBytes = (float64(item.size) / 1024) / float64(time.Since(StartDowns).Seconds()) + } + switch code { + case 220: + // pass + + case 920: // 920 is a special code for GoDownsRoutine to indicate that the item has been read from cache + mode = "cache read" + // Memory was already returned in GoDownsRoutine for cache reads + } - dlog(cfg.opt.DebugWorker && cfg.opt.BUG, "GoDownsRoutine: %s (wid=%d) seg.Id='%s' @ '%s' took='%v' speedInKBytes=%.2f", mode, wid, item.segment.Id, provider.Name, time.Since(StartDowns), speedInKBytes) + dlog(cfg.opt.DebugWorker && cfg.opt.BUG, "GoDownsRoutine: segId='%s' (wid=%d) seg.Id='%s' @ '%s' took='%v' speedInKBytes=%.2f", mode, wid, item.segment.Id, provider.Name, time.Since(StartDowns), speedInKBytes) - // back to top + // back to top + } // end select } // end forGoDownsRoutine - dlog(always, "GoDownsRoutine: wid=%d provider='%s' exiting", wid, provider.Name) - }(wid, provider, waitWorker, sharedCC, segCD) // end go func() + dlog(processed > 0, "GoDownsRoutine exiting (%d@'%s') processed: %d", wid, provider.Name, processed) + }(wid, provider, waitWorker, &thisWorkerWG, sharedCC, segCD) // end go func() /* new worker code ReupsRoutine */ segCR := s.segmentChansReups[provider.Group] - go func(wid int, provider *Provider, waitWorker *sync.WaitGroup, sharedCC chan *ConnItem, segCR chan *segmentChanItem) { + go func(wid int, provider *Provider, waitWorker *sync.WaitGroup, thisWorkerWG *sync.WaitGroup, sharedCC chan *ConnItem, segCR chan *segmentChanItem) { + var processed uint64 defer waitWorker.Done() + defer thisWorkerWG.Done() + timeOut := time.Now().Add(15 * time.Second) + lastGood := time.Now() + toChan := make(chan struct{}, 1) + go func() { + for { + time.Sleep(3 * time.Second) + select { + case toChan <- struct{}{}: + default: + } + } + }() forGoReupsRoutine: for { - item, ok := <-segCR - if !ok { - return // channel is closed, exit the routine - } - if item == nil { - if cfg.opt.DebugWorker { - log.Print("ReupsRoutine received a nil pointer to quit") + dlog(cfg.opt.DebugWorker, "ReupsRoutine: (%d@'%s') wait on segCD len=%d", wid, provider.Name, len(segCD)) + select { + case <-toChan: + s.mux.RLock() + segcheckdone := s.segcheckdone // get the segcheckdone state + s.mux.RUnlock() + if !segcheckdone { + timeOut = time.Now().Add(15 * time.Second) + lastGood = time.Now() // reset lastGood if segcheck is not done + continue forGoReupsRoutine } - s.segmentChansReups[provider.Group] <- nil // refill the nil so others will die too - break forGoReupsRoutine - } + if segcheckdone && time.Now().After(timeOut) { + dlog(always, "ReupsRoutine: (%d@'%s') timeout reached, exiting. lastGood: %v", wid, provider.Name, time.Since(lastGood)) + break forGoReupsRoutine + } + dlog(time.Since(lastGood) > 9*time.Second, "ReupsRoutine: (%d@'%s') waiting on segCR len=%d timeout in: %v (lastgood: %v)", wid, provider.Name, len(segCR), time.Until(timeOut), time.Since(lastGood)) - // we might get an item still locked for setting flags, so we lock too and wait for upper layer to release first. - /* - start := time.Now() - item.mux.Lock() - if cfg.opt.DebugWorker { - dlog(cfg.opt.DebugWorker && cfg.opt.BUG, "WorkerReup: got lock (wid=%d) seg.Id='%s' @ '%s'", wid, item.segment.Id, provider.Name) + case item, ok := <-segCR: + if !ok { + dlog(cfg.opt.DebugWorker, "ReupsRoutine: channel closed (%d@'%s')", wid, provider.Name) + break forGoReupsRoutine } - item.mux.Unlock() - if cfg.opt.DebugWorker { - dlog(cfg.opt.DebugWorker && cfg.opt.BUG, "WorkerReup: unlocked (wid=%d) (waited=%d µs), process seg.Id='%s' @ '%s'", wid, time.Since(start).Microseconds(), item.segment.Id, provider.Name) - } - */ - // TODO handle memlim freemem here - StartReUps := time.Now() - code, err := s.GoReupsRoutine(wid, provider, item, sharedCC) - item.PrintItemFlags(cfg.opt.DebugFlags, true, fmt.Sprintf("post-GoReupsRoutine: code=%d", code)) - - DecreaseUPQueueCnt() - if err != nil { - errStr := fmt.Sprintf("ERROR in GoReupsRoutine code='%d' err='%v'", code, err) - dlog(always, "%s", errStr) - memlim.MemReturn("MemRetOnERR: "+errStr, item) // memfree GoReupsRoutine on error - continue forGoReupsRoutine - } - speedInKBytes := (float64(item.size) / 1024) / float64(time.Since(StartReUps).Seconds()) - dlog(cfg.opt.DebugWorker && cfg.opt.BUG, "ReupsRoutine: finished item (wid=%d) seg.Id='%s' @ '%s' took='%v' speedInKBytes=%.2f", wid, item.segment.Id, provider.Name, time.Since(StartReUps), speedInKBytes) - - memlim.MemReturn("UR", item) // memfree GoReupsRoutine on success - // back to top + if item == nil { + dlog(cfg.opt.DebugWorker, "ReupsRoutine received a nil pointer to quit") + s.segmentChansReups[provider.Group] <- nil // refill the nil so others will die too + break forGoReupsRoutine + } + timeOut = time.Now().Add(15 * time.Second) + lastGood = time.Now() + StartReUps := lastGood + code, err := s.GoReupsRoutine(wid, provider, item, sharedCC) + item.PrintItemFlags(cfg.opt.DebugFlags, true, fmt.Sprintf("post-GoReupsRoutine: code=%d", code)) + processed++ + DecreaseUPQueueCnt() + if err != nil || code == 0 { + errStr := fmt.Sprintf("ERROR in GoReupsRoutine code='%d' err='%v' no retry", code, err) + dlog(always, "%s", errStr) + memlim.MemReturn("MemRetOnERR: "+errStr, item) // memfree GoReupsRoutine on error + continue forGoReupsRoutine + } + speedInKBytes := (float64(item.size) / 1024) / float64(time.Since(StartReUps).Seconds()) + dlog(cfg.opt.DebugWorker && cfg.opt.BUG, "ReupsRoutine: finished item (wid=%d) seg.Id='%s' @ '%s' took='%v' speedInKBytes=%.2f", wid, item.segment.Id, provider.Name, time.Since(StartReUps), speedInKBytes) + + memlim.MemReturn("UR", item) // memfree GoReupsRoutine on success + // back to top + } // end select } // end forGoReupsRoutine - dlog(always, "ReupsRoutine: wid=%d provider='%s' exiting", wid, provider.Name) - }(wid, provider, waitWorker, sharedCC, segCR) // end go func() + dlog(processed > 0, "ReupsRoutine exiting: (%d@'%s') processed: %d", wid, provider.Name, processed) + }(wid, provider, waitWorker, &thisWorkerWG, sharedCC, segCR) // end go func() dlog(cfg.opt.DebugWorker, "GoWorker (%d) waitWorker.Wait for routines to complete '%s'", wid, provider.Name) - waitWorker.Wait() // wait for all 3 routines to finish - dlog(cfg.opt.BUG, "GoWorker (%d) closing @ '%s'", wid, provider.Name) + thisWorkerWG.Wait() // wait for all 3 routines to finish + dlog(cfg.opt.DebugWorker, "GoWorker closing (%d@'%s')", wid, provider.Name) if sharedCC != nil { select { @@ -347,11 +421,13 @@ func (s *SESSION) GoWorker(wid int, provider *Provider, waitWorker *sync.WaitGro dlog(cfg.opt.DebugWorker, "GoWorker (%d) no sharedConn there...? @ '%s' .. ciao", wid, provider.Name) } } - - KillConnPool(provider) // close the connection pool for this provider - waitPool.Done() // release this GoWorker from the pool - - dlog(cfg.opt.BUG, "GoWorker (%d) quit @ '%s'", wid, provider.Name) + s.mux.Lock() + s.bootedWorkers-- + s.mux.Unlock() + dlog(cfg.opt.DebugWorker, "GoWorker (%d@'%s') done with routines, releasing pool", wid, provider.Name) + waitPool.Done() // release this GoWorker from the pool + waitWorker.Wait() + dlog(cfg.opt.DebugWorker, "GoWorker quit (%d@'%s') ", wid, provider.Name) } // end func GoWorker // matchThisDL checks if the item is a candidate for download @@ -373,6 +449,40 @@ func matchThisDL(item *segmentChanItem) bool { return (!item.flaginDL && !item.flagisDL && !item.flaginUP && !item.flagisUP && !item.flaginDLMEM) } // end func matchThisDL +func (item *segmentChanItem) FlagError(providerId int) { + log.Printf("FlagError called for providerId=%d on segment.Id='%s' waiting to lock item mutex", providerId, item.segment.Id) + item.mux.Lock() + defer item.mux.Unlock() + //item.ignoreDlOn[providerId] = true + //item.ignoreUlOn[providerId] = true + //item.errorOn[providerId] = true + if item.flaginUP { + if item.fails < 3 { + item.retryIn = int64(time.Now().Add(5 * time.Second).Second()) + } + item.flaginUP = false + } + + item.flaginDL = false + item.fails++ + delete(item.availableOn, providerId) + dlog(always, "Flagged item error, will not retry '%s' on providerId=%d", item.segment.Id, providerId) +} + +func (item *segmentChanItem) FlagDLFailed(providerList []*Provider, providerGroup string) { + item.mux.Lock() + defer item.mux.Unlock() + for id, prov := range providerList { + if prov.Group != providerGroup { + continue + } + item.ignoreDlOn[id] = true + item.missingOn[id] = true + item.errorOn[id] = true + delete(item.availableOn, id) + } +} + // matchThisUP checks if the item is a candidate for upload // it returns true if the item is a candidate for upload func matchThisUP(item *segmentChanItem) bool { @@ -411,7 +521,7 @@ func (s *SESSION) pushDL(allowDl bool, item *segmentChanItem) (pushed bool, nodl return false, 1, nil // not a match, item is already in DL or UP or has article } if !memlim.MemAvail() { - return + return false, 1, nil // not enough memory available to download } // if we are here, we are allowed to push the item to download queue // loop over the availableOn map and check if we can push the item to download @@ -429,6 +539,10 @@ providerDl: dlog(cfg.opt.DebugWorker, " | [DV-pushDL] (nodl) item missingOn but should be avail!? seg.Id='%s' @ #'%s'", item.segment.Id, s.providerList[pid].Group) continue providerDl } + if item.errorOn[pid] { + dlog(cfg.opt.DebugWorker, " | [DV-pushDL] (nodl) item errorOn seg.Id='%s' @ #'%s'", item.segment.Id, s.providerList[pid].Group) + continue providerDl + } if s.providerList[pid].NoDownload { nodl++ item.ignoreDlOn[pid] = true @@ -457,9 +571,9 @@ providerDl: item.pushedDL++ // mark as pushed to download queue (in pushDL) IncreaseDLQueueCnt() dlog(cfg.opt.DebugWorker, " | [DV-pushDL] pushed to dlchan seg.Id='%s' @ #'%s'", item.segment.Id, s.providerList[pid].Group) - return // return after 1st push! + return pushed, 0, nil // return after 1st push! default: - dlog(cfg.opt.BUG, "DEBUG SPAM pushDL: chan is full @ #'%s'", s.providerList[pid].Group) + dlog(cfg.opt.BUG, "DEBUG SPAM pushDL: chan is full @ #'%s', retry next", s.providerList[pid].Group) // chan is full means we cannot push the item to the download queue to this provider group // either app is blocked or we're just checking faster than we can download at all... //err = fmt.Errorf(" | [DV-pushDL] chans full @ '%s'#'%s'", s.providerList[pid].Name, s.providerList[pid].Group) @@ -468,19 +582,19 @@ providerDl: } // end select //} } // end for providerDl - return + return false, 1, nil } // end func pushDL func (s *SESSION) pushUP(allowUp bool, item *segmentChanItem) (pushed bool, noup uint64, inretry uint64, err error) { if !allowUp { - return + return false, 1, 0, fmt.Errorf("pushUP not allowed") } s.mux.RLock() segcheckdone := s.segcheckdone // get the segcheckdone state s.mux.RUnlock() if cfg.opt.CheckFirst && !segcheckdone { - return + return false, 1, 0, nil // still checking, do not push to upload yet } item.mux.Lock() // LOCK item for the duration of this function @@ -507,6 +621,11 @@ providerUp: dlog(cfg.opt.DebugWorker, " | [DV-pushUP] (noup) flagNoUp seg.Id='%s' @ #'%s'", item.segment.Id, s.providerList[pid].Group) continue providerUp } + if item.ignoreUlOn[pid] { + noup++ + dlog(cfg.opt.DebugWorker, " | [DV-pushUP] (noup) ignoreUlOn seg.Id='%s' @ #'%s'", item.segment.Id, s.providerList[pid].Group) + continue providerUp + } if item.uploadedTo[pid] { noup++ dlog(cfg.opt.DebugWorker, " | [DV-pushUP] (noup) uploadedTo seg.Id='%s' @ #'%s'", item.segment.Id, s.providerList[pid].Group) @@ -527,13 +646,18 @@ providerUp: dlog(cfg.opt.DebugWorker, " | [DV-pushUP] (noup) dmcaOn seg.Id='%s' @ #'%s'", item.segment.Id, s.providerList[pid].Group) continue providerUp } + if item.errorOn[pid] { + noup++ + dlog(cfg.opt.DebugWorker, " | [DV-pushUP] (noup) errorOn seg.Id='%s' @ #'%s'", item.segment.Id, s.providerList[pid].Group) + continue providerUp + } if item.retryOn[pid] { if item.retryIn > time.Now().Unix() { inretry++ - dlog(always, " | [DV-pushUP] (noup) retryOn seg.Id='%s' @ #'%s' retryIn=%d > now=%d", item.segment.Id, s.providerList[pid].Group, item.retryIn, time.Now().Unix()) + dlog(always, " | [DV-pushUP] (noup) retryOn seg.Id='%s' @ #'%s' retryIn=%d > now=%d, continue", item.segment.Id, s.providerList[pid].Group, item.retryIn, time.Now().Unix()) continue providerUp } else { - dlog(cfg.opt.DebugWorker, " | [DV-pushUP] (noup) retryOn seg.Id='%s' @ #'%s' retryIn=%d <= now=%d", item.segment.Id, s.providerList[pid].Group, item.retryIn, time.Now().Unix()) + dlog(cfg.opt.DebugWorker, " | [DV-pushUP] (noup) retryOn seg.Id='%s' @ #'%s' retryIn=%d <= now=%d, pass", item.segment.Id, s.providerList[pid].Group, item.retryIn, time.Now().Unix()) delete(item.retryOn, pid) // remove retryOn flag for this provider // pass } @@ -571,15 +695,10 @@ type WrappedItem struct { const testing = false // set to true to test the worker loop // GoWorkDivider is the main worker loop that processes segments func (s *SESSION) GoWorkDivider(waitDivider *sync.WaitGroup, waitDividerDone *sync.WaitGroup) { - if cfg.opt.DebugWorker { - log.Print("go GoWorkDivider() waitDivider.Wait()") - } + dlog(cfg.opt.DebugWorker, "go GoWorkDivider() waiting on waitDivider.Wait()") waitDivider.Wait() // waits for all conns to establish defer waitDividerDone.Done() - - if cfg.opt.DebugWorker { - log.Print("go GoWorkDivider() Starting!") - } + dlog(cfg.opt.DebugWorker, "go GoWorkDivider() Starting!") //segcheckdone := false closeWait, closeCase := 1, "" @@ -594,70 +713,21 @@ func (s *SESSION) GoWorkDivider(waitDivider *sync.WaitGroup, waitDividerDone *sy // strings var logstring, log00, log01, log02, log03, log04, log05, log06, log07, log08, log09, log10, log11, log99 string - /* - go func(s *SESSION) { - if !testing { - return - } - //var funcmux sync.Mutex - //inup, indl, nodl, noup, inretry := uint64(0), uint64(0), uint64(0), uint64(0), uint64(0) - //replychan := make(chan bool, 1) // internal replychan for the worker loop - for { - wrappedItem := <-s.WorkDividerChan - item := wrappedItem.wItem // unwrap the item from the channel - item.mux.RLock() - if item.checkedOn != providersCnt { - //dlog( " | [DV] WorkDividerChan debug#1 received item seg.Id='%s' checkedOn=%d", item.segment.Id, item.checkedOn) - item.mux.RUnlock() - continue // ignore item, will retry next run - } - item.mux.RUnlock() - if wrappedItem.src != "CR" { - dlog( " | [DV] WorkDividerChan debug#2 received item seg.Id='%s' checkedOn=%d src=%s", item.segment.Id, item.checkedOn, wrappedItem.src) - } - - switch wrappedItem.src { - - case "DR": - go func(item *segmentChanItem) { - for { - pushedUp, nNoUp, nInRetry := s.pushUP(allowUp, item) - if pushedUp { - dlog( " | [DV] PUSHED to Up seg.Id='%s' pushedUp=%t nNoUp=%d nInRetry=%d", item.segment.Id, pushedUp, nNoUp, nInRetry) - break - } - dlog( " | [DV] WorkDividerChan retrying pushUP seg.Id='%s'", item.segment.Id) - time.Sleep(1000 * time.Millisecond) // wait a bit before retrying - } - }(item) - - case "CR": - go func(item *segmentChanItem) { - for { - pushedDl, nNoDl := s.pushDL(allowDl, item) - if pushedDl { - if cfg.opt.BUG { - dlog( " | [DV] PUSHED to DL seg.Id='%s' pushedDl=%t nNoDl=%d", item.segment.Id, pushedDl, nNoDl) - } - break - } - dlog( " | [DV] WorkDividerChan retrying pushDL seg.Id='%s'", item.segment.Id) - time.Sleep(1000 * time.Millisecond) // wait a bit before retrying - } - }(item) - } // end switch wrappedItem.src - } - }(s) - */ + // loops forever over the s.segmentList and checks if there is anything to do for an item - var minsleep int64 = 10 // 0.01 second in milliseconds - var baseline int64 = 1000 - var maxsleep int64 = 6000 // 5 seconds in milliseconds + var minsleep int64 = 1000 + var baseline int64 = 1500 + var maxsleep int64 = 6000 microsleep := baseline fetchedtoDL, fetchedtoUP, backlogDL, refillUP, pushedDL, pushedUP := uint64(0), uint64(0), uint64(0), uint64(0), uint64(0), uint64(0) startLoop := time.Now() + deadWorkersDeadline := 9 + deadcounter := 10 + var capture_done, capture_segm, capture_isdl, capture_isup float64 + forever: for { + dvlastRunTook := time.Since(startLoop) pushed := pushedDL + pushedUP @@ -667,8 +737,24 @@ forever: } time.Sleep(time.Duration((dvlastRunTook.Milliseconds() * 2)) + (time.Duration(microsleep) * time.Millisecond)) + if deadWorkersDeadline <= 0 { + dlog(always, "GoWorkDivider: no booted workers for too long, exiting") + closeCase = "noworkers" + break forever + } + s.mux.Lock() + dlog(cfg.opt.DebugWorker, "GoWorkDivider: booted workers: %d", s.bootedWorkers) + + if s.bootedWorkers == 0 { + deadWorkersDeadline-- + dlog(always, "GoWorkDivider: no booted workers, deadWorkersDeadline=%d", deadWorkersDeadline) + } else { + deadWorkersDeadline = 9 // reset + } + s.mux.Unlock() + // uint64 - var segm, allOk, done, dead, isdl, indl, inup, isup, checked, dmca, nodl, noup, cached, inretry, inyenc, isyenc, dlQ, upQ, yeQ uint64 + var segm, allOk, done, fails, dead, isdl, indl, inup, isup, checked, dmca, nodl, noup, cached, inretry, inyenc, isyenc, dlQ, upQ, yeQ uint64 // Tnodl, Tnoup counts segments that not have been downloaded // or uploaded because provider has config flat NoDownload or NoUpload set @@ -680,6 +766,12 @@ forever: for _, item := range s.segmentList { item.mux.PrintStatus(false) + if deadcounter <= 0 { + dlog(always, "GoWorkDivider: counters not increasing for too long, exiting") + closeCase = "noProgress" + break forever + } + item.mux.RLock() // RLOCKS HERE #824d if item.flagCache { cached++ @@ -716,6 +808,9 @@ forever: inup++ doContinue = true } + if item.fails >= 3 { + fails++ + } if len(item.availableOn) > 0 { segm++ // counts overall availability of segments @@ -744,39 +839,40 @@ forever: } item.mux.RUnlock() // RUNLOCKS HERE #824d - if !testing { - pushedUp, nNoUp, nInRetry, a1err := s.pushUP(allowUp, item) - if a1err != nil { - dlog(always, "ERROR pushUP err='%v' (seg.Id='%s')", a1err, item.segment.Id) + pushedUp, nNoUp, nInRetry, a1err := s.pushUP(allowUp, item) + if a1err != nil { + dlog(cfg.opt.DebugWorker, "ERROR pushUP err='%v' (seg.Id='%s')", a1err, item.segment.Id) + continue forsegmentList + } + if pushedUp { + inup++ + dlog(cfg.opt.DebugWorker, " | [DV] PUSHEDup seg.Id='%s' pushedUp=%t inup=%d", item.segment.Id, pushedUp, inup) + } + noup += nNoUp + inretry += nInRetry + + if !pushedUp && allowDl { + pushedDl, nNoDl, b1err := s.pushDL(allowDl, item) + if b1err != nil { + dlog(always, "ERROR pushDL err='%v' (seg.Id='%s')", b1err, item.segment.Id) continue forsegmentList } - if pushedUp { - inup++ - dlog(cfg.opt.DebugWorker, " | [DV] PUSHEDup seg.Id='%s' pushedUp=%t inup=%d", item.segment.Id, pushedUp, inup) + nodl += nNoDl + Tnodl += uint64(len(item.ignoreDlOn)) + if pushedDl { + indl++ + //if cfg.opt.BUG { + dlog(cfg.opt.DebugWorker, " | [DV] PUSHEDdl seg.Id='%s' pushedDl=%t indl=%d", item.segment.Id, pushedDl, indl) + //} } - noup += nNoUp - //Tnoup += len(item.ignoreDlOn) - inretry += nInRetry - - if !pushedUp && allowDl { - pushedDl, nNoDl, b1err := s.pushDL(allowDl, item) - if b1err != nil { - dlog(always, "ERROR pushDL err='%v' (seg.Id='%s')", b1err, item.segment.Id) - continue forsegmentList - } - nodl += nNoDl - Tnodl += uint64(len(item.ignoreDlOn)) - if pushedDl { - indl++ - //if cfg.opt.BUG { - dlog(cfg.opt.DebugWorker, " | [DV] PUSHEDdl seg.Id='%s' pushedDl=%t indl=%d", item.segment.Id, pushedDl, indl) - //} - } - } // end pushDL - } // if !testing { + } // end pushDL } // end for forsegmentList //dlog(cfg.opt.DebugWorker, " | [DV] lastRunTook='%d ms' '%v", lastRunTook.Milliseconds(), lastRunTook) + s.mux.RLock() + checkFeedDone := s.checkFeedDone // read value of s.checkDone + segcheckdone := s.segcheckdone // read value of s.segcheckdone + s.mux.RUnlock() // part to print stats begins here upQ = GCounter.GetValue("upQueueCnt") @@ -797,9 +893,37 @@ forever: isup_perc := float64(isup) / float64(todo) * 100 dmca_perc := float64(dmca) / float64(todo) * 100 yenc_perc := float64(isyenc) / float64(todo) * 100 - used_slots, max_slots := memlim.Usage() + if segm_perc > capture_segm { + capture_segm = segm_perc + deadcounter = 10 // reset deadcounter + } else if segm_perc < 100 && segm_perc == capture_segm { + deadcounter-- + dlog(always, "GoWorkDivider: segm_perc not increasing, deadcounter=%d", deadcounter) + + } else if segcheckdone && done_perc > capture_done { + capture_done = done_perc + deadcounter = 10 // reset deadcounter + } else if segcheckdone && done_perc < 100 && done_perc == capture_done { + deadcounter-- + dlog(always, "GoWorkDivider: done_perc not increasing, deadcounter=%d", deadcounter) + + } else if segcheckdone && isdl_perc > capture_isdl { + capture_isdl = isdl_perc + deadcounter = 10 // reset deadcounter + } else if segcheckdone && isdl_perc < 100 && isdl_perc == capture_isdl { + deadcounter-- + dlog(always, "GoWorkDivider: isdl_perc not increasing, deadcounter=%d", deadcounter) + + } else if segcheckdone && isup_perc > capture_isup { + capture_isup = isup_perc + deadcounter = 10 // reset deadcounter + } else if segcheckdone && isup_perc < 100 && isup_perc == capture_isup { + deadcounter-- + dlog(always, "GoWorkDivider: isup_perc not increasing, deadcounter=%d", deadcounter) + } + //log00 = fmt.Sprintf(" TODO: %d ", todo) if (cfg.opt.CheckFirst || cfg.opt.CheckOnly) || (checked > 0 && checked != segm && checked != done) { @@ -841,8 +965,8 @@ forever: if dmca > 0 { log06 = fmt.Sprintf(" | DMCA:[%03.3f%%] (%"+s.D+"d)", dmca_perc, dmca) } - if inretry > 0 { - log07 = fmt.Sprintf(" | ERRS:(%"+s.D+"d)", inretry) + if fails > 0 || inretry > 0 { + log07 = fmt.Sprintf(" | ERRS:[%d] (inRetry: %"+s.D+"d)", fails, inretry) } if indl > 0 || isdl > 0 || dlQ > 0 || TdlQ > 0 { log08 = fmt.Sprintf(" | DL:[%03.3f%%] (%"+s.D+"d / %"+s.D+"d Q:%d=%d)", isdl_perc, isdl, TdlQ, dlQ, indl) @@ -887,23 +1011,18 @@ forever: logstring = log00 + log01 + log02 + log03 + log04 + log05 + log06 + log07 + log08 + log09 + log10 + log11 + log99 if cfg.opt.Verbose && cfg.opt.PrintStats >= 0 && logstring != "" && nextLogPrint < time.Now().Unix() { nextLogPrint = time.Now().Unix() + cfg.opt.PrintStats - log.Print(logstring) + dlog(always, "%s", logstring) } } // print some stats ends here - s.mux.RLock() - checkDone := s.checkFeedDone // read value of s.checkDone - segcheckdone := s.segcheckdone // read value of s.segcheckdone - s.mux.RUnlock() - - if !segcheckdone && (checked == todo && checkDone) { + if !segcheckdone && checked == todo && checkFeedDone { s.mux.Lock() s.segmentCheckEndTime = time.Now() took := time.Since(s.segmentCheckStartTime) s.segmentCheckTook = took s.segcheckdone = true s.mux.Unlock() - dlog(cfg.opt.Verbose, " | [DV] | Segment Check Done: took='%.1f sec'", took.Seconds()) + dlog(always, " | Segment Check Done: took='%.1f sec'", took.Seconds()) } // continue as long as any of this triggers because stuff is still in queues and processing @@ -923,47 +1042,47 @@ forever: // figure out if all jobs are done globalmux.RLock() - closeCase0 := (done == todo) + closeCase0 := (done == todo || fails == todo || fails+done == todo) closeCase1 := (cfg.opt.CheckOnly) closeCase2 := (cacheON && (GCounter.GetValue("postProviders") == 0 && cached == todo)) closeCase3 := (cacheON && (dead+cached == todo && dead+isup == todo)) closeCase4 := (isup == todo) - closeCase5 := (dead+isup == todo) - closeCase6 := (dead+done == todo) + closeCase5 := (dead+isup == todo || dead+isup+fails == todo) + closeCase6 := (dead+done == todo || dead+done+fails == todo) closeCase7 := false // placeholder globalmux.RUnlock() if closeCase0 { closeWait-- - closeCase = closeCase + "|Debug#0@" + fmt.Sprintf("%d", time.Now().Unix()) + closeCase = closeCase + "|Debug#0" } else if closeCase1 { closeWait-- - closeCase = closeCase + "|Debug#1@" + fmt.Sprintf("%d", time.Now().Unix()) + closeCase = closeCase + "|Debug#1" } else if closeCase2 { closeWait-- - closeCase = closeCase + "|Debug#2@" + fmt.Sprintf("%d", time.Now().Unix()) + closeCase = closeCase + "|Debug#2" } else if closeCase3 { closeWait-- - closeCase = closeCase + "|Debug#3@" + fmt.Sprintf("%d", time.Now().Unix()) + closeCase = closeCase + "|Debug#3" } else if closeCase4 { closeWait-- - closeCase = closeCase + "|Debug#4@" + fmt.Sprintf("%d", time.Now().Unix()) + closeCase = closeCase + "|Debug#4" } else if closeCase5 { closeWait-- - closeCase = closeCase + "|Debug#5@" + fmt.Sprintf("%d", time.Now().Unix()) + closeCase = closeCase + "|Debug#5" } else if closeCase6 { closeWait-- - closeCase = closeCase + "|Debug#6@" + fmt.Sprintf("%d", time.Now().Unix()) + closeCase = closeCase + "|Debug#6" } else if closeCase7 { closeWait-- - closeCase = closeCase + "|Debug#7@" + fmt.Sprintf("%d", time.Now().Unix()) + closeCase = closeCase + "|Debug#7" } else { if closeWait >= 10 { @@ -974,7 +1093,7 @@ forever: } closeWait++ if closeWait >= 15 { - log.Print("... force quit ...") + dlog(always, "... force quit ...") return } closeCase = "" diff --git a/build_windows.bat b/build_windows.bat new file mode 100644 index 0000000..d4c5452 --- /dev/null +++ b/build_windows.bat @@ -0,0 +1,93 @@ +@echo off +REM Windows build script for NZBreX +REM Requires: Git, Go 1.19+, MinGW-w64, CMake + +echo Building NZBreX for Windows... + +REM Check if Go is available +go version >nul 2>&1 +if errorlevel 1 ( + echo ERROR: Go not found. Please install Go 1.24.3 or later. + echo Download from: https://golang.org/dl/ + pause + exit /b 1 +) + +REM Check if CMake is available +cmake --version >nul 2>&1 +if errorlevel 1 ( + echo ERROR: CMake not found. Please install CMake. + echo Download from: https://cmake.org/download/ + pause + exit /b 1 +) + +REM Check if we're in the right directory +if not exist "rapidyenc" ( + echo ERROR: rapidyenc directory not found. Please run this script from the NZBreX root directory. + pause + exit /b 1 +) + +echo Building rapidyenc library for Windows... +cd rapidyenc +if not exist "rapidyenc" ( + echo ERROR: rapidyenc submodule not initialized. Please run: git submodule update --init --recursive + pause + exit /b 1 +) + +REM Build rapidyenc +rmdir /s /q rapidyenc\build 2>nul +mkdir rapidyenc\build +cd rapidyenc\build + +REM Try to configure with MinGW Makefiles generator +cmake .. -G "MinGW Makefiles" +if errorlevel 1 ( + REM Try Ninja generator if MinGW Makefiles fails + cmake .. -G "Ninja" + if errorlevel 1 ( + REM Try MSYS Makefiles generator if Ninja fails + cmake .. -G "MSYS Makefiles" + if errorlevel 1 ( + echo ERROR: CMake configuration failed with all known generators. + echo Please ensure MinGW-w64, Ninja, or MSYS2 are properly installed. + echo You may need to install MSYS2 and MinGW-w64 from: https://www.msys2.org/ + echo Or install Ninja from: https://ninja-build.org/ + pause + cd ..\..\..\ + exit /b 1 + ) + ) +) + +cmake --build . --config Release +if errorlevel 1 ( + echo ERROR: Failed to build rapidyenc library. + pause + cd ..\..\..\ + exit /b 1 +) + +REM Copy library files +copy rapidyenc_static\librapidyenc.a ..\..\librapidyenc.a +cd ..\..\..\ + +echo rapidyenc library built successfully. + +echo Building NZBreX executable... +set CGO_ENABLED=1 +go build -o NZBreX_ry.exe -tags "windows rapidyenc" . +if errorlevel 1 ( + echo ERROR: Failed to build NZBreX executable. + pause + exit /b 1 +) + +echo. +echo Build completed successfully! +echo Executable: NZBreX_ry.exe +echo. +echo To test the build, run: NZBreX_ry.exe -version +pause diff --git a/go.mod b/go.mod index da33cc5..bf76bc1 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,11 @@ module github.com/go-while/NZBreX -go 1.24.3 +go 1.25.3 require ( github.com/Tensai75/nzbparser v0.1.0 github.com/go-while/go-cpu-mem-profiler v0.0.0-20240612221627-856954a5fc83 + golang.org/x/crypto v0.43.0 ) require ( @@ -14,10 +15,10 @@ require ( ) require ( - github.com/Tensai75/subjectparser v0.1.0 // indirect + github.com/Tensai75/subjectparser v0.1.1 // indirect github.com/go-while/go-loggedrwmutex v0.0.0-20250601032232-b0aa20cdcb2c github.com/gorilla/mux v1.8.1 // indirect github.com/stretchr/testify v1.10.0 - golang.org/x/net v0.40.0 // indirect - golang.org/x/text v0.25.0 + golang.org/x/net v0.46.0 // indirect + golang.org/x/text v0.30.0 ) diff --git a/go.sum b/go.sum index 76db6a2..aa98f0d 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ github.com/Tensai75/nzbparser v0.1.0 h1:6RppAuWFahqu/kKjWO5Br0xuEYcxGz+XBTxYc+qvPo4= github.com/Tensai75/nzbparser v0.1.0/go.mod h1:IUIIaeGaYp2dLAAF29BWYeKTfI4COvXaeQAzQiTOfMY= -github.com/Tensai75/subjectparser v0.1.0 h1:6fEWnRov8lDHxJS2EWqY6VonwYfrIRN+k8h8H7fFwHA= -github.com/Tensai75/subjectparser v0.1.0/go.mod h1:PNBFBnkOGbVDfX+56ZmC4GKSpqoRMCF1Y44xYd7NLGI= +github.com/Tensai75/subjectparser v0.1.1 h1:SAlaEKUmaalt4QH+UFBzEP6iHqA994iouFqvFPSM9y0= +github.com/Tensai75/subjectparser v0.1.1/go.mod h1:PNBFBnkOGbVDfX+56ZmC4GKSpqoRMCF1Y44xYd7NLGI= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-while/go-cpu-mem-profiler v0.0.0-20240612221627-856954a5fc83 h1:vehfiL7LsK8bJQZdVuJxcMem4AP2HwPEQ8orUUnIA+E= @@ -14,10 +14,12 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= -golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= -golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= -golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/grep.sh b/grep.sh new file mode 100755 index 0000000..894d4bf --- /dev/null +++ b/grep.sh @@ -0,0 +1,3 @@ +#!/bin/bash +# tool to search for strings in GO source files +find . -iname "*.go" -exec grep -in "$1" {} + diff --git a/local_build_linux-amd64.sh b/local_build_linux-amd64.sh index dae80ff..1878474 100755 --- a/local_build_linux-amd64.sh +++ b/local_build_linux-amd64.sh @@ -1,7 +1,10 @@ -rm -rf rapidyenc/rapidyenc/build -mkdir -p rapidyenc/rapidyenc/build -cd rapidyenc && ./build_rapidyenc_linux-amd64.sh && cd ../ +#!/bin/bash +if [ "$1" != "quick" ]; then + rm -rf rapidyenc/rapidyenc/build + mkdir -p rapidyenc/rapidyenc/build + cd rapidyenc && ./build_rapidyenc_linux-amd64.sh && cd ../ +fi export GOOS=linux export GOARCH=amd64 -go build -o NZBreX -tags other . +go build -race -o NZBreX -tags other . && echo "built ok" exit $? diff --git a/local_crossbuild_windows-amd64.sh b/local_crossbuild_windows-amd64.sh index 571a25e..5116874 100755 --- a/local_crossbuild_windows-amd64.sh +++ b/local_crossbuild_windows-amd64.sh @@ -1,11 +1,82 @@ +#!/usr/bin/env bash + +# Windows cross-compilation script for NZBreX +# Requires: gcc-mingw-w64-x86-64 g++-mingw-w64-x86-64 cmake + +set -e # Exit on any error + +echo "Building NZBreX for Windows (amd64)..." + +# Check if required tools are available +if ! command -v x86_64-w64-mingw32-gcc &> /dev/null; then + echo "ERROR: x86_64-w64-mingw32-gcc not found. Please install MinGW-w64:" + echo " Ubuntu/Debian: sudo apt install gcc-mingw-w64-x86-64 g++-mingw-w64-x86-64" + echo " CentOS/RHEL: sudo yum install mingw64-gcc mingw64-gcc-c++" + exit 1 +fi + +if ! command -v cmake &> /dev/null; then + echo "ERROR: cmake not found. Please install cmake:" + echo " Ubuntu/Debian: sudo apt install cmake" + echo " CentOS/RHEL: sudo yum install cmake" + exit 1 +fi + +# Build rapidyenc for Windows +echo "Building rapidyenc library for Windows..." rm -rf rapidyenc/rapidyenc/build mkdir -p rapidyenc/rapidyenc/build -cd rapidyenc && ./crossbuild_rapidyenc_windows-amd64.sh && cd ../ +cd rapidyenc +if ! ./crossbuild_rapidyenc_windows-amd64.sh; then + echo "ERROR: Failed to build rapidyenc library for Windows" + exit 1 +fi +cd ../ +# Check if rapidyenc library was built successfully +if [ ! -f "rapidyenc/librapidyenc_windows_amd64.a" ]; then + echo "ERROR: rapidyenc static library not found at rapidyenc/librapidyenc_windows_amd64.a" + exit 1 +fi + +echo "rapidyenc library built successfully" + +# Set up cross-compilation environment export GOOS=windows export GOARCH=amd64 export CGO_ENABLED=1 export CC=x86_64-w64-mingw32-gcc -go build -o NZBreX_ry.exe -tags "windows rapidyenc" . -exit $? +echo "Cross-compiling Go application for Windows..." +# Build with static linking to avoid dependency on MinGW runtime DLLs +# -static: Link all libraries statically (including libgcc, libstdc++, winpthread) +# -static-libgcc: Static link libgcc only +# -static-libstdc++: Static link libstdc++ only +if ! go build -o NZBreX.exe -ldflags "-linkmode external -extldflags '-static -static-libgcc -static-libstdc++'" -tags "windows rapidyenc" .; then + echo "ERROR: Failed to build Windows executable" + exit 1 +fi + +# Verify the executable was created +if [ ! -f "NZBreX.exe" ]; then + echo "ERROR: Windows executable not created" + exit 1 +fi + +echo "Windows executable built successfully: NZBreX.exe" + +# Check dependencies (optional, for debugging) +if command -v x86_64-w64-mingw32-objdump &> /dev/null; then + echo "DLL dependencies:" + if ! x86_64-w64-mingw32-objdump -p "NZBreX.exe" > objdump_output.txt; then + echo " ERROR: objdump failed to analyze NZBreX.exe" + elif ! grep -q "DLL Name" objdump_output.txt; then + echo " (none found)" + else + grep "DLL Name" objdump_output.txt + fi + rm -f objdump_output.txt +fi + +echo "Build completed successfully!" +exit 0 diff --git a/local_crossbuild_windows-amd64_old.sh b/local_crossbuild_windows-amd64_old.sh new file mode 100755 index 0000000..571a25e --- /dev/null +++ b/local_crossbuild_windows-amd64_old.sh @@ -0,0 +1,11 @@ +rm -rf rapidyenc/rapidyenc/build +mkdir -p rapidyenc/rapidyenc/build +cd rapidyenc && ./crossbuild_rapidyenc_windows-amd64.sh && cd ../ + +export GOOS=windows +export GOARCH=amd64 +export CGO_ENABLED=1 +export CC=x86_64-w64-mingw32-gcc + +go build -o NZBreX_ry.exe -tags "windows rapidyenc" . +exit $? diff --git a/main.go b/main.go index b63d0a8..c9e042c 100644 --- a/main.go +++ b/main.go @@ -55,6 +55,7 @@ var ( nzbfile string // flag testmode bool // flag: used to test compilation testrapidyenc bool // flag: used to test rapidyenc decoder + proxy bool // flag: used to enable proxy server ) func init() { @@ -75,9 +76,8 @@ func main() { wg := new(sync.WaitGroup) thisProcessor := &PROCESSOR{} go GoMutexStatus() - if err := thisProcessor.NewProcessor(); err != nil { - dlog(always, "ERROR NewProcessor: err='%v'", err) + dlog(always, "ERROR NewProcessor: proxy err='%v'", err) os.Exit(1) } else { if cfg.opt.NzbDir != "" { @@ -86,8 +86,32 @@ func main() { select {} // infinite wait! } } - - if cfg.opt.NzbDir == "" && nzbfile != "" { + if proxy { + // boot proxy server + wg.Add(1) // waitSession + go func(wg *sync.WaitGroup) { + globalmux.Lock() + // set up a session as like for nzb files + // but only join launch the session to load the providerlist + // to establish connections to the providers and have pools running + dlog(cfg.opt.Debug, "pre:thisProcessor.LaunchSession: proxy server") + s := &SESSION{ + proc: thisProcessor, + mux: &loggedrwmutex.LoggedSyncRWMutex{Name: "PROXYSESSION"}, + proxy: true, + } + ProxyParent = s // set global proxy session so proxy has access to provider list and pools + go GoCliRxTxCounter() + globalmux.Unlock() + go StartProxyServers(cfg.opt) + if err := thisProcessor.LaunchSession(s, "", wg); err != nil { + dlog(always, "ERROR NewProcessor Boot Proxy Server: err='%v'", err) + os.Exit(1) + } + }(wg) + dlog(cfg.opt.Debug, "main: wg.Wait()") + wg.Wait() + } else if cfg.opt.NzbDir == "" && nzbfile != "" { wg.Add(1) // waitSession go func(nzbfile string, wg *sync.WaitGroup) { dlog(cfg.opt.Debug, "pre:thisProcessor.LaunchSession: nzbfile='%s'", nzbfile) diff --git a/rapidyenc/build_rapidyenc_linux-amd64.sh b/rapidyenc/build_rapidyenc_linux-amd64.sh index 234b8d4..cc46140 100755 --- a/rapidyenc/build_rapidyenc_linux-amd64.sh +++ b/rapidyenc/build_rapidyenc_linux-amd64.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +echo "run: $0 $1" cd rapidyenc || exit 2 rm -rf build mkdir -p build @@ -7,5 +8,11 @@ cmake .. || exit 4 cmake --build . --config Release || exit 5 ls . rapidyenc_static/ cd ../../ || exit 6 -cp -v rapidyenc/build/rapidyenc_static/librapidyenc.a . || exit 7 +if [ "$1" = "darwin" ]; then + cp -v rapidyenc/build/rapidyenc_static/librapidyenc.a ./librapidyenc_darwin.a || exit 7 + ln -sfv ./librapidyenc_darwin.a ./librapidyenc.a +else + cp -v rapidyenc/build/rapidyenc_static/librapidyenc.a ./librapidyenc_linux_amd64.a || exit 7 + ln -sfv ./librapidyenc_linux_amd64.a ./librapidyenc.a +fi #rm -rf rapidyenc/build diff --git a/rapidyenc/build_rapidyenc_linux-arm64.sh b/rapidyenc/build_rapidyenc_linux-arm64.sh index 19a453f..d9b03f7 100755 --- a/rapidyenc/build_rapidyenc_linux-arm64.sh +++ b/rapidyenc/build_rapidyenc_linux-arm64.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +echo "run: $0 $1" cd rapidyenc || exit 2 rm -rf build mkdir -p build @@ -7,5 +8,11 @@ cmake .. -DCMAKE_TOOLCHAIN_FILE=../toolchain-linux-arm64.cmake || exit 4 cmake --build . --config Release || exit 5 ls . rapidyenc_static/ cd ../../ || exit 6 -cp -v rapidyenc/build/rapidyenc_static/librapidyenc.a . || exit 7 +if [ "$1" = "darwin" ]; then + cp -v rapidyenc/build/rapidyenc_static/librapidyenc.a ./librapidyenc_darwin.a || exit 7 + ln -sfv ./librapidyenc_darwin.a ./librapidyenc.a +else + cp -v rapidyenc/build/rapidyenc_static/librapidyenc.a ./librapidyenc_linux_arm64.a || exit 7 + ln -sfv ./librapidyenc_linux_arm64.a ./librapidyenc.a +fi #rm -rf rapidyenc/build diff --git a/rapidyenc/clone_rapidyenc-org.sh b/rapidyenc/clone_rapidyenc-org.sh new file mode 100755 index 0000000..b80d71d --- /dev/null +++ b/rapidyenc/clone_rapidyenc-org.sh @@ -0,0 +1 @@ +git clone https://github.com/animetosho/rapidyenc.git || exit 1 diff --git a/rapidyenc/clone_rapidyenc.sh b/rapidyenc/clone_rapidyenc.sh index b80d71d..fefd3d5 100755 --- a/rapidyenc/clone_rapidyenc.sh +++ b/rapidyenc/clone_rapidyenc.sh @@ -1 +1 @@ -git clone https://github.com/animetosho/rapidyenc.git || exit 1 +# test ! -d rapidyenc && git clone https://github.com/animetosho/rapidyenc.git diff --git a/rapidyenc/crossbuild_rapidyenc_darwin-amd64.sh b/rapidyenc/crossbuild_rapidyenc_darwin-amd64.sh index 42a6b30..ee39767 100755 --- a/rapidyenc/crossbuild_rapidyenc_darwin-amd64.sh +++ b/rapidyenc/crossbuild_rapidyenc_darwin-amd64.sh @@ -7,6 +7,6 @@ cmake .. -DCMAKE_TOOLCHAIN_FILE=../../toolchain-darwin.cmake || exit 4 cmake --build . --config Release || exit 5 ls . rapidyenc_static/ cd ../../ || exit 6 -cp -v rapidyenc/build/rapidyenc_static/librapidyenc.a . || exit 7 +cp -v rapidyenc/build/rapidyenc_static/librapidyenc.a ./librapidyenc_darwin.a || exit 7 cp -v rapidyenc/build/librapidyenc.dylib . || exit 8 #rm -rf rapidyenc/build diff --git a/rapidyenc/crossbuild_rapidyenc_windows-amd64.sh b/rapidyenc/crossbuild_rapidyenc_windows-amd64.sh index 286bf0c..9150026 100755 --- a/rapidyenc/crossbuild_rapidyenc_windows-amd64.sh +++ b/rapidyenc/crossbuild_rapidyenc_windows-amd64.sh @@ -7,6 +7,7 @@ cmake .. -DCMAKE_TOOLCHAIN_FILE=../../toolchain-mingw64.cmake || exit 4 cmake --build . --config Release || exit 5 ls . rapidyenc_static/ cd ../../ || exit 6 -cp -v rapidyenc/build/rapidyenc_static/librapidyenc.a . || exit 7 +cp -v rapidyenc/build/rapidyenc_static/librapidyenc.a ./librapidyenc_windows_amd64.a || exit 7 +ln -sfv ./librapidyenc_windows_amd64.a ./librapidyenc.a cp -v rapidyenc/build/librapidyenc.dll . || exit 8 #rm -rf rapidyenc/build diff --git a/rapidyenc/decoder.go b/rapidyenc/decoder.go index 5b86f5d..a2a3fb1 100644 --- a/rapidyenc/decoder.go +++ b/rapidyenc/decoder.go @@ -2,14 +2,10 @@ package rapidyenc /* #cgo CFLAGS: -I${SRCDIR}/src -#cgo darwin LDFLAGS: ${SRCDIR}/librapidyenc.a -lstdc++ -#cgo windows,amd64 LDFLAGS: ${SRCDIR}/librapidyenc.a -lstdc++ -#cgo windows,386 LDFLAGS: ${SRCDIR}/librapidyenc.a -lstdc++ -#cgo windows,arm LDFLAGS: ${SRCDIR}/librapidyenc.a -lstdc++ -#cgo linux,amd64 LDFLAGS: ${SRCDIR}/librapidyenc.a -lstdc++ -#cgo linux,386 LDFLAGS: ${SRCDIR}/librapidyenc.a -lstdc++ -#cgo linux,arm LDFLAGS: ${SRCDIR}/librapidyenc.a -lstdc++ -#cgo linux,arm64 LDFLAGS: ${SRCDIR}/librapidyenc.a -lstdc++ +#cgo darwin LDFLAGS: ${SRCDIR}/librapidyenc_darwin.a -lstdc++ +#cgo windows,amd64 LDFLAGS: ${SRCDIR}/librapidyenc_windows_amd64.a -lstdc++ -static-libstdc++ -static-libgcc +#cgo linux,amd64 LDFLAGS: ${SRCDIR}/librapidyenc_linux_amd64.a -lstdc++ +#cgo linux,arm64 LDFLAGS: ${SRCDIR}/librapidyenc_linux_arm64.a -lstdc++ #include "rapidyenc.h" */ import "C" @@ -30,6 +26,8 @@ import ( "golang.org/x/text/transform" ) +/* compile test ... github actions is using cached files... */ + const constBufSize = 4096 // const buffer size for Decoder instances var ( @@ -259,7 +257,32 @@ func (d *Decoder) Read(p []byte) (int, error) { // It then incrementally decodes chunks of yEnc encoded data before returning to line-by-line processing // for the footer and EOF pattern (.\r\n) func (d *Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { -transform: +loop: + if d.body && d.format == FormatYenc { + nd, ns, end, _ := DecodeIncremental(dst[nDst:], src[nSrc:], &d.State) + if nd > 0 { + d.hash.Write(dst[nDst : nDst+nd]) + d.actualSize += int64(nd) + nDst += nd + } + + switch end { + case EndControl: + nSrc += ns - 2 + d.body = false + case EndArticle: + nSrc += ns - 3 + d.body = false + default: + if d.State == StateCRLFEQ { + d.State = StateCRLF + nSrc += ns - 1 + } else { + nSrc += ns + } + return nDst, nSrc, transform.ErrShortSrc + } + } // Line by line processing for { if nSrc < 0 { @@ -270,23 +293,23 @@ transform: d.m.Hash = d.hash.Sum32() if !d.begin { switch d.format { - case FormatUnknown: - err = fmt.Errorf("[rapidyenc] FormatUnknown end of article without finding any *begin header: %w", ErrDataMissing) case FormatYenc: err = fmt.Errorf("[rapidyenc] FormatYenc end of article without finding \"=ybegin\" header: %w", ErrDataMissing) case FormatUU: - err = fmt.Errorf("[rapidyenc] FormatUU end of article without finding \"begin\" header: %w", ErrDataCorruption) + err = fmt.Errorf("[rapidyenc] FormatUU end of article without finding \"begin\" header: %w", ErrDataMissing) + default: + err = fmt.Errorf("[rapidyenc] FormatUnknown end of article without finding any *begin header: %w", ErrDataMissing) } } else if !d.end { switch d.format { - case FormatUnknown: - err = fmt.Errorf("[rapidyenc] FormatUnknown end of article without finding any *end header: %w", ErrDataMissing) case FormatYenc: err = fmt.Errorf("[rapidyenc] FormatYenc end of article without finding \"=yend\" trailer: %w", ErrDataMissing) case FormatUU: err = fmt.Errorf("[rapidyenc] FormatUU end of article without finding \"end\" trailer: %w", ErrDataCorruption) + default: + err = fmt.Errorf("[rapidyenc] FormatUnknown end of article without finding any *end header: %w", ErrDataMissing) } - } else if (d.format != FormatUU && d.format != FormatUnknown) && ((!d.part && d.m.Size != d.endSize) || (d.endSize != d.actualSize)) { + } else if d.format == FormatYenc && ((!d.part && d.m.Size != d.endSize) || (d.endSize != d.actualSize)) { err = fmt.Errorf("[rapidyenc] expected size %d but got %d: %w", d.m.Size, d.actualSize, ErrDataCorruption) } else if d.format == FormatYenc && d.crc && d.expectedCrc != d.m.Hash { // If we have a segment ID, use it for debugging otherwise use an empty string. @@ -311,59 +334,20 @@ transform: switch d.format { case FormatYenc: - // Header/trailer lines - if bytes.HasPrefix(line, []byte("=ybegin ")) || - bytes.HasPrefix(line, []byte("=ypart ")) || - bytes.HasPrefix(line, []byte("=yend ")) { - d.processYenc(line) - goto transform - } - // If we're in the body, decode this line - if d.body { - // Remove trailing \r\n for decoding - bodyLine := line - if len(bodyLine) >= 2 && bodyLine[len(bodyLine)-2] == '\r' && bodyLine[len(bodyLine)-1] == '\n' { - bodyLine = bodyLine[:len(bodyLine)-2] - } - - dlog(d.debugSpam, "DecodeIncremental input: %q\n", bodyLine) - nd, ns, end, derr := DecodeIncremental(dst[nDst:], bodyLine, &d.State) - if derr != nil && derr != io.EOF { - dlog(always, "ERROR in rapidyenc.DecodeIncremental: nd=%d ns=%d end=%v err=%v\n", nd, ns, end, derr) - } - - if nd > 0 { - d.hash.Write(dst[nDst : nDst+nd]) - d.actualSize += int64(nd) - nDst += nd - } - goto transform - } - + d.processYenc(line) + goto loop case FormatUU: - if bytes.HasPrefix(line, []byte("begin ")) || bytes.Equal(line, []byte("end\r\n")) { - d.processYenc(line) - goto transform - } - if d.body { - bodyLine := line - if len(bodyLine) >= 2 && bodyLine[len(bodyLine)-2] == '\r' && bodyLine[len(bodyLine)-1] == '\n' { - bodyLine = bodyLine[:len(bodyLine)-2] - } - // Decode the UUencoded line - decoded, err := UUdecode(bodyLine) + d.processYenc(line) // Process UU headers (begin/end) + if d.body && !bytes.HasPrefix(line, []byte("begin ")) && !bytes.HasPrefix(line, []byte("end")) { + // Decode UU data line + decoded, err := UUdecode(line) if err != nil { - d.err = fmt.Errorf("[rapidyenc] error decoding UUencoded line: %w", err) - return nDst, nSrc, d.err + return nDst, nSrc, fmt.Errorf("[rapidyenc] UUdecode error: %w", err) } - if len(decoded) > 0 { - d.hash.Write(decoded) - d.actualSize += int64(len(decoded)) - if nDst+len(decoded) > len(dst) { - d.err = fmt.Errorf("[rapidyenc] destination buffer too small for UUencoded data: %w", errDestinationTooSmall) - return nDst, nSrc, d.err - } + if decoded != nil { nDst += copy(dst[nDst:], decoded) + d.actualSize += int64(len(decoded)) + d.hash.Write(decoded) } } } @@ -374,12 +358,16 @@ transform: // ! REVIEW ! not sure if we even get here since the formatUnknown is checked above if !d.begin || !d.end { switch d.format { - case FormatUnknown: - return nDst, nSrc, fmt.Errorf("[rapidyenc] FormatUnknown end of article without finding any *begin or *end header: %w", ErrDataMissing) case FormatYenc: return nDst, nSrc, fmt.Errorf("[rapidyenc] FormatYenc end of article without finding \"=ybegin\" or \"=yend\" header: %w", ErrDataMissing) case FormatUU: - return nDst, nSrc, fmt.Errorf("[rapidyenc] FormatUU end of article without finding \"begin\" or \"end\" header: %w", ErrDataMissing) + if !d.begin { + return nDst, nSrc, fmt.Errorf("[rapidyenc] FormatUU end of article without finding \"begin\" header: %w", ErrDataMissing) + } else { + return nDst, nSrc, fmt.Errorf("[rapidyenc] FormatUU end of article without finding \"end\" trailer: %w", ErrDataCorruption) + } + default: + return nDst, nSrc, fmt.Errorf("[rapidyenc] FormatUnknown end of article without finding any *begin or *end header: %w", ErrDataMissing) } } return nDst, nSrc, io.EOF diff --git a/rapidyenc/decoder_test.go b/rapidyenc/decoder_test.go index 49a8ca4..1972059 100644 --- a/rapidyenc/decoder_test.go +++ b/rapidyenc/decoder_test.go @@ -519,6 +519,40 @@ func RapidyencDecoderFilesTest(t *testing.T) (errs []error) { return errs } +// GenerateTestUUEncodedFiles creates uuencode/test1.uue and uuencode/test2.uue with test content. +func GenerateTestUUEncodedFiles(t *testing.T) error { + _ = os.MkdirAll("uuencode", 0755) + t.Logf("Generating test UUencoded files...") + // Create test content for uuencode files + // These contents are just examples, you can modify them as needed. + // The files will be created in the "uuencode" directory. + + // check if files already exist + if _, err := os.Stat(filepath.Join("uuencode", "test1.uue")); err == nil { + t.Logf("File uuencode/test1.uue already exists, skipping creation.") + return nil + } + if _, err := os.Stat(filepath.Join("uuencode", "test2.uue")); err == nil { + t.Logf("File uuencode/test2.uue already exists, skipping creation.") + return nil + } + content1 := []byte("Hello from test1!\nThis is a test file.\n") + content2 := []byte("Another file for test2.\nWith more lines.\n1234567890\r\n") + + uue1 := UUEncode(content1, "test1.txt", 644) + uue2 := UUEncode(content2, "test2.txt", 644) + + if err := os.WriteFile(filepath.Join("uuencode", "test1.uue"), uue1, 0644); err != nil { + t.Errorf("Failed to write uuencode/test1.uue: %v", err) + return err + } + if err := os.WriteFile(filepath.Join("uuencode", "test2.uue"), uue2, 0644); err != nil { + t.Errorf("Failed to write uuencode/test2.uue: %v", err) + return err + } + return nil +} + // TestUUdecodeFiles runs UUdecode tests on sample files. // It reads UUencoded files, decodes them, and checks for integrity. func TestUUdecodeFiles(t *testing.T) { @@ -573,38 +607,3 @@ func TestUUdecodeFiles(t *testing.T) { t.Logf("Successfully checked %d UUencoded files.", checked) } } - -// GenerateTestUUEncodedFiles creates uuencode/test1.uue and uuencode/test2.uue with test content. -func GenerateTestUUEncodedFiles(t *testing.T) error { - _ = os.MkdirAll("uuencode", 0755) - t.Logf("Generating test UUencoded files...") - // Create test content for uuencode files - // These contents are just examples, you can modify them as needed. - // The files will be created in the "uuencode" directory. - - // check if files already exist - if _, err := os.Stat(filepath.Join("uuencode", "test1.uue")); err == nil { - t.Logf("File uuencode/test1.uue already exists, skipping creation.") - return nil - } - if _, err := os.Stat(filepath.Join("uuencode", "test2.uue")); err == nil { - t.Logf("File uuencode/test2.uue already exists, skipping creation.") - return nil - } - - content1 := []byte("Hello from test1!\nThis is a test file.\n") - content2 := []byte("Another file for test2.\nWith more lines.\n1234567890\r\n") - - uue1 := UUEncode(content1, "test1.txt", 644) - uue2 := UUEncode(content2, "test2.txt", 644) - - if err := os.WriteFile(filepath.Join("uuencode", "test1.uue"), uue1, 0644); err != nil { - t.Errorf("Failed to write uuencode/test1.uue: %v", err) - return err - } - if err := os.WriteFile(filepath.Join("uuencode", "test2.uue"), uue2, 0644); err != nil { - t.Errorf("Failed to write uuencode/test2.uue: %v", err) - return err - } - return nil -} diff --git a/rapidyenc/encoder.go b/rapidyenc/encoder.go index 4d30baf..1e29278 100644 --- a/rapidyenc/encoder.go +++ b/rapidyenc/encoder.go @@ -2,21 +2,16 @@ package rapidyenc /* #cgo CFLAGS: -I${SRCDIR}/src -#cgo darwin LDFLAGS: ${SRCDIR}/librapidyenc.a -lstdc++ -#cgo windows,amd64 LDFLAGS: ${SRCDIR}/librapidyenc.a -lstdc++ -#cgo windows,386 LDFLAGS: ${SRCDIR}/librapidyenc.a -lstdc++ -#cgo windows,arm LDFLAGS: ${SRCDIR}/librapidyenc.a -lstdc++ -#cgo linux,amd64 LDFLAGS: ${SRCDIR}/librapidyenc.a -lstdc++ -#cgo linux,386 LDFLAGS: ${SRCDIR}/librapidyenc.a -lstdc++ -#cgo linux,arm LDFLAGS: ${SRCDIR}/librapidyenc.a -lstdc++ -#cgo linux,arm64 LDFLAGS: ${SRCDIR}/librapidyenc.a -lstdc++ +#cgo darwin LDFLAGS: ${SRCDIR}/librapidyenc_darwin.a -lstdc++ +#cgo windows,amd64 LDFLAGS: ${SRCDIR}/librapidyenc_windows_amd64.a -lstdc++ -static-libstdc++ -static-libgcc +#cgo linux,amd64 LDFLAGS: ${SRCDIR}/librapidyenc_linux_amd64.a -lstdc++ +#cgo linux,arm64 LDFLAGS: ${SRCDIR}/librapidyenc_linux_arm64.a -lstdc++ #include "rapidyenc.h" */ import "C" import ( "bytes" "fmt" - "os" "sync" "unsafe" ) @@ -87,9 +82,3 @@ func UUEncode(src []byte, filename string, mode int) []byte { buf.WriteString("end\r\n") return buf.Bytes() } - -// WriteUUEncodedFile encodes src and writes it as UUencoded data to the given file. -func WriteUUEncodedFile(filename string, src []byte, outPath string, mode int) error { - encoded := UUEncode(src, filename, mode) - return os.WriteFile(outPath, encoded, 0644) -}