gtsocial-umbx

Unnamed repository; edit this file 'description' to name the repository.
Log | Files | Refs | README | LICENSE

throttling.go (4502B)


      1 // GoToSocial
      2 // Copyright (C) GoToSocial Authors admin@gotosocial.org
      3 // SPDX-License-Identifier: AGPL-3.0-or-later
      4 //
      5 // This program is free software: you can redistribute it and/or modify
      6 // it under the terms of the GNU Affero General Public License as published by
      7 // the Free Software Foundation, either version 3 of the License, or
      8 // (at your option) any later version.
      9 //
     10 // This program is distributed in the hope that it will be useful,
     11 // but WITHOUT ANY WARRANTY; without even the implied warranty of
     12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     13 // GNU Affero General Public License for more details.
     14 //
     15 // You should have received a copy of the GNU Affero General Public License
     16 // along with this program.  If not, see <http://www.gnu.org/licenses/>.
     17 
     18 /*
     19 	The code in this file is adapted from MIT-licensed code in github.com/go-chi/chi. Thanks chi (thi)!
     20 
     21 	See: https://github.com/go-chi/chi/blob/e6baba61759b26ddf7b14d1e02d1da81a4d76c08/middleware/throttle.go
     22 
     23 	And: https://github.com/sponsors/pkieltyka
     24 */
     25 
     26 package middleware
     27 
     28 import (
     29 	"net/http"
     30 	"runtime"
     31 	"strconv"
     32 	"time"
     33 
     34 	"github.com/gin-gonic/gin"
     35 )
     36 
     37 // token represents a request that is being processed.
     38 type token struct{}
     39 
     40 // Throttle returns a gin middleware that performs throttling of incoming requests,
     41 // ensuring that only a certain number of requests are handled concurrently, to reduce
     42 // congestion of the server.
     43 //
     44 // Limits are configured using available CPUs and the given cpuMultiplier value.
     45 // Open request limit is available CPUs * multiplier; backlog limit is limit * multiplier.
     46 //
     47 // Example values for multiplier 8:
     48 //
     49 //	1 cpu = 08 open, 064 backlog
     50 //	2 cpu = 16 open, 128 backlog
     51 //	4 cpu = 32 open, 256 backlog
     52 //
     53 // Example values for multiplier 4:
     54 //
     55 //	1 cpu = 04 open, 016 backlog
     56 //	2 cpu = 08 open, 032 backlog
     57 //	4 cpu = 16 open, 064 backlog
     58 //
     59 // Callers will first attempt to get a backlog token. Once they have that, they will
     60 // wait in the backlog queue until they can get a token to allow their request to be
     61 // processed.
     62 //
     63 // If the backlog queue is full, the request context is closed, or the caller has been
     64 // waiting in the backlog for too long, this function will abort the request chain,
     65 // write a JSON error into the response, set an appropriate Retry-After value, and set
     66 // the HTTP response code to 503: Service Unavailable.
     67 //
     68 // If the multiplier is <= 0, a noop middleware will be returned instead.
     69 //
     70 // RetryAfter determines the Retry-After header value to be sent to throttled requests.
     71 //
     72 // Useful links:
     73 //
     74 //   - https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
     75 //   - https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/503
     76 func Throttle(cpuMultiplier int, retryAfter time.Duration) gin.HandlerFunc {
     77 	if cpuMultiplier <= 0 {
     78 		// throttling is disabled, return a noop middleware
     79 		return func(c *gin.Context) {}
     80 	}
     81 
     82 	var (
     83 		limit              = runtime.GOMAXPROCS(0) * cpuMultiplier
     84 		backlogLimit       = limit * cpuMultiplier
     85 		backlogChannelSize = limit + backlogLimit
     86 		tokens             = make(chan token, limit)
     87 		backlogTokens      = make(chan token, backlogChannelSize)
     88 		retryAfterStr      = strconv.FormatUint(uint64(retryAfter/time.Second), 10)
     89 	)
     90 
     91 	// prefill token channels
     92 	for i := 0; i < limit; i++ {
     93 		tokens <- token{}
     94 	}
     95 	for i := 0; i < backlogChannelSize; i++ {
     96 		backlogTokens <- token{}
     97 	}
     98 
     99 	return func(c *gin.Context) {
    100 		// inside this select, the caller tries to get a backlog token
    101 		select {
    102 		case <-c.Request.Context().Done():
    103 			// request context has been canceled already
    104 			return
    105 		case btok := <-backlogTokens:
    106 			defer func() {
    107 				// when we're finished, return the backlog token to the bucket
    108 				backlogTokens <- btok
    109 			}()
    110 
    111 			// inside *this* select, the caller has a backlog token,
    112 			// and they're waiting for their turn to be processed
    113 			select {
    114 			case <-c.Request.Context().Done():
    115 				// the request context has been canceled already
    116 				return
    117 			case tok := <-tokens:
    118 				// the caller gets a token, so their request can now be processed
    119 				defer func() {
    120 					// whatever happens to the request, put the
    121 					// token back in the bucket when we're finished
    122 					tokens <- tok
    123 				}()
    124 				c.Next() // <- finally process the caller's request
    125 			}
    126 
    127 		default:
    128 			// we don't have space in the backlog queue
    129 			c.Header("Retry-After", retryAfterStr)
    130 			c.JSON(http.StatusTooManyRequests, gin.H{"error": "server capacity exceeded"})
    131 			c.Abort()
    132 		}
    133 	}
    134 }