gofmt -s -w *.go */*.go

This commit is contained in:
6543 2021-11-25 16:12:28 +01:00
parent 5ed8d0f129
commit e800d2110e
No known key found for this signature in database
GPG Key ID: C99B82E40B027BAE
6 changed files with 58 additions and 44 deletions

View File

@ -101,7 +101,7 @@ var tlsConfig = &tls.Config{
} }
} }
err = keyCache.Set(sni, &tlsCertificate, 15 * time.Minute) err = keyCache.Set(sni, &tlsCertificate, 15*time.Minute)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -129,11 +129,11 @@ var tlsConfig = &tls.Config{
var keyCache = mcache.New() var keyCache = mcache.New()
var keyDatabase *pogreb.DB var keyDatabase *pogreb.DB
func CheckUserLimit(user string) (error) { func CheckUserLimit(user string) error {
userLimit, ok := acmeClientCertificateLimitPerUser[user] userLimit, ok := acmeClientCertificateLimitPerUser[user]
if !ok { if !ok {
// Each Codeberg user can only add 10 new domains per day. // Each Codeberg user can only add 10 new domains per day.
userLimit = equalizer.NewTokenBucket(10, time.Hour * 24) userLimit = equalizer.NewTokenBucket(10, time.Hour*24)
acmeClientCertificateLimitPerUser[user] = userLimit acmeClientCertificateLimitPerUser[user] = userLimit
} }
if !userLimit.Ask() { if !userLimit.Ask() {
@ -149,8 +149,9 @@ type AcmeAccount struct {
Email string Email string
Registration *registration.Resource Registration *registration.Resource
Key crypto.PrivateKey `json:"-"` Key crypto.PrivateKey `json:"-"`
KeyPEM string `json:"Key"` KeyPEM string `json:"Key"`
} }
func (u *AcmeAccount) GetEmail() string { func (u *AcmeAccount) GetEmail() string {
return u.Email return u.Email
} }
@ -178,14 +179,17 @@ var acmeClientCertificateLimitPerUser = map[string]*equalizer.TokenBucket{}
// rate limit is 300 / 3 hours, we want 200 / 2 hours but to refill more often, so that's 25 new domains every 15 minutes // rate limit is 300 / 3 hours, we want 200 / 2 hours but to refill more often, so that's 25 new domains every 15 minutes
// TODO: when this is used a lot, we probably have to think of a somewhat better solution? // TODO: when this is used a lot, we probably have to think of a somewhat better solution?
var acmeClientOrderLimit = equalizer.NewTokenBucket(25, 15 * time.Minute) var acmeClientOrderLimit = equalizer.NewTokenBucket(25, 15*time.Minute)
// rate limit is 20 / second, we want 10 / second // rate limit is 20 / second, we want 10 / second
var acmeClientRequestLimit = equalizer.NewTokenBucket(10, 1 * time.Second) var acmeClientRequestLimit = equalizer.NewTokenBucket(10, 1*time.Second)
var challengeCache = mcache.New() var challengeCache = mcache.New()
type AcmeTLSChallengeProvider struct{} type AcmeTLSChallengeProvider struct{}
var _ challenge.Provider = AcmeTLSChallengeProvider{} var _ challenge.Provider = AcmeTLSChallengeProvider{}
func (a AcmeTLSChallengeProvider) Present(domain, _, keyAuth string) error { func (a AcmeTLSChallengeProvider) Present(domain, _, keyAuth string) error {
return challengeCache.Set(domain, keyAuth, 1*time.Hour) return challengeCache.Set(domain, keyAuth, 1*time.Hour)
} }
@ -193,10 +197,13 @@ func (a AcmeTLSChallengeProvider) CleanUp(domain, _, _ string) error {
challengeCache.Remove(domain) challengeCache.Remove(domain)
return nil return nil
} }
type AcmeHTTPChallengeProvider struct{} type AcmeHTTPChallengeProvider struct{}
var _ challenge.Provider = AcmeHTTPChallengeProvider{} var _ challenge.Provider = AcmeHTTPChallengeProvider{}
func (a AcmeHTTPChallengeProvider) Present(domain, token, keyAuth string) error { func (a AcmeHTTPChallengeProvider) Present(domain, token, keyAuth string) error {
return challengeCache.Set(domain + "/" + token, keyAuth, 1*time.Hour) return challengeCache.Set(domain+"/"+token, keyAuth, 1*time.Hour)
} }
func (a AcmeHTTPChallengeProvider) CleanUp(domain, token, _ string) error { func (a AcmeHTTPChallengeProvider) CleanUp(domain, token, _ string) error {
challengeCache.Remove(domain + "/" + token) challengeCache.Remove(domain + "/" + token)
@ -248,6 +255,7 @@ func retrieveCertFromDB(sni []byte) (tls.Certificate, bool) {
} }
var obtainLocks = sync.Map{} var obtainLocks = sync.Map{}
func obtainCert(acmeClient *lego.Client, domains []string, renew *certificate.Resource, user string) (tls.Certificate, error) { func obtainCert(acmeClient *lego.Client, domains []string, renew *certificate.Resource, user string) (tls.Certificate, error) {
name := strings.TrimPrefix(domains[0], "*") name := strings.TrimPrefix(domains[0], "*")
if os.Getenv("DNS_PROVIDER") == "" && len(domains[0]) > 0 && domains[0][0] == '*' { if os.Getenv("DNS_PROVIDER") == "" && len(domains[0]) > 0 && domains[0][0] == '*' {
@ -356,8 +364,8 @@ func setupCertificates() {
panic(err) panic(err)
} }
myAcmeAccount = AcmeAccount{ myAcmeAccount = AcmeAccount{
Email: envOr("ACME_EMAIL", "noreply@example.email"), Email: envOr("ACME_EMAIL", "noreply@example.email"),
Key: privateKey, Key: privateKey,
KeyPEM: string(certcrypto.PEMEncode(privateKey)), KeyPEM: string(certcrypto.PEMEncode(privateKey)),
} }
myAcmeConfig = lego.NewConfig(&myAcmeAccount) myAcmeConfig = lego.NewConfig(&myAcmeAccount)
@ -375,8 +383,8 @@ func setupCertificates() {
} else { } else {
reg, err := tempClient.Registration.RegisterWithExternalAccountBinding(registration.RegisterEABOptions{ reg, err := tempClient.Registration.RegisterWithExternalAccountBinding(registration.RegisterEABOptions{
TermsOfServiceAgreed: os.Getenv("ACME_ACCEPT_TERMS") == "true", TermsOfServiceAgreed: os.Getenv("ACME_ACCEPT_TERMS") == "true",
Kid: os.Getenv("ACME_EAB_KID"), Kid: os.Getenv("ACME_EAB_KID"),
HmacEncoded: os.Getenv("ACME_EAB_HMAC"), HmacEncoded: os.Getenv("ACME_EAB_HMAC"),
}) })
if err != nil { if err != nil {
panic(err) panic(err)

View File

@ -14,9 +14,9 @@ var Logger = func(s string, i ...interface{}) {
} }
type Stepper struct { type Stepper struct {
Name string Name string
Start time.Time Start time.Time
LastStep time.Time LastStep time.Time
Completion time.Time Completion time.Time
} }
@ -27,8 +27,8 @@ func Start(name string) *Stepper {
t := time.Now() t := time.Now()
Logger("%s: started at %s\n", name, t.Format(time.RFC3339)) Logger("%s: started at %s\n", name, t.Format(time.RFC3339))
return &Stepper{ return &Stepper{
Name: name, Name: name,
Start: t, Start: t,
LastStep: t, LastStep: t,
} }
} }

View File

@ -9,7 +9,8 @@ import (
) )
// DnsLookupCacheTimeout specifies the timeout for the DNS lookup cache. // DnsLookupCacheTimeout specifies the timeout for the DNS lookup cache.
var DnsLookupCacheTimeout = 15*time.Minute var DnsLookupCacheTimeout = 15 * time.Minute
// dnsLookupCache stores DNS lookups for custom domains // dnsLookupCache stores DNS lookups for custom domains
var dnsLookupCache = mcache.New() var dnsLookupCache = mcache.New()
@ -61,9 +62,9 @@ func getTargetFromDNS(domain string) (targetOwner, targetRepo, targetBranch stri
return return
} }
// CanonicalDomainCacheTimeout specifies the timeout for the canonical domain cache. // CanonicalDomainCacheTimeout specifies the timeout for the canonical domain cache.
var CanonicalDomainCacheTimeout = 15*time.Minute var CanonicalDomainCacheTimeout = 15 * time.Minute
// canonicalDomainCache stores canonical domains // canonicalDomainCache stores canonical domains
var canonicalDomainCache = mcache.New() var canonicalDomainCache = mcache.New()
@ -98,14 +99,14 @@ func checkCanonicalDomain(targetOwner, targetRepo, targetBranch, actualDomain st
} }
} }
} }
domains = append(domains, targetOwner + string(MainDomainSuffix)) domains = append(domains, targetOwner+string(MainDomainSuffix))
if domains[len(domains) - 1] == actualDomain { if domains[len(domains)-1] == actualDomain {
valid = true valid = true
} }
if targetRepo != "" && targetRepo != "pages" { if targetRepo != "" && targetRepo != "pages" {
domains[len(domains) - 1] += "/" + targetRepo domains[len(domains)-1] += "/" + targetRepo
} }
_ = canonicalDomainCache.Set(targetOwner + "/" + targetRepo + "/" + targetBranch, domains, CanonicalDomainCacheTimeout) _ = canonicalDomainCache.Set(targetOwner+"/"+targetRepo+"/"+targetBranch, domains, CanonicalDomainCacheTimeout)
} }
canonicalDomain = domains[0] canonicalDomain = domains[0]
return return

View File

@ -118,7 +118,7 @@ func handler(ctx *fasthttp.RequestCtx) {
if targetRepo != "pages" { if targetRepo != "pages" {
canonicalPath = "/" + strings.SplitN(canonicalPath, "/", 3)[2] canonicalPath = "/" + strings.SplitN(canonicalPath, "/", 3)[2]
} }
ctx.Redirect("https://" + canonicalDomain + canonicalPath, fasthttp.StatusTemporaryRedirect) ctx.Redirect("https://"+canonicalDomain+canonicalPath, fasthttp.StatusTemporaryRedirect)
return return
} }
} }
@ -185,7 +185,7 @@ func handler(ctx *fasthttp.RequestCtx) {
if len(pathElements) > 1 && strings.HasPrefix(pathElements[1], "@") { if len(pathElements) > 1 && strings.HasPrefix(pathElements[1], "@") {
if targetRepo == "pages" { if targetRepo == "pages" {
// example.codeberg.org/pages/@... redirects to example.codeberg.org/@... // example.codeberg.org/pages/@... redirects to example.codeberg.org/@...
ctx.Redirect("/" + strings.Join(pathElements[1:], "/"), fasthttp.StatusTemporaryRedirect) ctx.Redirect("/"+strings.Join(pathElements[1:], "/"), fasthttp.StatusTemporaryRedirect)
return return
} }
@ -299,30 +299,36 @@ func returnErrorPage(ctx *fasthttp.RequestCtx, code int) {
} }
// BranchExistanceCacheTimeout specifies the timeout for the default branch cache. It can be quite long. // BranchExistanceCacheTimeout specifies the timeout for the default branch cache. It can be quite long.
var DefaultBranchCacheTimeout = 15*time.Minute var DefaultBranchCacheTimeout = 15 * time.Minute
// BranchExistanceCacheTimeout specifies the timeout for the branch timestamp & existance cache. It should be shorter // BranchExistanceCacheTimeout specifies the timeout for the branch timestamp & existance cache. It should be shorter
// than FileCacheTimeout, as that gets invalidated if the branch timestamp has changed. That way, repo changes will be // than FileCacheTimeout, as that gets invalidated if the branch timestamp has changed. That way, repo changes will be
// picked up faster, while still allowing the content to be cached longer if nothing changes. // picked up faster, while still allowing the content to be cached longer if nothing changes.
var BranchExistanceCacheTimeout = 5*time.Minute var BranchExistanceCacheTimeout = 5 * time.Minute
// branchTimestampCache stores branch timestamps for faster cache checking // branchTimestampCache stores branch timestamps for faster cache checking
var branchTimestampCache = mcache.New() var branchTimestampCache = mcache.New()
type branchTimestamp struct { type branchTimestamp struct {
branch string branch string
timestamp time.Time timestamp time.Time
} }
// FileCacheTimeout specifies the timeout for the file content cache - you might want to make this quite long, depending // FileCacheTimeout specifies the timeout for the file content cache - you might want to make this quite long, depending
// on your available memory. // on your available memory.
var FileCacheTimeout = 5*time.Minute var FileCacheTimeout = 5 * time.Minute
// FileCacheSizeLimit limits the maximum file size that will be cached, and is set to 1 MB by default. // FileCacheSizeLimit limits the maximum file size that will be cached, and is set to 1 MB by default.
var FileCacheSizeLimit = 1024 * 1024 var FileCacheSizeLimit = 1024 * 1024
// fileResponseCache stores responses from the Gitea server // fileResponseCache stores responses from the Gitea server
// TODO: make this an MRU cache with a size limit // TODO: make this an MRU cache with a size limit
var fileResponseCache = mcache.New() var fileResponseCache = mcache.New()
type fileResponse struct { type fileResponse struct {
exists bool exists bool
mimeType string mimeType string
body []byte body []byte
} }
// getBranchTimestamp finds the default branch (if branch is "") and returns the last modification time of the branch // getBranchTimestamp finds the default branch (if branch is "") and returns the last modification time of the branch
@ -339,30 +345,30 @@ func getBranchTimestamp(owner, repo, branch string) *branchTimestamp {
if branch == "" { if branch == "" {
// Get default branch // Get default branch
var body = make([]byte, 0) var body = make([]byte, 0)
status, body, err := fasthttp.GetTimeout(body, string(GiteaRoot)+"/api/v1/repos/"+owner+"/"+repo, 5 * time.Second) status, body, err := fasthttp.GetTimeout(body, string(GiteaRoot)+"/api/v1/repos/"+owner+"/"+repo, 5*time.Second)
if err != nil || status != 200 { if err != nil || status != 200 {
_ = branchTimestampCache.Set(owner + "/" + repo + "/" + branch, nil, DefaultBranchCacheTimeout) _ = branchTimestampCache.Set(owner+"/"+repo+"/"+branch, nil, DefaultBranchCacheTimeout)
return nil return nil
} }
result.branch = fastjson.GetString(body, "default_branch") result.branch = fastjson.GetString(body, "default_branch")
} }
var body = make([]byte, 0) var body = make([]byte, 0)
status, body, err := fasthttp.GetTimeout(body, string(GiteaRoot)+"/api/v1/repos/"+owner+"/"+repo+"/branches/"+branch, 5 * time.Second) status, body, err := fasthttp.GetTimeout(body, string(GiteaRoot)+"/api/v1/repos/"+owner+"/"+repo+"/branches/"+branch, 5*time.Second)
if err != nil || status != 200 { if err != nil || status != 200 {
return nil return nil
} }
result.timestamp, _ = time.Parse(time.RFC3339, fastjson.GetString(body, "commit", "timestamp")) result.timestamp, _ = time.Parse(time.RFC3339, fastjson.GetString(body, "commit", "timestamp"))
_ = branchTimestampCache.Set(owner + "/" + repo + "/" + branch, result, BranchExistanceCacheTimeout) _ = branchTimestampCache.Set(owner+"/"+repo+"/"+branch, result, BranchExistanceCacheTimeout)
return result return result
} }
var upstreamClient = fasthttp.Client{ var upstreamClient = fasthttp.Client{
ReadTimeout: 10 * time.Second, ReadTimeout: 10 * time.Second,
MaxConnDuration: 60 * time.Second, MaxConnDuration: 60 * time.Second,
MaxConnWaitTimeout: 1000 * time.Millisecond, MaxConnWaitTimeout: 1000 * time.Millisecond,
MaxConnsPerHost: 128 * 16, // TODO: adjust bottlenecks for best performance with Gitea! MaxConnsPerHost: 128 * 16, // TODO: adjust bottlenecks for best performance with Gitea!
} }
// upstream requests a file from the Gitea API at GiteaRoot and writes it to the request context. // upstream requests a file from the Gitea API at GiteaRoot and writes it to the request context.
@ -426,7 +432,7 @@ func upstream(ctx *fasthttp.RequestCtx, targetOwner string, targetRepo string, t
optionsForIndexPages.AppendTrailingSlash = true optionsForIndexPages.AppendTrailingSlash = true
for _, indexPage := range IndexPages { for _, indexPage := range IndexPages {
if upstream(ctx, targetOwner, targetRepo, targetBranch, strings.TrimSuffix(targetPath, "/")+"/"+indexPage, &optionsForIndexPages) { if upstream(ctx, targetOwner, targetRepo, targetBranch, strings.TrimSuffix(targetPath, "/")+"/"+indexPage, &optionsForIndexPages) {
_ = fileResponseCache.Set(uri + "?timestamp=" + strconv.FormatInt(options.BranchTimestamp.Unix(), 10), fileResponse{ _ = fileResponseCache.Set(uri+"?timestamp="+strconv.FormatInt(options.BranchTimestamp.Unix(), 10), fileResponse{
exists: false, exists: false,
}, FileCacheTimeout) }, FileCacheTimeout)
return true return true
@ -436,7 +442,7 @@ func upstream(ctx *fasthttp.RequestCtx, targetOwner string, targetRepo string, t
ctx.Response.SetStatusCode(fasthttp.StatusNotFound) ctx.Response.SetStatusCode(fasthttp.StatusNotFound)
if res != nil { if res != nil {
// Update cache if the request is fresh // Update cache if the request is fresh
_ = fileResponseCache.Set(uri + "?timestamp=" + strconv.FormatInt(options.BranchTimestamp.Unix(), 10), fileResponse{ _ = fileResponseCache.Set(uri+"?timestamp="+strconv.FormatInt(options.BranchTimestamp.Unix(), 10), fileResponse{
exists: false, exists: false,
}, FileCacheTimeout) }, FileCacheTimeout)
} }
@ -496,7 +502,7 @@ func upstream(ctx *fasthttp.RequestCtx, targetOwner string, targetRepo string, t
cachedResponse.exists = true cachedResponse.exists = true
cachedResponse.mimeType = mimeType cachedResponse.mimeType = mimeType
cachedResponse.body = cacheBodyWriter.Bytes() cachedResponse.body = cacheBodyWriter.Bytes()
_ = fileResponseCache.Set(uri + "?timestamp=" + strconv.FormatInt(options.BranchTimestamp.Unix(), 10), cachedResponse, FileCacheTimeout) _ = fileResponseCache.Set(uri+"?timestamp="+strconv.FormatInt(options.BranchTimestamp.Unix(), 10), cachedResponse, FileCacheTimeout)
} }
return true return true

View File

@ -37,7 +37,6 @@ func TestHandlerPerformance(t *testing.T) {
t.Logf("request took %d milliseconds", end.Sub(start).Milliseconds()) t.Logf("request took %d milliseconds", end.Sub(start).Milliseconds())
} }
ctx.Response.Reset() ctx.Response.Reset()
ctx.Response.ResetBody() ctx.Response.ResetBody()
ctx.Request.SetRequestURI("http://example.momar.xyz/") ctx.Request.SetRequestURI("http://example.momar.xyz/")

View File

@ -92,7 +92,7 @@ func main() {
NoDefaultServerHeader: true, NoDefaultServerHeader: true,
NoDefaultDate: true, NoDefaultDate: true,
ReadTimeout: 30 * time.Second, // needs to be this high for ACME certificates with ZeroSSL & HTTP-01 challenge ReadTimeout: 30 * time.Second, // needs to be this high for ACME certificates with ZeroSSL & HTTP-01 challenge
Concurrency: 1024 * 32, // TODO: adjust bottlenecks for best performance with Gitea! Concurrency: 1024 * 32, // TODO: adjust bottlenecks for best performance with Gitea!
MaxConnsPerIP: 100, MaxConnsPerIP: 100,
} }
@ -116,7 +116,7 @@ func main() {
} }
ctx.SetBodyString(challenge.(string)) ctx.SetBodyString(challenge.(string))
} else { } else {
ctx.Redirect("https://" + string(ctx.Host()) + string(ctx.RequestURI()), http.StatusMovedPermanently) ctx.Redirect("https://"+string(ctx.Host())+string(ctx.RequestURI()), http.StatusMovedPermanently)
} }
}) })
if err != nil { if err != nil {