mirror of
https://github.com/apricote/hcloud-upload-image.git
synced 2026-01-13 21:31:03 +00:00
In #68 I reduced the general limits for the backoff, thinking that it would speed up the upload on average because it was retrying faster. But because it was retrying faster, the 10 available retries were used up before SSH became available. The new 100 retries match the 3 minutes of total timeout that the previous solution had, and should fix all issues. In addition I discovered that my implementation in `hcloudimages/backoff.ExponentialBackoffWithLimit` has a bug where the calculated offset could overflow before the limit was applied, resulting in negative durations. I did not fix the issue because `hcloud-go` provides such a method natively nowadays. Instead I marked the method as deprected, to be removed in a later release.
33 lines
897 B
Go
33 lines
897 B
Go
// SPDX-License-Identifier: MPL-2.0
|
|
// From https://github.com/hetznercloud/terraform-provider-hcloud/blob/v1.46.1/internal/control/retry.go
|
|
// Copyright (c) Hetzner Cloud GmbH
|
|
|
|
package backoff
|
|
|
|
import (
|
|
"math"
|
|
"time"
|
|
|
|
"github.com/hetznercloud/hcloud-go/v2/hcloud"
|
|
)
|
|
|
|
// ExponentialBackoffWithLimit returns a [hcloud.BackoffFunc] which implements an exponential
|
|
// backoff.
|
|
// It uses the formula:
|
|
//
|
|
// min(b^retries * d, limit)
|
|
//
|
|
// This function has a known overflow issue and should not be used anymore.
|
|
//
|
|
// Deprecated: Use BackoffFuncWithOpts from github.com/hetznercloud/hcloud-go/v2/hcloud instead.
|
|
func ExponentialBackoffWithLimit(b float64, d time.Duration, limit time.Duration) hcloud.BackoffFunc {
|
|
return func(retries int) time.Duration {
|
|
current := time.Duration(math.Pow(b, float64(retries))) * d
|
|
|
|
if current > limit {
|
|
return limit
|
|
} else {
|
|
return current
|
|
}
|
|
}
|
|
}
|