Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Backport of api/seal-status: fix deadlock when namespace is set on seal-status calls into release/1.14.x #23879

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions changelog/23861.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
```release-note:bug
api/seal-status: Fix deadlock on calls to sys/seal-status with a namespace configured
on the request.
```
2 changes: 1 addition & 1 deletion command/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -1492,7 +1492,7 @@ func (c *ServerCommand) Run(args []string) int {
// Vault cluster with multiple servers is configured with auto-unseal but is
// uninitialized. Once one server initializes the storage backend, this
// goroutine will pick up the unseal keys and unseal this instance.
if !core.IsInSealMigrationMode() {
if !core.IsInSealMigrationMode(true) {
go runUnseal(c, core, context.Background())
}

Expand Down
2 changes: 1 addition & 1 deletion http/sys_seal.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ func handleSysSealStatus(core *vault.Core) http.Handler {

func handleSysSealStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
status, err := core.GetSealStatus(ctx)
status, err := core.GetSealStatus(ctx, true)
if err != nil {
respondError(w, http.StatusInternalServerError, err)
return
Expand Down
38 changes: 25 additions & 13 deletions vault/core.go
Original file line number Diff line number Diff line change
Expand Up @@ -1466,10 +1466,14 @@ func (c *Core) Sealed() bool {
return atomic.LoadUint32(c.sealed) == 1
}

// SecretProgress returns the number of keys provided so far
func (c *Core) SecretProgress() (int, string) {
c.stateLock.RLock()
defer c.stateLock.RUnlock()
// SecretProgress returns the number of keys provided so far. Lock
// should only be false if the caller is already holding the read
// statelock (such as calls originating from switchedLockHandleRequest).
func (c *Core) SecretProgress(lock bool) (int, string) {
if lock {
c.stateLock.RLock()
defer c.stateLock.RUnlock()
}
switch c.unlockInfo {
case nil:
return 0, ""
Expand Down Expand Up @@ -3073,22 +3077,30 @@ func (c *Core) unsealKeyToMasterKey(ctx context.Context, seal Seal, combinedKey
// configuration in storage is Shamir but the seal in HCL is not. In this
// mode we should not auto-unseal (even if the migration is done) and we will
// accept unseal requests with and without the `migrate` option, though the migrate
// option is required if we haven't yet performed the seal migration.
func (c *Core) IsInSealMigrationMode() bool {
c.stateLock.RLock()
defer c.stateLock.RUnlock()
// option is required if we haven't yet performed the seal migration. Lock
// should only be false if the caller is already holding the read
// statelock (such as calls originating from switchedLockHandleRequest).
func (c *Core) IsInSealMigrationMode(lock bool) bool {
if lock {
c.stateLock.RLock()
defer c.stateLock.RUnlock()
}
return c.migrationInfo != nil
}

// IsSealMigrated returns true if we're in seal migration mode but migration
// has already been performed (possibly by another node, or prior to this node's
// current invocation.)
func (c *Core) IsSealMigrated() bool {
if !c.IsInSealMigrationMode() {
// current invocation). Lock should only be false if the caller is already
// holding the read statelock (such as calls originating from switchedLockHandleRequest).
func (c *Core) IsSealMigrated(lock bool) bool {
if !c.IsInSealMigrationMode(lock) {
return false
}
c.stateLock.RLock()
defer c.stateLock.RUnlock()

if lock {
c.stateLock.RLock()
defer c.stateLock.RUnlock()
}
done, _ := c.sealMigrated(context.Background())
return done
}
Expand Down
10 changes: 5 additions & 5 deletions vault/core_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -413,7 +413,7 @@ func TestCore_Unseal_MultiShare(t *testing.T) {
t.Fatalf("should be sealed")
}

if prog, _ := c.SecretProgress(); prog != 0 {
if prog, _ := c.SecretProgress(true); prog != 0 {
t.Fatalf("bad progress: %d", prog)
}

Expand All @@ -432,14 +432,14 @@ func TestCore_Unseal_MultiShare(t *testing.T) {
if !unseal {
t.Fatalf("should be unsealed")
}
if prog, _ := c.SecretProgress(); prog != 0 {
if prog, _ := c.SecretProgress(true); prog != 0 {
t.Fatalf("bad progress: %d", prog)
}
} else {
if unseal {
t.Fatalf("should not be unsealed")
}
if prog, _ := c.SecretProgress(); prog != i+1 {
if prog, _ := c.SecretProgress(true); prog != i+1 {
t.Fatalf("bad progress: %d", prog)
}
}
Expand Down Expand Up @@ -592,7 +592,7 @@ func TestCore_Unseal_Single(t *testing.T) {
t.Fatalf("should be sealed")
}

if prog, _ := c.SecretProgress(); prog != 0 {
if prog, _ := c.SecretProgress(true); prog != 0 {
t.Fatalf("bad progress: %d", prog)
}

Expand All @@ -604,7 +604,7 @@ func TestCore_Unseal_Single(t *testing.T) {
if !unseal {
t.Fatalf("should be unsealed")
}
if prog, _ := c.SecretProgress(); prog != 0 {
if prog, _ := c.SecretProgress(true); prog != 0 {
t.Fatalf("bad progress: %d", prog)
}

Expand Down
2 changes: 1 addition & 1 deletion vault/hcp_link/capabilities/node_status/node_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ func (c *NodeStatusReporter) GetNodeStatus(ctx context.Context) (retStatus nodes

var status nodestatus.NodeStatus

sealStatus, err := c.NodeStatusGetter.GetSealStatus(ctx)
sealStatus, err := c.NodeStatusGetter.GetSealStatus(ctx, true)
if err != nil {
return status, err
}
Expand Down
2 changes: 1 addition & 1 deletion vault/hcp_link/internal/wrapped_hcpLink.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ import (

type WrappedCoreNodeStatus interface {
ActiveTime() time.Time
GetSealStatus(ctx context.Context) (*vault.SealStatusResponse, error)
GetSealStatus(ctx context.Context, lock bool) (*vault.SealStatusResponse, error)
IsRaftVoter() bool
ListenerAddresses() ([]string, error)
LogLevel() string
Expand Down
2 changes: 1 addition & 1 deletion vault/init.go
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,7 @@ func (c *Core) UnsealWithStoredKeys(ctx context.Context) error {
}

// Disallow auto-unsealing when migrating
if c.IsInSealMigrationMode() && !c.IsSealMigrated() {
if c.IsInSealMigrationMode(true) && !c.IsSealMigrated(true) {
return NewNonFatalError(errors.New("cannot auto-unseal during seal migration"))
}

Expand Down
8 changes: 4 additions & 4 deletions vault/logical_system.go
Original file line number Diff line number Diff line change
Expand Up @@ -4710,7 +4710,7 @@ type SealStatusResponse struct {
Warnings []string `json:"warnings,omitempty"`
}

func (core *Core) GetSealStatus(ctx context.Context) (*SealStatusResponse, error) {
func (core *Core) GetSealStatus(ctx context.Context, lock bool) (*SealStatusResponse, error) {
sealed := core.Sealed()

initialized, err := core.Initialized(ctx)
Expand Down Expand Up @@ -4763,7 +4763,7 @@ func (core *Core) GetSealStatus(ctx context.Context) (*SealStatusResponse, error
clusterID = cluster.ID
}

progress, nonce := core.SecretProgress()
progress, nonce := core.SecretProgress(lock)

s := &SealStatusResponse{
Type: sealConfig.Type,
Expand All @@ -4775,7 +4775,7 @@ func (core *Core) GetSealStatus(ctx context.Context) (*SealStatusResponse, error
Nonce: nonce,
Version: version.GetVersion().VersionNumber(),
BuildDate: version.BuildDate,
Migration: core.IsInSealMigrationMode() && !core.IsSealMigrated(),
Migration: core.IsInSealMigrationMode(lock) && !core.IsSealMigrated(lock),
ClusterName: clusterName,
ClusterID: clusterID,
RecoverySeal: core.SealAccess().RecoveryKeySupported(),
Expand Down Expand Up @@ -4837,7 +4837,7 @@ func (core *Core) GetLeaderStatus() (*LeaderResponse, error) {
}

func (b *SystemBackend) handleSealStatus(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
status, err := b.Core.GetSealStatus(ctx)
status, err := b.Core.GetSealStatus(ctx, false)
if err != nil {
return nil, err
}
Expand Down