Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions server/monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -2364,6 +2364,7 @@ type LeafzOptions struct {
type LeafInfo struct {
ID uint64 `json:"id"`
Name string `json:"name"`
Cluster string `json:"cluster,omitempty"`
IsSpoke bool `json:"is_spoke"`
IsIsolated bool `json:"is_isolated,omitempty"`
Account string `json:"account"`
Expand Down Expand Up @@ -2409,6 +2410,7 @@ func (s *Server) Leafz(opts *LeafzOptions) (*Leafz, error) {
lni := &LeafInfo{
ID: ln.cid,
Name: ln.leaf.remoteServer,
Cluster: ln.leaf.remoteCluster,
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2 Badge Hide synthetic leaf cluster names from /leafz

When a remote leaf has leafnodes.remotes and a server_name but no cluster block, Server.New intentionally fabricates opts.Cluster.Name = opts.ServerName only for internal bookkeeping (server/server.go:820-825), and /varz already strips that back out (server/monitor.go:1708-1711). leafNodeResumeConnectProcess still sends this synthetic value in the leaf CONNECT (server/leafnode.go:3367-3375, server/leafnode.go:976-978), so exposing ln.leaf.remoteCluster here makes /leafz report standalone leaves as if they belonged to a real cluster named after the server. Any topology consumer using /leafz will misclassify non-clustered leaves whenever server_name is configured.

Useful? React with 👍 / 👎.

IsSpoke: ln.isSpokeLeafNode(),
IsIsolated: ln.leaf.isolated,
Account: ln.acc.Name,
Expand Down
51 changes: 51 additions & 0 deletions server/monitor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4263,6 +4263,57 @@ func TestMonitorLeafz(t *testing.T) {
}
}

func TestMonitorLeafzCluster(t *testing.T) {
hubConf := createConfFile(t, []byte(`
server_name: "hub"
listen: "127.0.0.1:-1"
http: "127.0.0.1:-1"
leafnodes {
listen: "127.0.0.1:-1"
}
`))
hub, hubOpts := RunServerWithConfig(hubConf)
defer hub.Shutdown()

leafConf := createConfFile(t, []byte(fmt.Sprintf(`
server_name: "leaf1"
listen: "127.0.0.1:-1"
cluster {
name: "leaf-cluster"
listen: "127.0.0.1:-1"
}
leafnodes {
remotes = [{
url: "nats-leaf://127.0.0.1:%d"
}]
}
`, hubOpts.LeafNode.Port)))
leaf, _ := RunServerWithConfig(leafConf)
defer leaf.Shutdown()

checkLeafNodeConnected(t, leaf)

// Make sure the hub sees the remote cluster name of the leaf node.
l, err := hub.Leafz(nil)
require_NoError(t, err)
require_Equal(t, l.NumLeafs, 1)
require_Equal(t, len(l.Leafs), 1)
require_Equal(t, l.Leafs[0].Cluster, "leaf-cluster")
require_Equal(t, l.Leafs[0].Name, "leaf1")

// Make sure from the leaf's perspective, the hub has no cluster.
l, err = leaf.Leafz(nil)
require_NoError(t, err)
require_Equal(t, l.NumLeafs, 1)
require_Equal(t, len(l.Leafs), 1)
require_Equal(t, l.Leafs[0].Cluster, "")
require_Equal(t, l.Leafs[0].Name, "hub")

// Make sure that cluster is present in the HTTP response as well.
body := readBody(t, fmt.Sprintf("http://127.0.0.1:%d/leafz", hub.MonitorAddr().Port))
require_True(t, strings.Contains(string(body), `"cluster": "leaf-cluster"`))
}

func pollAccountz(t *testing.T, s *Server, mode int, url string, opts *AccountzOptions) *Accountz {
t.Helper()
if mode == 0 {
Expand Down
Loading