Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions .chloggen/44472.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: bug_fix

# The name of the component, or a single word describing the area of concern, (e.g. receiver/filelog)
component: exporter/loadbalancing

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: "Fix k8s resolver parsing so loadbalancing exporter works with service FQDNs"

# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
issues: [44472]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:

# If your change doesn't affect end users or the exported elements of any package,
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: []
8 changes: 4 additions & 4 deletions exporter/loadbalancingexporter/resolver_k8s.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,10 @@ func newK8sResolver(clt kubernetes.Interface,
timeout = defaultListWatchTimeout
}

nAddr := strings.SplitN(service, ".", 2)
name, namespace := nAddr[0], "default"
if len(nAddr) > 1 {
namespace = nAddr[1]
parts := strings.Split(service, ".")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for the fix, according to the changes you made, the loadbalancing exporter should work correctly (even without your fix), when using the standard <svcName>.<namespace> domain name format of the headless service. I am still experiencing the same issue even when using the collector-backend.default in the config mentioned in the issue. Am I missing something here? Thanks!

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let me retest with that and see if I missed something during the fix.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I just reran with the provided config against a local kind cluster using a collector build that includes the FQDN parsing fix. With telemetrygen driving 50 traces through a port-forward, both backend pods received traffic and kubectl logs deploy/lb-collector never produced the couldn’t find the exporter for the endpoint "" error. So, I think, <svc>.<namespace> now works as expected.

The front collector’s service account must be allowed to list/watch endpointslices in the namespace. Without the RBAC role binding, the informer never populates and the ring stays empty, which yields the same error message even with a correct service string.

name, namespace := parts[0], "default"
if len(parts) > 1 && parts[1] != "" {
namespace = parts[1]
} else {
logger.Info("the namespace for the Kubernetes service wasn't provided, trying to determine the current namespace", zap.String("name", name))
if ns, err := getInClusterNamespace(); err == nil {
Expand Down
48 changes: 48 additions & 0 deletions exporter/loadbalancingexporter/resolver_k8s_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -257,6 +257,54 @@ func TestK8sResolve(t *testing.T) {
}
}

func TestK8sResolveWithServiceFQDN(t *testing.T) {
serviceName := "lb"
namespace := "custom"
serviceFQDN := fmt.Sprintf("%s.%s.svc.cluster.local", serviceName, namespace)
port := int32(4317)
hostname := "pod-0"

endpointSlice := &discoveryv1.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Namespace: namespace,
Labels: map[string]string{
"kubernetes.io/service-name": serviceName,
},
},
Endpoints: []discoveryv1.Endpoint{
{
Addresses: []string{"10.0.0.1"},
Hostname: &hostname,
},
},
}

cl := fake.NewClientset(endpointSlice)
_, tb := getTelemetryAssets(t)
res, err := newK8sResolver(cl, zap.NewNop(), serviceFQDN, []int32{port}, defaultListWatchTimeout, true, tb)
require.NoError(t, err)
require.Equal(t, serviceName, res.svcName)
require.Equal(t, namespace, res.svcNs)

require.NoError(t, res.start(t.Context()))
t.Cleanup(func() {
require.NoError(t, res.shutdown(t.Context()))
})

expected := []string{fmt.Sprintf("%s.%s.%s:%d", hostname, serviceName, namespace, port)}

cErr := waitForCondition(t, 3*time.Second, 20*time.Millisecond, func(ctx context.Context) (bool, error) {
if _, err := res.resolve(ctx); err != nil {
return false, err
}
return slices.Equal(expected, res.Endpoints()), nil
})
if cErr != nil {
t.Fatalf("timed out waiting for resolver endpoints to match expected: %v", cErr)
}
}

// waitForCondition will poll the condition function until it returns true or times out.
// Any errors returned from the condition are treated as test failures.
func waitForCondition(t *testing.T, timeout, interval time.Duration, condition func(context.Context) (bool, error)) error {
Expand Down