Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix: ScrapeConfigs Selection Issue Across Different Namespaces #6390

Merged
merged 10 commits into from
Mar 25, 2024
16 changes: 16 additions & 0 deletions pkg/prometheus/agent/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -1167,6 +1167,22 @@ func (c *Operator) enqueueForNamespace(store cache.Store, nsName string) {
c.rr.EnqueueForReconciliation(p)
return
}
// Check for Prometheus instances selecting Probes in the NS.
ScrapeConfigNSSelector, err := metav1.LabelSelectorAsSelector(p.Spec.ScrapeConfigNamespaceSelector)
if err != nil {
level.Error(c.logger).Log(
"msg", fmt.Sprintf("failed to convert ScrapeConfigNamespaceSelector of %q to selector", p.Name),
"err", err,
)
return
}

level.Info(c.logger).Log("msg", "we are gonna check if it Matches")

if ScrapeConfigNSSelector.Matches(labels.Set(ns.Labels)) {
c.rr.EnqueueForReconciliation(p)
return
}
})
if err != nil {
level.Error(c.logger).Log(
Expand Down
15 changes: 15 additions & 0 deletions pkg/prometheus/server/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -850,6 +850,21 @@ func (c *Operator) enqueueForNamespace(store cache.Store, nsName string) {
c.rr.EnqueueForReconciliation(p)
return
}
// Check for Prometheus instances selecting ScrapeConfigs in
// the NS.
scrapeConfigNSSelector, err := metav1.LabelSelectorAsSelector(p.Spec.ScrapeConfigNamespaceSelector)
if err != nil {
level.Error(c.logger).Log(
"msg", fmt.Sprintf("failed to convert ScrapeConfigNamespaceSelector of %q to selector", p.Name),
"err", err,
)
return
}

if scrapeConfigNSSelector.Matches(labels.Set(ns.Labels)) {
c.rr.EnqueueForReconciliation(p)
return
}
})
if err != nil {
level.Error(c.logger).Log(
Expand Down
13 changes: 7 additions & 6 deletions test/e2e/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -372,12 +372,13 @@ func TestDenylist(t *testing.T) {
func TestPromInstanceNs(t *testing.T) {
skipPrometheusTests(t)
testFuncs := map[string]func(t *testing.T){
"AllNs": testPrometheusInstanceNamespacesAllNs,
"AllowList": testPrometheusInstanceNamespacesAllowList,
"DenyList": testPrometheusInstanceNamespacesDenyList,
"NamespaceNotFound": testPrometheusInstanceNamespacesNamespaceNotFound,
"ScrapeConfigLifecycle": testScrapeConfigLifecycle,
"ConfigReloaderResources": testConfigReloaderResources,
"AllNs": testPrometheusInstanceNamespacesAllNs,
"AllowList": testPrometheusInstanceNamespacesAllowList,
"DenyList": testPrometheusInstanceNamespacesDenyList,
"NamespaceNotFound": testPrometheusInstanceNamespacesNamespaceNotFound,
"ScrapeConfigLifecycle": testScrapeConfigLifecycle,
"ScrapeConfigLifecycleInDifferentNs": testScrapeConfigLifecycleInDifferentNS,
"ConfigReloaderResources": testConfigReloaderResources,
}

for name, f := range testFuncs {
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/prometheus_instance_namespaces_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ func testPrometheusInstanceNamespacesDenyList(t *testing.T) {

// create Prometheus custom resource in the "instance" namespace.
// This one must be reconciled.
// Let this Prometheus custom resource match service monitors in namespaces having the label `"monitored": "true"`.
// Let this Prometheus custom resource match service monitors in namespaces having the label `"group": "monitored"`.
// This will match the service monitors created in the "denied" namespace.
// Also create a service monitor in this namespace. This one must not be reconciled.
// Expose the created Prometheus service.
Expand Down
95 changes: 95 additions & 0 deletions test/e2e/scrapeconfig_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,101 @@ func testScrapeConfigLifecycle(t *testing.T) {
require.NoError(t, err)
}

func testScrapeConfigLifecycleInDifferentNS(t *testing.T) {
skipPrometheusTests(t)

testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)

// The ns where the prometheus CR will reside
promns := framework.CreateNamespace(context.Background(), t, testCtx)
// The ns where the scrapeConfig will reside
scns := framework.CreateNamespace(context.Background(), t, testCtx)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, promns)
framework.SetupPrometheusRBAC(context.Background(), t, testCtx, scns)

_, err := framework.CreateOrUpdatePrometheusOperator(
context.Background(),
promns,
[]string{scns},
nil,
[]string{promns},
nil,
false,
true, // clusterrole
true,
)
require.NoError(t, err)

// Make a prometheus object in promns which will select any ScrapeConfig resource with
// "group": "sc" and/or "kubernetes.io/metadata.name": <scns>
p := framework.MakeBasicPrometheus(promns, "prom", scns, 1)
mviswanathsai marked this conversation as resolved.
Show resolved Hide resolved
p.Spec.ScrapeConfigNamespaceSelector = &metav1.LabelSelector{
MatchLabels: map[string]string{
"kubernetes.io/metadata.name": scns,
},
}

p.Spec.ScrapeConfigSelector = &metav1.LabelSelector{
MatchLabels: map[string]string{
"group": "sc",
},
}

// Make the Prometheus selection surface thin
p.Spec.PodMonitorSelector = nil
p.Spec.PodMonitorNamespaceSelector = nil
p.Spec.ServiceMonitorSelector = nil
p.Spec.ServiceMonitorNamespaceSelector = nil
p.Spec.RuleSelector = nil
p.Spec.RuleNamespaceSelector = nil

_, err = framework.CreatePrometheusAndWaitUntilReady(context.Background(), promns, p)
require.NoError(t, err)

// 1. Create a ScrapeConfig in scns and check that its targets appear in Prometheus
sc := framework.MakeBasicScrapeConfig(scns, "scrape-config")
sc.ObjectMeta.Labels = map[string]string{
"group": "sc"}

sc.Spec.StaticConfigs = []monitoringv1alpha1.StaticConfig{
{
Targets: []monitoringv1alpha1.Target{"target1:9090", "target2:9090"},
simonpasquier marked this conversation as resolved.
Show resolved Hide resolved
},
}
_, err = framework.CreateScrapeConfig(context.Background(), scns, sc)
require.NoError(t, err)

// Check that the targets appear in Prometheus
err = framework.WaitForActiveTargets(context.Background(), promns, "prometheus-operated", 2)
require.NoError(t, err)

// 2. Update the ScrapeConfig and add a target. Then, check that 3 targets appear in Prometheus.
sc, err = framework.GetScrapeConfig(context.Background(), scns, "scrape-config")
require.NoError(t, err)

sc.Spec.StaticConfigs = []monitoringv1alpha1.StaticConfig{
{
Targets: []monitoringv1alpha1.Target{"target1:9090", "target2:9090", "target3:9090"},
},
}

_, err = framework.UpdateScrapeConfig(context.Background(), scns, sc)
require.NoError(t, err)

// Check that the targets appear in Prometheus
err = framework.WaitForActiveTargets(context.Background(), promns, "prometheus-operated", 3)
require.NoError(t, err)

// 3. Remove the ScrapeConfig and check that the targets disappear in Prometheus
err = framework.DeleteScrapeConfig(context.Background(), scns, "scrape-config")
require.NoError(t, err)

// Check that the targets disappeared in Prometheus
err = framework.WaitForActiveTargets(context.Background(), promns, "prometheus-operated", 0)
require.NoError(t, err)
}

// testPromOperatorStartsWithoutScrapeConfigCRD deletes the ScrapeConfig CRD from the cluster and then starts
// prometheus-operator to check that it doesn't crash.
func testPromOperatorStartsWithoutScrapeConfigCRD(t *testing.T) {
Expand Down