Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 17 additions & 3 deletions api/adc/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -505,6 +505,20 @@ func (n *UpstreamNodes) UnmarshalJSON(p []byte) error {
return nil
}

// MarshalJSON implements the json.Marshaler interface for UpstreamNodes.
// By default, Go serializes a nil slice as JSON null. However, for compatibility
// with APISIX semantics, we want a nil UpstreamNodes to be encoded as an empty
// array ([]) instead of null. Non-nil slices are marshaled as usual.
//
// See APISIX upstream nodes schema definition for details:
// https://github.com/apache/apisix/blob/77dacda31277a31d6014b4970e36bae2a5c30907/apisix/schema_def.lua#L295-L338
func (n UpstreamNodes) MarshalJSON() ([]byte, error) {
if n == nil {
return []byte("[]"), nil
}
return json.Marshal([]UpstreamNode(n))
}

// ComposeRouteName uses namespace, name and rule name to compose
// the route name.
func ComposeRouteName(namespace, name string, rule string) string {
Expand Down Expand Up @@ -621,9 +635,9 @@ type ResponseRewriteConfig struct {
}

type ResponseHeaders struct {
Set map[string]string `json:"set" yaml:"set"`
Add []string `json:"add" yaml:"add"`
Remove []string `json:"remove" yaml:"remove"`
Set map[string]string `json:"set,omitempty" yaml:"set,omitempty"`
Add []string `json:"add,omitempty" yaml:"add,omitempty"`
Remove []string `json:"remove,omitempty" yaml:"remove,omitempty"`
}

// RequestMirror is the rule config for proxy-mirror plugin.
Expand Down
82 changes: 70 additions & 12 deletions internal/adc/translator/httproute.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (

adctypes "github.com/apache/apisix-ingress-controller/api/adc"
"github.com/apache/apisix-ingress-controller/api/v1alpha1"
apiv2 "github.com/apache/apisix-ingress-controller/api/v2"
"github.com/apache/apisix-ingress-controller/internal/controller/label"
"github.com/apache/apisix-ingress-controller/internal/id"
"github.com/apache/apisix-ingress-controller/internal/provider"
Expand Down Expand Up @@ -285,7 +286,7 @@ func (t *Translator) fillHTTPRoutePolicies(routes []*adctypes.Route, policies []
}

func (t *Translator) translateEndpointSlice(portName *string, weight int, endpointSlices []discoveryv1.EndpointSlice, endpointFilter func(*discoveryv1.Endpoint) bool) adctypes.UpstreamNodes {
var nodes adctypes.UpstreamNodes
nodes := adctypes.UpstreamNodes{}
if len(endpointSlices) == 0 {
return nodes
}
Expand Down Expand Up @@ -466,32 +467,89 @@ func (t *Translator) TranslateHTTPRoute(tctx *provider.TranslateContext, httpRou
labels := label.GenLabel(httpRoute)

for ruleIndex, rule := range rules {
upstream := adctypes.NewDefaultUpstream()
var backendErr error
service := adctypes.NewDefaultService()
service.Labels = labels

service.Name = adctypes.ComposeServiceNameWithRule(httpRoute.Namespace, httpRoute.Name, fmt.Sprintf("%d", ruleIndex))
service.ID = id.GenID(service.Name)
service.Hosts = hosts

var (
upstreams = make([]*adctypes.Upstream, 0)
weightedUpstreams = make([]adctypes.TrafficSplitConfigRuleWeightedUpstream, 0)
backendErr error
)

for _, backend := range rule.BackendRefs {
if backend.Namespace == nil {
namespace := gatewayv1.Namespace(httpRoute.Namespace)
backend.Namespace = &namespace
}
upstream := adctypes.NewDefaultUpstream()
upNodes, err := t.translateBackendRef(tctx, backend.BackendRef, DefaultEndpointFilter)
if err != nil {
backendErr = err
continue
}
if len(upNodes) == 0 {
continue
}

t.AttachBackendTrafficPolicyToUpstream(backend.BackendRef, tctx.BackendTrafficPolicies, upstream)
upstream.Nodes = append(upstream.Nodes, upNodes...)
upstream.Nodes = upNodes
upstreams = append(upstreams, upstream)
}

// todo: support multiple backends
service := adctypes.NewDefaultService()
service.Labels = labels
// Handle multiple backends with traffic-split plugin
if len(upstreams) == 0 {
// Create a default upstream if no valid backends
upstream := adctypes.NewDefaultUpstream()
service.Upstream = upstream
} else if len(upstreams) == 1 {
// Single backend - use directly as service upstream
service.Upstream = upstreams[0]
} else {
// Multiple backends - use traffic-split plugin
service.Upstream = upstreams[0]
upstreams = upstreams[1:]

// Set weight in traffic-split for the default upstream
weight := apiv2.DefaultWeight
if rule.BackendRefs[0].Weight != nil {
weight = int(*rule.BackendRefs[0].Weight)
}
weightedUpstreams = append(weightedUpstreams, adctypes.TrafficSplitConfigRuleWeightedUpstream{
Weight: weight,
})

service.Name = adctypes.ComposeServiceNameWithRule(httpRoute.Namespace, httpRoute.Name, fmt.Sprintf("%d", ruleIndex))
service.ID = id.GenID(service.Name)
service.Hosts = hosts
service.Upstream = upstream
// Set other upstreams in traffic-split
for i, upstream := range upstreams {
weight := apiv2.DefaultWeight
// get weight from the backend refs starting from the second backend
if i+1 < len(rule.BackendRefs) && rule.BackendRefs[i+1].Weight != nil {
weight = int(*rule.BackendRefs[i+1].Weight)
}
weightedUpstreams = append(weightedUpstreams, adctypes.TrafficSplitConfigRuleWeightedUpstream{
Upstream: upstream,
Weight: weight,
})
}

if len(weightedUpstreams) > 0 {
if service.Plugins == nil {
service.Plugins = make(map[string]any)
}
service.Plugins["traffic-split"] = &adctypes.TrafficSplitConfig{
Rules: []adctypes.TrafficSplitConfigRule{
{
WeightedUpstreams: weightedUpstreams,
},
},
}
}
}

if backendErr != nil && len(upstream.Nodes) == 0 {
if backendErr != nil && (service.Upstream == nil || len(service.Upstream.Nodes) == 0) {
if service.Plugins == nil {
service.Plugins = make(map[string]any)
}
Expand Down
2 changes: 1 addition & 1 deletion internal/adc/translator/ingress.go
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ func (t *Translator) TranslateIngress(tctx *provider.TranslateContext, obj *netw

// translateEndpointSliceForIngress create upstream nodes from EndpointSlice
func (t *Translator) translateEndpointSliceForIngress(weight int, endpointSlices []discoveryv1.EndpointSlice, servicePort *corev1.ServicePort) adctypes.UpstreamNodes {
var nodes adctypes.UpstreamNodes
nodes := adctypes.UpstreamNodes{}
if len(endpointSlices) == 0 {
return nodes
}
Expand Down
41 changes: 24 additions & 17 deletions internal/provider/apisix/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,20 @@ import (
"github.com/apache/apisix-ingress-controller/internal/controller/status"
"github.com/apache/apisix-ingress-controller/internal/manager/readiness"
"github.com/apache/apisix-ingress-controller/internal/provider"
"github.com/apache/apisix-ingress-controller/internal/provider/common"
"github.com/apache/apisix-ingress-controller/internal/types"
"github.com/apache/apisix-ingress-controller/internal/utils"
pkgutils "github.com/apache/apisix-ingress-controller/pkg/utils"
)

const ProviderTypeAPISIX = "apisix"
const (
ProviderTypeAPISIX = "apisix"

RetryBaseDelay = 1 * time.Second
RetryMaxDelay = 1000 * time.Second

MinSyncPeriod = 1 * time.Second
)

type apisixProvider struct {
provider.Options
Expand Down Expand Up @@ -229,33 +237,32 @@ func (d *apisixProvider) Start(ctx context.Context) error {

initalSyncDelay := d.InitSyncDelay
if initalSyncDelay > 0 {
time.AfterFunc(initalSyncDelay, func() {
if err := d.sync(ctx); err != nil {
log.Error(err)
return
}
})
time.AfterFunc(initalSyncDelay, d.syncNotify)
}

if d.SyncPeriod < 1 {
return nil
syncPeriod := d.SyncPeriod
if syncPeriod < MinSyncPeriod {
syncPeriod = MinSyncPeriod
}
ticker := time.NewTicker(d.SyncPeriod)
ticker := time.NewTicker(syncPeriod)
defer ticker.Stop()

retrier := common.NewRetrier(common.NewExponentialBackoff(RetryBaseDelay, RetryMaxDelay))

for {
synced := false
select {
case <-d.syncCh:
synced = true
case <-ticker.C:
synced = true
case <-retrier.C():
case <-ctx.Done():
retrier.Reset()
return nil
}
if synced {
if err := d.sync(ctx); err != nil {
log.Error(err)
}
if err := d.sync(ctx); err != nil {
log.Error(err)
retrier.Next()
} else {
retrier.Reset()
}
}
}
Expand Down
96 changes: 96 additions & 0 deletions internal/provider/common/retrier.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

package common

import (
"sync"
"time"
)

type Backoff interface {
Next() time.Duration
Reset()
}

type ExponentialBackoff struct {
base, max, current time.Duration
}

func NewExponentialBackoff(base, max time.Duration) *ExponentialBackoff {
return &ExponentialBackoff{base: base, max: max, current: base}
}

func (b *ExponentialBackoff) Next() time.Duration {
delay := b.current
b.current *= 2
if b.current > b.max {
b.current = b.max
}
return delay
}

func (b *ExponentialBackoff) Reset() {
b.current = b.base
}

type Retrier struct {
mu sync.Mutex
ch chan struct{}
timer *time.Timer
backoff Backoff
}

func NewRetrier(b Backoff) *Retrier {
return &Retrier{
ch: make(chan struct{}, 1),
backoff: b,
}
}

func (r *Retrier) Reset() {
r.mu.Lock()
defer r.mu.Unlock()

if r.timer != nil {
r.timer.Stop()
r.timer = nil
}
r.backoff.Reset()
}

func (r *Retrier) Next() {
r.mu.Lock()
defer r.mu.Unlock()

if r.timer != nil {
r.timer.Stop()
r.timer = nil
}

delay := r.backoff.Next()
r.timer = time.AfterFunc(delay, func() {
select {
case r.ch <- struct{}{}:
default:
}
})
}

func (r *Retrier) C() <-chan struct{} {
return r.ch
}
Loading
Loading