-
Notifications
You must be signed in to change notification settings - Fork 175
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
fetch SAs from apiserver #242
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,43 @@ | ||
package cache | ||
|
||
import ( | ||
"sync" | ||
|
||
"k8s.io/klog/v2" | ||
) | ||
|
||
type notifications struct { | ||
handlers map[string]chan struct{} | ||
mu sync.Mutex | ||
fetchRequests chan<- *Request | ||
} | ||
|
||
func newNotifications(saFetchRequests chan<- *Request) *notifications { | ||
return ¬ifications{ | ||
handlers: map[string]chan struct{}{}, | ||
fetchRequests: saFetchRequests, | ||
} | ||
} | ||
|
||
func (n *notifications) create(req Request) <-chan struct{} { | ||
n.mu.Lock() | ||
defer n.mu.Unlock() | ||
|
||
notifier, found := n.handlers[req.CacheKey()] | ||
if !found { | ||
notifier = make(chan struct{}) | ||
n.handlers[req.CacheKey()] = notifier | ||
n.fetchRequests <- &req | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We control the APIServer request rate through the size of the channel but it has two downsides:
A better choice could be use a larger channel size to minimize the chance of channel write blocking, and implement a more robust channel consumer which limit the consumption rate. In case of extremely high volumes of requests queued in the channel and the API requests could not be sent in time, the result would be either be the cache is synced before grace period and pod is mutated, or cache is not synced and the pod is not mutated. But no prolonged delay to pod creation or excessive requests to the APIServer |
||
} | ||
return notifier | ||
} | ||
|
||
func (n *notifications) broadcast(key string) { | ||
n.mu.Lock() | ||
defer n.mu.Unlock() | ||
if handler, found := n.handlers[key]; found { | ||
klog.V(5).Infof("Notifying handlers for %q", key) | ||
close(handler) | ||
delete(n.handlers, key) | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -433,9 +433,10 @@ func (m *Modifier) buildPodPatchConfig(pod *corev1.Pod) *podPatchConfig { | |
} | ||
|
||
// Use the STS WebIdentity method if set | ||
request := cache.Request{Namespace: pod.Namespace, Name: pod.Spec.ServiceAccountName, RequestNotification: true} | ||
gracePeriodEnabled := m.saLookupGraceTime > 0 | ||
request := cache.Request{Namespace: pod.Namespace, Name: pod.Spec.ServiceAccountName, RequestNotification: gracePeriodEnabled} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I added this change to toggle Previously it was basically a no-op when the feature is disabled and |
||
response := m.Cache.Get(request) | ||
if !response.FoundInCache && m.saLookupGraceTime > 0 { | ||
if !response.FoundInCache && gracePeriodEnabled { | ||
klog.Warningf("Service account %s not found in the cache. Waiting up to %s to be notified", request.CacheKey(), m.saLookupGraceTime) | ||
select { | ||
case <-response.Notifier: | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This would mean we are making only one request at a time to apiserver right?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Wrapped this in a goroutine - thanks for the catch 💯