-
Notifications
You must be signed in to change notification settings - Fork 175
/
memo.go
119 lines (107 loc) · 3.12 KB
/
memo.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
// ex9.3 provides cancellable memoization of a function.
//
// Requests for different keys proceed in parallel. Concurrent requests for the
// same key block until the first completes. This implementation uses a monitor
// goroutine.
package memo
import (
"fmt"
)
// Func is the type of the function to memoize.
type Func func(key string, done <-chan struct{}) (interface{}, error)
// A result is the result of calling a Func.
type result struct {
value interface{}
err error
}
type entry struct {
res result
ready chan struct{} // closed when res is ready
}
// A request is a message requesting that the Func be applied to key.
type request struct {
key string
done <-chan struct{}
response chan<- result // the client wants a single result
}
type Memo struct {
requests, cancels chan request
}
// New returns a memoization of f. Clients must subsequently call Close.
func New(f Func) *Memo {
memo := &Memo{make(chan request), make(chan request)}
go memo.server(f)
return memo
}
func (memo *Memo) Get(key string, done <-chan struct{}) (interface{}, error) {
response := make(chan result)
req := request{key, done, response}
memo.requests <- req
fmt.Println("get: waiting for response")
res := <-response
fmt.Println("get: checking if cancelled")
select {
case <-done:
fmt.Println("get: queueing cancellation request")
memo.cancels <- req
default:
// Not cancelled. Continue.
}
fmt.Println("get: return")
return res.value, res.err
}
func (memo *Memo) Close() { close(memo.requests) }
func (memo *Memo) server(f Func) {
cache := make(map[string]*entry)
Loop:
for {
Cancel:
// Process all cancellations before requests.
// After Get has returned a cancellation for some key, any subsequent
// requests for that key should return the result of a new call to
// Func. If select is allowed to choose randomly between processing
// requests and cancellations it can't be predicted whether a request
// will be cancelled by a previous cancellation or not without looking
// at the cancels queue, which client obviously can't do.
for {
select {
case req := <-memo.cancels:
fmt.Println("server: deleting cancelled entry (early)")
delete(cache, req.key)
default:
break Cancel
}
}
// Wait for requests or cancellations, and break to process all
// cancellations if there are any.
select {
case req := <-memo.cancels:
fmt.Println("server: deleting cancelled entry")
delete(cache, req.key)
continue Loop
case req := <-memo.requests:
fmt.Println("server: request")
e := cache[req.key]
if e == nil {
// This is the first request for this key.
e = &entry{ready: make(chan struct{})}
cache[req.key] = e
go e.call(f, req.key, req.done) // call f(key)
}
go e.deliver(req.response)
}
}
}
func (e *entry) call(f Func, key string, done <-chan struct{}) {
// Evaluate the function.
e.res.value, e.res.err = f(key, done)
fmt.Println("call: returned from f")
// Broadcast the ready condition.
close(e.ready)
}
func (e *entry) deliver(response chan<- result) {
// Wait for the ready condition.
<-e.ready
// Send the result to the client.
response <- e.res
}