Browse Source

Merge pull request #160 from slingamn/cache.4

fix a race in regenerateMembersCache
tags/v0.10.1
Shivaram Lingamneni 6 years ago
parent
commit
d5832bf765
No account linked to committer's email address
1 changed files with 22 additions and 16 deletions
  1. 22
    16
      irc/channel.go

+ 22
- 16
irc/channel.go View File

@@ -19,20 +19,21 @@ import (
19 19
 
20 20
 // Channel represents a channel that clients can join.
21 21
 type Channel struct {
22
-	flags          ModeSet
23
-	lists          map[Mode]*UserMaskSet
24
-	key            string
25
-	members        MemberSet
26
-	membersCache   []*Client // allow iteration over channel members without holding the lock
27
-	name           string
28
-	nameCasefolded string
29
-	server         *Server
30
-	createdTime    time.Time
31
-	stateMutex     sync.RWMutex
32
-	topic          string
33
-	topicSetBy     string
34
-	topicSetTime   time.Time
35
-	userLimit      uint64
22
+	flags             ModeSet
23
+	lists             map[Mode]*UserMaskSet
24
+	key               string
25
+	members           MemberSet
26
+	membersCache      []*Client  // allow iteration over channel members without holding the lock
27
+	membersCacheMutex sync.Mutex // tier 2; see `regenerateMembersCache`
28
+	name              string
29
+	nameCasefolded    string
30
+	server            *Server
31
+	createdTime       time.Time
32
+	stateMutex        sync.RWMutex
33
+	topic             string
34
+	topicSetBy        string
35
+	topicSetTime      time.Time
36
+	userLimit         uint64
36 37
 }
37 38
 
38 39
 // NewChannel creates a new channel from a `Server` and a `name`
@@ -67,10 +68,15 @@ func NewChannel(s *Server, name string, addDefaultModes bool) *Channel {
67 68
 }
68 69
 
69 70
 func (channel *Channel) regenerateMembersCache() {
70
-	// this is eventually consistent even without holding the writable Lock()
71
+	// this is eventually consistent even without holding stateMutex.Lock()
71 72
 	// throughout the update; all updates to `members` while holding Lock()
72 73
 	// have a serial order, so the call to `regenerateMembersCache` that
73
-	// happens-after the last one will see *all* the updates
74
+	// happens-after the last one will see *all* the updates. then,
75
+	// `membersCacheMutex` ensures that this final read is correctly paired
76
+	// with the final write to `membersCache`.
77
+	channel.membersCacheMutex.Lock()
78
+	defer channel.membersCacheMutex.Unlock()
79
+
74 80
 	channel.stateMutex.RLock()
75 81
 	result := make([]*Client, len(channel.members))
76 82
 	i := 0

Loading…
Cancel
Save