state atomic.Uint64 // high 32 bits are counter, low 32 bits are waiter count. sema uint32 }
//相关接口与结构体
// A Uint64 is an atomic uint64. The zero value is zero. // atomic/type.go type Uint64 struct { _ noCopy _ align64 v uint64 }
// Note that it must not be embedded, due to the Lock and Unlock methods. ///sync/cond.go type noCopy struct{} // Lock is a no-op used by -copylocks checker from `go vet`. func(*noCopy) Lock() {} func(*noCopy) Unlock() {}
func(wg *WaitGroup) Add(delta int) { //这里检查是否开启数据竞争检测, if race.Enabled { if delta < 0 { // Synchronize decrements with Wait. race.ReleaseMerge(unsafe.Pointer(wg)) } race.Disable() defer race.Enable() } /**
*/ state := wg.state.Add(uint64(delta) << 32) v := int32(state >> 32) w := uint32(state) if race.Enabled && delta > 0 && v == int32(delta) { // The first increment must be synchronized with Wait. // Need to model this as a read, because there can be // several concurrent wg.counter transitions from 0. race.Read(unsafe.Pointer(&wg.sema)) } if v < 0 { panic("sync: negative WaitGroup counter") } if w != 0 && delta > 0 && v == int32(delta) { panic("sync: WaitGroup misuse: Add called concurrently with Wait") } if v > 0 || w == 0 { return } // This goroutine has set counter to 0 when waiters > 0. // Now there can't be concurrent mutations of state: // - Adds must not happen concurrently with Wait, // - Wait does not increment waiters if it sees counter == 0. // Still do a cheap sanity check to detect WaitGroup misuse. if wg.state.Load() != state { panic("sync: WaitGroup misuse: Add called concurrently with Wait") } // Reset waiters count to 0. wg.state.Store(0) for ; w != 0; w-- { runtime_Semrelease(&wg.sema, false, 0) } }