class mutex_tt : nocopy_t { os_unfair_lock mLock; ... }
getter 方法中对 atomic 的处理,同 setter 是大致相同的
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
id objc_getProperty(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic) { if (offset == 0) { return object_getClass(self); }
// Retain release world id *slot = (id*) ((char*)self + offset); if (!atomic) return *slot; // Atomic retain release world spinlock_t& slotlock = PropertyLocks[slot]; slotlock.lock(); // 加锁 id value = objc_retain(*slot); slotlock.unlock(); // 解锁 // for performance, we (safely) issue the autorelease OUTSIDE of the spinlock. return objc_autoreleaseReturnValue(value); }
// End synchronizing on 'obj'. 结束对“ obj”的同步 // Returns OBJC_SYNC_SUCCESS or OBJC_SYNC_NOT_OWNING_THREAD_ERROR int objc_sync_exit(id obj) { int result = OBJC_SYNC_SUCCESS; if (obj) {//obj不为nil SyncData* data = id2data(obj, RELEASE); if (!data) { result = OBJC_SYNC_NOT_OWNING_THREAD_ERROR; } else { bool okay = data->mutex.tryUnlock();//解锁 if (!okay) { result = OBJC_SYNC_NOT_OWNING_THREAD_ERROR; } } } else {//obj为nil时,什么也不做 // @synchronized(nil) does nothing } return result; }
#if SUPPORT_DIRECT_THREAD_KEYS //tls(Thread Local Storage,本地局部的线程缓存) // Check per-thread single-entry fast cache for matching object bool fastCacheOccupied = NO; // 通过KVC方式对线程进行获取 线程绑定的data SyncData *data = (SyncData *)tls_get_direct(SYNC_DATA_DIRECT_KEY); // 如果线程缓存中有data,执行if流程 if (data) { fastCacheOccupied = YES; // 如果在线程空间找到了data if (data->object == object) { // Found a match in fast cache. uintptr_t lockCount;
result = data; // 通过KVC获取lockCount,lockCount用来记录 被锁了几次,即 该锁可嵌套 lockCount = (uintptr_t)tls_get_direct(SYNC_COUNT_DIRECT_KEY); if (result->threadCount <= 0 || lockCount <= 0) { _objc_fatal("id2data fastcache is buggy"); }
switch(why) { case ACQUIRE: { // objc_sync_enter走这里,传入的是ACQUIRE -- 获取 lockCount++; // 通过lockCount判断被锁了几次,即表示 可重入(递归锁如果可重入,会死锁) tls_set_direct(SYNC_COUNT_DIRECT_KEY, (void*)lockCount);//设置 break; } case RELEASE: // objc_sync_exit走这里,传入的why是RELEASE -- 释放 lockCount--; tls_set_direct(SYNC_COUNT_DIRECT_KEY, (void*)lockCount); if (lockCount == 0) { // remove from fast cache tls_set_direct(SYNC_DATA_DIRECT_KEY, NULL); // atomic because may collide with concurrent ACQUIRE OSAtomicDecrement32Barrier(&result->threadCount); } break; case CHECK: // do nothing break; }
return result; } } #endif
// Check per-thread cache of already-owned locks for matching object SyncCache *cache = fetch_cache(NO);// 判断缓存中是否有该线程 // 如果cache中有,方式与线程缓存一致 if (cache) { unsigned int i; for (i = 0; i < cache->used; i++) { // 遍历总表 SyncCacheItem *item = &cache->list[i]; if (item->data->object != object) continue;
// Found a match. result = item->data; if (result->threadCount <= 0 || item->lockCount <= 0) { _objc_fatal("id2data cache is buggy"); } switch(why) { case ACQUIRE: // 加锁 item->lockCount++; break; case RELEASE: // 解锁 item->lockCount--; if (item->lockCount == 0) { // remove from per-thread cache 从cache中清除使用标记 cache->list[i] = cache->list[--cache->used]; // atomic because may collide with concurrent ACQUIRE OSAtomicDecrement32Barrier(&result->threadCount); } break; case CHECK: // do nothing break; }
return result; } }
// Thread cache didn't find anything. // Walk in-use list looking for matching object // Spinlock prevents multiple threads from creating multiple // locks for the same new object. // We could keep the nodes in some hash table if we find that there are // more than 20 or so distinct locks active, but we don't do that now. // 第一次进来,所有缓存都找不到 lockp->lock(); { SyncData* p; SyncData* firstUnused = NULL; for (p = *listp; p != NULL; p = p->nextData) {//cache中已经找到 if ( p->object == object ) {//如果不等于空,且与object相似 result = p;//赋值 // atomic because may collide with concurrent RELEASE OSAtomicIncrement32Barrier(&result->threadCount);//对threadCount进行++ goto done; } if ( (firstUnused == NULL) && (p->threadCount == 0) ) firstUnused = p; } // no SyncData currently associated with object 没有与当前对象关联的SyncData if ( (why == RELEASE) || (why == CHECK) ) goto done; // an unused one was found, use it 第一次进来,没有找到 if ( firstUnused != NULL ) { result = firstUnused; result->object = (objc_object *)object; result->threadCount = 1; goto done; } }
// Allocate a new SyncData and add to list. // XXX allocating memory with a global lock held is bad practice, // might be worth releasing the lock, allocating, and searching again. // But since we never free these guys we won't be stuck in allocation very often. posix_memalign((void **)&result, alignof(SyncData), sizeof(SyncData));//创建赋值 result->object = (objc_object *)object; result->threadCount = 1; new (&result->mutex) recursive_mutex_t(fork_unsafe_lock); result->nextData = *listp; *listp = result; done: lockp->unlock(); if (result) { // Only new ACQUIRE should get here. // All RELEASE and CHECK and recursive ACQUIRE are // handled by the per-thread caches above. if (why == RELEASE) { // Probably some thread is incorrectly exiting // while the object is held by another thread. return nil; } if (why != ACQUIRE) _objc_fatal("id2data is buggy"); if (result->object != object) _objc_fatal("id2data is buggy");
#if SUPPORT_DIRECT_THREAD_KEYS if (!fastCacheOccupied) { //判断是否支持栈存缓存,支持则通过KVC形式赋值 存入tls // Save in fast thread cache tls_set_direct(SYNC_DATA_DIRECT_KEY, result); tls_set_direct(SYNC_COUNT_DIRECT_KEY, (void*)1);//lockCount = 1 } else #endif { // Save in thread cache 缓存中存一份 if (!cache) cache = fetch_cache(YES);//第一次存储时,对线程进行了绑定 cache->list[cache->used].data = result; cache->list[cache->used].lockCount = 1; cache->used++; } }
open class NSConditionLock : NSObject, NSLocking { internal var _cond = NSCondition() internal var _value: Int internal var _thread: _swift_CFThreadRef? public convenience override init() { self.init(condition: 0) } public init(condition: Int) { _value = condition }
open func lock() { let _ = lock(before: Date.distantFuture) }
open func unlock() { _cond.lock() _thread = nil _cond.broadcast() _cond.unlock() } open var condition: Int { return _value }
open func lock(whenCondition condition: Int) { let _ = lock(whenCondition: condition, before: Date.distantFuture) }