Logo Search packages:      
Sourcecode: tbb version File versions  Download package

void tbb::queuing_rw_mutex::scoped_lock::release (  ) 

Release lock.

A method to release queuing_rw_mutex lock.

Definition at line 230 of file queuing_rw_mutex.cpp.

References acquire_internal_lock(), tbb::FLAG, going, initialize(), internal_lock, next, prev, tbb::release, release_internal_lock(), state, try_acquire_internal_lock(), and unblock_or_wait_on_internal_lock().

Referenced by ~scoped_lock().

{
    __TBB_ASSERT(this->mutex!=NULL, "no lock acquired");

    ITT_NOTIFY(sync_releasing, mutex);

    if( state == STATE_WRITER ) { // Acquired for write

        // The logic below is the same as "writerUnlock", but restructured to remove "return" in the middle of routine.
        // In the statement below, acquire semantics of reading 'next' is required
        // so that following operations with fields of 'next' are safe.
        scoped_lock* n = __TBB_load_with_acquire(next);
        if( !n ) {
            if( this == mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) {
                // this was the only item in the queue, and the queue is now empty.
                goto done;
            }
            SpinwaitWhileEq( next, (scoped_lock*)NULL );
            n = next;
        }
        n->going = 2; // protect next queue node from being destroyed too early
        if( n->state==STATE_UPGRADE_WAITING ) {
            // the next waiting for upgrade means this writer was upgraded before.
            acquire_internal_lock();
            queuing_rw_mutex::scoped_lock* tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->prev), NULL);
            n->state = STATE_UPGRADE_LOSER;
            // The volatile here ensures release semantics on IPF, which is necessary
            // so that the user's critical section sends the correct values to the next
            // process that acquires the critical section.
            __TBB_store_with_release(n->going,1);
            unblock_or_wait_on_internal_lock(get_flag(tmp));
        } else {
            __TBB_ASSERT( state & (STATE_COMBINED_WAITINGREADER | STATE_WRITER), "unexpected state" );
            __TBB_ASSERT( !( tricky_pointer(n->prev) & FLAG ), "use of corrupted pointer!" );
            n->prev = NULL;
            // ensure release semantics on IPF
            __TBB_store_with_release(n->going,1);
        }

    } else { // Acquired for read

        queuing_rw_mutex::scoped_lock *tmp = NULL;
retry:
        // Addition to the original paper: Mark this->prev as in use
        queuing_rw_mutex::scoped_lock *pred = tricky_pointer::fetch_and_add<tbb::acquire>(&(this->prev), FLAG);

        if( pred ) {
            if( !(pred->try_acquire_internal_lock()) )
            {
                // Failed to acquire the lock on pred. The predecessor either unlinks or upgrades.
                // In the second case, it could or could not know my "in use" flag - need to check
                tmp = tricky_pointer::compare_and_swap<tbb::release>(&(this->prev), pred, tricky_pointer(pred)|FLAG );
                if( !(tricky_pointer(tmp)&FLAG) ) {
                    // Wait for the predecessor to change this->prev (e.g. during unlink)
                    SpinwaitWhileEq( this->prev, tricky_pointer(pred)|FLAG );
                    // Now owner of pred is waiting for _us_ to release its lock
                    pred->release_internal_lock();
                }
                else ; // The "in use" flag is back -> the predecessor didn't get it and will release itself; nothing to do

                tmp = NULL;
                goto retry;
            }
            __TBB_ASSERT(pred && pred->internal_lock==ACQUIRED, "predecessor's lock is not acquired");
            this->prev = pred;
            acquire_internal_lock();

            __TBB_store_with_release(pred->next,reinterpret_cast<scoped_lock *>(NULL));

            if( !next && this != mutex->q_tail.compare_and_swap<tbb::release>(pred, this) ) {
                SpinwaitWhileEq( next, (void*)NULL );
            }
            __TBB_ASSERT( !get_flag(next), "use of corrupted pointer" );

            // ensure acquire semantics of reading 'next'
            if( __TBB_load_with_acquire(next) ) { // I->next != nil
                // Equivalent to I->next->prev = I->prev but protected against (prev[n]&FLAG)!=0
                tmp = tricky_pointer::fetch_and_store<tbb::release>(&(next->prev), pred);
                // I->prev->next = I->next;
                __TBB_ASSERT(this->prev==pred, "");
                __TBB_store_with_release(pred->next,next);
            }
            // Safe to release in the order opposite to acquiring which makes the code simplier
            pred->release_internal_lock();

        } else { // No predecessor when we looked
            acquire_internal_lock();  // "exclusiveLock(&I->EL)"
            // ensure acquire semantics of reading 'next'
            scoped_lock* n = __TBB_load_with_acquire(next);
            if( !n ) {
                if( this != mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) {
                    SpinwaitWhileEq( next, (scoped_lock*)NULL );
                    n = next;
                } else {
                    goto unlock_self;
                }
            }
            n->going = 2; // protect next queue node from being destroyed too early
            tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->prev), NULL);
            // ensure release semantics on IPF
            __TBB_store_with_release(n->going,1);
        }
unlock_self:
        unblock_or_wait_on_internal_lock(get_flag(tmp));
    }
done:
    SpinwaitWhileEq( going, 2 );

    initialize();
}


Generated by  Doxygen 1.6.0   Back to index