diff -Naur bench/Benchmark.cpp bench/Benchmark.cpp --- bench/Benchmark.cpp 2008-05-02 23:36:52.000000000 -0400 +++ bench/Benchmark.cpp 2008-05-02 23:37:05.000000000 -0400 @@ -143,19 +143,25 @@ barrier(args->id, args->threads); #if _MSC_VER - starttime = clock(); - globalEndTime = starttime + (BMCONFIG.duration * 1000); + if (id == 0) { + starttime = clock(); + globalEndTime = starttime + (BMCONFIG.duration * 1000); + } #else - // set the signal handler and kick off a timer unless execute is nonzero - if (BMCONFIG.execute == 0) { - signal(SIGALRM, catch_SIGALRM); - alarm(BMCONFIG.duration); + if (id == 0) { + // set the signal handler and kick off a timer unless execute is nonzero + if (BMCONFIG.execute == 0) { + signal(SIGALRM, catch_SIGALRM); + alarm(BMCONFIG.duration); + } + // get the start time of the benchmark run + starttime = getElapsedTime(); } - - // get the start time of the benchmark run - starttime = getElapsedTime(); #endif + // everyone waits here, and then we run the experiment + barrier(args->id, args->threads); + if (execute > 0) { for (int e = 0; e < execute; e++) { b->random_transaction(args, &seed, vals[i], chance[i]); @@ -189,6 +195,18 @@ #endif } + // in benchmarks that use retry(), there may be transactions that need to + // be woken up. wake them here via a call to wake_retriers() + // + // NB: exactly one thread should call the wake_retriers method, but we + // can't be sure that thread0 will be awake. However, any reasonably + // correct benchmark should have at least one thread not in retry() at any + // given time + static volatile unsigned long mtx = 0; + if (bool_cas(&mtx, 0, 1)) + b->wake_retriers(); + + // now wait for everyone to wake up barrier(args->id, args->threads); // OK, everyone is done, so we can get the endtime here @@ -261,9 +279,12 @@ // Run the sanity check if (BMCONFIG.verify) { - assert((args[0].b)->sanity_check()); + bool sanity = (args[0].b)->sanity_check(); if (BMCONFIG.verbosity > 0) - cout << "Completed sanity check." << endl; + if (sanity) + cout << "Completed sanity check." << endl; + else + cout << "Sanity check failed." << endl; } // shut off transactions in thread 0 diff -Naur bench/Benchmark.hpp bench/Benchmark.hpp --- bench/Benchmark.hpp 2008-05-02 23:36:51.000000000 -0400 +++ bench/Benchmark.hpp 2008-05-02 23:37:05.000000000 -0400 @@ -107,6 +107,8 @@ void measure_speed(); virtual bool verify(VerifyLevel_t v) = 0; virtual ~Benchmark() { } + // not usually needed, so provide a default + virtual void wake_retriers() { } }; // small bit of infrastructure for shutting down livelocked benchmarks diff -Naur bench/FeatureTest.hpp bench/FeatureTest.hpp --- bench/FeatureTest.hpp 2008-05-02 23:36:51.000000000 -0400 +++ bench/FeatureTest.hpp 2008-05-02 23:37:05.000000000 -0400 @@ -161,7 +161,7 @@ BEGIN_TRANSACTION { // we don't want things to keep running once time is up! if (!bench::early_tx_terminate) { - if (id%2 == 0) { + if (id % 2 == 0) { FillRetryBuffer(); } else { @@ -172,6 +172,15 @@ } /** + * Wake up anyone who is retrying, via a transaction that touches enough + * important stuff to get everyone to wake up + */ + void wake_retriers() + { + retrylist.touch_head(); + } + + /** * Read the head of the list. If the list head is null, then insert * zero. If the head points to zero, then find the max and if the max * is less than 63, insert max+1. Otherwise, retry. diff -Naur bench/LinkedList.cpp bench/LinkedList.cpp --- bench/LinkedList.cpp 2008-05-02 23:36:51.000000000 -0400 +++ bench/LinkedList.cpp 2008-05-02 23:37:05.000000000 -0400 @@ -47,27 +47,20 @@ bool LinkedList::isSane(void) const { bool sane = false; + BEGIN_TRANSACTION { + sane = true; + rd_ptr prev(sentinel); + rd_ptr curr(prev->get_next(prev)); - BEGIN_TRANSACTION; - - sane = true; - rd_ptr prev(sentinel); - rd_ptr curr(prev->get_next(prev)); - - while (curr != NULL) - { - if (prev->get_val(prev) >= curr->get_val(curr)) - { - sane = false; - break; + while (curr != NULL) { + if (prev->get_val(prev) >= curr->get_val(curr)) { + sane = false; + break; + } + prev = curr; + curr = curr->get_next(curr); } - - prev = curr; - curr = curr->get_next(curr); - } - - END_TRANSACTION; - + } END_TRANSACTION; return sane; } @@ -76,28 +69,21 @@ bool LinkedList::extendedSanityCheck(verifier v, unsigned long v_param) const { bool sane = false; - - BEGIN_TRANSACTION; - - sane = true; - rd_ptr prev(sentinel); - rd_ptr curr(prev->get_next(prev)); - - while (curr != NULL) - { - if (!v(curr->get_val(curr), v_param) || - (prev->get_val(prev) >= curr->get_val(curr))) - { - sane = false; - break; - } - - prev = curr; - curr = prev->get_next(prev); - } - - END_TRANSACTION; - + BEGIN_TRANSACTION { + sane = true; + rd_ptr prev(sentinel); + rd_ptr curr(prev->get_next(prev)); + while (curr != NULL) { + if (!v(curr->get_val(curr), v_param) || + (prev->get_val(prev) >= curr->get_val(curr))) + { + sane = false; + break; + } + prev = curr; + curr = prev->get_next(prev); + } + } END_TRANSACTION; return sane; } @@ -105,29 +91,26 @@ // sorted order; if val is already in the list, exit without inserting void LinkedList::insert(int val) { - BEGIN_TRANSACTION; + BEGIN_TRANSACTION { + // traverse the list to find the insertion point + rd_ptr prev(sentinel); + rd_ptr curr(prev->get_next(prev)); + + while (curr != NULL) { + if (curr->get_val(curr) >= val) + break; + prev = curr; + curr = prev->get_next(prev); + } - // traverse the list to find the insertion point - rd_ptr prev(sentinel); - rd_ptr curr(prev->get_next(prev)); - - while (curr != NULL) - { - if (curr->get_val(curr) >= val) - break; - - prev = curr; - curr = prev->get_next(prev); - } - - // now insert new_node between prev and curr - if (!curr || (curr->get_val(curr) > val)) { - wr_ptr insert_point(prev); - insert_point->set_next(sh_ptr(new LLNode(val, curr)), - insert_point); - } + // now insert new_node between prev and curr + if (!curr || (curr->get_val(curr) > val)) { + wr_ptr insert_point(prev); + insert_point->set_next(sh_ptr(new LLNode(val, curr)), + insert_point); + } - END_TRANSACTION; + } END_TRANSACTION; } @@ -136,22 +119,18 @@ { bool found = false; - BEGIN_TRANSACTION; - - rd_ptr curr(sentinel); - curr = curr->get_next(curr); - - while (curr != NULL) - { - if (curr->get_val(curr) >= val) - break; - + BEGIN_TRANSACTION { + rd_ptr curr(sentinel); curr = curr->get_next(curr); - } - found = ((curr != NULL) && (curr->get_val(curr) == val)); + while (curr != NULL) { + if (curr->get_val(curr) >= val) + break; + curr = curr->get_next(curr); + } - END_TRANSACTION; + found = ((curr != NULL) && (curr->get_val(curr) == val)); + } END_TRANSACTION; return found; } @@ -183,38 +162,40 @@ return min; } +// write to the head of the list +void LinkedList::touch_head() +{ + BEGIN_TRANSACTION { + wr_ptr curr(sentinel); + curr->set_next(curr->get_next(curr), curr); + } END_TRANSACTION; +} + // remove a node if its value == val void LinkedList::remove(int val) { - BEGIN_TRANSACTION; - - // find the node whose val matches the request - rd_ptr prev(sentinel); - rd_ptr curr(prev->get_next(prev)); - - while (curr != NULL) - { - // if we find the node, disconnect it and end the search - if (curr->get_val(curr) == val) - { - wr_ptr mod_point(prev); - mod_point->set_next(curr->get_next(curr), mod_point); - - // delete curr... - tx_delete(curr); - break; - } - else if (curr->get_val(curr) > val) - { - // this means the search failed - break; - } - - prev = curr; - curr = prev->get_next(prev); - } - - END_TRANSACTION; + BEGIN_TRANSACTION { + // find the node whose val matches the request + rd_ptr prev(sentinel); + rd_ptr curr(prev->get_next(prev)); + while (curr != NULL) { + // if we find the node, disconnect it and end the search + if (curr->get_val(curr) == val) { + wr_ptr mod_point(prev); + mod_point->set_next(curr->get_next(curr), mod_point); + + // delete curr... + tx_delete(curr); + break; + } + else if (curr->get_val(curr) > val) { + // this means the search failed + break; + } + prev = curr; + curr = prev->get_next(prev); + } + } END_TRANSACTION; } @@ -224,14 +205,11 @@ BEGIN_TRANSACTION { rd_ptr curr(sentinel); curr = curr->get_next(curr); - std::cout << "list :: "; - while (curr != NULL) { std::cout << curr->get_val(curr) << "->"; curr = curr->get_next(curr); } - std::cout << "NULL" << std::endl; } END_TRANSACTION; } diff -Naur bench/LinkedList.hpp bench/LinkedList.hpp --- bench/LinkedList.hpp 2008-05-02 23:36:52.000000000 -0400 +++ bench/LinkedList.hpp 2008-05-02 23:37:06.000000000 -0400 @@ -70,8 +70,6 @@ // Set of LLNodes represented as a linked list in sorted order class LinkedList : public IntSet { - private: - stm::sh_ptr sentinel; public: @@ -98,6 +96,13 @@ virtual bool extendedSanityCheck(verifier v, unsigned long param) const; + // bogus transaction that writes to the head node of the list, in order + // to wake up retriers + // + // NB: the write does not change anything, and a good compiler would + // notice that it can be elided, but for now this is acceptable + virtual void touch_head(); + // find max and min virtual int findmax() const; virtual int findmin() const; diff -Naur bench/Makefile bench/Makefile --- bench/Makefile 2008-05-02 23:36:52.000000000 -0400 +++ bench/Makefile 2008-05-02 23:37:06.000000000 -0400 @@ -98,10 +98,6 @@ Benchmark.hpp $(CXX) $(CXXFLAGS) $(INCLUDEPATH) -o $@ -c $< -$(OBJDIR)/SkipList.o: SkipList.cpp SkipList.hpp IntSet.hpp $(STM_LIB) \ - Benchmark.hpp - $(CXX) $(CXXFLAGS) $(INCLUDEPATH) -o $@ -c $< - # this is the executable $(OBJROOT)/Bench_$(STM_VERSION): $(OBJFILES) $(STM_LIB) $(CXX) $(CXXFLAGS) $(INCLUDEPATH) -o $@ $^ $(LDFLAGS) diff -Naur Makefile Makefile --- Makefile 2008-05-02 23:36:52.000000000 -0400 +++ Makefile 2008-05-02 23:37:06.000000000 -0400 @@ -54,7 +54,7 @@ ./$(STMCONFIG) -D $(STMCONFIG): stmconfig.cpp - g++ stmconfig.cpp -o $(STMCONFIG) -ggdb + g++ stmconfig.cpp -o $(STMCONFIG) -ggdb -Wall -Wextra -O3 benchmarks: lib @cd bench && $(MAKE) $(MFLAGS) @@ -67,6 +67,7 @@ realclean: @cd stm && $(MAKE) realclean @cd bench && $(MAKE) realclean + @cd retry_tests && $(MAKE) clean rm -f TAGS* config.h Makefile.inc $(STMCONFIG) # the following are hacks; need to be cleaned up to capture dependences diff -Naur retry_tests/BarrierTest.cpp retry_tests/BarrierTest.cpp --- retry_tests/BarrierTest.cpp 1969-12-31 19:00:00.000000000 -0500 +++ retry_tests/BarrierTest.cpp 2008-05-02 23:37:06.000000000 -0400 @@ -0,0 +1,361 @@ +/////////////////////////////////////////////////////////////////////////////// +// +// Copyright (c) 2008 +// University of Rochester +// Department of Computer Science +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// * Neither the name of the University of Rochester nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#include +#include +#include +#include +#include + +#include +#include "Buffer.hpp" + +using std::string; +using std::cout; +using std::endl; +using std::vector; + +/***************************************************************************** + * + * This benchmark is a sense-reversing barrier; we see how many times we can + * get N threads through it in a fixed time period + * + *****************************************************************************/ + +/** + * Sense-reversing barrier. This code does not compose! Do not call + * WaitForAll from a transaction! + * + * NB: we use a counter instead of a bool for the sense, so that we can do a + * sanity check at the end + */ +class Barrier +{ + class INT : public stm::Object + { + GENERATE_FIELD(long, val); + INT() : m_val(0) { } + }; + + /*** number of threads at the barrier right now */ + stm::sh_ptr count; + + /*** current sense of the barrier */ + stm::sh_ptr sense; + + /*** each thread's private sense */ + stm::sh_ptr thread_sense[256]; + + public: + /** + * Construct by allocating all of the INTs + */ + Barrier() + : count(new INT()), sense(new INT()) + { + for (int i = 0; i < 256; i++) { + thread_sense[i] = stm::sh_ptr(new INT()); + } + } + + /*** this is the transactional sense-reversing barrier */ + void WaitForAll(int id, int nthreads) + { + // track if this thread is the last one to reach the barrier + bool is_last = false; + + BEGIN_TRANSACTION { + // bump thread's sense + stm::wr_ptr s(thread_sense[id]); + s->set_val(1 + s->get_val(s), s); + + // increment the counter + stm::wr_ptr c(count); + c->set_val(1 + c->get_val(c), c); + + is_last = c->get_val(c) == nthreads; + } END_TRANSACTION; + + BEGIN_TRANSACTION { + if (is_last) { + // reset the counter + stm::wr_ptr c(count); + c->set_val(0, c); + // bump the barrier sense + stm::wr_ptr s(sense); + s->set_val(1 + s->get_val(s), s); + } + else { + stm::rd_ptr my_s(thread_sense[id]); + stm::rd_ptr s(sense); + if (my_s->get_val(my_s) != s->get_val(s)) + stm::retry(); + } + } END_TRANSACTION; + } + + /*** Sanity Check */ + bool sanity(int numthreads) + { + stm::un_ptr s(sense); + int barrier_sense = s->get_val(s); + + // each active thread's sense should equal the barrier sense + for (int i = 0; i < numthreads; i++) { + stm::un_ptr ss(thread_sense[i]); + if (barrier_sense != ss->get_val(ss)) { + return false; + } + } + + // each inactive thread's sense should be zero + for (int i = numthreads; i < 256; i++) { + stm::un_ptr ss(thread_sense[i]); + if (ss->get_val(ss) != 0) { + return false; + } + } + + return true; + } +}; + +/*** Struct to hold all configuration information */ +struct config_t +{ + int threads; + int iterations; + string validation; + string cm; + + /*** default is to run a 1000 iteration test with 2 threads */ + config_t() + : threads(2), iterations(1000), validation("invis-eager"), cm("Polka") + { } + + /*** Display the config parameters */ + void display() + { + cout << "BarrierTest: p=" << threads << ", i=" << iterations + << ", V=" << validation << ", C=" << cm << endl; + } +}; + +/*** Global config options */ +config_t CONFIG; + +/*** The barrier */ +Barrier* BARRIER; + +/*** global timing information */ +unsigned long long STARTTIME = 0; +unsigned long long ENDTIME = 0; + +/** + * barrier to synchronize timing + */ +void barrier(int id, unsigned long nthreads) +{ + static struct + { + volatile unsigned long count; + volatile unsigned long sense; + volatile unsigned long thread_sense[256]; // hard-coded max threads + } __attribute__ ((aligned(64))) bar = {0}; + + bar.thread_sense[id] = !bar.thread_sense[id]; + if (fai(&bar.count) == nthreads - 1) { + bar.count = 0; + bar.sense = !bar.sense; + } + else + while (bar.sense != bar.thread_sense[id]) { } // spin +} + +/** + * This is the packet of information that describes what a thread ought to do + * and what it has done + */ +struct thread_args_t +{ + /*** my thread id */ + int id; + + /** total number of threads (needed for barriers) */ + int threads; + + /*** count of all barriers that have been reached */ + unsigned long long barriers; +}; + +/** + * new threads will all begin in this code + */ +void* barrier_test(void* arg) +{ + thread_args_t* args = (thread_args_t*)arg; + + // threads other than thread0 must create a transactional context here + if (args->id != 0) + stm::init(CONFIG.cm, CONFIG.validation, true); + + // all thread configuration before we start timing + barrier(args->id, args->threads); + + // get the start time of the benchmark run + if (args->id == 0) + STARTTIME = getElapsedTime(); + + // now do the appropriate transactions + for (int i = 0; i < CONFIG.iterations; i++) { + BARRIER->WaitForAll(args->id, args->threads); + args->barriers++; + } + + // wait for everyone to ack that the experiment is over + barrier(args->id, args->threads); + + // everyone is done, so we can get the endtime here + if (args->id == 0) + ENDTIME = getElapsedTime(); + + // Shut down STM for all threads other than thread 0 + if (args->id != 0) + stm::shutdown(args->id); + + return 0; +} + +/** + * Print usage + */ +void usage() +{ + cout << "Valid options are:" << endl; + cout << " -p : threads" << endl; + cout << " -i : iterations (default 1000)" << endl; + cout << " -V : validation strategy" << endl; + cout << " -C : contention manager" << endl; + cout << " -h : this message" << endl; +} + +/** + * Main driver for the benchmark + */ +int main(int argc, char** argv) +{ + // parse the command-line options + int opt; + while ((opt = getopt(argc, argv, "p:i:C:V:h")) != -1) { + switch (opt) { + case 'p': + CONFIG.threads = atoi(optarg); + assert(CONFIG.threads > 0); + break; + case 'i': + CONFIG.iterations = atoi(optarg); + assert(CONFIG.iterations > 0); + break; + case 'V': + CONFIG.validation = string(optarg); + break; + case 'C': + CONFIG.cm = string(optarg); + break; + case 'h': + usage(); + return 0; + default: + usage(); + return -1; + } + } + + // print this experiment's information + CONFIG.display(); + + // get a transactional context + stm::init(CONFIG.cm, CONFIG.validation, true); + + // construct the buffer + BARRIER = new Barrier(); + + // create enough config packets for all threads + vector args; + args.resize(CONFIG.threads); + + // create enough pthread handles for all threads + vector tid; + tid.resize(CONFIG.threads); + + // create pthread scope to support multithreading + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM); + + // set up configuration structs for the threads we'll create + for (int i = 0; i < CONFIG.threads; i++) { + // thread id, number of threads + args[i].id = i; + args[i].threads = CONFIG.threads; + + // set counts to zero + args[i].barriers = 0; + } + + // actually create the threads + for (int j = 1; j < CONFIG.threads; j++) + pthread_create(&tid[j], &attr, &barrier_test, &args[j]); + + // all of the other threads should be queued up, waiting to run the + // benchmark, but they can't until this thread starts the benchmark too... + barrier_test((void*)(&args[0])); + + // everyone should be done. Join all threads so we don't leave anything + // hanging around + for (int k = 1; k < CONFIG.threads; k++) + pthread_join(tid[k], NULL); + + // sanity check + assert(BARRIER->sanity(CONFIG.threads)); + + // output total performance + cout << "total barriers = " << args[0].barriers << endl; + cout << "total time = " << ENDTIME - STARTTIME << endl; + cout << (1000000000LL * args[0].barriers)/(ENDTIME-STARTTIME) + << " barriers per second" << endl; + + // shut off transactions in thread 0 + stm::shutdown(0); +} diff -Naur retry_tests/Buffer.hpp retry_tests/Buffer.hpp --- retry_tests/Buffer.hpp 1969-12-31 19:00:00.000000000 -0500 +++ retry_tests/Buffer.hpp 2008-05-02 23:37:06.000000000 -0400 @@ -0,0 +1,209 @@ +/////////////////////////////////////////////////////////////////////////////// +// +// Copyright (c) 2008 +// University of Rochester +// Department of Computer Science +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// * Neither the name of the University of Rochester nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef BUFFER_HPP__ +#define BUFFER_HPP__ + +#include + +/** + * A Bounded Buffer data structure. Producers and Consumers synchronize via + * retry, and there is no single shared variable to serve as a point of + * contention. + */ +class BoundedBuffer +{ + /** + * Transaction-safe indirected integer for RSTM... we'll use these to hold + * the next insert and remove points + */ + class INT : public stm::Object + { + GENERATE_FIELD(long, val); + INT() : m_val(0) { } + }; + + /** + * BufferEntry consists of a random int and a bool indicating whether the + * entry is initialized or not. The buffer will be a bunch of these. + */ + class BufferEntry : public stm::Object + { + GENERATE_FIELD(long, val); + GENERATE_FIELD(bool, init); + + // construct by setting to uninitialized + BufferEntry() : m_val(0), m_init(false) { } + }; + + /*** the size of the buffer */ + const int buffer_size; + + /*** the buffer */ + stm::sh_ptr* buffer; + + /*** the next insertion point */ + stm::sh_ptr next_insert; + + /*** the next removal point */ + stm::sh_ptr next_remove; + + public: + + /** + * Constructor is nothing fancy, just set the size and make sure all + * pointers are non-null + */ + BoundedBuffer(unsigned int size) + : buffer_size(size), buffer(new stm::sh_ptr[buffer_size]), + next_insert(new INT()), next_remove(new INT()) + { + for (int i = 0; i < buffer_size; i++) + buffer[i] = stm::sh_ptr(new BufferEntry()); + } + + /** + * Leak like a sieve for now... + */ + ~BoundedBuffer() { } + + /** + * Produce into the bounded buffer. We don't care what the /val/ is, but + * the benchmark might want to use something meaningful. Also, the + * benchmark is responsible for counting the activity... + */ + void produce(long val) + { + // get next insertion point + stm::rd_ptr ins(next_insert); + int i = ins->get_val(ins); + + // ok, now jump to that entry in the buffer and see if it is + // available + stm::rd_ptr next(buffer[i]); + + // if the entry is initialized, then we are waiting for it to be + // consumed, so we need to retry + if (next->get_init(next) == true) + stm::retry(); + + // the entry is not initialized, so we can add our entry here: + + // first move the next pointer + stm::wr_ptr wr_i(ins); + wr_i->set_val((wr_i->get_val(wr_i) + 1) % buffer_size, wr_i); + + // now set the value and init fields of 'next' + stm::wr_ptr w_next(next); + w_next->set_val(val, w_next); + w_next->set_init(true, w_next); + } + + /** + * Consume something from the bounded buffer. This code doesn't process + * the result, it just takest out of the buffer so that the caller can use + * it + */ + long consume() + { + // get the next removal point + stm::rd_ptr rmv(next_remove); + int r = rmv->get_val(rmv); + + // ok, now jump to that entry in the buffer and see if it is + // available + stm::rd_ptr next(buffer[r]); + + // if the entry is uninitialized, then we are waiting for it to be + // produced, so we need to retry + if (next->get_init(next) == false) + stm::retry(); + + // the entry is initialized, so we can consume it here: + + // first move the next pointer + stm::wr_ptr wr_r(rmv); + wr_r->set_val((wr_r->get_val(wr_r) + 1) % buffer_size, wr_r); + + // now consume the value and update the init fields + stm::wr_ptr w_next(next); + w_next->set_init(false, w_next); + return w_next->get_val(w_next); + } + + /** + * The benchmark won't exit until all threads have finished their last + * transaction. However, if a thread is in the middle of retrying, then + * we're going to have a problem because it might be sleeping on a + * semaphore. + * + * If we don't require the last operation to succeed, then there is an + * easy fix: the retriers will all be waiting for something to change. + * That "something" must include either the next_remove or next_insert + * integer. If we just do stores to update both fields, everyone will + * wake up. + */ + void wake_all() + { + // get ptrs to the next insert and remove points + stm::wr_ptr rmv_w(next_remove); + stm::wr_ptr ins_w(next_insert); + + // read corresponding ints to locals + int r = rmv_w->get_val(rmv_w); + int i = ins_w->get_val(ins_w); + + // dumb writes: write same value we read + rmv_w->set_val(r, rmv_w); + ins_w->set_val(i, ins_w); + } + + /** + * If the callers were counting the total amount produced and consumed, + * then the difference between those two sums ought to be the total amount + * left in the buffer. + */ + bool sanity(long long produced, long long consumed) + { + long long tally = 0; + for (int i = 0; i < buffer_size; i++) { + stm::un_ptr ube(buffer[i]); + if (ube->get_init(ube)) + tally += ube->get_val(ube); + } + return (produced == (consumed + tally)); + } +}; + +#endif // BUFFER_HPP__ diff -Naur retry_tests/BufferTest.cpp retry_tests/BufferTest.cpp --- retry_tests/BufferTest.cpp 1969-12-31 19:00:00.000000000 -0500 +++ retry_tests/BufferTest.cpp 2008-05-02 23:37:06.000000000 -0400 @@ -0,0 +1,407 @@ +/////////////////////////////////////////////////////////////////////////////// +// +// Copyright (c) 2008 +// University of Rochester +// Department of Computer Science +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// * Neither the name of the University of Rochester nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#include +#include +#include +#include +#include +#include + +#include +#include "Buffer.hpp" + +using std::string; +using std::cout; +using std::endl; +using std::vector; + +/***************************************************************************** + * + * This benchmark is a bounded buffer with P producers, C consumers, and R + * threads that randomly produce or consume in batches of B. + * + *****************************************************************************/ + +/** + * Struct to hold all configuration information + */ +struct config_t +{ + int producers; + int consumers; + int rand_threads; + int rand_batch_size; + int buffer_size; + int seconds; + string validation; + string cm; + + /** + * default is to run a 5-second test with 1 producer, 1 consumer, and 64 + * buckets in the buffer + */ + config_t() + : producers(1), consumers(1), rand_threads(0), rand_batch_size(1), + buffer_size(64), seconds(5), validation("invis-eager"), cm("Polka") + { } + + /** + * Display the config parameters + */ + void display() + { + cout << "BufferTest: p=" << producers << ", c=" << consumers + << ", r=" << rand_threads << ", b=" << rand_batch_size + << ", m=" << buffer_size << ", t=" << seconds << ", V=" + << validation << ", C=" << cm << endl; + } + +}; + +/*** Global config options */ +config_t CONFIG; + +/*** The bounded buffer */ +BoundedBuffer* BUFFER; + +/*** global timing information */ +unsigned long long STARTTIME = 0; +unsigned long long ENDTIME = 0; + +/*** interrupt an experiment */ +volatile bool ExperimentInProgress __attribute__ ((aligned(64))) = true; + +/*** signal handler to end the test */ +static void catch_SIGALRM(int sig_num) { ExperimentInProgress = false; } + +/** + * barrier to synchronize timing + */ +void barrier(int id, unsigned long nthreads) +{ + static struct + { + volatile unsigned long count; + volatile unsigned long sense; + volatile unsigned long thread_sense[256]; // hard-coded max threads + } __attribute__ ((aligned(64))) bar = {0}; + + bar.thread_sense[id] = !bar.thread_sense[id]; + if (fai(&bar.count) == nthreads - 1) { + bar.count = 0; + bar.sense = !bar.sense; + } + else { + while (bar.sense != bar.thread_sense[id]) { } // spin + } +} + +/** + * This is the packet of information that describes what a thread ought to do + * and what it has done + */ +struct thread_args_t +{ + /*** my thread id */ + int id; + + /** total number of threads (needed for barriers) */ + int threads; + + /*** role of this thread */ + enum role_t { PROD, CONS, RAND }; + role_t role; + + /*** count of all that has been consumed */ + unsigned long long consumed; + + /*** count of all that has been produced */ + unsigned long long produced; +}; + +/** + * Wrapper function for the transaction that produces something, adds it to + * the buffer, and counts the successful production + */ +void producer_job(thread_args_t* args) +{ + bool q = false; + BEGIN_TRANSACTION { + q = false; + if (ExperimentInProgress) { + q = true; + BUFFER->produce(1); + } + } END_TRANSACTION; + if (q) + args->produced++; +} + +/** + * Wrapper function for the transaction that consumes something, adds it to + * the buffer, and counts the successful consumption + */ +void consumer_job(thread_args_t* args) +{ + unsigned q = 0; + BEGIN_TRANSACTION { + q = 0; + if (ExperimentInProgress) + q = BUFFER->consume(); + } END_TRANSACTION; + args->consumed += q; +} + +/** + * new threads will all begin in this code + */ +void* buffer_test(void* arg) +{ + thread_args_t* args = (thread_args_t*)arg; + unsigned int seed = args->id; + + // threads other than thread0 must create a transactional context here + if (args->id != 0) + stm::init(CONFIG.cm, CONFIG.validation, true); + + // everyone waits... + barrier(args->id, args->threads); + + if (args->id == 0) { + // start timing by initializing the alarm signalhandler + signal(SIGALRM, catch_SIGALRM); + alarm(CONFIG.seconds); + // get the start time of the benchmark run + STARTTIME = getElapsedTime(); + } + + // if this thread is RAND, this is how we track what to do + thread_args_t::role_t batch_type = thread_args_t::RAND; + int batch_remaining = 0; + + // now do the appropriate transactions + do { + if (args->role == thread_args_t::PROD) { + producer_job(args); + } + else if (args->role == thread_args_t::CONS) { + consumer_job(args); + } + else { + // choose action for next batch of jobs + if (batch_remaining == 0) { + int i = rand_r(&seed); + batch_type = + (i & 1) ? thread_args_t::PROD : thread_args_t::CONS; + batch_remaining = CONFIG.rand_batch_size; + } + // do a job from this batch + if (batch_type == thread_args_t::PROD) { + producer_job(args); + } + else { + consumer_job(args); + } + batch_remaining--; + } + } while (ExperimentInProgress); + + // someone has to force all threads to wake here, since some are waiting on + // the OS to signal them. Use a guard (mtx) to ensure that only one thread + // does the wakeup call + static volatile unsigned long mtx = 0; + if (bool_cas(&mtx, 0, 1)) { + BEGIN_TRANSACTION { + BUFFER->wake_all(); + } END_TRANSACTION; + } + + // wait for everyone to ack that the experiment is over + barrier(args->id, args->threads); + + // everyone is done, so we can get the endtime here + ENDTIME = getElapsedTime(); + + // Shut down STM for all threads other than thread 0 + if (args->id != 0) + stm::shutdown(args->id); + + return 0; +} + +/** + * Print usage + */ +void usage() +{ + cout << "Valid options are:" << endl; + cout << " -p : dedicated producers" << endl; + cout << " -c : dedicated consumers" << endl; + cout << " -r : threads who choose actions based on rand_r()" << endl; + cout << " -b : number of tasks per call to rand_r()" << endl; + cout << " -m : size of buffer" << endl; + cout << " -t : time (in seconds) to run tests" << endl; + cout << " -V : validation strategy" << endl; + cout << " -C : contention manager" << endl; + cout << " -h : this message" << endl; +} + +/** + * Main driver for the benchmark + */ +int main(int argc, char** argv) +{ + // parse the command-line options + int opt; + while ((opt = getopt(argc, argv, "p:c:r:b:m:t:C:V:h")) != -1) { + switch (opt) { + case 'p': + CONFIG.producers = atoi(optarg); + assert(CONFIG.producers > 0); + break; + case 'c': + CONFIG.consumers = atoi(optarg); + assert(CONFIG.consumers > 0); + break; + case 'r': + CONFIG.rand_threads = atoi(optarg); + assert(CONFIG.rand_threads >= 0); + break; + case 'b': + CONFIG.rand_batch_size = atoi(optarg); + assert(CONFIG.rand_batch_size >= 0); + break; + case 'm': + CONFIG.buffer_size = atoi(optarg); + assert(CONFIG.buffer_size > 0); + break; + case 't': + CONFIG.seconds = atoi(optarg); + assert(CONFIG.seconds > 0); + break; + case 'V': + CONFIG.validation = string(optarg); + break; + case 'C': + CONFIG.cm = string(optarg); + break; + case 'h': + usage(); + return 0; + default: + usage(); + return -1; + } + } + + // print this experiment's information + CONFIG.display(); + + // get a transactional context + stm::init(CONFIG.cm, CONFIG.validation, true); + + // construct the buffer + BUFFER = new BoundedBuffer(CONFIG.buffer_size); + + // get the total number of threads + int threads = CONFIG.producers + CONFIG.consumers + CONFIG.rand_threads; + + // create enough config packets for all threads + vector args; + args.resize(threads); + + // create enough pthread handles for all threads + vector tid; + tid.resize(threads); + + // create pthread scope to support multithreading + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM); + + // set up configuration structs for the threads we'll create + for (int i = 0; i < threads; i++) { + // thread id, number of threads + args[i].id = i; + args[i].threads = threads; + + // what is the job of this thread (prod, cons, rand) + if (i < CONFIG.producers) + args[i].role = thread_args_t::PROD; + else if (i < CONFIG.producers + CONFIG.consumers) + args[i].role = thread_args_t::CONS; + else + args[i].role = thread_args_t::RAND; + + // set counts to zero + args[i].consumed = 0; + args[i].produced = 0; + } + + // actually create the threads + for (int j = 1; j < threads; j++) + pthread_create(&tid[j], &attr, &buffer_test, &args[j]); + + // all of the other threads should be queued up, waiting to run the + // benchmark, but they can't until this thread starts the benchmark too... + buffer_test((void*)(&args[0])); + + // everyone should be done. Join all threads so we don't leave anything + // hanging around + for (int k = 1; k < threads; k++) + pthread_join(tid[k], NULL); + + // figure out how much was produced and consumed + long long total_consumed = 0; + long long total_produced = 0; + for (int i = 0; i < threads; i++) { + total_produced += args[i].produced; + total_consumed += args[i].consumed; + } + + // Run the sanity check + if (BUFFER->sanity(total_produced, total_consumed)) + cout << "Completed sanity check" << endl; + else + cout << "Sanity check failed" << endl; + + // output total performance + cout << "Produced = " << total_produced << endl; + cout << "Consumed = " << total_consumed << endl; + cout << "total time = " << ENDTIME - STARTTIME << endl; + + // shut off transactions in thread 0 + stm::shutdown(0); +} diff -Naur retry_tests/Makefile retry_tests/Makefile --- retry_tests/Makefile 1969-12-31 19:00:00.000000000 -0500 +++ retry_tests/Makefile 2008-05-02 23:37:06.000000000 -0400 @@ -0,0 +1,57 @@ +############################################################################### +# +# Copyright (c) 2008 +# University of Rochester +# Department of Computer Science +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# * Neither the name of the University of Rochester nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# Pull in the standard RSTM makefile +include ../Makefile.inc + +CXXFLAGS += -I../ +LDFLAGS += -L../stm/obj -lstm + +EXES = BufferTest BarrierTest RoundRobin + +.PHONY: all clean + +all: $(EXES) + +BufferTest: BufferTest.cpp Buffer.hpp + $(CXX) $< $(LDFLAGS) $(CXXFLAGS) -o $@ + +BarrierTest: BarrierTest.cpp + $(CXX) $< $(LDFLAGS) $(CXXFLAGS) -o $@ + +RoundRobin: RoundRobin.cpp + $(CXX) $< $(LDFLAGS) $(CXXFLAGS) -o $@ + +clean: + -rm -f $(EXES) diff -Naur retry_tests/RoundRobin.cpp retry_tests/RoundRobin.cpp --- retry_tests/RoundRobin.cpp 1969-12-31 19:00:00.000000000 -0500 +++ retry_tests/RoundRobin.cpp 2008-05-02 23:37:06.000000000 -0400 @@ -0,0 +1,413 @@ +/////////////////////////////////////////////////////////////////////////////// +// +// Copyright (c) 2008 +// University of Rochester +// Department of Computer Science +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// * Neither the name of the University of Rochester nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#include +#include +#include +#include +#include +#include + +#include +#include "Buffer.hpp" + +using std::string; +using std::cout; +using std::endl; +using std::vector; + +/***************************************************************************** + * + * This benchmark has lots of waiting. Essentially everyone is waiting for a + * token to be passed to their bucket, at which point they wake and pass it + * on. There can be multiple tokens moving around, and tokens can move in + * order or randomly. + * + *****************************************************************************/ + +/** + * ring of ints to support passing a token around + */ +class TokenRing +{ + class INT : public stm::Object + { + GENERATE_FIELD(long, val); + INT(long v) : m_val(v) { } + }; + + /*** number of INTs in the ring */ + const int ring_size; + + /*** the actual ring of INTs */ + stm::sh_ptr* ring; + + /*** flag for rapid termination */ + stm::sh_ptr termination_flag; + + public: + /** + * Construct by allocating all of the INTs and then setting a few active + */ + TokenRing(int size, int active_elements) + : ring_size(size), ring(new stm::sh_ptr[ring_size]), + termination_flag(new INT(0)) + { + // allocate the ring entries + for (int i = 0; i < ring_size; i++) { + ring[i] = stm::sh_ptr(new INT(0)); + } + + // set /active_elements/ random entries to 1 + unsigned seed = 7; + int initialized = 0; + while (initialized < active_elements) { + unsigned position = rand_r(&seed) % ring_size; + stm::un_ptr u(ring[position]); + if (u->get_val(u) == 0) { + u->set_val(1, u); + initialized++; + } + } + } + + /*** shut down all threads */ + void halt() + { + BEGIN_TRANSACTION { + stm::wr_ptr f(termination_flag); + f->set_val(1, f); + } END_TRANSACTION; + } + + /*** + * Simple transaction waits until the token reaches the /from/ bucket and + * then waits until the /to/ bucket is empty, at which time it atomically + * moves the token. + */ + void PassToken(int from, int to) + { + BEGIN_TRANSACTION { + stm::rd_ptr flag(termination_flag); + if (!flag->get_val(flag)) { + // make sure that /from/ is full + stm::rd_ptr f_r(ring[from]); + if (f_r->get_val(f_r) == 0) + stm::retry(); + // make sure that /to/ is empty + stm::rd_ptr t_r(ring[to]); + if (t_r->get_val(t_r) != 0) + stm::retry(); + // now do the transfer + stm::wr_ptr f_w(f_r); + stm::wr_ptr t_w(t_r); + f_w->set_val(0, f_w); + t_w->set_val(1, t_w); + } + } END_TRANSACTION; + } + + /*** Sanity Check */ + bool sanity(int expected) + { + int count = 0; + for (int i = 0; i < ring_size; i++) { + stm::un_ptr r(ring[i]); + count += r->get_val(r); + } + return count == expected; + } +}; + +/*** Struct to hold all configuration information */ +struct config_t +{ + int threads; + bool random; + int tokens; + int seconds; + string validation; + string cm; + + /*** default is to run a 2-thread test with no randoms and 1 token */ + config_t() + : threads(2), random(false), tokens(1), seconds(5), + validation("invis-eager"), cm("Polka") + { } + + /*** Display the config parameters */ + void display() + { + cout << "RoundRobin: p=" << threads << ", r=" << random + << ", k=" << tokens << ", t=" << seconds + << ", V=" << validation << ", C=" << cm << endl; + } +}; + +/*** Global config options */ +config_t CONFIG; + +/*** The token ring */ +TokenRing* RING; + +/*** global timing information */ +unsigned long long STARTTIME = 0; +unsigned long long ENDTIME = 0; + +/*** interrupt an experiment */ +volatile bool ExperimentInProgress __attribute__ ((aligned(64))) = true; + +/*** signal handler to end the test */ +static void catch_SIGALRM(int sig_num) { ExperimentInProgress = false; } + +/** + * barrier to synchronize timing + */ +void barrier(int id, unsigned long nthreads) +{ + static struct + { + volatile unsigned long count; + volatile unsigned long sense; + volatile unsigned long thread_sense[256]; // hard-coded max threads + } __attribute__ ((aligned(64))) bar = {0}; + + bar.thread_sense[id] = !bar.thread_sense[id]; + if (fai(&bar.count) == nthreads - 1) { + bar.count = 0; + bar.sense = !bar.sense; + } + else { + while (bar.sense != bar.thread_sense[id]) { } // spin + } +} + +/** + * This is the packet of information that describes what a thread ought to do + * and what it has done + */ +struct thread_args_t +{ + /*** my thread id */ + int id; + + /** total number of threads (needed for barriers) */ + int threads; + + /*** seed for random computation */ + unsigned seed; + + /*** count of all transactions to commit */ + unsigned long long commits; +}; + +/** + * new threads will all begin in this code + */ +void* roundrobin_test(void* arg) +{ + thread_args_t* args = (thread_args_t*)arg; + + // threads other than thread0 must create a transactional context here + if (args->id != 0) + stm::init(CONFIG.cm, CONFIG.validation, true); + + // everyone waits... + barrier(args->id, args->threads); + + if (args->id == 0) { + // start timing by initializing the alarm sighandler + signal(SIGALRM, catch_SIGALRM); + alarm(CONFIG.seconds); + // get the start time of the benchmark run + STARTTIME = getElapsedTime(); + } + + // now do the appropriate transactions + do { + // where should this thread send the token? default is next slot + int to = (args->id + 1) % CONFIG.threads; + if (CONFIG.random) { + // get random values until we get a random value that isn't my id + while (true) { + int nxt = rand_r(&args->seed) % CONFIG.threads; + if (nxt != args->id) { + to = nxt; + break; + } + } + } + + RING->PassToken(args->id, to); + args->commits++; + } while (ExperimentInProgress); + + // halt all transactions + static volatile unsigned long mtx = 0; + if (bool_cas(&mtx, 0, 1)) { + RING->halt(); + } + + // wait for everyone to ack that the experiment is over + barrier(args->id, args->threads); + + // everyone is done, so we can get the endtime here + if (args->id == 0) + ENDTIME = getElapsedTime(); + + // Shut down STM for all threads other than thread 0 + if (args->id != 0) + stm::shutdown(args->id); + + return 0; +} + +/** + * Print usage + */ +void usage() +{ + cout << "Valid options are:" << endl; + cout << " -p : threads (default 2)" << endl; + cout << " -r : pass token randomly (default false)" << endl; + cout << " -k : number of tokens (default 1)" << endl; + cout << " -t : time (in seconds) to run experiment (default 5)" << endl; + cout << " -V : validation strategy" << endl; + cout << " -C : contention manager" << endl; + cout << " -h : this message" << endl; +} + +/** + * Main driver for the benchmark + */ +int main(int argc, char** argv) +{ + // parse the command-line options + int opt; + while ((opt = getopt(argc, argv, "p:rk:t:C:V:h")) != -1) { + switch (opt) { + case 'p': + CONFIG.threads = atoi(optarg); + assert(CONFIG.threads > 1); + break; + case 'r': + CONFIG.random = true; + break; + case 'k': + CONFIG.tokens = atoi(optarg); + assert(CONFIG.tokens > 0); + break; + case 't': + CONFIG.seconds = atoi(optarg); + assert(CONFIG.seconds > 0); + break; + case 'V': + CONFIG.validation = string(optarg); + break; + case 'C': + CONFIG.cm = string(optarg); + break; + case 'h': + usage(); + return 0; + default: + usage(); + return -1; + } + } + + // print this experiment's information + CONFIG.display(); + + // get a transactional context + stm::init(CONFIG.cm, CONFIG.validation, true); + + // construct the buffer + RING = new TokenRing(CONFIG.threads, CONFIG.tokens); + + // create enough config packets for all threads + vector args; + args.resize(CONFIG.threads); + + // create enough pthread handles for all threads + vector tid; + tid.resize(CONFIG.threads); + + // create pthread scope to support multithreading + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM); + + // set up configuration structs for the threads we'll create + for (int i = 0; i < CONFIG.threads; i++) { + // thread id, number of threads + args[i].id = i; + args[i].threads = CONFIG.threads; + args[i].seed = i; + + // set counts to zero + args[i].commits = 0; + } + + // actually create the threads + for (int j = 1; j < CONFIG.threads; j++) + pthread_create(&tid[j], &attr, &roundrobin_test, &args[j]); + + // all of the other threads should be queued up, waiting to run the + // benchmark, but they can't until this thread starts the benchmark too... + roundrobin_test((void*)(&args[0])); + + // everyone should be done. Join all threads so we don't leave anything + // hanging around + for (int k = 1; k < CONFIG.threads; k++) + pthread_join(tid[k], NULL); + + // figure out how many transactions there were + long total = 0; + for (int i = 0; i < CONFIG.threads; i++) + total += args[i].commits; + + // sanity check + assert(RING->sanity(CONFIG.tokens)); + cout << "Completed sanity check" << endl; + + // output total performance + cout << "total commits = " << total << endl; + cout << "total time = " << ENDTIME - STARTTIME << endl; + cout << (1000000000LL * total)/(ENDTIME-STARTTIME) + << " transactions per second" << endl; + + // shut off transactions in thread 0 + stm::shutdown(0); +} diff -Naur stm/api/llt_api.hpp stm/api/llt_api.hpp --- stm/api/llt_api.hpp 2008-05-02 23:36:51.000000000 -0400 +++ stm/api/llt_api.hpp 2008-05-02 23:37:05.000000000 -0400 @@ -55,7 +55,7 @@ /** - * Macro for ending a transaction. [mfs - hacked. (...) is missing] + * Macro for ending a transaction. * * NB: catch (...) needs to actually release the objects owned by this tx and * then it needs to reset the descriptor. Otherwise, if there is a catch diff -Naur stm/api/rstm_api.hpp stm/api/rstm_api.hpp --- stm/api/rstm_api.hpp 2008-05-02 23:36:51.000000000 -0400 +++ stm/api/rstm_api.hpp 2008-05-02 23:37:05.000000000 -0400 @@ -64,9 +64,8 @@ if (tx.rollback()) \ throw; \ } catch (stm::Retry&) { \ - if (tx.rollback()) \ + if (tx.retry()) \ throw; \ - sleep_ms(1); \ } catch (...) { \ tx.abort(false); \ tx.rollback(); \ diff -Naur stm/Makefile stm/Makefile --- stm/Makefile 2008-05-02 23:36:51.000000000 -0400 +++ stm/Makefile 2008-05-02 23:37:05.000000000 -0400 @@ -44,7 +44,8 @@ support/ConflictDetector.hpp support/Inevitability.hpp \ support/MiniVector.hpp support/MMPolicy.hpp \ support/defs.hpp support/ThreadLocalPointer.hpp \ - support/TokenManager.hpp ../config.h + support/TokenManager.hpp support/Retry.hpp \ + ../config.h LIBRSTM = obj/librstm.a ######################################## @@ -68,7 +69,7 @@ LIBCGL = obj/libcgl.a ######################################## -# .o and .h dependencies for building libealv.a +# .o and .h dependencies for building libllt.a LLT_OBJS = obj/llt.o obj/GCHeap.o LLT_HEADERS = llt.hpp \ support/atomic_ops.h support/MiniVector.hpp \ diff -Naur stm/rstm.cpp stm/rstm.cpp --- stm/rstm.cpp 2008-05-02 23:36:50.000000000 -0400 +++ stm/rstm.cpp 2008-05-02 23:37:04.000000000 -0400 @@ -110,6 +110,7 @@ cout << "Thread:" << i << "; Commits: " << currentDescriptor->getCommits() << "; Aborts: " << currentDescriptor->getAborts() + << "; Retrys: " << currentDescriptor->getRetrys() << endl; // we should merge this thread's reclaimer into a global reclaimer to @@ -569,7 +570,230 @@ // we cannot be inevitable and call this! assert(!currentDescriptor->inev.isInevitable()); - // set state to aborted, throw Retry - currentDescriptor->abort(false); + // throw Retry throw stm::Retry(); } + +stm::RetryMechanism rstm::Descriptor::retryImpl; + +#if defined(STM_RETRY_SLEEP) +/*** Implementation of retry that uses calls to usleep() */ +bool rstm::Descriptor::retry() +{ + // if not outermost transaction, return true. this will cause the api + // file to re-throw the exception and back us out another level + if (--nesting_depth != 0) + return true; + + // set state to aborted. If this fails, we've been aborted, so we + // shouldn't sleep later + bool sleep_at_end = bool_cas(&tx_state, stm::ACTIVE, stm::ABORTED); + + // un-acquire headers: note that LAZY won't acquire anything yet, so we + // can cleanup and just free the list + cleanupEagerWrites(stm::ABORTED); + lazyWrites.reset(); + + // uninstall visible readers, zero read sets + cleanupVisReads(); + invisibleReads.reset(); + + // commit memory changes and reset memory logging + mm.onTxEnd(stm::ABORTED); + + // exit inevitability + inev.onEndTx(); + + // sleep only if we didn't take a remote abort + if (sleep_at_end) { + num_retrys++; + retryImpl.endRetry(retryHandle); + } + else { + num_aborts++; + cm.onTxAborted(); + } + + // we are fully unwound, and ready to start a new transaction + return false; +} + +#elif defined(STM_RETRY_BLOOM) +/*** Bloom-filter based retry */ +bool rstm::Descriptor::retry() +{ + // if not outermost transaction, return true. this will cause the api + // file to re-throw the exception and back us out another level + if (--nesting_depth != 0) + return true; + + // create the bloom filter + retryHandle->reset(); + for (LazyWriteLog::iterator i = lazyWrites.begin(); + i != lazyWrites.end(); i++) + { + retryHandle->insert(i->shared); + } + for (EagerWriteLog::iterator i = eagerWrites.begin(); + i != eagerWrites.end(); i++) + { + retryHandle->insert(i->shared); + } + for (VisReadLog::iterator i = visibleReads.begin(); + i != visibleReads.end(); i++) + { + retryHandle->insert(*i); + } + for (InvisReadLog::iterator i = invisibleReads.begin(); + i != invisibleReads.end(); i++) + { + retryHandle->insert(i->shared); + } + + // put the filter into the global list + retryImpl.beginRetry(retryHandle); + + // validate + for (LazyWriteLog::iterator i = lazyWrites.begin(); + i != lazyWrites.end() && tx_state == stm::ACTIVE; i++) + { + if (!isCurrent(i->shared, i->read_version)) + tx_state = stm::ABORTED; + } + + for (InvisReadLog::iterator i = invisibleReads.begin(); + i != invisibleReads.end() && tx_state == stm::ACTIVE; i++) + { + if (!isCurrent(i->shared, i->read_version)) + tx_state = stm::ABORTED; + } + + // now we can uninstall self as eager owner / vis reader + for (EagerWriteLog::iterator i = eagerWrites.begin(); + i != eagerWrites.end(); i++) + { + CleanOnAbort(i->shared, i->write_version, i->read_version); + } + + for (VisReadLog::iterator i = visibleReads.begin(); + i != visibleReads.end(); i++) + { + removeVisibleReader(*i); + } + + // It is important that we do this (1) as a CAS and not a store, (2) + // AFTER beginRetry(), and (3) after we've uninstalled ourselves on our + // eager/vis sets + bool sleep_at_end = bool_cas(&tx_state, stm::ACTIVE, stm::ABORTED); + + // now we can reset all lists + lazyWrites.reset(); + eagerWrites.reset(); + visibleReads.reset(); + invisibleReads.reset(); + + // we're on the brink of sleeping... exit the inev epoch so that a GRL + // transaction can start inevitably + inev.onEndTx(); + + // we can also exit our MM epoch now, since we don't have any references + // hanging around to shared data + mm.onTxEnd(stm::ABORTED); + + if (!sleep_at_end) { + retryImpl.cancelRetry(retryHandle); + cm.onTxAborted(); + num_aborts++; + } + else { + // validation was OK. + retryImpl.endRetry(retryHandle); + num_retrys++; + } + + return false; +} + +#elif defined(STM_RETRY_VISREAD) +/*** retry via retry bits set in the object header of every object */ +bool rstm::Descriptor::retry() +{ + // if not outermost transaction, return true. this will cause the api + // file to re-throw the exception and back us out another level + if (--nesting_depth != 0) + return true; + + // must make this call early, because once we call insert() even once, + // someone could wake us + retryImpl.beginRetry(retryHandle); + + // We are simultaneously going to roll back and populate the + // RetryHandle's notion of read/write sets. + + // eager writes: install as retry on each, then clean on abort each + for (EagerWriteLog::iterator i = eagerWrites.begin(); + i != eagerWrites.end(); i++) + { + retryHandle->insert(i->shared); + CleanOnAbort(i->shared, i->write_version, i->read_version); + } + + // visible reads: install as retry on each, then unset vis read bit + for (VisReadLog::iterator i = visibleReads.begin(); + i != visibleReads.end(); i++) + { + retryHandle->insert(*i); + removeVisibleReader(*i); + } + + // lazy writes and invis reads: install retry, then test if current + for (LazyWriteLog::iterator i = lazyWrites.begin(); + i != lazyWrites.end() && tx_state == stm::ACTIVE; i++) + { + retryHandle->insert(i->shared); + if (!isCurrent(i->shared, i->read_version)) + tx_state = stm::ABORTED; + } + + for (InvisReadLog::iterator i = invisibleReads.begin(); + i != invisibleReads.end() && tx_state == stm::ACTIVE; i++) + { + retryHandle->insert(i->shared); + if (!isCurrent(i->shared, i->read_version)) + tx_state = stm::ABORTED; + } + + // It is important that we do this (1) as a CAS and not a store, (2) + // AFTER beginRetry(), and (3) after we've uninstalled ourselves on our + // eager/vis sets + bool sleep_at_end = bool_cas(&tx_state, stm::ACTIVE, stm::ABORTED); + + // now we can reset all lists + lazyWrites.reset(); + eagerWrites.reset(); + visibleReads.reset(); + invisibleReads.reset(); + + // we're on the brink of sleeping... exit the inev epoch so that a GRL + // transaction can start inevitably + inev.onEndTx(); + + if (!sleep_at_end) { + retryImpl.cancelRetry(retryHandle); + cm.onTxAborted(); + num_aborts++; + } + else { + // validation was OK. + retryImpl.endRetry(retryHandle); + num_retrys++; + } + + mm.onTxEnd(stm::ABORTED); + + return false; +} +#else +#error "No STM_RETRY_ option specified" + +#endif // STM_RETRY_* diff -Naur stm/rstm.hpp stm/rstm.hpp --- stm/rstm.hpp 2008-05-02 23:36:51.000000000 -0400 +++ stm/rstm.hpp 2008-05-02 23:37:04.000000000 -0400 @@ -46,6 +46,7 @@ #include "cm/CMPolicies.hpp" #include "support/TokenManager.hpp" #include "support/atomic_ops.h" +#include "support/Retry.hpp" namespace rstm { @@ -191,6 +192,11 @@ volatile unsigned long m_readers; /** + * Similar to above, but for retry() (depending on retry implementation) + */ + volatile stm::RetryMechanism::PerObjectRetryMetadata m_retryers; + + /** * Constructor for shared objects. All this constructor does is * initialize a SharedHandle to wrap the object for transactional * use. @@ -198,7 +204,7 @@ * @param t - The Object that this shared manages. t should not be * NULL */ - SharedHandle(Object* t) : m_payload(t), m_readers(0) { } + SharedHandle(Object* t) : m_payload(t), m_readers(0), m_retryers(0) { } }; // class SharedHandle @@ -281,6 +287,12 @@ stm::HybridCMPolicy cm; /** + * Retry support + */ + static stm::RetryMechanism retryImpl; + stm::RetryMechanism::RetryHandle* retryHandle; + + /** * For privatization: check privatizer clock which will trigger * validation if necessary. */ @@ -359,6 +371,11 @@ */ bool rollback(); + /** + * Called by the catch block in END_TRANSACTION to implement retry(). + */ + bool retry(); + private: /** * The current depth of nested transactions, where 0 is no @@ -437,11 +454,13 @@ */ void addValidateInvisRead(SharedHandle* shared, Object* version); + public: /** * Validate the invisible read and lazy write sets */ void validate(); + private: /** * Validate the invisible read set and insert a new entry if it * isn't already in the set. @@ -483,11 +502,12 @@ // for tracking statistics unsigned num_commits; unsigned num_aborts; + unsigned num_retrys; public: unsigned getCommits() { return num_commits; } - - unsigned getAborts() { return num_aborts; } + unsigned getAborts() { return num_aborts; } + unsigned getRetrys() { return num_retrys; } public: @@ -496,11 +516,11 @@ * * For now, with stm::Object caching a SharedHandle* back-pointer * (m_st), we can easily guarantee that any Object will have one and - * only one header wrapping it. If we remove the back pointer ([mfs] - * we should!), we can no longer provide this guarantee from within - * the runtime. Instead, it will become the programmer's - * responsibility to ensure that an object is never passed to this - * method more than once during its lifetime. + * only one header wrapping it. If we remove the back pointer, we can + * no longer provide this guarantee from within the runtime. Instead, + * it will become the programmer's responsibility to ensure that an + * object is never passed to this method more than once during its + * lifetime. * * @param t - an object that needs a shared header * @@ -613,10 +633,8 @@ mm.deleteOnCommit.insert(sh); mm.deleteOnCommit.insert(const_cast(openReadOnly(sh))); } - }; // class Descriptor - /** * wrapper to allocate memory. */ @@ -728,11 +746,12 @@ : id_mask(0), cm(_use_static_cm, dynamic_cm), // set up CM + retryHandle(new stm::RetryMechanism::RetryHandle()), mm(), // set up the DeferredReclamationMMPolicy conflicts(), // construct bookkeeping invisibleReads(64), visibleReads(64), // fields that depend on eagerWrites(64), lazyWrites(64), // the heap - num_commits(0), num_aborts(0) + num_commits(0), num_aborts(0), num_retrys(0) { // the state is stm::COMMITTED, in tx #0 tx_state = stm::COMMITTED; @@ -756,6 +775,9 @@ else isLazy = true; + // set up retry handle + retryImpl.init_thread(retryHandle); + // initialize nesting depth nesting_depth = 0; } @@ -823,6 +845,7 @@ tx_state = stm::COMMITTED; cm.onTxCommitted(); inev.onInevCommit(); + retryImpl.onCommit(eagerWrites, lazyWrites); } else { // acquire objects that were open_RW'd lazily @@ -841,6 +864,7 @@ abort(); cm.onTxCommitted(); + retryImpl.onCommit(eagerWrites, lazyWrites); // at the end of a transaction, we are supposed to restore the // headers of any objects that we acquired (regardless of diff -Naur stm/RSTM.vcproj stm/RSTM.vcproj --- stm/RSTM.vcproj 2008-05-02 23:36:51.000000000 -0400 +++ stm/RSTM.vcproj 2008-05-02 23:37:05.000000000 -0400 @@ -856,6 +856,10 @@ UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}" > + + diff -Naur stm/support/Bloom.hpp stm/support/Bloom.hpp --- stm/support/Bloom.hpp 2008-05-02 23:36:51.000000000 -0400 +++ stm/support/Bloom.hpp 2008-05-02 23:37:04.000000000 -0400 @@ -69,6 +69,41 @@ return (filter[index] & mask); } + void insert2(unsigned val) + { + // use contiguous bit ranges + unsigned key[2]; + key[0] = (val >> 3) % SIZE; + key[1] = ((val >> 3) / SIZE) % SIZE; + + // set the bits that correspond to the keys + for (int k = 0; k < 2; k++) { + unsigned index = key[k] / (8*sizeof(bucket_t)); + unsigned bit = key[k] % (8*sizeof(bucket_t)); + bucket_t mask = 1 << bit; + filter[index] |= mask; + } + } + + bool lookup2(unsigned val) + { + // use contiguous bit ranges + unsigned key[2]; + key[0] = (val >> 3) % SIZE; + key[1] = ((val >> 3) / SIZE) % SIZE; + + // test the bits that correspond to the keys; if any fail, return 0 + int k; + for (k = 0; k < 2; k++) { + unsigned index = key[k] / (8*sizeof(bucket_t)); + unsigned bit = key[k] % (8*sizeof(bucket_t)); + bucket_t mask = 1 << bit; + if (!(filter[index] & mask)) + return false; + } + return true; + } + void insert3(unsigned val) { // we're going to use 3 keys: Key 1 will be the value itself. We'll @@ -123,7 +158,7 @@ */ Bloom() { - assert((HASHES == 1) || (HASHES == 3)); + assert((HASHES == 1) || (HASHES == 2) || (HASHES == 3)); assert(SIZE % (8*sizeof(bucket_t)) == 0); reset(); } @@ -145,6 +180,8 @@ { if (HASHES == 1) insert1(val); + else if (HASHES == 2) + insert2(val); else if (HASHES == 3) insert3(val); } @@ -157,6 +194,8 @@ { if (HASHES == 1) return lookup1(val); + else if (HASHES == 2) + return lookup2(val); else if (HASHES == 3) return lookup3(val); else diff -Naur stm/support/Retry.hpp stm/support/Retry.hpp --- stm/support/Retry.hpp 1969-12-31 19:00:00.000000000 -0500 +++ stm/support/Retry.hpp 2008-05-02 23:37:05.000000000 -0400 @@ -0,0 +1,564 @@ +/////////////////////////////////////////////////////////////////////////////// +// +// Copyright (c) 2008 +// University of Rochester +// Department of Computer Science +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// * Neither the name of the University of Rochester nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef RETRY_HPP__ +#define RETRY_HPP__ + +#include +#include "config.h" +#include "Bloom.hpp" +#include "TokenManager.hpp" + +#ifdef _MSC_VER +#include +#pragma comment(lib, "winmm.lib") // for Sleep +#else +#include // for usleep +#endif + +/*** forward declaration for VisReadRetry */ +namespace rstm { class SharedHandle; } + +namespace stm +{ + /** + * The usleep and semaphore interfaces differ between *nix and Win32. This + * class encapsulates the differences, so that we don't have to re-implement + * the code in each of our retry mechanisms. + */ + struct AbstractSchedulerInterface + { + /*** The semaphore that the owner of this entry is sleeping on */ +#ifdef _MSC_VER + HANDLE sem; +#else + sem_t sem; +#endif + + /*** Wake up someone sleeping on the semaphore */ + void post() + { +#ifdef _MSC_VER + ReleaseSemaphore(sem, 1, NULL); +#else + sem_post(&sem); +#endif + } + + /*** Block this thread (via OS) until someone posts on the semaphore */ + void wait() + { +#ifdef _MSC_VER + WaitForSingleObject(sem, INFINITE); +#else + sem_wait(&sem); +#endif + } + + /*** reset the semaphore if it had multiple posts */ + void reset_sem() + { +#ifdef _MSC_VER + while (WaitForSingleObject(sem, 0) != WAIT_OBJECT_0); +#else + while (!sem_trywait(&sem)); +#endif + } + + /*** Simple constructor: initialize the semaphore */ + AbstractSchedulerInterface() + { +#ifdef _MSC_VER + // lower the granularity of the sleep() method to 1 ms, which is the + // best we can get easily in Win32 + timeBeginPeriod(1); + // set up the semaphore + sem = CreateSemaphore(NULL, 0, 128, NULL); +#else + sem_init(&sem, 0, 0); +#endif + } + + /*** destructor destroys the semaphore */ + ~AbstractSchedulerInterface() + { +#ifdef _MSC_VER + CloseHandle(sem); +#else + sem_destroy(&sem); +#endif + } + + /*** sleep for a number of microseconds */ + void sleep(unsigned usecs) + { +#ifdef _MSC_VER + unsigned time = (usecs < 1000) ? 1 : usecs / 1000; + Sleep(time); +#else + usleep(usecs); +#endif + } + }; + + /** + * Singleton storing the list of sleeping bloom filters, and implementing + * the code for interacting with that list. + */ + class BloomRetry + { + public: + /*** Each retrying transaction is represented by one of these. */ + class RetryHandle : public AbstractSchedulerInterface + { + friend class BloomRetry; + + /** + * Filter representing all addresses (or objects) read + * + * for now we'll use 1Kbit filters with 3 hash functions, but this + * should become a compile-time parameter eventually + */ + Bloom<1024, 3> filter; + + /*** status counter. even means this filter is inactive */ + volatile unsigned long status; + + public: + /*** reset the filter */ + void reset() { filter.reset(); } + + /*** Add something to our filter. */ + void insert(void* ptr) { filter.insert((unsigned long)ptr); } + + /*** Simple constructor: initialize the semaphore */ + RetryHandle() : AbstractSchedulerInterface(), filter(), status(0) { } + + /*** destructor destroys the semaphore */ + ~RetryHandle() { } + }; + + /** + * For uniformity, expose an empty class that can be added to the header + * of shared objects + */ + struct PerObjectRetryMetadata + { + /*** constructor must take an unsigned long long */ + PerObjectRetryMetadata(unsigned long long i) { } + }; + + /*** hard-coded max threads (for now) */ + static const unsigned long MAX_THREADS = 64; + + /** + * TokenManager to store up to 64 thread RetryHandle objects + * + * note that we could make do with a simple list if returning entries + * isn't an issue (we don't return them right now anyway) + */ + stm::common::TokenManager handles; + + /*** fast-path test if any retryers in the list */ + volatile unsigned long thread_count; + + /*** Initialize by zeroing the list of retrying transactions */ + BloomRetry() : handles(MAX_THREADS), thread_count(0) { } + + /** + * When a transaction commits, it needs to check if there are waiting + * transactions, and if so it needs to wake them up if any entry in its + * write set hits in the waiting transaction's filter + * + * NB: there is a bit of a hack here. We assume that the WS collection + * stores things that have a field called 'shared', and that that field + * is the address we care about. + */ + template + void onCommit(WS1 ws1, WS2 ws2) + { + // read the head, and if it is null we can just return + if (thread_count == 0) + return; + + // check each entry in the filter list and see if we should wake it + for (int i = 0; i < handles.get_max(); i++) { + // skip this slot in the array if it doesn't hold a valid filter + RetryHandle* h = handles.lookup(i); + if (!h) + continue; + + // skip this filter if it isn't active + unsigned long filter_seq = h->status; + if ((filter_seq & 1) != 1) + continue; + + bool activated = false; // track if filter was activated + // go through the first write set + for (typename WS1::iterator i1 = ws1.begin(); + i1 != ws1.end(); ++i1) + { + // stop if filter changed + if (h->status != filter_seq) { + activated = true; + break; + } + // should I wake this? + if (h->filter.lookup((unsigned long)i1->shared)) { + h->post(); + activated = true; + break; + } + } + + // if this filter was activated, move to next filter + if (activated) + continue; + + // go through second write set + for (typename WS2::iterator i2 = ws2.begin(); + i2 != ws2.end(); ++i2) + { + // stop if filter changed + if (h->status != filter_seq) + break; + // should I wake this + if (h->filter.lookup((unsigned long)i2->shared)) { + h->post(); + break; + } + } + } + } + + /** + * On Transaction Retry, reset the semaphore and mark this filter as + * active. The filter must be configured before calling this + */ + void beginRetry(RetryHandle* handle) + { + // reset the semaphore + handle->reset_sem(); + + // mark that there is a retryer to test against + fai(&thread_count); + + // mark handle as even + handle->status++; + } + + /*** clean up without yielding the CPU */ + void cancelRetry(RetryHandle* handle) + { + // decrement count of retryers + faa(&thread_count, -1); + + // uninit my filter + handle->status++; + } + + /*** Yield the CPU, then clean up */ + void endRetry(RetryHandle* handle) + { + // sleep + handle->wait(); + + // unset filter + cancelRetry(handle); + } + + /** + * At Descriptor construction time, pass a handle to the manager and get + * an ID, which will then be used for the rest of time + */ + void init_thread(RetryHandle* handle) + { + int id = handles.get_token(handle); + assert((id < 64) && (id >= 0)); + } + }; + + /** + * Singleton for sleep-based retry doesn't actually have any global + * variables, but we implement retry in this class to keep the interface + * consistent with other mechanisms. + */ + class SleepRetry + { + /*** this should be a compiler option eventually. usecs to sleep */ + static const unsigned long SLEEP_AMOUNT = 50; + + public: + /*** Each retrying transaction is represented by one of these. */ + class RetryHandle : public AbstractSchedulerInterface + { + friend class SleepRetry; + + public: + /*** initialize all fields */ + RetryHandle() : AbstractSchedulerInterface() { } + + /*** destructor forwards to parent class to destroy the semaphore */ + ~RetryHandle() { } + }; + + /** + * For uniformity, expose an empty class that can be added to the header + * of shared objects + */ + struct PerObjectRetryMetadata + { + /*** constructor must take an unsigned long long */ + PerObjectRetryMetadata(unsigned long long i) { } + }; + + /*** Initialize is a nop since there are no fields */ + SleepRetry() { } + + /*** On Transaction Commit, call this for uniformity (but do nothing) */ + template + void onCommit(WS1 ws1, WS2 ws2) { } + + /*** On Transaction Retry, do nothing */ + void beginRetry(RetryHandle* handle) { } + + /*** clean up without yielding the CPU */ + void cancelRetry(RetryHandle* handle) { } + + /*** Yield the CPU, then clean up */ + void endRetry(RetryHandle* handle) { handle->sleep(SLEEP_AMOUNT); } + + /*** no initialization required */ + void init_thread(RetryHandle* handle) { } + }; + + /** + * Singleton for managing retry bits, and for implementing the code for + * vis-read retry. + * + * The template type should always be rstm::SharedHandle + */ + template + class VisReadRetry + { + public: + /*** Each retrying transaction is represented by one of these. */ + class RetryHandle : public AbstractSchedulerInterface + { + friend class VisReadRetry; + + /*** refers to the bit associated with this handle */ + int id; + + /*** store all of the objects whose retry bits we set here */ + stm::common::MiniVector markedObjects; + + /*** install into retry bit */ + template + void installRetry(T* shared) + { + unsigned long long flag = (1ULL << id); + unsigned long long oldval, newval; + do { + oldval = shared->m_retryers; + // casX takes ull* for its old and new, so we need + newval = oldval | flag; + } while (!casX(&shared->m_retryers, &oldval, &newval)); + } + + /*** uninstall retry bit */ + template + bool removeRetry(T* shared) + { + unsigned long long flag = (1ULL << id); + unsigned long long expected; + unsigned long long n; + + // exit immediately if bit not set + if (!(shared->m_retryers & flag)) + return true; + do { + expected = shared->m_retryers; + n = expected & ~flag; + } while (!casX(&shared->m_retryers, &expected, &n)); + return false; + } + + /*** simple getter and setter for the id */ + void set_id(int i) { id = i; assert((i < 64) && (i >= 0)); } + public: + + /*** unset all retry bits and reset the list */ + void reset() + { + typename stm::common::MiniVector::iterator i; + for (i = markedObjects.begin(); i != markedObjects.end(); ++i) + removeRetry(*i); + markedObjects.reset(); + } + + /** + * set a retry bit and log it for later + * + * NB: we use void* to remain compatible with other mechanisms + */ + void insert(SHAREDHANDLE* ptr) + { + installRetry(ptr); + markedObjects.insert(ptr); + } + + /*** Simple constructor: initialize the semaphore */ + RetryHandle() + : AbstractSchedulerInterface(), id(-1), markedObjects(128) + { } + + /*** destructor destroys the semaphore */ + ~RetryHandle() { } + }; + + /** + * Inject this type into shared handles to provide space for marking + * retryer transactions. + */ + typedef unsigned long long PerObjectRetryMetadata; + + /*** hard-coded max threads (for now) */ + static const unsigned long MAX_THREADS = 64; + + /** + * TokenManager to map up to 64 thread RetryHandle objects to the 64 + * retry bits. we need to return bit reservations eventually + */ + stm::common::TokenManager handles; + + /*** Initialize the global RetryManager by setting its tokenmanager */ + VisReadRetry() : handles(MAX_THREADS) { } + + /** + * When a transaction commits, it needs to check if any retry bits are + * set for any object in its write set + * + * NB: we assume the type of m_retryers, and we assume that the WS + * collection stores things that have a field called 'shared' + */ + template + void onCommit(WS1 ws1, WS2 ws2) + { + unsigned long long bmp = 0; + // get all retry bitmaps from eager writes and 'or' them into bmp + for (typename WS1::iterator i = ws1.begin(); i != ws1.end(); ++i) + bmp |= i->shared->m_retryers; + + // do the same for lazy writes + for (typename WS2::iterator i = ws2.begin(); i != ws2.end(); ++i) + bmp |= i->shared->m_retryers; + + // if bmp is 0, we're done + if (!bmp) + return; + + // time to wake everyone in bmp + unsigned long long flag = 1; + for (unsigned int idx = 0; idx < (sizeof(bmp) * 8); ++idx) { + if (bmp & flag) + handles.lookup(idx)->post(); + flag <<= 1; + } + } + + /** + * On Transaction Retry, just reset the semaphore. No bits should be + * set before calling this + * + * Eventually we will want to get a new ID here and return it later, but + * that's a lot of overhead since TokenManager has linear overhead to + * reserve a bit. To avoid that overhead, we'll just save the ID, but + * there's a mess if we ever want more than 64 retrying threads. + */ + void beginRetry(RetryHandle* handle) + { + handle->reset_sem(); + } + + /*** clean up without yielding the CPU */ + void cancelRetry(RetryHandle* handle) + { + // unset all read bits and clear the read bit list + handle->reset(); + + // eventually need to return the ID here + } + + /*** Yield the CPU, then clean up */ + void endRetry(RetryHandle* handle) + { + // Wait on semaphore. + handle->wait(); + + // now unset mark bits + cancelRetry(handle); + } + + /** + * At Descriptor construction time, pass a handle to the manager and get + * an ID, which will then be used for the rest of time. + */ + void init_thread(RetryHandle* handle) + { + int id = handles.get_token(handle); + handle->set_id(id); + } + }; + + /** + * typedef one of these mechanisms into the "RetryMechanism" that RSTM will + * use, based on compile-time defines from config.h + */ +#if defined(STM_RETRY_SLEEP) + typedef SleepRetry RetryMechanism; + +#elif defined(STM_RETRY_BLOOM) + typedef BloomRetry RetryMechanism; + +#elif defined(STM_RETRY_VISREAD) + typedef VisReadRetry RetryMechanism; + +#else +#error "No STM_RETRY_ option specified" + +#endif +} + +#endif // RETRY_HPP__ diff -Naur stm/support/TokenManager.hpp stm/support/TokenManager.hpp --- stm/support/TokenManager.hpp 2008-05-02 23:36:51.000000000 -0400 +++ stm/support/TokenManager.hpp 2008-05-02 23:37:04.000000000 -0400 @@ -54,7 +54,7 @@ /** * Max number of tokens that can be given out / sizeof the array below */ - const int max_tokens; + const int max_tokens; /** * Array of volatile pointers to nonvolatile Descriptors diff -Naur stmconfig.cpp stmconfig.cpp --- stmconfig.cpp 2008-05-02 23:36:50.000000000 -0400 +++ stmconfig.cpp 2008-05-02 23:37:04.000000000 -0400 @@ -50,15 +50,13 @@ using std::vector; using std::ofstream; -// shorthand for an iterator over a vector of strings -typedef vector::iterator VSI; - // enums to make it easy to track our platform / build environment enum os_t { LINUX, SOLARIS, MAC, WINDOWS, OPENBSD, FREEBSD }; enum cpu_t { X86, SPARC }; enum cc_t { GCC, MSC }; // globals + // for determining platform os_t OS; cpu_t CPU; @@ -76,7 +74,7 @@ // strings that we output for various options vector libs, tlocals, profiles, allocators, cmprompt, cms, valheur, - priv, lwpriv, inev, lwinev, locks; + priv, lwpriv, inev, lwinev, locks, retrys; // the gcc we use at UR doesn't emit correct code on sparc/solaris when -ggdb // is given and __thread is used. Set this global if __thread is selected @@ -105,6 +103,14 @@ } } +// utility to print the contents of a vector +void print_vector_string(vector& txt) +{ + // shorthand for an iterator over a vector of strings + for (vector::iterator i = txt.begin(); i != txt.end(); ++i) + cout << *i << endl; +} + // Use compiler defines to figure out the cpu, compiler, and os; update globals // accordingly // @@ -255,6 +261,12 @@ locks.push_back(" [2] pthread_mutex_lock"); locks.push_back(" [3] ticket lock"); locks.push_back(" [4] MCS lock"); + + // retry + retrys.push_back("How would you like to implement retry?"); + retrys.push_back(" [1] sleep briefly (50 usec) and then restart"); + retrys.push_back(" [2] wait using visible read bits"); + retrys.push_back(" [3] wait using bloom filters"); } // Query the user for the library to build; This must come first, because other @@ -352,7 +364,7 @@ // privatization query when using LLT, which only has TFence and Program Logic void lw_privatization(int choice) { - string opts[5] = { "", "TFENCE", "LOGIC" }; + string opts[3] = { "", "TFENCE", "LOGIC" }; CONFIGH.push_back("#define STM_PRIV_" + opts[choice]); } @@ -378,6 +390,13 @@ CONFIGH.push_back("#define STM_LOCK_" + opts[choice]); } +// which type of retry should be used? +void retry(int choice) +{ + string opts[4] = { "", "SLEEP", "VISREAD", "BLOOM" }; + CONFIGH.push_back("#define STM_RETRY_" + opts[choice]); +} + // interactive mode: ask the user how to configure everything void interactive_config() { @@ -387,69 +406,64 @@ init_strings(); // now figure out what library we're building - for (VSI i = libs.begin(); i != libs.end(); ++i) - cout << *i << endl; + print_vector_string(libs); lib(prompt(1, 4)); // now figure out the tlocal, because the build profile depends on it if (need_tlocal()) { - for (VSI i = tlocals.begin(); i != tlocals.end(); ++i) - cout << *i << endl; + print_vector_string(tlocals); tlocal(prompt(1, 2)); } // get the build profile if (OS != WINDOWS) { - for (VSI i = profiles.begin(); i != profiles.end(); ++i) - cout << *i << endl; + print_vector_string(profiles); profile(prompt(1, 5)); } // and set the allocator - for (VSI i = allocators.begin(); i != allocators.end(); ++i) - cout << *i << endl; + print_vector_string(allocators); allocator(prompt(1, (OS == SOLARIS) ? 3 : 2)); // lib-specific questions if ((LIB == "RSTM") || (LIB == "REDO_LOCK")) { // contention management - for (VSI i = cmprompt.begin(); i != cmprompt.end(); ++i) - cout << *i << endl; + print_vector_string(cmprompt); int c = 1; - for (VSI i = cms.begin(); i != cms.end(); ++i) + for (vector::iterator i = cms.begin(); i != cms.end(); ++i) cout << " [" << (c++) << "] " << *i << endl; cm(prompt(9, 16)); // global commit counter validation heuristic - for (VSI i = valheur.begin(); i != valheur.end(); ++i) - cout << *i << endl; + print_vector_string(valheur); validation(prompt(1, 2)); // privatization (full support) - for (VSI i = priv.begin(); i != priv.end(); ++i) - cout << *i << endl; + print_vector_string(priv); privatization(prompt(1, 4)); // inevitability (limited support) - for (VSI i = lwinev.begin(); i != lwinev.end(); ++i) - cout << *i << endl; + print_vector_string(lwinev); lw_inevitability(prompt(1, 2)); + + // RSTM supports lots of retry mechanisms + if (LIB == "RSTM") { + print_vector_string(retrys); + retry(prompt(1,3)); + } } else if (LIB == "LLT") { // privatization (limited support) - for (VSI i = lwpriv.begin(); i != lwpriv.end(); ++i) - cout << *i << endl; + print_vector_string(lwpriv); lw_privatization(prompt(1, 2)); // inevitability (full support) - for (VSI i = inev.begin(); i != inev.end(); ++i) - cout << *i << endl; + print_vector_string(inev); inevitability(prompt(1, 9)); } else if (LIB == "CGL") { // type of lock to use - for (VSI i = locks.begin(); i != locks.end(); ++i) - cout << *i << endl; + print_vector_string(locks); lock(prompt(1, 4)); } } @@ -487,6 +501,8 @@ privatization(1); // inevitability via GRL lw_inevitability(1); + // retry via sleep + retry(1); } else if (LIB == "LLT") { // privatization via TFENCE @@ -528,7 +544,7 @@ ofstream cfgfile; cfgfile.open("config.h"); cfgfile << "/* This file was automatically generated */" << endl; - for (VSI i = CONFIGH.begin(); i != CONFIGH.end(); i++) + for (vector::iterator i = CONFIGH.begin(); i != CONFIGH.end(); i++) cfgfile << *i << endl; cfgfile.close();