Skip to content

Commit

Permalink
Preempt (almost) everywhere (if desired).
Browse files Browse the repository at this point in the history
  • Loading branch information
bblum committed Jun 7, 2016
1 parent 3c71ca3 commit 94cf177
Show file tree
Hide file tree
Showing 9 changed files with 103 additions and 5 deletions.
15 changes: 14 additions & 1 deletion id/job.c
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ static void *run_job(void *arg)
/* write config file */

XWRITE(&j->config_static, "TEST_CASE=%s\n", test_name);
XWRITE(&j->config_static, "VERBOSE=%d\n", verbose ? 1 : 0);
XWRITE(&j->config_static, "VERBOSE=%d\n", preempt_everywhere ? 0 : verbose ? 1 : 0);
XWRITE(&j->config_static, "ICB=%d\n", use_icb ? 1 : 0);
XWRITE(&j->config_static, "PREEMPT_EVERYWHERE=%d\n", preempt_everywhere ? 1 : 0);

Expand Down Expand Up @@ -171,6 +171,19 @@ static void *run_job(void *arg)
XWRITE(&j->config_dynamic, "%s critical_section\n", without);
}

if (preempt_everywhere) {
XWRITE(&j->config_static, "DR_PPS_RESPECT_WITHIN_FUNCTIONS=1\n");
if (pintos) {
/* Manually approved shm accesses. */
XWRITE(&j->config_dynamic, "%s intr_get_level\n", without);
XWRITE(&j->config_dynamic, "%s intr_context\n", without);
} else {
/* Known offender to our ">=ebp+0x10" heuristic.
* See work/modules/landslide/pp.c. */
XWRITE(&j->config_dynamic, "%s _doprnt\n", without);
}
}

messaging_init(&mess, &j->config_static, &j->config_dynamic, j->id);

// XXX: Need to do this here so the parent can have the path into pebsim
Expand Down
8 changes: 6 additions & 2 deletions work/modules/landslide/arbiter.c
Original file line number Diff line number Diff line change
Expand Up @@ -108,8 +108,12 @@ bool arbiter_interested(struct ls_state *ls, bool just_finished_reschedule,
/* if xchg-blocked, need NOT set DR PP. other case below. */
&& !XCHG_BLOCKED(&ls->sched.cur_agent->user_yield)
#ifdef DR_PPS_RESPECT_WITHIN_FUNCTIONS
&& ((testing_userspace() && user_within_functions(ls)) ||
(!testing_userspace() && kern_within_functions(ls)))
// NB. The use of KERNEL_MEMORY here used to be !testing_userspace.
// I needed to change it to implement preempt-everywhere mode,
// to handle the case of userspace shms in deschedule() syscall.
// Not entirely sure of all implications of this change.
&& ((!KERNEL_MEMORY(ls->eip) && user_within_functions(ls)) ||
(KERNEL_MEMORY(ls->eip) && kern_within_functions(ls)))
#endif
) {
*data_race = true;
Expand Down
4 changes: 4 additions & 0 deletions work/modules/landslide/landslide.c
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,11 @@ static void check_exception(struct ls_state *ls, int number)
#define PROGRESS_TRIGGER_FACTOR 4000
#define PROGRESS_AGGRESSIVE_TRIGGER_FACTOR 2000

#ifdef PREEMPT_EVERYWHERE
#define TOO_DEEP_0TH_BRANCH (1<<20)
#else
#define TOO_DEEP_0TH_BRANCH 4000
#endif

/* Avoid getting owned by DR PPs on e.g. memset which hose the average. */
#define PROGRESS_MIN_TRIGGER_AVERAGE 100 /* idk really */
Expand Down
14 changes: 14 additions & 0 deletions work/modules/landslide/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -996,12 +996,24 @@ void mem_check_shared_access(struct ls_state *ls, unsigned int phys_addr,
} else if (do_add_shm) {
add_shm(ls, m, c, addr, write, in_kernel);
}
#ifdef PREEMPT_EVERYWHERE
if (testing_userspace() != in_kernel &&
!(testing_userspace() && KERNEL_MEMORY(addr))) {
maybe_preempt_here(ls, addr);
}
#endif
} else if ((in_kernel && kern_address_global(addr)) ||
(!in_kernel /* && user_address_global(addr) */
&& do_add_shm)) {
/* Record shm accesses for user threads even on their own
* stacks, to deal with potential WISE IDEA yield loops. */
add_shm(ls, m, NULL, addr, write, in_kernel);
#ifdef PREEMPT_EVERYWHERE
if (testing_userspace() != in_kernel &&
!(testing_userspace() && KERNEL_MEMORY(addr))) {
maybe_preempt_here(ls, addr);
}
#endif
}
}

Expand Down Expand Up @@ -1385,8 +1397,10 @@ bool mem_shm_intersect(struct ls_state *ls, struct hax *h0, struct hax *h1,
conflicts++;
ma0->conflict = true;
ma1->conflict = true;
#ifndef PREEMPT_EVERYWHERE
// FIXME: make this not interleave horribly with conflicts
check_locksets(ls, h0, h1, ma0, ma1, c0, c1, in_kernel);
#endif
}
ma0 = MEM_ENTRY(rb_next(&ma0->nobe));
ma1 = MEM_ENTRY(rb_next(&ma1->nobe));
Expand Down
42 changes: 41 additions & 1 deletion work/modules/landslide/pp.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "pp.h"
#include "stack.h"
#include "student_specifics.h"
#include "x86.h"

void pps_init(struct pp_config *p)
{
Expand Down Expand Up @@ -139,10 +140,12 @@ bool load_dynamic_pps(struct ls_state *ls, const char *filename)

static bool check_withins(struct ls_state *ls, pp_within_list_t *pps)
{
#ifndef PREEMPT_EVERYWHERE
/* If there are no within_functions, the default answer is yes.
* Otherwise the default answer is no. Later ones take precedence, so
* all of them have to be compared. */
bool any_withins = false;
#endif
bool answer = true;
unsigned int i;
struct pp_within *pp;
Expand All @@ -152,11 +155,13 @@ static bool check_withins(struct ls_state *ls, pp_within_list_t *pps)
ARRAY_LIST_FOREACH(pps, i, pp) {
bool in = within_function_st(st, pp->func_start, pp->func_end);
if (pp->within) {
#ifndef PREEMPT_EVERYWHERE
/* Switch to whitelist mode. */
if (!any_withins) {
any_withins = true;
answer = false;
}
#endif
/* Must be within this function to allow. */
if (in) {
answer = true;
Expand All @@ -183,6 +188,41 @@ bool user_within_functions(struct ls_state *ls)
return check_withins(ls, &ls->pps.user_withins);
}

#ifdef PREEMPT_EVERYWHERE
#define EBP_OFFSET_HEURISTIC 0x10 /* for judging stack frame accesses */
void maybe_preempt_here(struct ls_state *ls, unsigned int addr)
{
#ifndef TESTING_MUTEXES
if (ls->sched.cur_agent->action.user_mutex_locking ||
ls->sched.cur_agent->action.user_mutex_unlocking ||
ls->sched.cur_agent->action.kern_mutex_locking ||
ls->sched.cur_agent->action.kern_mutex_trylocking ||
ls->sched.cur_agent->action.kern_mutex_unlocking) {
return;
}
#endif
/* Omit accesses on the current stack frame. Also, extend consideration
* of the current frame to include up to 4 pushed args. Beyond that is
* considered "shared memory". It's ok to have false positives on this
* judgement of shared memory as long as they're uncommon; the cost is
* just extra PPs that DPOR will find to be independent. But the cost
* of false negatives (not preempting on true shms) is missing bugs. */
if (addr < GET_CPU_ATTR(ls->cpu0, esp) - WORD_SIZE ||
addr >= GET_CPU_ATTR(ls->cpu0, ebp) + EBP_OFFSET_HEURISTIC) {
ls->sched.cur_agent->preempt_for_shm_here = true;
}
}

bool suspected_data_race(struct ls_state *ls)
{
#ifndef DR_PPS_RESPECT_WITHIN_FUNCTIONS
assert(0 && "PREEMPT_EVERYWHERE requires DR_PPS_RESPECT_WITHIN_FUNCTIONS");
#endif
return ls->sched.cur_agent->preempt_for_shm_here;
}

#else

bool suspected_data_race(struct ls_state *ls)
{
struct pp_data_race *pp;
Expand Down Expand Up @@ -216,4 +256,4 @@ bool suspected_data_race(struct ls_state *ls)
}
return false;
}

#endif
4 changes: 4 additions & 0 deletions work/modules/landslide/pp.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <simics/api.h>

#include "array_list.h"
#include "student_specifics.h"

struct ls_state;

Expand Down Expand Up @@ -46,5 +47,8 @@ bool load_dynamic_pps(struct ls_state *ls, const char *filename);
bool kern_within_functions(struct ls_state *ls);
bool user_within_functions(struct ls_state *ls);
bool suspected_data_race(struct ls_state *ls);
#ifdef PREEMPT_EVERYWHERE
void maybe_preempt_here(struct ls_state *ls, unsigned int addr);
#endif

#endif
9 changes: 8 additions & 1 deletion work/modules/landslide/save.c
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,9 @@ static struct agent *copy_agent(struct agent *a_src)
COPY_FIELD(last_pf_cr2);
COPY_FIELD(just_delayed_for_data_race);
COPY_FIELD(delayed_data_race_eip);
#ifdef PREEMPT_EVERYWHERE
COPY_FIELD(preempt_for_shm_here);
#endif
COPY_FIELD(just_delayed_for_vr_exit);
COPY_FIELD(delayed_vr_exit_eip);
COPY_FIELD(most_recent_syscall);
Expand Down Expand Up @@ -797,9 +800,13 @@ void save_setjmp(struct save_state *ss, struct ls_state *ls,
Q_INIT_HEAD(&h->children);
h->all_explored = end_of_test;

h->is_preemption_point = is_preemption_point;
h->data_race_eip = data_race_eip;
#ifdef PREEMPT_EVERYWHERE
h->is_preemption_point = true;
#else
h->is_preemption_point = is_preemption_point;
if (is_preemption_point) { assert(data_race_eip == -1); }
#endif

h->marked_children = 0;
h->proportion = 0.0L;
Expand Down
9 changes: 9 additions & 0 deletions work/modules/landslide/schedule.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,9 @@ static void agent_fork(struct sched_state *s, unsigned int tid, bool on_runqueue
a->last_pf_cr2 = 0x15410de0u;
a->just_delayed_for_data_race = false;
a->delayed_data_race_eip = -1;
#ifdef PREEMPT_EVERYWHERE
a->preempt_for_shm_here = false;
#endif
a->just_delayed_for_vr_exit = false;
a->delayed_vr_exit_eip = -1;
a->most_recent_syscall = 0;
Expand Down Expand Up @@ -1577,6 +1580,9 @@ void sched_update(struct ls_state *ls)
* Allow arbiter to insert new PPs again. */
CURRENT(s, just_delayed_for_data_race) = false;
CURRENT(s, delayed_data_race_eip) = -1;
#ifdef PREEMPT_EVERYWHERE
CURRENT(s, preempt_for_shm_here) = false;
#endif
}
return;
}
Expand Down Expand Up @@ -1684,6 +1690,9 @@ void sched_update(struct ls_state *ls)
lsprintf(CHOICE, "just delayed DR (tricky disco)\n");
CURRENT(s, just_delayed_for_data_race) = false;
CURRENT(s, delayed_data_race_eip) = -1;
#ifdef PREEMPT_EVERYWHERE
CURRENT(s, preempt_for_shm_here) = false;
#endif
}
#endif
}
Expand Down
3 changes: 3 additions & 0 deletions work/modules/landslide/schedule.h
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,9 @@ struct agent {
* race instruction (to delay its access until after the save point). */
bool just_delayed_for_data_race;
unsigned int delayed_data_race_eip; /* ...and if so, where was it */
#ifdef PREEMPT_EVERYWHERE
bool preempt_for_shm_here;
#endif
/* Same as above but used when exiting a VR yield to a new thread. */
bool just_delayed_for_vr_exit;
unsigned int delayed_vr_exit_eip; /* ...and if so, where was it */
Expand Down

0 comments on commit 94cf177

Please sign in to comment.