These are different models of the interpretation of “Black lives matter”. Under what conditions does the exhaustivity inference arise that prompts people to respond with “All lives matter”? What does this suggest about where one should interfere?

## Model components

### State space

We make the vastly oversimplifying assumption that there are only black lives and white lives whose mattering we want to talk about.

A state consists of two Booleans: one indicates whether black lives matter and one whether white lives matter, spanning a four-alternative state space.

### Utterance alternatives

These vary between models. One of the goals is to investigate to what extent it is enough to assume one set of alternatives (“Black lives matter”, “White lives matter”, “All lives matter”, “No lives matter”) and have the QUD drive interpretation effects, or whether one needs to assume different sets of alternatives in the first place (maybe generated by the QUDs themselves). The first model assumes two different sets of alternatives. The first is the one just listed. The second one is: “Black lives matter”, “Black lives don’t matter”. The second and third model assume just the first set of alternatives and derive interpretation effects via the QUD. Uniform prior on utterances.

### QUDs

“Which lives matter?” or “Do black lives matter?” – Model 1 hard-codes these via the available alternatives. Model 2 represents them explicitly. Model 3 does joint state/QUD inference.

ToDo: include the “imperative QUD” that came up in lab meeting? What else?

## Model 1: different sets of alternatives

The following model is basic RSA with different sets of alternatives. If the set of alternatives contains just “BLM” & “negBLM”, the probability of white lives not mattering is lower than if it contains “BLM”, “WLM”, “ALM”, “NLM”. That is, if you explicitly assume these two different sets of alternatives, you get a stronger exhaustivity inference when “ALM” is an alternative.

``````var alpha = 1

var statePrior = function() {
return Categorical({
// ps: [.9,.04,.04,.02], // make it a priori likely that all lives matter
ps: [.25,.25,.25,.25], // uniform prior
// ps: [.04,.04,.9,.02], // make it a priori likely that only white lives matter
vs: [{black:true, white:true},{black:true, white:false},{black:false, white:true},{black:false, white:false}]})
};

// possible utterances
var utterancePrior = function() {
return uniformDraw(['blm', 'nblm'])
//   return uniformDraw(['blm', 'wlm', 'alm', 'nlm'])
};

// meaning funtion to interpret the utterances
var literalMeanings = {
blm: function(state) { return state["black"] },
nblm: function(state) { return  !state["black"]  },
wlm: function(state) { return state["white"]},
alm: function(state) { return state["black"] && state["white"] },
nlm: function(state) { return !state["black"] && !state["white"] }
};

// literal listener
var literalListener = cache(function(utt) {
return Infer({method:"enumerate"},
function(){
var state = sample(statePrior())
var meaning = literalMeanings[utt]
condition(meaning(state))
return state
})
});

// pragmatic speaker
var speaker = cache(function(state) {
return Infer({method:"enumerate"},
function(){
var utt = utterancePrior()
factor(alpha * literalListener(utt).score(state))
return utt
})
});

// pragmatic listener
var pragmaticListener = cache(function(utt) {
return Infer({method:"enumerate"},
function(){
var state = sample(statePrior())
observe(speaker(state),utt)
return state
})
});

pragmaticListener("blm")
``````

## Model 2: one set of alternatives, different QUDs

You get the exact same effect as in model 1 if you assume just one set of alternatives (“BLM”, “WLM”, “ALM”, “NLM”) but two different QUDs (“Do black lives matter?”, “Which lives matter?”).

``````var alpha = 1

var statePrior = function() {
return Categorical({
// ps: [.9,.04,.04,.02],
ps: [.25,.25,.25,.25],
// ps: [.04,.04,.9,.02],
vs: [{black:true, white:true},{black:true, white:false},{black:false, white:true},{black:false, white:false}]})
};

var qudPrior = function() {
return Categorical({
ps: [.5,.5],
// ps: [.9,.1],
// ps: [.1,.9],
vs: ["do_blm","which_lm"]
});
};

// possible utterances
var utterancePrior = function() {
// return uniformDraw(['blm', 'nblm'])
uniformDraw(['blm', 'wlm', 'alm', 'nlm'])
};

var qudFns = {
do_blm : function(black, white) {return { black: black } },
which_lm : function(black, white) {return {black: black, white: white} }
};

// meaning funtion to interpret the utterances
var literalMeanings = {
blm: function(state) { return state["black"] },
nblm: function(state) { return  !state["black"]  },
wlm: function(state) { return state["white"]},
alm: function(state) { return state["black"] && state["white"] },
nlm: function(state) { return !state["black"] && !state["white"] }
};

// literal listener
var literalListener = cache(function(utt,qud) {
return Infer({method:"enumerate"},
function(){
var state = sample(statePrior())
var meaning = literalMeanings[utt]
var qudFn = qudFns[qud]
condition(meaning(state))
return qudFn(state["black"],state["white"])
})
});

// pragmatic speaker
var speaker = cache(function(state,qud) {
return Infer({method:"enumerate"},
function(){
var utt = utterancePrior()
factor(alpha * literalListener(utt,qud).score(state))
// factor(params.alpha * literalListener(utt).score(state))
return utt
})
});

// pragmatic listener
var pragmaticListener = cache(function(utt,qud) {
return Infer({method:"enumerate"},
function(){
var state = sample(statePrior())
var qudFn = qudFns[qud]
var qValue = qudFn(state["black"],state["white"])
observe(speaker(qValue,qud),utt)
return state
})
});

//pragmaticListener("blm","do_blm")
pragmaticListener("blm","which_lm")
``````

## Model 3: QUD inference

In this model, rather than assuming a particular QUD, we let the model infer the QUD jointly with the state. Weird: “which_lm” generally inferred to be more likely QUD unless either QUD prior skewed, and even then white lives matter are inferred to be more likely not to matter than to matter. White lives are only more likely to matter than not matter if a priori both black and white lives mattering is the most likely state.

``````var alpha = 1

var statePrior = Categorical({
// ps: [.9,.04,.04,.02],
ps: [.25,.25,.25,.25],
//ps: [.04,.04,.9,.02],
vs: [{black:true, white:true},
{black:true, white:false},
{black:false, white:true},
{black:false, white:false}]
})

var qudPrior = Categorical({
ps: [.5,.5],
// ps: [.9,.1],
// ps: [.1,.9],
vs: ["do_blm","which_lm"]
});

// possible utterances
var utterancePrior = function() {
return uniformDraw(['blm', 'wlm', 'alm', 'nlm'])
};

var qudFns = {
do_blm : function(state) {return { black: state['black'] } },
which_lm : function(state) {return state}
};

// meaning funtion to interpret the utterances
var literalMeanings = {
blm: function(state) { return state["black"] },
nblm: function(state) { return !state["black"]  },
wlm: function(state) { return state["white"]},
alm: function(state) { return state["black"] && state["white"] },
nlm: function(state) { return !state["black"] && !state["white"] }
};

// literal listener
var literalListener = cache(function(utt) {
return Infer({method:"enumerate"}, function(){
var state = sample(statePrior)
var meaning = literalMeanings[utt]
condition(meaning(state))
return state;
})
});

// pragmatic speaker
var speaker = cache(function(state,qud) {
var qudFn = qudFns[qud];
return Infer({method:"enumerate"}, function(){
var utt = utterancePrior()
var projectedListener = Infer({method: 'enumerate'}, function() {
return qudFn(sample(literalListener(utt)))
});
factor(alpha * projectedListener.score(qudFn(state)))
return utt
})
});

// pragmatic listener
var pragmaticListener = cache(function(utt) {
return Infer({method:"enumerate"}, function(){
var state = sample(statePrior)
var qud = sample(qudPrior)
observe(speaker(state, qud), utt)
return {state:state, qud:qud}
})
});

var joint = pragmaticListener("blm")
var marginal_qud = marginalize(pragmaticListener("blm"),'qud')
var marginal_state = marginalize(pragmaticListener("blm"),'state')

viz(marginal_qud)
viz(marginal_state)
``````