@dominikpeters/

# ACE model

## No description

Files
• main.py
main.py
```1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
```
```# CHARITY EVALUATION MODEL
# Peters & Sittler, April/May 2017

####### MODEL PARAMETERS ###########
## In each case, we give (5%,95%) confidence intervals

#### distribution of true impact ####
# measured in chicken years of suffering prevented / \$
# this is ACE's estimate for VEBU and Vegan Outreach, respectively
impact_distribution_low  = (0.15,0.93)
# this is ACE's estimates for MFA and the Humane League, respectively
impact_distribution_high = (4.9,18)

#### distribution of evidence around true impact ####
# eyeballing the variance of the distribution in the ACE estimates for MFA and the Humane League
evidence_distribution_sd = (10,20)

#### number of charities in pool
# ACE has 13 top and standout charities
initial_number_of_charities = (10,15)

# which round are we currently in?
number_of_rounds = (4,6)

# how much money is donated to the recommended charity?
# interval around ACE money moved in 2016 https://animalcharityevaluators.org/wp-content/uploads/2017/02/ace-2016-year-in-review-rgb.pdf
money_moved = (2000000,5000000)

# how much does a round cost?
# interval around the values from https://animalcharityevaluators.org/wp-content/uploads/2017/02/ace-2016-year-in-review-rgb.pdf
cost_per_round = (200000,400000)

# Compared to how good is it is to live as a human in full health for one year, how bad is it to live as a human in a factory farm for one year?
# e.g. 1/100 = would accept 1 year on a factory farm for 100 years of additional healthy life
good_life_vs_factory_farm = (1/100,1/10)

# Compared to how bad is it is to live as a chicken in a factory farm for one year, how bad is it to live as a human in a factory farm for one year?
# e.g. 100 = would be indifferent between saving 100 chickens or 1 human from a factory farm
humans_vs_chickens = (10,1000)

###### END MODEL PARAMETERS ########

import numpy
import math

def from_conf_interval(interval):
"sample from a lognormal fitted through a 90% confidence interval"
# see https://github.com/getguesstimate/guesstimate-app/blob/master/src/lib/guesstimator/samplers/DistributionLognormal.js
low, high = interval
assert high >= low > 0
mu = (math.log(low) + math.log(high)) / 2
sigma = (math.log(high) - math.log(low)) / 2 / 1.645
return numpy.random.lognormal(mu, sigma)

def from_mean_sd(mean, sd):
"sample from a lognormal with given mean and standard deviation"
# see https://en.wikipedia.org/wiki/Log-normal_distribution#Notation
assert mean > 0 and sd > 0
mu = math.log(mean/math.sqrt(1 + sd**2/mean**2))
sigma = math.sqrt(1 + sd**2/mean**2)
return numpy.random.lognormal(mu, sigma)

def simulate(verbose=False):
# sample distribution parameters
sampled_impact_low  = from_conf_interval(impact_distribution_low)
sampled_impact_high = from_conf_interval(impact_distribution_high)
sampled_evidence_sd = from_conf_interval(evidence_distribution_sd)

def sample_impact():
"sample a random true mu for an organisation"
return from_conf_interval((sampled_impact_low, sampled_impact_high))

def sample_evidence(true_impact):
"for an org with true impact mu, sample a piece of evidence"
#return from_mean_sd(true_impact, sampled_evidence_sd)
return numpy.random.normal(true_impact, sampled_evidence_sd)

def impact_estimate(evidence_list):
"given a list of sampled evidence impacts, what is our overall impact estimate? we use average"
return sum(evidence_list) / len(evidence_list)

# decide true impacts
true_mu = {}
sampled_initial_number_of_charities = round(from_conf_interval(initial_number_of_charities))
orgs = range(sampled_initial_number_of_charities)
for org in orgs:
true_mu[org] = sample_impact()

max_impact = max([true_mu[org] for org in orgs])
arg_max_impact = [org for org in orgs if true_mu[org] == max_impact][0]

if verbose:
print("Best achievable impact:", round(max_impact), "("+str(arg_max_impact)+")")

# store all evidence that we have sampled so far
gathered_evidence = {}
for org in orgs:
# we don't have any evidence yet
gathered_evidence[org] = []

rounds = round(from_conf_interval(number_of_rounds))

round_str      = "Round:      "
org_str        = "Recom. Org: "
estimate_str   = "Estimate:   "
true_str       = "True Impact:"
difference_str = "Difference: "
change_str     = "Change:     "

last_round_true_impact = 0.0
two_back_true_impact = 0.0
width = 5 # table column width
impact_changes = [] # keep a record of impact changes
for i in range(1,rounds+1):
round_str += str(i).rjust(width)
### do research ###
# for each org, obtain a sample
for org in orgs:
gathered_evidence[org].append(sample_evidence(true_mu[org]))
# which org do we recommend after this round?
max_impact_estimate = max([impact_estimate(gathered_evidence[org]) for org in orgs])
### pretty-print results ###
for org in orgs:
if impact_estimate(gathered_evidence[org]) == max_impact_estimate:
org_str += str(org).rjust(width)
estimate_str += str(round(max_impact_estimate)).rjust(width)
impact_change = true_mu[org] - last_round_true_impact
true_str += str(round(true_mu[org])).rjust(width)
difference_str += '{0:+d}'.format(round(true_mu[org] - max_impact_estimate)).rjust(width)
if impact_change != 0:
change_str += '{0:+d}'.format(round(impact_change)).rjust(width)
else:
change_str += "".rjust(width)
# remember data from this round
impact_changes.append(impact_change)
two_back_true_impact = last_round_true_impact
last_round_true_impact = true_mu[org]
break

if verbose:
print(round_str)
print(org_str)
print(estimate_str)
print(true_str)
print(difference_str)
print(change_str)
print("")

sampled_money_moved = from_conf_interval(money_moved)
sampled_cost        = from_conf_interval(cost_per_round)
sampled_good_life_vs_factory_farm = from_conf_interval(good_life_vs_factory_farm)
sampled_humans_vs_chickens = from_conf_interval(humans_vs_chickens)
result = {}
result["impact"]        = impact_change * sampled_money_moved / sampled_good_life_vs_factory_farm / sampled_humans_vs_chickens / sampled_cost
result["2-round impact"]= (impact_changes[-1] + impact_changes[-2]) * sampled_money_moved / sampled_good_life_vs_factory_farm / sampled_humans_vs_chickens / sampled_cost / 2
result["3-round impact"]= (impact_changes[-1] + impact_changes[-2] + impact_changes[-3]) * sampled_money_moved / sampled_good_life_vs_factory_farm / sampled_humans_vs_chickens / sampled_cost / 3
result["impact change"] = impact_change
result["rec impact"]    = two_back_true_impact / sampled_good_life_vs_factory_farm / sampled_humans_vs_chickens
result["cost"]          = sampled_cost
result["money moved"]   = sampled_money_moved
return result

simulate(verbose=True)
simulate(verbose=True)

# do simulations
n = 5000
results = [simulate() for _ in range(n)]

# calculate averages
average = {}
std = {}
for metric in results[0].keys():
average[metric] = sum([r[metric] for r in results])/n
std[metric] = round(numpy.std([r[metric] for r in results]),1)
print("average " + metric.ljust(15) + str(round(average[metric],1)).rjust(10) + " ±" + str(std[metric]))

#for r in results:
#  print(round(r["3-round impact"],2))```