-
Notifications
You must be signed in to change notification settings - Fork 0
/
Estimators.jl
140 lines (107 loc) · 3.67 KB
/
Estimators.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
module Estimators
using ..DGP: Data, globals
using Optim
using Statistics
using LinearAlgebra
using Base.Iterators
using ForwardDiff: gradient, hessian
using Intervals
using Pipe: @pipe
using DecisionTree
using ScikitLearn
using UUIDs
fit_np(X, y) = fit_predict!(RandomForestRegressor(impurity_importance=false, n_trees=100, max_depth=10, partial_sampling=0.5, n_subfeatures=size(X, 2)), X, y)
fit_lm(X, y) =
let X = [ones(size(X, 1)) X]
X * (X \ y)
end
export acfObjective, estimateACF, estimateOLS
const cache = Dict{UUID,Function}()
"""
Given a data struct, create the objective function that we will minimize as part of the ACF procedure.
This is separate from the estimateACF function so that a Newton step can be taken in the estimateOLS function.
"""
function acfObjective(data::Data)
if haskey(cache, data.id)
return cache[data.id]
end
ϕ = let X = [data.lnKapital[:] data.lnIntermedInput[:] data.lnLabor[:]]
reshape(fit_np(X, data.lnOutput[:]), globals.nperiodsKeep, globals.nfirms)
end
lag(x::Matrix{T}) where {T<:Real} = x[1:(end-1), :][:]
lead(x::Matrix{T}) where {T<:Real} = x[2:end, :][:]
lag(x::Symbol) = getfield(data, x)[1:(end-1), :][:]
lead(x::Symbol) = getfield(data, x)[2:end, :][:]
ϕ₋₁ = lag(ϕ)
ϕ₀ = lead(ϕ)
k₋₁ = lag(:lnKapital)
k₀ = lead(:lnKapital)
l₋₁ = lag(:lnLabor)
l₀ = lead(:lnLabor)
# use this let scope to avoid having to use the CUE
# estimator (which is apt to run to infinity)
out = let
W = nothing
function (θ)
# innovation term from ω process
ξ = let
ω₋₁ = ϕ₋₁ - θ[1] * k₋₁ - θ[2] * l₋₁
ω₀ = ϕ₀ - θ[1] * k₀ - θ[2] * l₀
ω₀ - fit_lm(ω₋₁, ω₀)
end
moments = ξ .* [k₀ l₋₁]
if isnothing(W)
W = cov(moments)
end
μ = mean(moments, dims=1)
n = size(moments, 1)
return n * only(μ * (W \ μ'))
end
end
empty!(cache)
cache[data.id] = out
out
end
estimateACF(data::Data) = @pipe data |> acfObjective |> optimize(_, [0.5, 0.5]) |> Optim.minimizer(_)
function makePolynomial(order::Integer, vecs::Vararg{Vector{T}})::Matrix{T} where {T<:Real}
powers = fill(0:order, length(vecs))
vector_multiply(vecs::Vararg{Vector{T}}) = broadcast(*, vecs...)
# each term is a vector of the powers to which we want to raise each corresponding element of vecs
mapreduce(hcat, product(powers...)) do term
mapreduce(vector_multiply, enumerate(term)) do (i, power)
vecs[i] .^ power
end
end
end
function estimateOLS(data::Data; includeKNoShock=false, newtonSteps=0)
lag(x::Symbol) = getfield(data, x)[1:(end-1), :][:]
lead(x::Symbol) = getfield(data, x)[2:end, :][:]
y₀ = lead(:lnOutput)
l₀ = lead(:lnLabor)
k₋₁ = lag(:lnKapital)
k₀ = lead(:lnKapital)
m₋₁ = lag(:lnIntermedInput)
kNoShock = lead(:lnKapitalNoShock)
X = [k₀ l₀ makePolynomial(2, k₋₁, m₋₁)]
if includeKNoShock
X = [X kNoShock]
end
# store intial β₀ in case our optimization goes awry
β = (X\y₀)[1:2]
step = let f = acfObjective(data)
α -> (α - (hessian(f, α) \ gradient(f, α)))
end
# do the prescribed number of Newton steps, but stop
# early if it looks like we are heading to a minimum
# in the negative limit
for _ in 1:newtonSteps
βʼ = step(β)
if ~(βʼ[1] ∈ 0 .. 1)
return β
else
β = βʼ
end
end
return β
end
end