Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,4 @@ attic/
/full/Project.toml
.CondaPkg/
LocalPreferences.toml
/.claude/settings.local.json
10 changes: 8 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ version = "0.4.0"
Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"
AutoHashEquals = "15f4f7f2-30c1-5605-9d31-71845cf9641f"
ConstructionBase = "187b0558-2788-49d3-abe0-74a17ed4e7c9"
DataAPI = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
Expand All @@ -23,6 +24,8 @@ LogarithmicNumbers = "aa2f6b4e-9042-5d33-9679-40d3a6b85899"
MacroTools = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
Mmap = "a63ad114-7e13-5084-954f-fe012c677804"
PrecompileTools = "aea7be01-6a6a-4083-8856-8a6e6704d82a"
PrettyPrinting = "54e16d92-306c-5ea0-a30b-337be88ac337"
PrettyTables = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d"
PsychometricsBazaarBase = "b0d9cada-d963-45e9-a4c6-4746243987f1"
QuickHeaps = "30b38841-0f52-47f8-a5f8-18d5d4064379"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Expand All @@ -43,13 +46,14 @@ TestExt = "Test"
Accessors = "^0.1.12"
AutoHashEquals = "2"
ConstructionBase = "^1.2"
DataAPI = "1.16.0"
DataFrames = "1.6.1"
Distributions = "^0.25.88"
DocStringExtensions = " ^0.9"
EffectSizes = "^1.0.1"
ElasticArrays = "1.2.12"
FillArrays = "0.13, 1.5.0"
FittedItemBanks = "^0.7.2"
FittedItemBanks = "^0.7.3"
ForwardDiff = "1"
HypothesisTests = "^0.10.12, ^0.11.0"
Interpolations = "^0.14, ^0.15"
Expand All @@ -59,7 +63,9 @@ LogarithmicNumbers = "1"
MacroTools = "^0.5.6"
Mmap = "^1.11"
PrecompileTools = "1.2.1"
PsychometricsBazaarBase = "^0.8.4"
PrettyPrinting = "0.4.2"
PrettyTables = "3"
PsychometricsBazaarBase = "^0.8.7"
QuickHeaps = "0.2.2"
Random = "^1.11"
Reexport = "1"
Expand Down
10 changes: 8 additions & 2 deletions src/Aggregators/Aggregators.jl
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ using FittedItemBanks: AbstractItemBank, ContinuousDomain,
using ..Responses
using ..Responses: concrete_response_type, function_xs, function_ys, Responses
using ..ConfigBase
import PsychometricsBazaarBase: power_summary
using PsychometricsBazaarBase.ConfigTools: @requiresome, @returnsome,
find1_instance, find1_type,
find1_type_sloppy
Expand Down Expand Up @@ -52,6 +53,7 @@ export FunctionOptimizer, FunctionIntegrator
export DistributionAbilityEstimator
export variance, variance_given_mean, mean_1d
export RiemannEnumerationIntegrator
export get_integrator
# export EnumerationOptimizer

# Basic types
Expand Down Expand Up @@ -200,6 +202,10 @@ struct FunctionIntegrator{IntegratorT <: Integrator} <: AbilityIntegrator
integrator::IntegratorT
end

function get_integrator(integrator::FunctionIntegrator)
return integrator.integrator
end

function (integrator::FunctionIntegrator{IntegratorT})(f::F,
ncomp,
lh_function::LHF) where {F, LHF, IntegratorT}
Expand All @@ -210,8 +216,8 @@ function (integrator::FunctionIntegrator{IntegratorT})(f::F,
integrator.integrator(FunctionProduct(f, lh_function), ncomp)
end

function show(io::IO, ::MIME"text/plain", responses::FunctionIntegrator)
show(io, MIME("text/plain"), responses.integrator)
function power_summary(io::IO, responses::FunctionIntegrator)
power_summary(io, responses.integrator)
end

# Defaults
Expand Down
18 changes: 9 additions & 9 deletions src/Aggregators/ability_estimator.jl
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ function pdf(::LikelihoodAbilityEstimator,
AbilityLikelihood(tracked_responses)
end

function show(io::IO, ::MIME"text/plain", ability_estimator::LikelihoodAbilityEstimator)
function power_summary(io::IO, ::LikelihoodAbilityEstimator)
println(io, "Ability likelihood distribution")
end

Expand Down Expand Up @@ -61,11 +61,11 @@ function multiple_response_types_guard(tracked_responses)
return false
end

function show(io::IO, ::MIME"text/plain", ability_estimator::PosteriorAbilityEstimator)
function power_summary(io::IO, ability_estimator::PosteriorAbilityEstimator)
println(io, "Ability posterior distribution")
indent_io = indent(io, 2)
print(indent_io, "Prior: ")
show(indent_io, MIME("text/plain"), ability_estimator.prior)
power_summary(indent_io, ability_estimator.prior)
println(io)
end

Expand Down Expand Up @@ -224,11 +224,11 @@ function ModeAbilityEstimator(bits...)
ModeAbilityEstimator(dist_est, optimizer)
end

function show(io::IO, ::MIME"text/plain", ability_estimator::ModeAbilityEstimator)
function power_summary(io::IO, ability_estimator::ModeAbilityEstimator)
println(io, "Estimate ability using its mode")
indent_io = indent(io, 2)
show(indent_io, MIME("text/plain"), ability_estimator.dist_est)
show(indent_io, MIME("text/plain"), ability_estimator.optim)
power_summary(indent_io, ability_estimator.dist_est)
power_summary(indent_io, ability_estimator.optim)
end

struct MeanAbilityEstimator{
Expand All @@ -246,12 +246,12 @@ function MeanAbilityEstimator(bits...)
MeanAbilityEstimator(dist_est, integrator)
end

function show(io::IO, ::MIME"text/plain", ability_estimator::MeanAbilityEstimator)
function power_summary(io::IO, ability_estimator::MeanAbilityEstimator)
println(io, "Estimate ability using its mean")
indent_io = indent(io, 2)
show(indent_io, MIME("text/plain"), ability_estimator.dist_est)
power_summary(indent_io, ability_estimator.dist_est)
print(indent_io, "Integrator: ")
show(indent_io, MIME("text/plain"), ability_estimator.integrator)
power_summary(indent_io, ability_estimator.integrator)
end

function distribution_estimator(dist_est::DistributionAbilityEstimator)::DistributionAbilityEstimator
Expand Down
2 changes: 1 addition & 1 deletion src/Aggregators/optimizers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ function (optim::FunctionOptimizer)(f::F,
optim.optim(comp_f)
end

function show(io::IO, ::MIME"text/plain", optim::FunctionOptimizer)
function power_summary(io::IO, optim::FunctionOptimizer)
indent_io = indent(io, 2)
if optim.optim isa Optimizers.OneDimOptimOptimizer || optim.optim isa Optimizers.MultiDimOptimOptimizer || optim.optim isa Optimizers.NativeOneDimOptimOptimizer
inner = optim.optim
Expand Down
4 changes: 4 additions & 0 deletions src/Aggregators/tracked.jl
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@ struct TrackedLikelihoodIntegrator{IntegratorT <: Integrator} <: AbilityIntegrat
tracker::GriddedAbilityTracker
end

function get_integrator(integrator::TrackedLikelihoodIntegrator)
return integrator.integrator
end

function (integrator::TrackedLikelihoodIntegrator{IntegratorT})(f::F,
ncomp) where {F, IntegratorT}
integrator.integrator(FunctionArgProduct(f), integrator.tracker.cur_ability, ncomp)
Expand Down
3 changes: 2 additions & 1 deletion src/ComputerAdaptiveTesting.jl
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ include("./NextItemRules/NextItemRules.jl")
include("./TerminationConditions.jl")

# Combining / running
include("./DerivedMeasures.jl")
include("./Rules.jl")
include("./Sim/Sim.jl")
include("./DecisionTree/DecisionTree.jl")
Expand All @@ -53,4 +54,4 @@ function require_testext()
return TestExt
end

end
end
3 changes: 3 additions & 0 deletions src/ConfigBase.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ module ConfigBase

using Accessors: PropertyLens, opcompose
using DocStringExtensions: TYPEDEF
using PsychometricsBazaarBase: power_summary

export CatConfigBase, walk

Expand All @@ -10,6 +11,8 @@ $(TYPEDEF)
"""
abstract type CatConfigBase end

show(io::IO, ::MIME"text/plain", obj::CatConfigBase) = power_summary(io, obj)

function walk(f, x::CatConfigBase, lens = identity)
f(x, lens)
for fieldname in fieldnames(typeof(x))
Expand Down
165 changes: 165 additions & 0 deletions src/DerivedMeasures.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,165 @@
module DerivedMeasures

using Distributions: pdf

import PsychometricsBazaarBase: power_summary, GridSummary
using ..Aggregators: TrackedResponses,
Aggregators,
AbilityIntegrator,
AbilityOptimizer,
AbilityEstimator,
ModeAbilityEstimator,
MeanAbilityEstimator,
LikelihoodAbilityEstimator,
DistributionAbilityEstimator,
get_integrator,
expectation
using FittedItemBanks: domdims
using ..NextItemRules: AbilityVariance, compute_criteria, compute_criterion, best_item
using PsychometricsBazaarBase.Integrators: AnyGridIntegrator, get_grid, normdenom
using PsychometricsBazaarBase.IndentWrappers: indent
using PsychometricsBazaarBase: IntegralCoeffs
using PsychometricsBazaarBase: Differentiation

export PointAndSpreadEstimator, MeanAndStdDevEstimator, LaplaceApproxEstimator, SpreadEstimator

abstract type PointAndSpreadEstimator end

# TODO: These all recalculate everything at the moment, but they should reuse the results generated during the CAT

struct MeanAndStdDevEstimator{
DistEstT <: DistributionAbilityEstimator,
IntegratorT <: AbilityIntegrator
} <: PointAndSpreadEstimator
dist_est::DistEstT
integrator::IntegratorT
end

MeanAndStdDevEstimator(ability_estimator::MeanAbilityEstimator) = MeanAndStdDevEstimator(ability_estimator.dist_est, ability_estimator.integrator)
MeanAndStdDevEstimator(ability_variance::AbilityVariance) = MeanAndStdDevEstimator(ability_variance.dist_est, ability_variance.integrator)

function (est::MeanAndStdDevEstimator)(tracked_responses::TrackedResponses)
denom = normdenom(est.integrator,
est.dist_est,
tracked_responses)
mean = expectation(IntegralCoeffs.id,
domdims(tracked_responses.item_bank),
est.integrator,
est.dist_est,
tracked_responses,
denom)
return (
mean,
sqrt(expectation(IntegralCoeffs.SqDev(mean),
domdims(tracked_responses.item_bank),
est.integrator,
est.dist_est,
tracked_responses,
denom))
)
end

function power_summary(io::IO, est::MeanAndStdDevEstimator)
println(io, "Mean and standard deviation estimator")
indent_io = indent(io, 2)
power_summary(indent_io, est.dist_est)
power_summary(indent_io, est.integrator)
end

show(io::IO, ::MIME"text/plain", est::MeanAndStdDevEstimator) = power_summary(io, est)

struct LaplaceApproxEstimator{
DistEstT <: DistributionAbilityEstimator,
OptimizerT <: AbilityOptimizer
} <: PointAndSpreadEstimator
dist_est::DistEstT
optimizer::OptimizerT
end

LaplaceApproxEstimator(ability_estimator::ModeAbilityEstimator) = LaplaceApproxEstimator(ability_estimator.dist_est, ability_estimator.optim)

function (est::LaplaceApproxEstimator)(tracked_responses::TrackedResponses)
# TODO: Numerical stability: Should directly access the log-pdf here
mode = est.optimizer(IntegralCoeffs.one, est.dist_est, tracked_responses)
return (
mode,
-Differentiation.double_derivative((ability -> log(pdf(est, tracked_responses))), mode)
)
end

function power_summary(io::IO, est::LaplaceApproxEstimator)
println(io, "Laplace approximation based mean and standard deviation estimator")
indent_io = indent(io, 2)
power_summary(indent_io, est.dist_est)
power_summary(indent_io, est.optimizer)
end

struct SpreadEstimator{InnerT <: PointAndSpreadEstimator}
inner::InnerT
end

function (est::SpreadEstimator)(tracked_responses::TrackedResponses)
_, stddev = est.inner(tracked_responses)
return stddev
end

struct DistributionSampler{
DistEst <: DistributionAbilityEstimator,
IntegratorT <: AbilityIntegrator,
ContainerT <: Union{Vector{Float64}, Vector{Vector{Float64}}}
}
dist_est::DistEst
integrator::IntegratorT
points::ContainerT
end

_get_estimator_and_integrator(ability_estimator::MeanAbilityEstimator) = (ability_estimator.dist_est, ability_estimator.integrator)
_get_estimator_and_integrator(ability_variance::AbilityVariance) = (ability_variance.dist_est, ability_variance.integrator)

function DistributionSampler(composite::Union{MeanAbilityEstimator, AbilityVariance}, points=nothing)
dist_est, integrator = _get_estimator_and_integrator(composite)
return DistributionSampler(dist_est, integrator, points)
end

function DistributionSampler(dist_est::DistributionAbilityEstimator, integrator::Union{AbilityIntegrator, Nothing}=nothing, points::Nothing=nothing)
@info "DistributionSampler" dist_est integrator points
if isnothing(integrator)
return nothing
end
inner_integrator = get_integrator(integrator)
if !isnothing(points)
return DistributionSampler(dist_est, integrator, points)
elseif inner_integrator isa AnyGridIntegrator
return DistributionSampler(dist_est, integrator, get_grid(inner_integrator))
else
return nothing
end
end

function eachmatcol(xs::AbstractMatrix)
eachcol(xs)
end

function eachmatcol(xs::AbstractVector)
xs
end

function (est::DistributionSampler)(tracked_responses::TrackedResponses)
num = Aggregators.pdf.(
est.dist_est,
tracked_responses,
eachmatcol(est.points)
)
denom = normdenom(est.integrator, est.dist_est, tracked_responses)
return num ./ denom
end

function power_summary(io::IO, est::DistributionSampler)
println(io, "Distribution sampler")
indent_io = indent(io, 2)
power_summary(indent_io, est.dist_est)
power_summary(indent_io, est.integrator)
power_summary(indent_io, GridSummary(est.points))
end

end
7 changes: 6 additions & 1 deletion src/NextItemRules/NextItemRules.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,12 @@ using Random: AbstractRNG, Xoshiro

using ..Responses: BareResponses
using ..ConfigBase
import PsychometricsBazaarBase: power_summary
using PsychometricsBazaarBase.ConfigTools: @requiresome, @returnsome,
find1_instance, find1_type
using PsychometricsBazaarBase.Integrators: Integrator, intval
using PsychometricsBazaarBase: Integrators
using PsychometricsBazaarBase: Differentiation
using PsychometricsBazaarBase.IndentWrappers: indent
import PsychometricsBazaarBase.IntegralCoeffs
using FittedItemBanks: AbstractItemBank, DiscreteDomain, DomainType,
Expand Down Expand Up @@ -56,8 +58,11 @@ export PointResponseExpectation, DistributionResponseExpectation
export MatrixScalarizer, DeterminantScalarizer, TraceScalarizer
export AbilityCovarianceStateMultiCriterion, StateMultiCriterion, ItemMultiCriterion
export InformationMatrixCriteria
export ScalarizedStateCriteron, ScalarizedItemCriteron
export ScalarizedStateCriterion, ScalarizedItemCriterion
export DRuleItemCriterion, TRuleItemCriterion
export ObservedInformationPointwiseItemCriterion
export RawEmpiricalInformationPointwiseItemCriterion
export EmpiricalInformationPointwiseItemCriterion

# Prelude
include("./prelude/abstract.jl")
Expand Down
Loading
Loading