Skip to content

Commit

Permalink
Refactor implementation of path-following and QRE.
Browse files Browse the repository at this point in the history
This is a substantial refactoring and cleanup of path-following and the
interface to QRE tracing.

The only new functionality is to implement the ability to stop at an interior
local maximiser when fitting QRE.

Otherwise, other changes are internal to simplify calling the various
QRE-related routines and separate concerns (e.g. formatting/printing is no longer
part of the QRE tracer itself but properly delegated to a
provided observer function if that is desired).
  • Loading branch information
tturocy committed May 24, 2024
1 parent 8c56f2e commit 5708d2a
Show file tree
Hide file tree
Showing 18 changed files with 812 additions and 1,281 deletions.
15 changes: 15 additions & 0 deletions .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
{
"image": "mcr.microsoft.com/devcontainers/base:ubuntu-24.04",
"features": {
"ghcr.io/devcontainers/features/python:1": {
"installTools": true,
"version": "3.11"
},
"ghcr.io/devcontainers-contrib/features/gdbgui:2": {
"version": "latest"
},
"ghcr.io/rocker-org/devcontainer-features/apt-packages:1": {
"packages": "automake,autoconf,gdb"
}
}
}
3 changes: 2 additions & 1 deletion ChangeLog
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@
### Added
- Implemented maximum-likelihood estimation for agent logit QRE, to parallel existing support
for strategic logit QRE. Strategic logit QRE function names have been modified to provide
parallel naming.
parallel naming. Estimation now supports an option to stop at the first interior local
maxmizer found (if one exists).


## [16.2.0] - unreleased
Expand Down
3 changes: 1 addition & 2 deletions Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -519,9 +519,8 @@ gambit_logit_SOURCES = \
src/solvers/logit/logbehav.imp \
src/solvers/logit/path.cc \
src/solvers/logit/path.h \
src/solvers/logit/efglogit.h \
src/solvers/logit/logit.h \
src/solvers/logit/efglogit.cc \
src/solvers/logit/nfglogit.h \
src/solvers/logit/nfglogit.cc \
src/tools/logit/logit.cc

Expand Down
46 changes: 19 additions & 27 deletions src/pygambit/gambit.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -455,58 +455,50 @@ cdef extern from "solvers/gnm/gnm.h":
int p_localNewtonInterval, int p_localNewtonMaxits
) except +RuntimeError

cdef extern from "solvers/logit/efglogit.h":
cdef extern from "solvers/logit/logit.h":
cdef cppclass c_LogitQREMixedBehaviorProfile "LogitQREMixedBehaviorProfile":
c_LogitQREMixedBehaviorProfile(c_Game) except +
c_LogitQREMixedBehaviorProfile(c_LogitQREMixedBehaviorProfile) except +
c_Game GetGame() except +
c_MixedBehaviorProfileDouble GetProfile() # except + doesn't compile
double GetLambda() except +
double GetLogLike() except +
int BehaviorProfileLength() except +
int size() except +
double getitem "operator[]"(int) except +IndexError

c_List[c_MixedBehaviorProfileDouble] LogitBehaviorSolve(c_Game,
double,
double,
double) except +RuntimeError

cdef extern from "solvers/logit/nfglogit.h":
cdef cppclass c_LogitQREMixedStrategyProfile "LogitQREMixedStrategyProfile":
c_LogitQREMixedStrategyProfile(c_Game) except +
c_LogitQREMixedStrategyProfile(c_LogitQREMixedStrategyProfile) except +
c_Game GetGame() except +
c_MixedStrategyProfileDouble GetProfile() # except + doesn't compile
double GetLambda() except +
double GetLogLike() except +
int MixedProfileLength() except +
int size() except +
double getitem "operator[]"(int) except +IndexError

cdef cppclass c_StrategicQREEstimator "StrategicQREEstimator":
c_StrategicQREEstimator() except +
c_LogitQREMixedStrategyProfile Estimate(c_LogitQREMixedStrategyProfile,
c_MixedStrategyProfileDouble,
double, double, double) except +RuntimeError

c_List[c_MixedStrategyProfileDouble] LogitStrategySolve(c_Game,
double,
double,
double) except +RuntimeError


cdef extern from "nash.h":
shared_ptr[c_LogitQREMixedBehaviorProfile] LogitBehaviorEstimateHelper(
shared_ptr[c_MixedBehaviorProfileDouble], double, double
c_List[c_MixedBehaviorProfileDouble] LogitBehaviorSolveWrapper(
c_Game, double, double, double
) except +
shared_ptr[c_LogitQREMixedBehaviorProfile] LogitBehaviorAtLambdaHelper(
c_List[c_LogitQREMixedBehaviorProfile] LogitBehaviorPrincipalBranchWrapper(
c_Game, double, double, double
) except +
shared_ptr[c_LogitQREMixedStrategyProfile] LogitStrategyEstimateHelper(
shared_ptr[c_MixedStrategyProfileDouble], double, double
shared_ptr[c_LogitQREMixedBehaviorProfile] LogitBehaviorAtLambdaWrapper(
c_Game, double, double, double
) except +
shared_ptr[c_LogitQREMixedBehaviorProfile] LogitBehaviorEstimateWrapper(
shared_ptr[c_MixedBehaviorProfileDouble], bool, double, double
) except +
shared_ptr[c_LogitQREMixedStrategyProfile] LogitStrategyAtLambdaHelper(
c_List[c_MixedStrategyProfileDouble] LogitStrategySolveWrapper(
c_Game, double, double, double
) except +
c_List[c_LogitQREMixedStrategyProfile] _logit_principal_branch "logit_principal_branch"(
c_List[c_LogitQREMixedStrategyProfile] LogitStrategyPrincipalBranchWrapper(
c_Game, double, double, double
) except +
shared_ptr[c_LogitQREMixedStrategyProfile] LogitStrategyAtLambdaWrapper(
c_Game, double, double, double
) except +
shared_ptr[c_LogitQREMixedStrategyProfile] LogitStrategyEstimateWrapper(
shared_ptr[c_MixedStrategyProfileDouble], bool, double, double
) except +
100 changes: 56 additions & 44 deletions src/pygambit/nash.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,71 +21,83 @@
//

#include "gambit.h"
#include "solvers/logit/efglogit.h"
#include "solvers/logit/nfglogit.h"
#include "solvers/logit/logit.h"

using namespace std;
using namespace Gambit;

class NullBuffer : public std::streambuf {
public:
int overflow(int c) { return c; }
};
List<MixedBehaviorProfile<double>> LogitBehaviorSolveWrapper(const Game &p_game, double p_regret,
double p_firstStep, double p_maxAccel)
{
List<MixedBehaviorProfile<double>> ret;
ret.push_back(LogitBehaviorSolve(LogitQREMixedBehaviorProfile(p_game), p_regret, 1.0,
p_firstStep, p_maxAccel)
.back()
.GetProfile());
return ret;
}

inline List<LogitQREMixedBehaviorProfile> LogitBehaviorPrincipalBranchWrapper(const Game &p_game,
double p_regret,
double p_firstStep,
double p_maxAccel)
{
return LogitBehaviorSolve(LogitQREMixedBehaviorProfile(p_game), p_regret, 1.0, p_firstStep,
p_maxAccel);
}

std::shared_ptr<LogitQREMixedBehaviorProfile>
LogitBehaviorEstimateHelper(std::shared_ptr<MixedBehaviorProfile<double>> p_frequencies,
double p_firstStep, double p_maxAccel)
LogitBehaviorEstimateWrapper(std::shared_ptr<MixedBehaviorProfile<double>> p_frequencies,
bool p_stopAtLocal, double p_firstStep, double p_maxAccel)
{
return make_shared<LogitQREMixedBehaviorProfile>(
LogitBehaviorEstimate(*p_frequencies, p_firstStep, p_maxAccel));
return make_shared<LogitQREMixedBehaviorProfile>(LogitBehaviorEstimate(
*p_frequencies, 1000000.0, 1.0, p_stopAtLocal, p_firstStep, p_maxAccel));
}

std::shared_ptr<LogitQREMixedBehaviorProfile> LogitBehaviorAtLambdaHelper(const Game &p_game,
double p_lambda,
double p_firstStep,
double p_maxAccel)
std::shared_ptr<LogitQREMixedBehaviorProfile> LogitBehaviorAtLambdaWrapper(const Game &p_game,
double p_lambda,
double p_firstStep,
double p_maxAccel)
{
LogitQREMixedBehaviorProfile start(p_game);
AgentQREPathTracer alg;
alg.SetMaxDecel(p_maxAccel);
alg.SetStepsize(p_firstStep);
NullBuffer null_buffer;
std::ostream null_stream(&null_buffer);
return make_shared<LogitQREMixedBehaviorProfile>(
alg.SolveAtLambda(start, null_stream, p_lambda, 1.0));
LogitBehaviorSolveLambda(start, p_lambda, 1.0, p_firstStep, p_maxAccel));
}

std::shared_ptr<LogitQREMixedStrategyProfile>
LogitStrategyEstimateHelper(std::shared_ptr<MixedStrategyProfile<double>> p_frequencies,
double p_firstStep, double p_maxAccel)
List<MixedStrategyProfile<double>> LogitStrategySolveWrapper(const Game &p_game, double p_regret,
double p_firstStep, double p_maxAccel)
{
return make_shared<LogitQREMixedStrategyProfile>(
LogitStrategyEstimate(*p_frequencies, p_firstStep, p_maxAccel));
List<MixedStrategyProfile<double>> ret;
ret.push_back(LogitStrategySolve(LogitQREMixedStrategyProfile(p_game), p_regret, 1.0,
p_firstStep, p_maxAccel)
.back()
.GetProfile());
return ret;
}

inline List<LogitQREMixedStrategyProfile> LogitStrategyPrincipalBranchWrapper(const Game &p_game,
double p_regret,
double p_firstStep,
double p_maxAccel)
{
return LogitStrategySolve(LogitQREMixedStrategyProfile(p_game), p_regret, 1.0, p_firstStep,
p_maxAccel);
}

std::shared_ptr<LogitQREMixedStrategyProfile> LogitStrategyAtLambdaHelper(const Game &p_game,
double p_lambda,
double p_firstStep,
double p_maxAccel)
std::shared_ptr<LogitQREMixedStrategyProfile> LogitStrategyAtLambdaWrapper(const Game &p_game,
double p_lambda,
double p_firstStep,
double p_maxAccel)
{
LogitQREMixedStrategyProfile start(p_game);
StrategicQREPathTracer alg;
alg.SetMaxDecel(p_maxAccel);
alg.SetStepsize(p_firstStep);
NullBuffer null_buffer;
std::ostream null_stream(&null_buffer);
return make_shared<LogitQREMixedStrategyProfile>(
alg.SolveAtLambda(start, null_stream, p_lambda, 1.0));
LogitStrategySolveLambda(start, p_lambda, 1.0, p_firstStep, p_maxAccel));
}

List<LogitQREMixedStrategyProfile> logit_principal_branch(const Game &p_game, double p_maxregret,
double p_firstStep, double p_maxAccel)
std::shared_ptr<LogitQREMixedStrategyProfile>
LogitStrategyEstimateWrapper(std::shared_ptr<MixedStrategyProfile<double>> p_frequencies,
bool p_stopAtLocal, double p_firstStep, double p_maxAccel)
{
LogitQREMixedStrategyProfile start(p_game);
StrategicQREPathTracer alg;
alg.SetMaxDecel(p_maxAccel);
alg.SetStepsize(p_firstStep);
NullBuffer null_buffer;
std::ostream null_stream(&null_buffer);
return alg.TraceStrategicPath(start, null_stream, p_maxregret, 1.0);
return make_shared<LogitQREMixedStrategyProfile>(LogitStrategyEstimate(
*p_frequencies, 1000000.0, 1.0, p_stopAtLocal, p_firstStep, p_maxAccel));
}
20 changes: 11 additions & 9 deletions src/pygambit/nash.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -185,13 +185,13 @@ def _gnm_strategy_solve(
def _logit_strategy_solve(
game: Game, maxregret: float, first_step: float, max_accel: float,
) -> typing.List[MixedStrategyProfileDouble]:
return _convert_mspd(LogitStrategySolve(game.game, maxregret, first_step, max_accel))
return _convert_mspd(LogitStrategySolveWrapper(game.game, maxregret, first_step, max_accel))


def _logit_behavior_solve(
game: Game, maxregret: float, first_step: float, max_accel: float,
) -> typing.List[MixedBehaviorProfileDouble]:
return _convert_mbpd(LogitBehaviorSolve(game.game, maxregret, first_step, max_accel))
return _convert_mbpd(LogitBehaviorSolveWrapper(game.game, maxregret, first_step, max_accel))


@cython.cclass
Expand All @@ -208,7 +208,7 @@ class LogitQREMixedStrategyProfile:
return "LogitQREMixedStrategyProfile(lam=%f,profile=%s)" % (self.lam, self.profile)

def __len__(self):
return deref(self.thisptr).MixedProfileLength()
return deref(self.thisptr).size()

def __getitem__(self, int i):
return deref(self.thisptr).getitem(i+1)
Expand Down Expand Up @@ -241,13 +241,14 @@ class LogitQREMixedStrategyProfile:


def _logit_strategy_estimate(profile: MixedStrategyProfileDouble,
local_max: bool = False,
first_step: float = .03,
max_accel: float = 1.1) -> LogitQREMixedStrategyProfile:
"""Estimate QRE corresponding to mixed strategy profile using
maximum likelihood along the principal branch.
"""
ret = LogitQREMixedStrategyProfile(profile.game)
ret.thisptr = LogitStrategyEstimateHelper(profile.profile, first_step, max_accel)
ret.thisptr = LogitStrategyEstimateWrapper(profile.profile, local_max, first_step, max_accel)
return ret


Expand All @@ -259,12 +260,12 @@ def logit_strategy_atlambda(game: Game,
game corresponding to lambda value `lam`.
"""
ret = LogitQREMixedStrategyProfile()
ret.thisptr = LogitStrategyAtLambdaHelper(game.game, lam, first_step, max_accel)
ret.thisptr = LogitStrategyAtLambdaWrapper(game.game, lam, first_step, max_accel)
return ret


def logit_principal_branch(game: Game, first_step: float = .03, max_accel: float = 1.1):
solns = _logit_principal_branch(game.game, 1.0e-8, first_step, max_accel)
solns = LogitStrategyPrincipalBranchWrapper(game.game, 1.0e-8, first_step, max_accel)
ret = []
for i in range(solns.Length()):
p = LogitQREMixedStrategyProfile()
Expand All @@ -287,7 +288,7 @@ class LogitQREMixedBehaviorProfile:
return f"LogitQREMixedBehaviorProfile(lam={self.lam},profile={self.profile})"

def __len__(self):
return deref(self.thisptr).BehaviorProfileLength()
return deref(self.thisptr).size()

def __getitem__(self, int i):
return deref(self.thisptr).getitem(i+1)
Expand Down Expand Up @@ -320,13 +321,14 @@ class LogitQREMixedBehaviorProfile:


def _logit_behavior_estimate(profile: MixedBehaviorProfileDouble,
local_max: bool = False,
first_step: float = .03,
max_accel: float = 1.1) -> LogitQREMixedBehaviorProfile:
"""Estimate QRE corresponding to mixed behavior profile using
maximum likelihood along the principal branch.
"""
ret = LogitQREMixedBehaviorProfile(profile.game)
ret.thisptr = LogitBehaviorEstimateHelper(profile.profile, first_step, max_accel)
ret.thisptr = LogitBehaviorEstimateWrapper(profile.profile, local_max, first_step, max_accel)
return ret


Expand All @@ -338,5 +340,5 @@ def logit_behavior_atlambda(game: Game,
game corresponding to lambda value `lam`.
"""
ret = LogitQREMixedBehaviorProfile()
ret.thisptr = LogitBehaviorAtLambdaHelper(game.game, lam, first_step, max_accel)
ret.thisptr = LogitBehaviorAtLambdaWrapper(game.game, lam, first_step, max_accel)
return ret
20 changes: 17 additions & 3 deletions src/pygambit/qre.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,8 @@ def __repr__(self) -> str:


def fit_strategy_fixedpoint(
data: libgbt.MixedStrategyProfileDouble
data: libgbt.MixedStrategyProfileDouble,
local_max: bool = False
) -> LogitQREMixedStrategyFitResult:
"""Use maximum likelihood estimation to find the logit quantal
response equilibrium on the principal branch for a strategic game
Expand All @@ -362,6 +363,13 @@ def fit_strategy_fixedpoint(
be expressed as total counts of observations of each strategy
rather than probabilities.
local_max : bool, default False
The default behavior is to find the global maximiser along
the principal branch. If this parameter is set to True,
tracing stops at the first interior local maximiser found.
.. versionadded:: 16.2.0
Returns
-------
LogitQREMixedStrategyFitResult
Expand All @@ -379,7 +387,7 @@ def fit_strategy_fixedpoint(
as a structural model for estimation: The missing manual.
SSRN working paper 4425515.
"""
res = libgbt._logit_strategy_estimate(data)
res = libgbt._logit_strategy_estimate(data, local_max=local_max)
return LogitQREMixedStrategyFitResult(
data, "fixedpoint", res.lam, res.profile, res.log_like
)
Expand Down Expand Up @@ -485,7 +493,8 @@ def __repr__(self) -> str:


def fit_behavior_fixedpoint(
data: libgbt.MixedBehaviorProfileDouble
data: libgbt.MixedBehaviorProfileDouble,
local_max: bool = False
) -> LogitQREMixedBehaviorFitResult:
"""Use maximum likelihood estimation to find the logit quantal
response equilibrium on the principal branch for an extensive game
Expand All @@ -501,6 +510,11 @@ def fit_behavior_fixedpoint(
be expressed as total counts of observations of each action
rather than probabilities.
local_max : bool, default False
The default behavior is to find the global maximiser along
the principal branch. If this parameter is set to True,
tracing stops at the first interior local maximiser found.
Returns
-------
LogitQREMixedBehaviorFitResult
Expand Down
Loading

0 comments on commit 5708d2a

Please sign in to comment.