futurize_notes_round_2 - npalmer-professional/HARK-1 GitHub Wiki

Using these two commands to futurize, as it will just spit out the text itself:

# First command: look at the suggested changes; if reasonable run second command
futurize filename.py >> ~/workspace/HARK-1.wiki/futurize_notes_round_2.md
# Second command: same as above but with "-w" (write to file) to actually implement changes 
futurize -w filename.py >> ~/workspace/HARK-1.wiki/futurize_notes_round_2.md

Note that there will often be two entries in thsi document for each file, due to running two commands above. (Occassionally a minor manual change will make the automated change from the second command unecessary, leading to only one entry.)

Note: something new which I did not encounter previously is the need for relative vs absolute imports. See the first examples below.

I think the way to test this is to run all of this, then see if I can run a script anywhere. Hmmmm.

Important reading:

Output from futurize:

RefactoringTool: No changes to ./HARK/estimation.py

--- ./HARK/interpolation.py (original) +++ ./HARK/interpolation.py (refactored) @@ -7,9 +7,10 @@ distance method from HARKobject. ''' from future import division, print_function +from future import absolute_import from builtins import range import numpy as np -from core import HARKobject +from .core import HARKobject from copy import deepcopy

def _isscalar(x): --- ./HARK/interpolation.py (original) +++ ./HARK/interpolation.py (refactored) @@ -7,9 +7,10 @@ distance method from HARKobject. ''' from future import division, print_function +from future import absolute_import from builtins import range import numpy as np -from core import HARKobject +from .core import HARKobject from copy import deepcopy

def _isscalar(x):

--- ./HARK/init.py (original) +++ ./HARK/init.py (refactored) @@ -1 +1,2 @@ -from core import * +from future import absolute_import +from .core import * --- ./HARK/init.py (original) +++ ./HARK/init.py (refactored) @@ -1 +1,2 @@ -from core import * +from future import absolute_import +from .core import * --- ./HARK/core.py (original) +++ ./HARK/core.py (refactored) @@ -6,12 +6,17 @@ model adds an additional layer, endogenizing some of the inputs to the micro problem by finding a general equilibrium dynamic rule. '''

-from utilities import getArgNames, NullFunc +from future import print_function +from future import absolute_import + +from builtins import str +from builtins import range +from builtins import object +from .utilities import getArgNames, NullFunc from copy import copy, deepcopy import numpy as np from time import clock -from parallel import multiThreadCommands +from .parallel import multiThreadCommands

def distanceMetric(thing_A,thing_B): ''' --- ./HARK/core.py (original) +++ ./HARK/core.py (refactored) @@ -6,12 +6,17 @@ model adds an additional layer, endogenizing some of the inputs to the micro problem by finding a general equilibrium dynamic rule. '''

-from utilities import getArgNames, NullFunc +from future import print_function +from future import absolute_import + +from builtins import str +from builtins import range +from builtins import object +from .utilities import getArgNames, NullFunc from copy import copy, deepcopy import numpy as np from time import clock -from parallel import multiThreadCommands +from .parallel import multiThreadCommands

def distanceMetric(thing_A,thing_B): ''' --- ./HARK/utilities.py (original) +++ ./HARK/utilities.py (refactored) @@ -5,6 +5,10 @@ '''

from future import division # Import Python 3.x division function +from future import print_function +from builtins import str +from builtins import range +from builtins import object import functools import warnings import numpy as np # Python's numeric library, abbreviated "np" @@ -75,7 +79,7 @@ return argNames

-class NullFunc(): +class NullFunc(object): ''' A trivial class that acts as a placeholder "do nothing" function. '''

--- ./HARK/parallel.py (original) +++ ./HARK/parallel.py (refactored) @@ -4,6 +4,9 @@ a command prompt. ''' from future import print_function, division +from builtins import zip +from builtins import str +from builtins import range import multiprocessing import numpy as np from time import clock @@ -19,9 +22,9 @@ # such that they will raise useful errors if called. def raiseImportError(moduleStr): def defineImportError(*args, **kwargs):

  •        raise ImportError,moduleStr + ' could not be imported, and is required for this'+\
    
  •        raise ImportError(moduleStr + ' could not be imported, and is required for this'+\
           ' function.  See HARK documentation for more information on how to install the ' \
    
  •        + moduleStr + ' module.'
    
  •        + moduleStr + ' module.')
       return defineImportError
    

    Parallel = raiseImportError('joblib') @@ -228,7 +231,7 @@ print('Resuming search after ' + str(iters) + ' iterations and ' + str(evals) + ' function evaluations.')

    Initialize some inputs for the multithreader

  • j_list = range(N-P,N)
  • j_list = list(range(N-P,N)) opt_params= [r_param,c_param,e_param]

    Run the Nelder-Mead algorithm until a terminal condition is met

@@ -361,15 +364,15 @@ ''' f = open(name + '.txt','rb') my_reader = csv.reader(f,delimiter=' ')

  • my_shape_txt = my_reader.next()
  • my_shape_txt = next(my_reader) shape0 = int(my_shape_txt[0]) shape1 = int(my_shape_txt[1])
  • my_nums_txt = my_reader.next()
  • my_nums_txt = next(my_reader) iters = int(my_nums_txt[0]) evals = int(my_nums_txt[1])
  • simplex_flat = np.array(my_reader.next(),dtype=float)
  • simplex_flat = np.array(next(my_reader),dtype=float) simplex = np.reshape(simplex_flat,(shape0,shape1))
  • fvals = np.array(my_reader.next(),dtype=float)
  • fvals = np.array(next(my_reader),dtype=float) f.close()

    return simplex, fvals, iters, evals --- ./HARK/parallel.py (original) +++ ./HARK/parallel.py (refactored) @@ -4,6 +4,9 @@ a command prompt. ''' from future import print_function, division +from builtins import zip +from builtins import str +from builtins import range import multiprocessing import numpy as np from time import clock @@ -19,9 +22,9 @@

    such that they will raise useful errors if called.

    def raiseImportError(moduleStr): def defineImportError(*args, **kwargs):

  •        raise ImportError,moduleStr + ' could not be imported, and is required for this'+\
    
  •        raise ImportError(moduleStr + ' could not be imported, and is required for this'+\
           ' function.  See HARK documentation for more information on how to install the ' \
    
  •        + moduleStr + ' module.'
    
  •        + moduleStr + ' module.')
       return defineImportError
    

    Parallel = raiseImportError('joblib') @@ -228,7 +231,7 @@ print('Resuming search after ' + str(iters) + ' iterations and ' + str(evals) + ' function evaluations.')

    Initialize some inputs for the multithreader

  • j_list = range(N-P,N)
  • j_list = list(range(N-P,N)) opt_params= [r_param,c_param,e_param]

    Run the Nelder-Mead algorithm until a terminal condition is met

@@ -361,15 +364,15 @@ ''' f = open(name + '.txt','rb') my_reader = csv.reader(f,delimiter=' ')

  • my_shape_txt = my_reader.next()
  • my_shape_txt = next(my_reader) shape0 = int(my_shape_txt[0]) shape1 = int(my_shape_txt[1])
  • my_nums_txt = my_reader.next()
  • my_nums_txt = next(my_reader) iters = int(my_nums_txt[0]) evals = int(my_nums_txt[1])
  • simplex_flat = np.array(my_reader.next(),dtype=float)
  • simplex_flat = np.array(next(my_reader),dtype=float) simplex = np.reshape(simplex_flat,(shape0,shape1))
  • fvals = np.array(my_reader.next(),dtype=float)
  • fvals = np.array(next(my_reader),dtype=float) f.close()

    return simplex, fvals, iters, evals

--- ./HARK/ConsumptionSaving/ConsIndShockModel.py (original) +++ ./HARK/ConsumptionSaving/ConsIndShockModel.py (refactored) @@ -13,6 +13,11 @@ See HARK documentation for mathematical descriptions of the models being solved. ''' from future import division +from future import print_function +from future import absolute_import +from builtins import str +from builtins import range +from builtins import object from copy import copy, deepcopy import numpy as np from scipy.optimize import newton @@ -1693,29 +1698,29 @@ #Evaluate and report on the return impatience condition RIC=(self.LivPrb[0](self.Rfreeself.DiscFac)**(1/self.CRRA))/self.Rfree if RIC<1:

  •        print 'The return impatiance factor value for the supplied parameter values satisfies the return impatiance condition.'
    
  •        print('The return impatiance factor value for the supplied parameter values satisfies the return impatiance condition.')
       else:
    
  •        print 'The given type violates the return impatience condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.'
    
  •        print('The given type violates the return impatience condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.')
       if verbose:
    
  •        print 'The return impatiance factor value for the supplied parameter values is ' + str(RIC)
    
  •        print('The return impatiance factor value for the supplied parameter values is ' + str(RIC))
    
       #Evaluate and report on the absolute impatience condition
       AIC=self.LivPrb[0]*(self.Rfree*self.DiscFac)**(1/self.CRRA)
       if AIC<1:
    
  •        print 'The absolute impatiance factor value for the supplied parameter values satisfies the absolute impatiance condition.'
    
  •        print('The absolute impatiance factor value for the supplied parameter values satisfies the absolute impatiance condition.')
       else:
    
  •        print 'The given type violates the absolute impatience condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.'
    
  •        print('The given type violates the absolute impatience condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.')
       if verbose:
    
  •        print 'The absolute impatiance factor value for the supplied parameter values is ' + str(AIC)
    
  •        print('The absolute impatiance factor value for the supplied parameter values is ' + str(AIC))
    
       #Evaluate and report on the finite human wealth condition
       FHWC=self.PermGroFac[0]/self.Rfree
       if FHWC<1:
    
  •        print 'The finite human wealth factor value for the supplied parameter values satisfies the finite human wealth condition.'
    
  •        print('The finite human wealth factor value for the supplied parameter values satisfies the finite human wealth condition.')
       else:
    
  •        print 'The given type violates the finite human wealth condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.'
    
  •        print('The given type violates the finite human wealth condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.')
       if verbose:
    
  •        print 'The finite human wealth factor value for the supplied parameter values is ' + str(FHWC)
    
  •        print('The finite human wealth factor value for the supplied parameter values is ' + str(FHWC))
    

class IndShockConsumerType(PerfForesightConsumerType): @@ -2021,29 +2026,29 @@ #Evaluate and report on the growth impatience condition GIC=(self.LivPrb[0]exp_psi_inv(self.Rfree*self.DiscFac)**(1/self.CRRA))/self.PermGroFac[0] if GIC<1:

  •        print 'The growth impatiance factor value for the supplied parameter values satisfies the growth impatiance condition.'
    
  •        print('The growth impatiance factor value for the supplied parameter values satisfies the growth impatiance condition.')
       else:
    
  •        print 'The given type violates the growth impatience condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.'
    
  •        print('The given type violates the growth impatience condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.')
       if verbose:
    
  •        print 'The growth impatiance factor value for the supplied parameter values is ' + str(GIC)
    
  •        print('The growth impatiance factor value for the supplied parameter values is ' + str(GIC))
    
       #Evaluate and report on the weak return impatience condition
       WRIC=(self.LivPrb[0]*(self.UnempPrb**(1/self.CRRA))*(self.Rfree*self.DiscFac)**(1/self.CRRA))/self.Rfree
       if WRIC<1:
    
  •        print 'The weak return impatiance factor value for the supplied parameter values satisfies the weak return impatiance condition.'
    
  •        print('The weak return impatiance factor value for the supplied parameter values satisfies the weak return impatiance condition.')
       else:
    
  •        print 'The given type violates the weak return impatience condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.'
    
  •        print('The given type violates the weak return impatience condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.')
       if verbose:
    
  •        print 'The weak return impatiance factor value for the supplied parameter values is ' + str(WRIC)
    
  •        print('The weak return impatiance factor value for the supplied parameter values is ' + str(WRIC))
    
       #Evaluate and report on the finite value of autarky condition
       FVAC=self.LivPrb[0]*self.DiscFac*exp_psi_to_one_minus_rho*(self.PermGroFac[0]**(1-self.CRRA))
       if FVAC<1:
    
  •        print 'The finite value of autarky factor value for the supplied parameter values satisfies the finite value of autarky condition.'
    
  •        print('The finite value of autarky factor value for the supplied parameter values satisfies the finite value of autarky condition.')
       else:
    
  •        print 'The given type violates the finite value of autarky condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.'
    
  •        print('The given type violates the finite value of autarky condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.')
       if verbose:
    
  •        print 'The finite value of autarky factor value for the supplied parameter values is ' + str(FVAC)
    
  •        print('The finite value of autarky factor value for the supplied parameter values is ' + str(FVAC))
    

class KinkedRconsumerType(IndShockConsumerType): ''' @@ -2382,8 +2387,8 @@ elif grid_type == "exp_mult": aXtraGrid = makeGridExpMult(ming=aXtraMin, maxg=aXtraMax, ng=aXtraCount, timestonest=exp_nest) else:

  •    raise Exception, "grid_type not recognized in __init__." + \
    
  •                     "Please ensure grid_type is 'linear' or 'exp_mult'"
    
  •    raise Exception("grid_type not recognized in __init__." + \
    
  •                     "Please ensure grid_type is 'linear' or 'exp_mult'")
    

    Add in additional points for the grid:

    for a in aXtraExtra: @@ -2397,7 +2402,7 @@ ####################################################################################################

if name == 'main':

  • import ConsumerParameters as Params
  • from . import ConsumerParameters as Params from HARK.utilities import plotFuncsDer, plotFuncs from time import clock mystr = lambda number : "{:.4f}".format(number) --- ./HARK/ConsumptionSaving/ConsIndShockModel.py (original) +++ ./HARK/ConsumptionSaving/ConsIndShockModel.py (refactored) @@ -13,6 +13,11 @@ See HARK documentation for mathematical descriptions of the models being solved. ''' from future import division +from future import print_function +from future import absolute_import +from builtins import str +from builtins import range +from builtins import object from copy import copy, deepcopy import numpy as np from scipy.optimize import newton @@ -1693,29 +1698,29 @@ #Evaluate and report on the return impatience condition RIC=(self.LivPrb[0](self.Rfreeself.DiscFac)**(1/self.CRRA))/self.Rfree if RIC<1:
  •        print 'The return impatiance factor value for the supplied parameter values satisfies the return impatiance condition.'
    
  •        print('The return impatiance factor value for the supplied parameter values satisfies the return impatiance condition.')
       else:
    
  •        print 'The given type violates the return impatience condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.'
    
  •        print('The given type violates the return impatience condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.')
       if verbose:
    
  •        print 'The return impatiance factor value for the supplied parameter values is ' + str(RIC)
    
  •        print('The return impatiance factor value for the supplied parameter values is ' + str(RIC))
    
       #Evaluate and report on the absolute impatience condition
       AIC=self.LivPrb[0]*(self.Rfree*self.DiscFac)**(1/self.CRRA)
       if AIC<1:
    
  •        print 'The absolute impatiance factor value for the supplied parameter values satisfies the absolute impatiance condition.'
    
  •        print('The absolute impatiance factor value for the supplied parameter values satisfies the absolute impatiance condition.')
       else:
    
  •        print 'The given type violates the absolute impatience condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.'
    
  •        print('The given type violates the absolute impatience condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.')
       if verbose:
    
  •        print 'The absolute impatiance factor value for the supplied parameter values is ' + str(AIC)
    
  •        print('The absolute impatiance factor value for the supplied parameter values is ' + str(AIC))
    
       #Evaluate and report on the finite human wealth condition
       FHWC=self.PermGroFac[0]/self.Rfree
       if FHWC<1:
    
  •        print 'The finite human wealth factor value for the supplied parameter values satisfies the finite human wealth condition.'
    
  •        print('The finite human wealth factor value for the supplied parameter values satisfies the finite human wealth condition.')
       else:
    
  •        print 'The given type violates the finite human wealth condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.'
    
  •        print('The given type violates the finite human wealth condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.')
       if verbose:
    
  •        print 'The finite human wealth factor value for the supplied parameter values is ' + str(FHWC)
    
  •        print('The finite human wealth factor value for the supplied parameter values is ' + str(FHWC))
    

class IndShockConsumerType(PerfForesightConsumerType): @@ -2021,29 +2026,29 @@ #Evaluate and report on the growth impatience condition GIC=(self.LivPrb[0]exp_psi_inv(self.Rfree*self.DiscFac)**(1/self.CRRA))/self.PermGroFac[0] if GIC<1:

  •        print 'The growth impatiance factor value for the supplied parameter values satisfies the growth impatiance condition.'
    
  •        print('The growth impatiance factor value for the supplied parameter values satisfies the growth impatiance condition.')
       else:
    
  •        print 'The given type violates the growth impatience condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.'
    
  •        print('The given type violates the growth impatience condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.')
       if verbose:
    
  •        print 'The growth impatiance factor value for the supplied parameter values is ' + str(GIC)
    
  •        print('The growth impatiance factor value for the supplied parameter values is ' + str(GIC))
    
       #Evaluate and report on the weak return impatience condition
       WRIC=(self.LivPrb[0]*(self.UnempPrb**(1/self.CRRA))*(self.Rfree*self.DiscFac)**(1/self.CRRA))/self.Rfree
       if WRIC<1:
    
  •        print 'The weak return impatiance factor value for the supplied parameter values satisfies the weak return impatiance condition.'
    
  •        print('The weak return impatiance factor value for the supplied parameter values satisfies the weak return impatiance condition.')
       else:
    
  •        print 'The given type violates the weak return impatience condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.'
    
  •        print('The given type violates the weak return impatience condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.')
       if verbose:
    
  •        print 'The weak return impatiance factor value for the supplied parameter values is ' + str(WRIC)
    
  •        print('The weak return impatiance factor value for the supplied parameter values is ' + str(WRIC))
    
       #Evaluate and report on the finite value of autarky condition
       FVAC=self.LivPrb[0]*self.DiscFac*exp_psi_to_one_minus_rho*(self.PermGroFac[0]**(1-self.CRRA))
       if FVAC<1:
    
  •        print 'The finite value of autarky factor value for the supplied parameter values satisfies the finite value of autarky condition.'
    
  •        print('The finite value of autarky factor value for the supplied parameter values satisfies the finite value of autarky condition.')
       else:
    
  •        print 'The given type violates the finite value of autarky condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.'
    
  •        print('The given type violates the finite value of autarky condition with the supplied parameter values. Therefore, a nondegenerate solution may not be available. See Table 3 in "Theoretical Foundations of Buffer Stock Saving" (Carroll, 2011) to check which conditions are sufficient for a nondegenerate solution.')
       if verbose:
    
  •        print 'The finite value of autarky factor value for the supplied parameter values is ' + str(FVAC)
    
  •        print('The finite value of autarky factor value for the supplied parameter values is ' + str(FVAC))
    

class KinkedRconsumerType(IndShockConsumerType): ''' @@ -2382,8 +2387,8 @@ elif grid_type == "exp_mult": aXtraGrid = makeGridExpMult(ming=aXtraMin, maxg=aXtraMax, ng=aXtraCount, timestonest=exp_nest) else:

  •    raise Exception, "grid_type not recognized in __init__." + \
    
  •                     "Please ensure grid_type is 'linear' or 'exp_mult'"
    
  •    raise Exception("grid_type not recognized in __init__." + \
    
  •                     "Please ensure grid_type is 'linear' or 'exp_mult'")
    

    Add in additional points for the grid:

    for a in aXtraExtra: @@ -2397,7 +2402,7 @@ ####################################################################################################

if name == 'main':

  • import ConsumerParameters as Params
  • from . import ConsumerParameters as Params from HARK.utilities import plotFuncsDer, plotFuncs from time import clock mystr = lambda number : "{:.4f}".format(number)

--- ./HARK/ConsumptionSaving/ConsAggShockModel.py (original) +++ ./HARK/ConsumptionSaving/ConsAggShockModel.py (refactored) @@ -5,6 +5,9 @@ used for solving "macroeconomic" models with aggregate shocks. ''' from future import division, print_function +from future import absolute_import +from builtins import str +from builtins import range import numpy as np import scipy.stats as stats from HARK.interpolation import LinearInterp, LinearInterpOnInterp1D, ConstantFunction, IdentityFunction,
@@ -13,7 +16,7 @@ CRRAutility_invP, CRRAutility_inv, combineIndepDstns,
approxMeanOneLognormal from HARK.simulation import drawDiscrete, drawUniform -from ConsIndShockModel import ConsumerSolution, IndShockConsumerType +from .ConsIndShockModel import ConsumerSolution, IndShockConsumerType from HARK import HARKobject, Market, AgentType from copy import deepcopy import matplotlib.pyplot as plt @@ -1750,7 +1753,7 @@ ###############################################################################

def demo():

  • import ConsumerParameters as Params
  • from . import ConsumerParameters as Params from time import clock from HARK.utilities import plotFuncs mystr = lambda number : "{:.4f}".format(number) --- ./HARK/ConsumptionSaving/ConsAggShockModel.py (original) +++ ./HARK/ConsumptionSaving/ConsAggShockModel.py (refactored) @@ -5,6 +5,9 @@ used for solving "macroeconomic" models with aggregate shocks. ''' from future import division, print_function +from future import absolute_import +from builtins import str +from builtins import range import numpy as np import scipy.stats as stats from HARK.interpolation import LinearInterp, LinearInterpOnInterp1D, ConstantFunction, IdentityFunction,
    @@ -13,7 +16,7 @@ CRRAutility_invP, CRRAutility_inv, combineIndepDstns,
    approxMeanOneLognormal from HARK.simulation import drawDiscrete, drawUniform -from ConsIndShockModel import ConsumerSolution, IndShockConsumerType +from .ConsIndShockModel import ConsumerSolution, IndShockConsumerType from HARK import HARKobject, Market, AgentType from copy import deepcopy import matplotlib.pyplot as plt @@ -1750,7 +1753,7 @@ ###############################################################################

def demo():

  • import ConsumerParameters as Params
  • from . import ConsumerParameters as Params from time import clock from HARK.utilities import plotFuncs mystr = lambda number : "{:.4f}".format(number)

--- ./HARK/ConsumptionSaving/TractableBufferStockModel.py (original) +++ ./HARK/ConsumptionSaving/TractableBufferStockModel.py (refactored) @@ -19,6 +19,8 @@ in the HARK framework, as shown below. ''' from future import division, print_function +from future import absolute_import +from builtins import str import numpy as np

Import the HARK library. The assumption is that this code is in a folder

@@ -467,7 +469,7 @@ # contained in the HARK folder. Also import the ConsumptionSavingModel import numpy as np # numeric Python from HARK.utilities import plotFuncs # basic plotting tools

  • from ConsMarkovModel import MarkovConsumerType # An alternative, much longer way to solve the TBS model
  • from .ConsMarkovModel import MarkovConsumerType # An alternative, much longer way to solve the TBS model from time import clock # timing utility

    do_simulation = True --- ./HARK/ConsumptionSaving/TractableBufferStockModel.py (original) +++ ./HARK/ConsumptionSaving/TractableBufferStockModel.py (refactored) @@ -19,6 +19,8 @@ in the HARK framework, as shown below. ''' from future import division, print_function +from future import absolute_import +from builtins import str import numpy as np

Import the HARK library. The assumption is that this code is in a folder

@@ -467,7 +469,7 @@ # contained in the HARK folder. Also import the ConsumptionSavingModel import numpy as np # numeric Python from HARK.utilities import plotFuncs # basic plotting tools

  • from ConsMarkovModel import MarkovConsumerType # An alternative, much longer way to solve the TBS model
  • from .ConsMarkovModel import MarkovConsumerType # An alternative, much longer way to solve the TBS model from time import clock # timing utility

    do_simulation = True

--- ./HARK/ConsumptionSaving/ConsMarkovModel.py (original) +++ ./HARK/ConsumptionSaving/ConsMarkovModel.py (refactored) @@ -5,10 +5,12 @@ distribution can vary with the discrete state. ''' from future import division, print_function +from future import absolute_import +from builtins import range from copy import deepcopy import numpy as np -from ConsIndShockModel import ConsIndShockSolver, ValueFunc, MargValueFunc, ConsumerSolution, IndShockConsumerType -from ConsAggShockModel import AggShockConsumerType +from .ConsIndShockModel import ConsIndShockSolver, ValueFunc, MargValueFunc, ConsumerSolution, IndShockConsumerType +from .ConsAggShockModel import AggShockConsumerType from HARK.utilities import combineIndepDstns, warnings # Because of "patch" to warnings modules from HARK import Market, HARKobject from HARK.simulation import drawDiscrete, drawUniform @@ -1308,7 +1310,7 @@

if name == 'main':

  • import ConsumerParameters as Params
  • from . import ConsumerParameters as Params from HARK.utilities import plotFuncs from time import clock from copy import copy --- ./HARK/ConsumptionSaving/ConsMarkovModel.py (original) +++ ./HARK/ConsumptionSaving/ConsMarkovModel.py (refactored) @@ -5,10 +5,12 @@ distribution can vary with the discrete state. ''' from future import division, print_function +from future import absolute_import +from builtins import range from copy import deepcopy import numpy as np -from ConsIndShockModel import ConsIndShockSolver, ValueFunc, MargValueFunc, ConsumerSolution, IndShockConsumerType -from ConsAggShockModel import AggShockConsumerType +from .ConsIndShockModel import ConsIndShockSolver, ValueFunc, MargValueFunc, ConsumerSolution, IndShockConsumerType +from .ConsAggShockModel import AggShockConsumerType from HARK.utilities import combineIndepDstns, warnings # Because of "patch" to warnings modules from HARK import Market, HARKobject from HARK.simulation import drawDiscrete, drawUniform @@ -1308,7 +1310,7 @@

if name == 'main':

  • import ConsumerParameters as Params
  • from . import ConsumerParameters as Params from HARK.utilities import plotFuncs from time import clock from copy import copy

--- ./HARK/ConsumptionSaving/ConsRepAgentModel.py (original) +++ ./HARK/ConsumptionSaving/ConsRepAgentModel.py (refactored) @@ -5,10 +5,13 @@ time invariant or exist on a short cycle; models must be infinite horizon. ''' from future import division, print_function +from future import absolute_import +from builtins import str +from builtins import range import numpy as np from HARK.interpolation import LinearInterp from HARK.simulation import drawUniform, drawDiscrete -from ConsIndShockModel import IndShockConsumerType, ConsumerSolution, MargValueFunc +from .ConsIndShockModel import IndShockConsumerType, ConsumerSolution, MargValueFunc

def solveConsRepAgent(solution_next,DiscFac,CRRA,IncomeDstn,CapShare,DeprFac,PermGroFac,aXtraGrid): ''' @@ -328,7 +331,7 @@ from copy import deepcopy from time import clock from HARK.utilities import plotFuncs

  • import ConsumerParameters as Params
  • from . import ConsumerParameters as Params

    Make a quick example dictionary

    RA_params = deepcopy(Params.init_idiosyncratic_shocks) --- ./HARK/ConsumptionSaving/ConsRepAgentModel.py (original) +++ ./HARK/ConsumptionSaving/ConsRepAgentModel.py (refactored) @@ -5,10 +5,13 @@ time invariant or exist on a short cycle; models must be infinite horizon. ''' from future import division, print_function +from future import absolute_import +from builtins import str +from builtins import range import numpy as np from HARK.interpolation import LinearInterp from HARK.simulation import drawUniform, drawDiscrete -from ConsIndShockModel import IndShockConsumerType, ConsumerSolution, MargValueFunc +from .ConsIndShockModel import IndShockConsumerType, ConsumerSolution, MargValueFunc

def solveConsRepAgent(solution_next,DiscFac,CRRA,IncomeDstn,CapShare,DeprFac,PermGroFac,aXtraGrid): ''' @@ -328,7 +331,7 @@ from copy import deepcopy from time import clock from HARK.utilities import plotFuncs

  • import ConsumerParameters as Params
  • from . import ConsumerParameters as Params

    Make a quick example dictionary

    RA_params = deepcopy(Params.init_idiosyncratic_shocks)

--- ./HARK/ConsumptionSaving/ConsGenIncProcessModel.py (original) +++ ./HARK/ConsumptionSaving/ConsGenIncProcessModel.py (refactored) @@ -5,6 +5,9 @@ and allows (log) persistent income to follow an AR1 process rather than random walk. ''' from future import division, print_function +from future import absolute_import +from builtins import str +from builtins import range from copy import deepcopy import numpy as np from HARK import HARKobject @@ -14,7 +17,7 @@ CRRAutility_invP, CRRAutility_inv, CRRAutilityP_invP,
getPercentiles from HARK.simulation import drawLognormal, drawDiscrete, drawUniform -from ConsIndShockModel import ConsIndShockSetup, ConsumerSolution, IndShockConsumerType +from .ConsIndShockModel import ConsIndShockSetup, ConsumerSolution, IndShockConsumerType

utility = CRRAutility utilityP = CRRAutilityP @@ -1273,7 +1276,7 @@ ###############################################################################

if name == 'main':

  • import ConsumerParameters as Params
  • from . import ConsumerParameters as Params from HARK.utilities import plotFuncs from time import clock import matplotlib.pyplot as plt --- ./HARK/ConsumptionSaving/ConsGenIncProcessModel.py (original) +++ ./HARK/ConsumptionSaving/ConsGenIncProcessModel.py (refactored) @@ -5,6 +5,9 @@ and allows (log) persistent income to follow an AR1 process rather than random walk. ''' from future import division, print_function +from future import absolute_import +from builtins import str +from builtins import range from copy import deepcopy import numpy as np from HARK import HARKobject @@ -14,7 +17,7 @@ CRRAutility_invP, CRRAutility_inv, CRRAutilityP_invP,
    getPercentiles from HARK.simulation import drawLognormal, drawDiscrete, drawUniform -from ConsIndShockModel import ConsIndShockSetup, ConsumerSolution, IndShockConsumerType +from .ConsIndShockModel import ConsIndShockSetup, ConsumerSolution, IndShockConsumerType

utility = CRRAutility utilityP = CRRAutilityP @@ -1273,7 +1276,7 @@ ###############################################################################

if name == 'main':

  • import ConsumerParameters as Params
  • from . import ConsumerParameters as Params from HARK.utilities import plotFuncs from time import clock import matplotlib.pyplot as plt

--- ./HARK/ConsumptionSaving/RepAgentModel.py (original) +++ ./HARK/ConsumptionSaving/RepAgentModel.py (refactored) @@ -5,10 +5,13 @@ time invariant or exist on a short cycle. ''' from future import division, print_function +from future import absolute_import +from builtins import str +from builtins import range import numpy as np from HARK.interpolation import LinearInterp from HARK.simulation import drawUniform, drawDiscrete -from ConsIndShockModel import IndShockConsumerType, ConsumerSolution, MargValueFunc +from .ConsIndShockModel import IndShockConsumerType, ConsumerSolution, MargValueFunc

def solveConsRepAgent(solution_next,DiscFac,CRRA,IncomeDstn,CapShare,DeprFac,PermGroFac,aXtraGrid): ''' @@ -328,7 +331,7 @@ from copy import deepcopy from time import clock from HARK.utilities import plotFuncs

  • import ConsumerParameters as Params
  • from . import ConsumerParameters as Params

    Make a quick example dictionary

    RA_params = deepcopy(Params.init_idiosyncratic_shocks) --- ./HARK/ConsumptionSaving/RepAgentModel.py (original) +++ ./HARK/ConsumptionSaving/RepAgentModel.py (refactored) @@ -5,10 +5,13 @@ time invariant or exist on a short cycle. ''' from future import division, print_function +from future import absolute_import +from builtins import str +from builtins import range import numpy as np from HARK.interpolation import LinearInterp from HARK.simulation import drawUniform, drawDiscrete -from ConsIndShockModel import IndShockConsumerType, ConsumerSolution, MargValueFunc +from .ConsIndShockModel import IndShockConsumerType, ConsumerSolution, MargValueFunc

def solveConsRepAgent(solution_next,DiscFac,CRRA,IncomeDstn,CapShare,DeprFac,PermGroFac,aXtraGrid): ''' @@ -328,7 +331,7 @@ from copy import deepcopy from time import clock from HARK.utilities import plotFuncs

  • import ConsumerParameters as Params
  • from . import ConsumerParameters as Params

    Make a quick example dictionary

    RA_params = deepcopy(Params.init_idiosyncratic_shocks)

--- ./HARK/ConsumptionSaving/ConsPrefShockModel.py (original) +++ ./HARK/ConsumptionSaving/ConsPrefShockModel.py (refactored) @@ -7,9 +7,12 @@ by inheriting from multiple classes. ''' from future import division, print_function +from future import absolute_import +from builtins import str +from builtins import range import numpy as np from HARK.utilities import approxMeanOneLognormal -from ConsIndShockModel import IndShockConsumerType, ConsumerSolution, ConsIndShockSolver,
+from .ConsIndShockModel import IndShockConsumerType, ConsumerSolution, ConsIndShockSolver,
ValueFunc, MargValueFunc, KinkedRconsumerType, ConsKinkedRsolver from HARK.interpolation import LinearInterpOnInterp1D, LinearInterp, CubicInterp, LowerEnvelope

@@ -591,7 +594,7 @@ ###############################################################################

if name == 'main':

  • import ConsumerParameters as Params
  • from . import ConsumerParameters as Params import matplotlib.pyplot as plt from HARK.utilities import plotFuncs from time import clock --- ./HARK/ConsumptionSaving/ConsPrefShockModel.py (original) +++ ./HARK/ConsumptionSaving/ConsPrefShockModel.py (refactored) @@ -7,9 +7,12 @@ by inheriting from multiple classes. ''' from future import division, print_function +from future import absolute_import +from builtins import str +from builtins import range import numpy as np from HARK.utilities import approxMeanOneLognormal -from ConsIndShockModel import IndShockConsumerType, ConsumerSolution, ConsIndShockSolver,
    +from .ConsIndShockModel import IndShockConsumerType, ConsumerSolution, ConsIndShockSolver,
    ValueFunc, MargValueFunc, KinkedRconsumerType, ConsKinkedRsolver from HARK.interpolation import LinearInterpOnInterp1D, LinearInterp, CubicInterp, LowerEnvelope

@@ -591,7 +594,7 @@ ###############################################################################

if name == 'main':

  • import ConsumerParameters as Params
  • from . import ConsumerParameters as Params import matplotlib.pyplot as plt from HARK.utilities import plotFuncs from time import clock

RefactoringTool: No changes to ./HARK/ConsumptionSaving/ConsumerParameters.py

futurize ./HARK/ConsumptionSaving/init.py >> ~/workspace/HARK-1.wiki/futurize_notes_round_2.md RefactoringTool: Skipping optional fixer: idioms RefactoringTool: Skipping optional fixer: ws_comma RefactoringTool: No files need to be modified.

--- ./HARK/ConsumptionSaving/ConsMedModel.py (original) +++ ./HARK/ConsumptionSaving/ConsMedModel.py (refactored) @@ -1,7 +1,13 @@ ''' Consumption-saving models that also include medical spending. '''

+from future import division +from future import print_function +from future import absolute_import + +from builtins import str +from builtins import range +from past.utils import old_div import numpy as np from scipy.optimize import brentq from HARK import HARKobject @@ -9,11 +15,11 @@ CRRAutility, CRRAutility_inv, CRRAutility_invP, CRRAutilityPP,
makeGridExpMult, NullFunc from HARK.simulation import drawLognormal -from ConsIndShockModel import ConsumerSolution +from .ConsIndShockModel import ConsumerSolution from HARK.interpolation import BilinearInterpOnInterp1D, TrilinearInterp, BilinearInterp, CubicInterp,
LinearInterp, LowerEnvelope3D, UpperEnvelope, LinearInterpOnInterp1D,
VariableLowerBoundFunc3D -from ConsGenIncProcessModel import ConsGenIncProcessSolver, PersistentShockConsumerType,
+from .ConsGenIncProcessModel import ConsGenIncProcessSolver, PersistentShockConsumerType,
ValueFunc2D, MargValueFunc2D, MargMargValueFunc2D,
VariableLowerBoundFunc2D from copy import deepcopy @@ -79,23 +85,23 @@ elif MedShk == 0: # All consumption when MedShk = 0 cLvl = xLvl else:

  •                optMedZeroFunc = lambda c : (MedShk/MedPrice)**(-1.0/CRRAcon)*\
    
  •                                 ((xLvl-c)/MedPrice)**(CRRAmed/CRRAcon) - c
    
  •                optMedZeroFunc = lambda c : (old_div(MedShk,MedPrice))**(old_div(-1.0,CRRAcon))*\
    
  •                                 (old_div((xLvl-c),MedPrice))**(old_div(CRRAmed,CRRAcon)) - c
                   cLvl = brentq(optMedZeroFunc,0.0,xLvl) # Find solution to FOC
               cLvlGrid[i,j] = cLvl
    
       # Construct the consumption function and medical care function
       if xLvlCubicBool:
           if MedShkCubicBool:
    
  •            raise NotImplementedError(), 'Bicubic interpolation not yet implemented'
    
  •            raise NotImplementedError()('Bicubic interpolation not yet implemented')
           else:
               xLvlGrid_tiled   = np.tile(np.reshape(xLvlGrid,(xLvlGrid.size,1)),
                                          (1,MedShkGrid.size))
               MedShkGrid_tiled = np.tile(np.reshape(MedShkGrid,(1,MedShkGrid.size)),
                                          (xLvlGrid.size,1))
    
  •            dfdx = (CRRAmed/(CRRAcon*MedPrice))*(MedShkGrid_tiled/MedPrice)**(-1.0/CRRAcon)*\
    
  •                   ((xLvlGrid_tiled - cLvlGrid)/MedPrice)**(CRRAmed/CRRAcon - 1.0)
    
  •            dcdx = dfdx/(dfdx + 1.0)
    
  •            dfdx = (old_div(CRRAmed,(CRRAcon*MedPrice)))*(old_div(MedShkGrid_tiled,MedPrice))**(old_div(-1.0,CRRAcon))*\
    
  •                   (old_div((xLvlGrid_tiled - cLvlGrid),MedPrice))**(old_div(CRRAmed,CRRAcon) - 1.0)
    
  •            dcdx = old_div(dfdx,(dfdx + 1.0))
               dcdx[0,:] = dcdx[1,:] # approximation; function goes crazy otherwise
               dcdx[:,0] = 1.0 # no Med when MedShk=0, so all x is c
               cFromxFunc_by_MedShk = []
    

@@ -129,7 +135,7 @@ ''' xLvl = self.xFunc(mLvl,pLvl,MedShk) cLvl = self.cFunc(xLvl,MedShk)

  •    Med  = (xLvl-cLvl)/self.MedPrice
    
  •    Med  = old_div((xLvl-cLvl),self.MedPrice)
       return cLvl,Med
    

    def derivativeX(self,mLvl,pLvl,MedShk): @@ -160,7 +166,7 @@ dxdm = self.xFunc.derivativeX(mLvl,pLvl,MedShk) dcdx = self.cFunc.derivativeX(xLvl,MedShk) dcdm = dxdm*dcdx

  •    dMeddm = (dxdm - dcdm)/self.MedPrice
    
  •    dMeddm = old_div((dxdm - dcdm),self.MedPrice)
       return dcdm,dMeddm
    

    def derivativeY(self,mLvl,pLvl,MedShk): @@ -191,7 +197,7 @@ dxdp = self.xFunc.derivativeY(mLvl,pLvl,MedShk) dcdx = self.cFunc.derivativeX(xLvl,MedShk) dcdp = dxdp*dcdx

  •    dMeddp = (dxdp - dcdp)/self.MedPrice
    
  •    dMeddp = old_div((dxdp - dcdp),self.MedPrice)
       return dcdp,dMeddp
    

    def derivativeZ(self,mLvl,pLvl,MedShk): @@ -222,7 +228,7 @@ dxdShk = self.xFunc.derivativeZ(mLvl,pLvl,MedShk) dcdx = self.cFunc.derivativeX(xLvl,MedShk) dcdShk = dxdShk*dcdx + self.cFunc.derivativeY(xLvl,MedShk)

  •    dMeddShk = (dxdShk - dcdShk)/self.MedPrice
    
  •    dMeddShk = old_div((dxdShk - dcdShk),self.MedPrice)
       return dcdShk,dMeddShk
    

@@ -407,7 +413,7 @@ Optimal medical care for each point in (xLvl,MedShk). ''' xLvl = self.xFunc(mLvl,pLvl,MedShk)

  •    Med  = (xLvl-self.cFunc(xLvl,MedShk))/self.MedPrice
    
  •    Med  = old_div((xLvl-self.cFunc(xLvl,MedShk)),self.MedPrice)
       return Med
    

    def derivativeX(self,mLvl,pLvl,MedShk): @@ -438,7 +444,7 @@ dxdm = self.xFunc.derivativeX(mLvl,pLvl,MedShk) dcdx = self.cFunc.derivativeX(xLvl,MedShk) dcdm = dxdm*dcdx

  •    dMeddm = (dxdm - dcdm)/self.MedPrice
    
  •    dMeddm = old_div((dxdm - dcdm),self.MedPrice)
       return dcdm,dMeddm
    

    def derivativeY(self,mLvl,pLvl,MedShk): @@ -464,7 +470,7 @@ ''' xLvl = self.xFunc(mLvl,pLvl,MedShk) dxdp = self.xFunc.derivativeY(mLvl,pLvl,MedShk)

  •    dMeddp = (dxdp - dxdp*self.cFunc.derivativeX(xLvl,MedShk))/self.MedPrice
    
  •    dMeddp = old_div((dxdp - dxdp*self.cFunc.derivativeX(xLvl,MedShk)),self.MedPrice)
       return dMeddp
    

    def derivativeZ(self,mLvl,pLvl,MedShk): @@ -492,7 +498,7 @@ dxdShk = self.xFunc.derivativeZ(mLvl,pLvl,MedShk) dcdx = self.cFunc.derivativeX(xLvl,MedShk) dcdShk = dxdShk*dcdx + self.cFunc.derivativeY(xLvl,MedShk)

  •    dMeddShk = (dxdShk - dcdShk)/self.MedPrice
    
  •    dMeddShk = old_div((dxdShk - dcdShk),self.MedPrice)
       return dMeddShk
    

@@ -629,7 +635,7 @@ vP_expected = np.sum(vPgrid*PrbGrid,axis=1)

     # Construct the marginal (marginal) value function for the terminal period
  •    vPnvrs = vP_expected**(-1.0/self.CRRA)
    
  •    vPnvrs = vP_expected**(old_div(-1.0,self.CRRA))
       vPnvrs[0] = 0.0
       vPnvrsFunc = BilinearInterp(np.tile(np.reshape(vPnvrs,(vPnvrs.size,1)),
                    (1,trivial_grid.size)),mLvlGrid,trivial_grid)
    

@@ -892,7 +898,7 @@ # Find minimum allowable end-of-period assets at each permanent income level PermIncMinNext = self.PermShkMinNextself.pLvlNextFunc(self.pLvlGrid) IncLvlMinNext = PermIncMinNextself.TranShkMinNext

  •    aLvlMin = (self.solution_next.mLvlMin(PermIncMinNext) - IncLvlMinNext)/self.Rfree
    
  •    aLvlMin = old_div((self.solution_next.mLvlMin(PermIncMinNext) - IncLvlMinNext),self.Rfree)
    
       # Make a function for the natural borrowing constraint by permanent income
       BoroCnstNat = LinearInterp(np.insert(self.pLvlGrid,0,0.0),np.insert(aLvlMin,0,0.0))
    

@@ -944,7 +950,7 @@ cLvlNow = np.tile(np.reshape(self.uPinv(EndOfPrdvP),(1,pCount,mCount)),(MedCount,1,1)) MedBaseNow = np.tile(np.reshape(self.uMedPinv(self.MedPrice*EndOfPrdvP),(1,pCount,mCount)), (MedCount,1,1))

  •    MedShkVals_tiled = np.tile(np.reshape(self.MedShkVals**(1.0/self.CRRAmed),(MedCount,1,1)),
    
  •    MedShkVals_tiled = np.tile(np.reshape(self.MedShkVals**(old_div(1.0,self.CRRAmed)),(MedCount,1,1)),
                                  (1,pCount,mCount))
       MedLvlNow = MedShkVals_tiled*MedBaseNow
       aLvlNow_tiled = np.tile(np.reshape(aLvlNow,(1,pCount,mCount)),(MedCount,1,1))
    

@@ -1175,11 +1181,11 @@ EndOfPrdvPP = self.DiscFacEffself.Rfreeself.Rfree*np.sum(self.vPPfuncNext(self.mLvlNext,
self.pLvlNext)*self.ShkPrbs_temp,axis=0) EndOfPrdvPP = np.tile(np.reshape(EndOfPrdvPP,(1,pCount,EndOfPrdvPP.shape[1])),(MedCount,1,1))

  •    dcda        = EndOfPrdvPP/self.uPP(np.array(self.cLvlNow))
    
  •    dMedda      = EndOfPrdvPP/(self.MedShkVals_tiled*self.uMedPP(self.MedLvlNow))
    
  •    dcda        = old_div(EndOfPrdvPP,self.uPP(np.array(self.cLvlNow)))
    
  •    dMedda      = old_div(EndOfPrdvPP,(self.MedShkVals_tiled*self.uMedPP(self.MedLvlNow)))
       dMedda[0,:,:] = 0.0 # dMedda goes crazy when MedShk=0
    
  •    MPC         = dcda/(1.0 + dcda + self.MedPrice*dMedda)
    
  •    MPM         = dMedda/(1.0 + dcda + self.MedPrice*dMedda)
    
  •    MPC         = old_div(dcda,(1.0 + dcda + self.MedPrice*dMedda))
    
  •    MPM         = old_div(dMedda,(1.0 + dcda + self.MedPrice*dMedda))
    
       # Convert to marginal propensity to spend
       MPX = MPC + self.MedPrice*MPM
    

@@ -1356,7 +1362,7 @@ ###############################################################################

if name == 'main':

  • import ConsumerParameters as Params
  • from . import ConsumerParameters as Params from HARK.utilities import CRRAutility_inv from time import clock import matplotlib.pyplot as plt @@ -1407,7 +1413,7 @@ pLvl = MedicalExample.pLvlGrid[0][p] M_temp = pLvlM + MedicalExample.solution[0].mLvlMin(pLvl) P = pLvlnp.ones_like(M)
  •    vP = MedicalExample.solution[0].vPfunc(M_temp,P)**(-1.0/MedicalExample.CRRA)
    
  •    vP = MedicalExample.solution[0].vPfunc(M_temp,P)**(old_div(-1.0,MedicalExample.CRRA))
       plt.plot(M_temp,vP)
    
    print('Marginal value function (pseudo inverse)') plt.show()

--- ./HARK/ConsumptionSaving/ConsMedModel.py (original) +++ ./HARK/ConsumptionSaving/ConsMedModel.py (refactored) @@ -2,6 +2,9 @@ Consumption-saving models that also include medical spending. ''' from future import division, print_function +from future import absolute_import +from builtins import str +from builtins import range import numpy as np from scipy.optimize import brentq from HARK import HARKobject @@ -9,11 +12,11 @@ CRRAutility, CRRAutility_inv, CRRAutility_invP, CRRAutilityPP,
makeGridExpMult, NullFunc from HARK.simulation import drawLognormal -from ConsIndShockModel import ConsumerSolution +from .ConsIndShockModel import ConsumerSolution from HARK.interpolation import BilinearInterpOnInterp1D, TrilinearInterp, BilinearInterp, CubicInterp,
LinearInterp, LowerEnvelope3D, UpperEnvelope, LinearInterpOnInterp1D,
VariableLowerBoundFunc3D -from ConsGenIncProcessModel import ConsGenIncProcessSolver, PersistentShockConsumerType,
+from .ConsGenIncProcessModel import ConsGenIncProcessSolver, PersistentShockConsumerType,
ValueFunc2D, MargValueFunc2D, MargMargValueFunc2D,
VariableLowerBoundFunc2D from copy import deepcopy @@ -87,7 +90,7 @@ # Construct the consumption function and medical care function if xLvlCubicBool: if MedShkCubicBool:

  •            raise NotImplementedError(), 'Bicubic interpolation not yet implemented'
    
  •            raise NotImplementedError()('Bicubic interpolation not yet implemented')
           else:
               xLvlGrid_tiled   = np.tile(np.reshape(xLvlGrid,(xLvlGrid.size,1)),
                                          (1,MedShkGrid.size))
    

@@ -1356,7 +1359,7 @@ ###############################################################################

if name == 'main':

  • import ConsumerParameters as Params
  • from . import ConsumerParameters as Params from HARK.utilities import CRRAutility_inv from time import clock import matplotlib.pyplot as plt --- ./HARK/ConsumptionSaving/ConsMedModel.py (original) +++ ./HARK/ConsumptionSaving/ConsMedModel.py (refactored) @@ -2,6 +2,9 @@ Consumption-saving models that also include medical spending. ''' from future import division, print_function +from future import absolute_import +from builtins import str +from builtins import range import numpy as np from scipy.optimize import brentq from HARK import HARKobject @@ -9,11 +12,11 @@ CRRAutility, CRRAutility_inv, CRRAutility_invP, CRRAutilityPP,
    makeGridExpMult, NullFunc from HARK.simulation import drawLognormal -from ConsIndShockModel import ConsumerSolution +from .ConsIndShockModel import ConsumerSolution from HARK.interpolation import BilinearInterpOnInterp1D, TrilinearInterp, BilinearInterp, CubicInterp,
    LinearInterp, LowerEnvelope3D, UpperEnvelope, LinearInterpOnInterp1D,
    VariableLowerBoundFunc3D -from ConsGenIncProcessModel import ConsGenIncProcessSolver, PersistentShockConsumerType,
    +from .ConsGenIncProcessModel import ConsGenIncProcessSolver, PersistentShockConsumerType,
    ValueFunc2D, MargValueFunc2D, MargMargValueFunc2D,
    VariableLowerBoundFunc2D from copy import deepcopy @@ -87,7 +90,7 @@ # Construct the consumption function and medical care function if xLvlCubicBool: if MedShkCubicBool:
  •            raise NotImplementedError(), 'Bicubic interpolation not yet implemented'
    
  •            raise NotImplementedError()('Bicubic interpolation not yet implemented')
           else:
               xLvlGrid_tiled   = np.tile(np.reshape(xLvlGrid,(xLvlGrid.size,1)),
                                          (1,MedShkGrid.size))
    

@@ -1356,7 +1359,7 @@ ###############################################################################

if name == 'main':

  • import ConsumerParameters as Params
  • from . import ConsumerParameters as Params from HARK.utilities import CRRAutility_inv from time import clock import matplotlib.pyplot as plt

--- ./HARK/ConsumptionSaving/Demos/Fagereng_demo.py (original) +++ ./HARK/ConsumptionSaving/Demos/Fagereng_demo.py (refactored) @@ -33,6 +33,8 @@ because this target tends to push the estimate around a bit. ''' from future import division, print_function +from builtins import str +from builtins import range import numpy as np from copy import deepcopy

--- ./HARK/ConsumptionSaving/Demos/Fagereng_demo.py (original) +++ ./HARK/ConsumptionSaving/Demos/Fagereng_demo.py (refactored) @@ -33,6 +33,8 @@ because this target tends to push the estimate around a bit. ''' from future import division, print_function +from builtins import str +from builtins import range import numpy as np from copy import deepcopy

RefactoringTool: No changes to ./HARK/ConsumptionSaving/Demos/MPC_credit_vs_MPC_income.py

--- ./HARK/ConsumptionSaving/Demos/Chinese_Growth.py (original) +++ ./HARK/ConsumptionSaving/Demos/Chinese_Growth.py (refactored) @@ -34,6 +34,8 @@

First bring in default parameter values from cstwPMC. We will change these as necessary.

Now, bring in what we need from the cstwMPC parameters

+from builtins import str +from builtins import range import HARK.cstwMPC.SetupParamsCSTW as cstwParams

Initialize the cstwMPC parameters

--- ./HARK/ConsumptionSaving/Demos/Chinese_Growth.py (original) +++ ./HARK/ConsumptionSaving/Demos/Chinese_Growth.py (refactored) @@ -34,6 +34,8 @@

First bring in default parameter values from cstwPMC. We will change these as necessary.

Now, bring in what we need from the cstwMPC parameters

+from builtins import str +from builtins import range import HARK.cstwMPC.SetupParamsCSTW as cstwParams

Initialize the cstwMPC parameters

--- ./HARK/ConsumptionSaving/Demos/NonDurables_During_Great_Recession.py (original) +++ ./HARK/ConsumptionSaving/Demos/NonDurables_During_Great_Recession.py (refactored) @@ -23,6 +23,8 @@

Import some things from cstwMPC

from future import division, print_function +from builtins import str +from builtins import range import numpy as np from copy import deepcopy

--- ./HARK/ConsumptionSaving/Demos/NonDurables_During_Great_Recession.py (original) +++ ./HARK/ConsumptionSaving/Demos/NonDurables_During_Great_Recession.py (refactored) @@ -23,6 +23,8 @@

Import some things from cstwMPC

from future import division, print_function +from builtins import str +from builtins import range import numpy as np from copy import deepcopy

--- ./HARK/ConsumptionSaving/ConsIndShockModelDemos/Try-Alternative-Parameter-Values.py (original) +++ ./HARK/ConsumptionSaving/ConsIndShockModelDemos/Try-Alternative-Parameter-Values.py (refactored) @@ -5,6 +5,8 @@ @author: [email protected] """ from future import division, print_statement +from builtins import str +from builtins import range import matplotlib.pyplot # the plotting tools

xPoints=100 # number of points at which to sample a function when plotting it using pylab --- ./HARK/ConsumptionSaving/ConsIndShockModelDemos/Try-Alternative-Parameter-Values.py (original) +++ ./HARK/ConsumptionSaving/ConsIndShockModelDemos/Try-Alternative-Parameter-Values.py (refactored) @@ -5,6 +5,8 @@ @author: [email protected] """ from future import division, print_statement +from builtins import str +from builtins import range import matplotlib.pyplot # the plotting tools

xPoints=100 # number of points at which to sample a function when plotting it using pylab --- ./HARK/FashionVictim/FashionVictimParams.py (original) +++ ./HARK/FashionVictim/FashionVictimParams.py (refactored) @@ -1,6 +1,7 @@ ''' Defines some default parameters for the fashion victim model. ''' +from future import print_function

DiscFac = 0.95 # Intertemporal discount factor uParamA = 1.0 # Parameter A in the utility function (pdf of the beta distribution) --- ./HARK/FashionVictim/FashionVictimParams.py (original) +++ ./HARK/FashionVictim/FashionVictimParams.py (refactored) @@ -1,6 +1,7 @@ ''' Defines some default parameters for the fashion victim model. ''' +from future import print_function

DiscFac = 0.95 # Intertemporal discount factor uParamA = 1.0 # Parameter A in the utility function (pdf of the beta distribution)

--- ./HARK/FashionVictim/FashionVictimModel.py (original) +++ ./HARK/FashionVictim/FashionVictimModel.py (refactored) @@ -5,13 +5,17 @@ preferences each style), and pay switching costs if they change. ''' from future import print_function, division

+from future import absolute_import + +from builtins import str +from builtins import range +from builtins import object from HARK import AgentType, Solution, NullFunc from HARK.interpolation import LinearInterp from HARK.utilities import approxUniform, plotFuncs import numpy as np import scipy.stats as stats -import FashionVictimParams as Params +from . import FashionVictimParams as Params from copy import copy

class FashionSolution(Solution): @@ -86,7 +90,7 @@ self.distance_criteria = ['pNextSlope','pNextWidth','pNextIntercept']

-class FashionMarketInfo(): +class FashionMarketInfo(object): ''' A class for representing the current distribution of styles in the population. ''' --- ./HARK/FashionVictim/FashionVictimModel.py (original) +++ ./HARK/FashionVictim/FashionVictimModel.py (refactored) @@ -5,13 +5,17 @@ preferences each style), and pay switching costs if they change. ''' from future import print_function, division

+from future import absolute_import + +from builtins import str +from builtins import range +from builtins import object from HARK import AgentType, Solution, NullFunc from HARK.interpolation import LinearInterp from HARK.utilities import approxUniform, plotFuncs import numpy as np import scipy.stats as stats -import FashionVictimParams as Params +from . import FashionVictimParams as Params from copy import copy

class FashionSolution(Solution): @@ -86,7 +90,7 @@ self.distance_criteria = ['pNextSlope','pNextWidth','pNextIntercept']

-class FashionMarketInfo(): +class FashionMarketInfo(object): ''' A class for representing the current distribution of styles in the population. ''' --- ./HARK/SolvingMicroDSOPs/SetupSCFdata.py (original) +++ ./HARK/SolvingMicroDSOPs/SetupSCFdata.py (refactored) @@ -2,12 +2,14 @@ Sets up the SCF data for use in the SolvingMicroDSOPs estimation. ''' from future import division # Use new division function +from future import print_function +from future import absolute_import import os

The following libraries are part of the standard python distribution

import numpy as np # Numerical Python import csv # Comma-separated variable reader -from EstimationParameters import initial_age, empirical_cohort_age_groups +from .EstimationParameters import initial_age, empirical_cohort_age_groups

Libraries below are part of HARK's module system and must be in this directory

from HARK.utilities import warnings @@ -18,7 +20,7 @@

Open the file handle and create a reader object and a csv header

infile = open(scf_data_path + '/SCFdata.csv', 'rb') csv_reader = csv.reader(infile) -data_csv_header = csv_reader.next() +data_csv_header = next(csv_reader)

Pull the column index from the data_csv_header

data_column_index = data_csv_header.index('wealth_income_ratio') # scf_w_col --- ./HARK/SolvingMicroDSOPs/SetupSCFdata.py (original) +++ ./HARK/SolvingMicroDSOPs/SetupSCFdata.py (refactored) @@ -2,12 +2,14 @@ Sets up the SCF data for use in the SolvingMicroDSOPs estimation. ''' from future import division # Use new division function +from future import print_function +from future import absolute_import import os

The following libraries are part of the standard python distribution

import numpy as np # Numerical Python import csv # Comma-separated variable reader -from EstimationParameters import initial_age, empirical_cohort_age_groups +from .EstimationParameters import initial_age, empirical_cohort_age_groups

Libraries below are part of HARK's module system and must be in this directory

from HARK.utilities import warnings @@ -18,7 +20,7 @@

Open the file handle and create a reader object and a csv header

infile = open(scf_data_path + '/SCFdata.csv', 'rb') csv_reader = csv.reader(infile) -data_csv_header = csv_reader.next() +data_csv_header = next(csv_reader)

Pull the column index from the data_csv_header

data_column_index = data_csv_header.index('wealth_income_ratio') # scf_w_col --- ./HARK/SolvingMicroDSOPs/EstimationParameters.py (original) +++ ./HARK/SolvingMicroDSOPs/EstimationParameters.py (refactored) @@ -2,6 +2,7 @@ Specifies the full set of calibrated values required to estimate the SolvingMicroDSOPs model. The empirical data is stored in a separate csv file and is loaded in SetupSCFdata. ''' +from future import print_function

---------------------------------------------------------------------------------

- Define all of the model parameters for SolvingMicroDSOPs and ConsumerExamples -

--- ./HARK/SolvingMicroDSOPs/EstimationParameters.py (original) +++ ./HARK/SolvingMicroDSOPs/EstimationParameters.py (refactored) @@ -2,6 +2,7 @@ Specifies the full set of calibrated values required to estimate the SolvingMicroDSOPs model. The empirical data is stored in a separate csv file and is loaded in SetupSCFdata. ''' +from future import print_function

---------------------------------------------------------------------------------

- Define all of the model parameters for SolvingMicroDSOPs and ConsumerExamples -

--- ./HARK/SolvingMicroDSOPs/StructEstimation.py (original) +++ ./HARK/SolvingMicroDSOPs/StructEstimation.py (refactored) @@ -8,10 +8,14 @@ consumption-saving model with idiosyncratic shocks to permanent and transitory income as defined in ConsIndShockModel. '''

-import EstimationParameters as Params # Parameters for the consumer type and the estimation +from future import print_function +from future import absolute_import + +from builtins import str +from builtins import range +from . import EstimationParameters as Params # Parameters for the consumer type and the estimation import HARK.ConsumptionSaving.ConsIndShockModel as Model # The consumption-saving micro model -import SetupSCFdata as Data # SCF 2004 data on household wealth +from . import SetupSCFdata as Data # SCF 2004 data on household wealth from HARK.simulation import drawDiscrete # Method for sampling from a discrete distribution from HARK.estimation import minimizeNelderMead, bootstrapSampleFromData # Estimation methods import numpy as np # Numeric Python

--- ./HARK/cstwMPC/cstwMPC.py (original) +++ ./HARK/cstwMPC/cstwMPC.py (refactored) @@ -4,6 +4,8 @@ from future import division, print_function from future import absolute_import

+from builtins import str +from builtins import range import numpy as np from copy import copy, deepcopy from time import clock --- ./HARK/cstwMPC/cstwMPC.py (original) +++ ./HARK/cstwMPC/cstwMPC.py (refactored) @@ -4,6 +4,8 @@ from future import division, print_function from future import absolute_import

+from builtins import str +from builtins import range import numpy as np from copy import copy, deepcopy from time import clock --- ./HARK/cstwMPC/MakeCSTWfigs.py (original) +++ ./HARK/cstwMPC/MakeCSTWfigs.py (refactored) @@ -3,6 +3,7 @@ of the model have been estimated, with the results stored in ./Results. '''

+from builtins import range import matplotlib.pyplot as plt import csv import numpy as np @@ -74,7 +75,7 @@ kappa_mean_age = np.array(kappa_mean_age) kappa_lo_beta_age = np.array(kappa_lo_beta_age) kappa_hi_beta_age = np.array(kappa_hi_beta_age) -age_list = np.array(range(len(kappa_mean_age)),dtype=float)*0.25 + 24.0 +age_list = np.array(list(range(len(kappa_mean_age))),dtype=float)*0.25 + 24.0 f.close()

f = open('./Results/LC_KYbyBeta.txt','r') --- ./HARK/cstwMPC/MakeCSTWfigs.py (original) +++ ./HARK/cstwMPC/MakeCSTWfigs.py (refactored) @@ -3,6 +3,7 @@ of the model have been estimated, with the results stored in ./Results. '''

+from builtins import range import matplotlib.pyplot as plt import csv import numpy as np @@ -74,7 +75,7 @@ kappa_mean_age = np.array(kappa_mean_age) kappa_lo_beta_age = np.array(kappa_lo_beta_age) kappa_hi_beta_age = np.array(kappa_hi_beta_age) -age_list = np.array(range(len(kappa_mean_age)),dtype=float)*0.25 + 24.0 +age_list = np.array(list(range(len(kappa_mean_age))),dtype=float)*0.25 + 24.0 f.close()

f = open('./Results/LC_KYbyBeta.txt','r')

--- ./HARK/cstwMPC/SetupParamsCSTW.py (original) +++ ./HARK/cstwMPC/SetupParamsCSTW.py (refactored) @@ -4,6 +4,7 @@

from future import division, print_function

+from builtins import range import numpy as np import csv from copy import deepcopy --- ./HARK/cstwMPC/SetupParamsCSTW.py (original) +++ ./HARK/cstwMPC/SetupParamsCSTW.py (refactored) @@ -4,6 +4,7 @@

from future import division, print_function

+from builtins import range import numpy as np import csv from copy import deepcopy

--- ./HARK/cstwMPC/MakeCSTWfigsForSlides.py (original) +++ ./HARK/cstwMPC/MakeCSTWfigsForSlides.py (refactored) @@ -4,8 +4,11 @@ imports cstwMPC; there's no need to actually do anything but load the model. ''' from future import division, print_function +from future import absolute_import

-from cstwMPC import * +from builtins import str +from builtins import range +from .cstwMPC import * import matplotlib.pyplot as plt

plot_range = (0.0,30.0) --- ./HARK/cstwMPC/MakeCSTWfigsForSlides.py (original) +++ ./HARK/cstwMPC/MakeCSTWfigsForSlides.py (refactored) @@ -4,8 +4,11 @@ imports cstwMPC; there's no need to actually do anything but load the model. ''' from future import division, print_function +from future import absolute_import

-from cstwMPC import * +from builtins import str +from builtins import range +from .cstwMPC import * import matplotlib.pyplot as plt

plot_range = (0.0,30.0)

--- ./HARK/cAndCwithStickyE/StickyEmodel.py (original) +++ ./HARK/cAndCwithStickyE/StickyEmodel.py (refactored) @@ -17,6 +17,7 @@ Calibrated parameters for each type are found in StickyEparams. ''' from future import division +from builtins import range import numpy as np from ConsAggShockModel import AggShockConsumerType, AggShockMarkovConsumerType, CobbDouglasEconomy, CobbDouglasMarkovEconomy from RepAgentModel import RepAgentConsumerType, RepAgentMarkovConsumerType --- ./HARK/cAndCwithStickyE/StickyEmodel.py (original) +++ ./HARK/cAndCwithStickyE/StickyEmodel.py (refactored) @@ -17,6 +17,7 @@ Calibrated parameters for each type are found in StickyEparams. ''' from future import division +from builtins import range import numpy as np from ConsAggShockModel import AggShockConsumerType, AggShockMarkovConsumerType, CobbDouglasEconomy, CobbDouglasMarkovEconomy from RepAgentModel import RepAgentConsumerType, RepAgentMarkovConsumerType

--- ./HARK/cAndCwithStickyE/StickyE_NO_MARKOV.py (original) +++ ./HARK/cAndCwithStickyE/StickyE_NO_MARKOV.py (refactored) @@ -10,16 +10,20 @@ the ./Tables directory. See StickyEparams for calibrated model parameters. ''' from future import division

+from future import print_function +from future import absolute_import + +from builtins import str +from builtins import range import numpy as np from time import clock from copy import deepcopy -from StickyEmodel import StickyEconsumerType, StickyErepAgent, StickyCobbDouglasEconomy +from .StickyEmodel import StickyEconsumerType, StickyErepAgent, StickyCobbDouglasEconomy from HARK.ConsumptionSaving.ConsAggShockModel import SmallOpenEconomy from HARK.utilities import plotFuncs import matplotlib.pyplot as plt -import StickyEparams as Params -from StickyEtools import makeStickyEdataFile, runStickyEregressions, makeResultsTable,
+from . import StickyEparams as Params +from .StickyEtools import makeStickyEdataFile, runStickyEregressions, makeResultsTable,
runStickyEregressionsInStata, makeParameterTable, makeEquilibriumTable,
makeMicroRegressionTable, extractSampleMicroData, makeuCostVsPiFig

--- ./HARK/cAndCwithStickyE/StickyE_NO_MARKOV.py (original) +++ ./HARK/cAndCwithStickyE/StickyE_NO_MARKOV.py (refactored) @@ -10,16 +10,20 @@ the ./Tables directory. See StickyEparams for calibrated model parameters. ''' from future import division

+from future import print_function +from future import absolute_import + +from builtins import str +from builtins import range import numpy as np from time import clock from copy import deepcopy -from StickyEmodel import StickyEconsumerType, StickyErepAgent, StickyCobbDouglasEconomy +from .StickyEmodel import StickyEconsumerType, StickyErepAgent, StickyCobbDouglasEconomy from HARK.ConsumptionSaving.ConsAggShockModel import SmallOpenEconomy from HARK.utilities import plotFuncs import matplotlib.pyplot as plt -import StickyEparams as Params -from StickyEtools import makeStickyEdataFile, runStickyEregressions, makeResultsTable,
+from . import StickyEparams as Params +from .StickyEtools import makeStickyEdataFile, runStickyEregressions, makeResultsTable,
runStickyEregressionsInStata, makeParameterTable, makeEquilibriumTable,
makeMicroRegressionTable, extractSampleMicroData, makeuCostVsPiFig

--- ./HARK/cAndCwithStickyE/StickyE_MAIN.py (original) +++ ./HARK/cAndCwithStickyE/StickyE_MAIN.py (refactored) @@ -6,17 +6,21 @@ the tables directory. See StickyEparams for calibrated model parameters. ''' from future import division +from future import print_function +from future import absolute_import +from builtins import str +from builtins import range import os import numpy as np import csv from time import clock from copy import deepcopy -from StickyEmodel import StickyEmarkovConsumerType, StickyEmarkovRepAgent, StickyCobbDouglasMarkovEconomy +from .StickyEmodel import StickyEmarkovConsumerType, StickyEmarkovRepAgent, StickyCobbDouglasMarkovEconomy from HARK.ConsumptionSaving.ConsAggShockModel import SmallOpenMarkovEconomy from HARK.utilities import plotFuncs import matplotlib.pyplot as plt -import StickyEparams as Params -from StickyEtools import makeStickyEdataFile, runStickyEregressions, makeResultsTable,
+from . import StickyEparams as Params +from .StickyEtools import makeStickyEdataFile, runStickyEregressions, makeResultsTable,
runStickyEregressionsInStata, makeParameterTable, makeEquilibriumTable,
makeMicroRegressionTable, extractSampleMicroData, makeuCostVsPiFig,
makeValueVsAggShkVarFig, makeValueVsPiFig --- ./HARK/cAndCwithStickyE/StickyE_MAIN.py (original) +++ ./HARK/cAndCwithStickyE/StickyE_MAIN.py (refactored) @@ -6,17 +6,21 @@ the tables directory. See StickyEparams for calibrated model parameters. ''' from future import division +from future import print_function +from future import absolute_import +from builtins import str +from builtins import range import os import numpy as np import csv from time import clock from copy import deepcopy -from StickyEmodel import StickyEmarkovConsumerType, StickyEmarkovRepAgent, StickyCobbDouglasMarkovEconomy +from .StickyEmodel import StickyEmarkovConsumerType, StickyEmarkovRepAgent, StickyCobbDouglasMarkovEconomy from HARK.ConsumptionSaving.ConsAggShockModel import SmallOpenMarkovEconomy from HARK.utilities import plotFuncs import matplotlib.pyplot as plt -import StickyEparams as Params -from StickyEtools import makeStickyEdataFile, runStickyEregressions, makeResultsTable,
+from . import StickyEparams as Params +from .StickyEtools import makeStickyEdataFile, runStickyEregressions, makeResultsTable,
runStickyEregressionsInStata, makeParameterTable, makeEquilibriumTable,
makeMicroRegressionTable, extractSampleMicroData, makeuCostVsPiFig,
makeValueVsAggShkVarFig, makeValueVsPiFig

--- ./HARK/cAndCwithStickyE/StickyEtools.py (original) +++ ./HARK/cAndCwithStickyE/StickyEtools.py (refactored) @@ -2,7 +2,10 @@ This module holds some data tools used in the cAndCwithStickyE project. """ from future import division

+from future import absolute_import + +from builtins import str +from builtins import range import os import csv import numpy as np @@ -14,7 +17,7 @@ import subprocess from HARK.utilities import CRRAutility from HARK.interpolation import LinearInterp -from StickyEparams import results_dir, tables_dir, figures_dir, UpdatePrb, PermShkAggVar +from .StickyEparams import results_dir, tables_dir, figures_dir, UpdatePrb, PermShkAggVar UpdatePrbBase = UpdatePrb PermShkAggVarBase = PermShkAggVar

--- ./HARK/cAndCwithStickyE/StickyEtools.py (original) +++ ./HARK/cAndCwithStickyE/StickyEtools.py (refactored) @@ -2,7 +2,10 @@ This module holds some data tools used in the cAndCwithStickyE project. """ from future import division

+from future import absolute_import + +from builtins import str +from builtins import range import os import csv import numpy as np @@ -14,7 +17,7 @@ import subprocess from HARK.utilities import CRRAutility from HARK.interpolation import LinearInterp -from StickyEparams import results_dir, tables_dir, figures_dir, UpdatePrb, PermShkAggVar +from .StickyEparams import results_dir, tables_dir, figures_dir, UpdatePrb, PermShkAggVar UpdatePrbBase = UpdatePrb PermShkAggVarBase = PermShkAggVar

--- ./HARK/cAndCwithStickyE/StickyEparams.py (original) +++ ./HARK/cAndCwithStickyE/StickyEparams.py (refactored) @@ -13,6 +13,7 @@ the Market instance as well as the consumers themselves. All parameters are quarterly. ''' from future import division +from builtins import range import numpy as np from copy import copy from HARK.utilities import approxUniform --- ./HARK/cAndCwithStickyE/StickyEparams.py (original) +++ ./HARK/cAndCwithStickyE/StickyEparams.py (refactored) @@ -13,6 +13,7 @@ the Market instance as well as the consumers themselves. All parameters are quarterly. ''' from future import division +from builtins import range import numpy as np from copy import copy from HARK.utilities import approxUniform

RefactoringTool: No changes to ./Testing/MultithreadDemo.py

--- ./Testing/ModelTesting.py (original) +++ ./Testing/ModelTesting.py (refactored) @@ -74,7 +74,7 @@ dict_of_min_max_and_N = {key:(value-self._multipliervalue, # the min value+self._multipliervalue, # the max self.N_param_values_in_range) # number of param values to try

  •                                  for key,value in self._base_primitives.iteritems()}
    
  •                                  for key,value in self._base_primitives.items()}
    
       N_combinations = self.N_param_values_in_range**len(self._base_primitives)
    

@@ -103,7 +103,7 @@ parameterLists = [] keyOrder = [] parametersToTest = []

  •    for key,value in self.dict_of_min_max_and_N.items():
    
  •    for key,value in list(self.dict_of_min_max_and_N.items()):
           parameterRange = np.linspace(*value)
           parameterLists.append(parameterRange)
           keyOrder.append(key)
    

--- ./Testing/ModelTesting.py (original) +++ ./Testing/ModelTesting.py (refactored) @@ -74,7 +74,7 @@ dict_of_min_max_and_N = {key:(value-self._multipliervalue, # the min value+self._multipliervalue, # the max self.N_param_values_in_range) # number of param values to try

  •                                  for key,value in self._base_primitives.iteritems()}
    
  •                                  for key,value in self._base_primitives.items()}
    
       N_combinations = self.N_param_values_in_range**len(self._base_primitives)
    

@@ -103,7 +103,7 @@ parameterLists = [] keyOrder = [] parametersToTest = []

  •    for key,value in self.dict_of_min_max_and_N.items():
    
  •    for key,value in list(self.dict_of_min_max_and_N.items()):
           parameterRange = np.linspace(*value)
           parameterLists.append(parameterRange)
           keyOrder.append(key)
    

RefactoringTool: No files need to be modified.

RefactoringTool: No changes to ./Testing/Comparison_UnitTests.py

RefactoringTool: No changes to ./Testing/TractableBufferStockModel_UnitTests.py

⚠️ **GitHub.com Fallback** ⚠️