/*
* Created on Aug 16, 2004
*
* $Log: MarkovDecisionProcess.java,v $
* Revision 1.4 2004/08/25 02:19:35 bh
* Added a few getters.
* Added transit(), getRandomState(), getCoincideState().
*
* Revision 1.3 2004/08/24 01:12:26 bh
* Changed getNextState and getStartState. Because the serialization has
* been done in compileStates, those two functions simply return the right
* states in the vector.
*
* Source clean.
*
* Revision 1.2 2004/08/23 15:47:00 bh
* Major redesign of transition model.
*
* Revision 1.1 2004/08/19 03:19:19 bh
* Tested against the textbook 3x4 world. The transition model is still
* not ideal.
*
*/
package cs.decision;
import java.util.*;
/**
* A model a 2D Markov Decision Process.
*
* @author bh
*/
public class MarkovDecisionProcess {
int rows, cols;
State[][] grid;
Vector reachableStates;
int numReachableStates = 0;
static final int ACTION_UP = 0;
static final int ACTION_RIGHT = 1;
static final int ACTION_DOWN = 2;
static final int ACTION_LEFT = 3;
static final int ACTION_STAY = 4; //stay is not an action per se.
static final int numActions = 4;
/**
* For each action, there is a resulting state. Plus 1 because it's
* possible that the action is not allowed at boundaries and the resulting
* state is the originating state itself.
*/
static final int numResultingStates = numActions+1;
/**
* The actions array contains all valid movements in the environment and
* provides a level of abstraction, so the client
* of this class doesn't have to worry about specific kinds of actions.
*/
Action[] actions;
/**
* Translate two absolute coordinates (r,c) and (r',c') to relative
* positions (UP,RIGHT,DOWN,LEFT). Note the order of the relative
* positions are the same as the actions[] array.
*
* dr dc | dr+1 dc+1 | direction | code
* -1 -1 | 0 0 | x | -1
* -1 0 | 0 1 | DOWN | 2
* -1 +1 | 0 2 | x | -1
* 0 -1 | 1 0 | LEFT | 3
* 0 0 | 1 1 | STAY | 4
* 0 +1 | 1 2 | RIGHT | 1
* +1 -1 | 2 0 | x | -1
* +1 0 | 2 1 | UP | 0
* +1 +1 | 2 2 | x | -1
*
*/
int[][] abs2relative = {{-1,2,-1},{3,4,1}, {-1,0, 1}};
int[][] rel2absolute = {{1,0}, {0,1}, {-1,0},{0,-1}, {0, 0}};
/**
* The transition function is a big (4*N^2) sparse matrix. Reduce the
* last dimension to 5, number of actions+1 (stay)
*/
double[][][] transitionModel;
Vector transitions;
/**
* Variables for the iterator methods: getNextAction() and
* getNextState().
*/
int currentStateIndex;
int currentAction;
/**
* discount factor
*/
double gamma=1.0;
public MarkovDecisionProcess(int rows, int cols){
this.rows = rows;
this.cols = cols;
grid = new State[rows][cols];
for(int i=0; inull if no more state to visit.
*/
public State getNextState() {
currentStateIndex ++;
if(currentStateIndex == numReachableStates)
return null;
else
return (State)reachableStates.get(currentStateIndex);
}
/**
* Because only (in theory) this class know the details of Action, this
* method belongs inside the MDP class.
*
* @return A random valid action.
*/
public Action getRandomAction() {
// Don't need a fancy RNG here. Use Math.random()
int a = (int)Math.round(Math.random()*numActions-0.5);
return actions[a];
}
/**
* Generate a proper policy, which of course isn't necessarily optimal.
* A multi-path maze tracing problem.
*/
public void generateProperPolicy(){
Stack stack = new Stack();
for(State s=getStartState(); s!=null; s=getNextState()){
if(s.action != null)
continue;
stack.push(s);
while(!stack.empty()) {
State currentState = (State)stack.peek();
currentState.visited = true;
int a;
for(a=0; anull if no more action's possible.
*/
public Action getNextAction() {
currentAction ++;
if(currentAction == numActions)
return null;
else
return actions[currentAction];
}
/**
* Set a state to terminate state.
* @param row Row number of the state.
* @param col Column number of the state.
*/
public void setTerminateState(int row, int col) {
grid[row-1][col-1].setTerminate();
grid[row-1][col-1].utility = grid[row-1][col-1].reward;
}
/**
* This function is only intended for setting up the process. An
* algorithm (user of this class) should use the next one.
* @param row Row number, starting from 1, not 0.
* @param col Column number, starting from 1.
* @param r Reward.
*/
public void setReward(int row, int col, double r) {
State s = grid[row-1][col-1];
// If s is null, s is a hole. So no warning is issued.
if(s != null){
s.reward = r;
// for terminate states, the (initial) reward value
// is its utility value. Remember this function is
// used for initilizing the MDP.
if(s.terminate)
s.utility = r;
}
}
/**
* Set reward value.
* @param s The state to be set value to.
* @param r The reward value.
*/
public void setReward(State s, double r) {
s.reward = r;
}
/**
*
* @param s The state.
* @return The reward value of the state.
*/
public double getReward(State s) {
return s.reward;
}
/**
* Set utility of a state. If the state is a terminate state, its
* utility value keeps unchanged.
* @param s The state.
* @param u The utility.
*/
public void setUtility(State s, double u){
if(s.terminate){
//TODO the assignment is redundant
s.utility = s.reward;
}else
s.utility = u;
}
/**
* Used (solely) by policy evaluation. Setting the utility value of a state.
* @param index The index of the state.
* @param u The utility value.
*/
public void setUtility(int index, double u) {
((State)reachableStates.get(index)).utility = u;
}
/**
* @param s The state.
* @return The utility value of the state.
*/
public double getUtility(State s) {
return s.utility;
}
/**
* @return Returns the gamma.
*/
public double getGamma() {
return gamma;
}
/**
* @param gamma The gamma to set.
*/
public void setGamma(double gamma) {
this.gamma = gamma;
}
public void accumTransitionProbability(int r, int c, Action a, int rp, int cp, double prob) {
State s = grid[r-1][c-1];
if(s.terminate)
return;
int nextStateIndex = abs2relative[rp-r+1][cp-c+1];
transitionModel[s.index][a.action][nextStateIndex] += prob;
}
public void accumTransitionProbability(State s, Action a, State sp, double p) {
accumTransitionProbability(s.row+1, s.col+1, a, sp.row+1, sp.col+1, p);
}
/**
* This function is for setting up the transition model.
* @param r Row number (starts from 1) of current state.
* @param c Column number (starts from 1) of current state.
* @param a Action to be taken.
* @param rp Row number of next state, after the action is performed.
* @param cp Column number of next state.
* @param prob The probability of this chain of action: T(s,a,s').
*/
public void setTransitionProbability(int r, int c, Action a, int rp, int cp, double prob) {
State s = grid[r-1][c-1];
if(s.terminate)
return;
int nextStateIndex = abs2relative[rp-r+1][cp-c+1];
transitionModel[s.index][a.action][nextStateIndex] = prob;
}
/**
* Set T(s,a,s').
* @param s The source state.
* @param a The action to be taken.
* @param sp The destination state.
* @param p The probability.
*/
public void setTransitionProbability(State s, Action a, State sp, double p) {
setTransitionProbability(s.row+1, s.col+1, a, sp.row+1, sp.col+1, p);
}
/**
* This is the transition function, T(s,a,s') in the textbook.
*
* @param s The current state.
* @param a Action to be taken.
* @return A list of (probability,next-state) pairs.
*/
public Vector getTransition(State s, Action a) {
transitions.clear();
// If s is a terminate state, no transition function for it.
// Return an empty vector.
if(s.terminate)
return transitions;
double p;
State nextState;
for(int i=0; i=0; --i){
for(int j=0; j");
System.out.println("");
for(int i=rows-1; i>=0; --i){
System.out.println("");
for(int j=0; j");
if(grid[i][j] != null){
if(grid[i][j].terminate)
System.out.print(grid[i][j].utility);
else
System.out.print(grid[i][j].utility+":"+grid[i][j].action);
}
else
System.out.print(" N ");
System.out.println("");
}
System.out.println("
");
}
System.out.println("
");
if(showHeader)
System.out.println("