ArrayIndexOutOfBoundsException:-1
ArrayIndexOutOfBoundsException:-1
我正在 pacman 机器人上编写一些 RL 行为,我在我的一个函数中用我的列表之一搞砸了 arg_allmax 或 选择动作
这是我的代码 class:
package rl;
import java.util.ArrayList;
import java.util.Hashtable;
public class Qlearn {
private double epsilon = 0.1; // Epsilon parameter for the Epsilon Greedy Strategy
private double alpha = 0.2; // Alpha parameter: used to influence o the refresh of Q
private double gamma = 0.9; // used to notice or not the feedback of the next action ; if =0 -> no feed back
private int actions[];
private Hashtable< Tuple<Integer,Integer>, Double> q; // Q(s,a) : hashTable : <state,action> -> value of q
public Qlearn(int[] actions) {
this.actions = actions;
q = new Hashtable< Tuple<Integer,Integer>, Double>();
}
public Qlearn(int[] actions, double epsilon, double alpha, double gamma) {
this.actions = actions;
this.epsilon = epsilon;
this.alpha = alpha;
this.gamma = gamma;
q = new Hashtable< Tuple<Integer,Integer>, Double>();
}
public Double getQ(int id_state, int id_action) {
// get the value of Q for the state of id_state and the action id_action ( return 0 if the value is not in the hashtable )
Tuple<Integer,Integer> t = new Tuple<Integer,Integer> (id_state, id_action); // we creatte a new integer object Tubple with the value of id_state and id_action
Double v = q.get(t);
if(v != null) return v;
else return 0.0;
}
// get the argmax of a list
public int argmax(double[] list) {
int arg=-1;
double max= 0;
for ( int i = 0; i<list.length; i++){
if ( list[i]>max ){
max = list[i];
arg = i;
}
}
return arg;
}
// get all the argmax if the argmax has several iterations
public ArrayList<Integer> arg_allmax(double[] list) {
ArrayList<Integer> args = new ArrayList<Integer>();
int a = argmax(list);
for ( int i = 0; i< list.length; i++){
if (list[i] == list[a]){
args.add(i);
}
}
return args;
}
// get the max of the list
public double max(double[] list) {
double max_ = -1e20;
int a = argmax(list);
max_ = list[a];
return max_;
}
/*
* Fonction that updates the hashtable
* for the action id_action and the state id_state
* if Q(s,a) had an old value, we allocate it the new value+ alpha(value - old_value)
* if Q(s,a) had not an old value : we allocate reward
*/
public void learnQ(int id_state, int id_action, double reward, double value) {
Tuple<Integer,Integer> t = new Tuple<Integer,Integer>(id_state,id_action);
Double oldv = q.get(t);
if(oldv == null) {
q.put(t, reward);
} else {
q.put(t, oldv+alpha*(value-oldv));
}
}
/*
* Here is the Epsilon Greedy strategy
* with proba epsilon :we choose a random action
* avec proba 1-eps : we choose the most favorable action in fonction of Q(s,a)
*/
public int chooseAction(int id_state) {
int action = -1;
if(Math.random() < epsilon) {
int i = (int)(Math.random()*actions.length);
action = actions[i];
} else {
double[] tab = new double[actions.length];
ArrayList<Integer> argmaxarray = new ArrayList<Integer>();
for ( int i=0; i>actions.length; i++){
tab[i]=actions[i];
}
argmaxarray=arg_allmax(tab);
int i=(int)(Math.random()*argmaxarray.size());
action=argmaxarray.get(i);
}
return action;
}
/*
* Learning after the occurence of a move
* 1) get the most profitable potential action from Q(s',a)
* 2) call learnQ
*/
public void learn(int id_state1, int id_action1, double reward, int id_state2) {
int futureAction = 0;
futureAction = chooseAction(id_state2);
double maxqnew = 0; // REMPLIR
maxqnew = getQ(futureAction, id_state2);
learnQ(id_state1, id_action1, reward, reward + gamma*maxqnew);
}
// Affiche Q(s,a)
private void printQvalue(int id_state) {
for(int action : actions) {
Tuple<Integer,Integer> t = new Tuple<Integer,Integer>(id_state,action);
Double v = q.get(t);
System.out.print(v+" ");
}
System.out.println();
}
这是 eclipse 告诉我的内容:
Exception in thread "AWT-EventQueue-0" java.lang.ArrayIndexOutOfBoundsException: -1
at rl.Qlearn.arg_allmax(Qlearn.java:54)
at rl.Qlearn.chooseAction(Qlearn.java:108)
at rl.Qlearn.learn(Qlearn.java:138)
我认为它出现在使用 all_argmax 函数的 chooseAction 方法的其他地方,但我找不到确切的错误!
以下是涉及的两个方法(因此它对您来说更具可读性):
all_argmax :
public ArrayList<Integer> arg_allmax(double[] list) {
ArrayList<Integer> args = new ArrayList<Integer>();
int a = argmax(list);
for ( int i = 0; i< list.length; i++){
if (list[i] == list[a]){
args.add(i);
}
}
return args;
}
选择操作:
public int chooseAction(int id_state) {
int action = -1;
if(Math.random() < epsilon) {
int i = (int)(Math.random()*actions.length);
action = actions[i];
} else {
double[] tab = new double[actions.length];
ArrayList<Integer> argmaxarray = new ArrayList<Integer>();
for ( int i=0; i>actions.length; i++){
tab[i]=actions[i];
}
argmaxarray=arg_allmax(tab);
int i=(int)(Math.random()*argmaxarray.size());
action=argmaxarray.get(i);
}
return action;
}
你的 IndexOutOfBoundsException
发生是因为你的 argmax([])
方法,要么是因为数组为空,要么是因为列表中的所有双打都是负数。
在这两种情况下,int arg = -1
变量永远不会设置为 -1
之外的另一个值,这显然在任何情况下都是越界的,因为 -1
不是有效数组位置。
最好的做法是在将数组传递给 argmax
之前检查数组是否为空,或者在传递之前检查 return 值是否有效(不是 -1
)用它做任何事情。并且还将 double max = 0
更改为 double max = Double.NEGATIVE_INFINITY
我正在 pacman 机器人上编写一些 RL 行为,我在我的一个函数中用我的列表之一搞砸了 arg_allmax 或 选择动作
这是我的代码 class:
package rl;
import java.util.ArrayList;
import java.util.Hashtable;
public class Qlearn {
private double epsilon = 0.1; // Epsilon parameter for the Epsilon Greedy Strategy
private double alpha = 0.2; // Alpha parameter: used to influence o the refresh of Q
private double gamma = 0.9; // used to notice or not the feedback of the next action ; if =0 -> no feed back
private int actions[];
private Hashtable< Tuple<Integer,Integer>, Double> q; // Q(s,a) : hashTable : <state,action> -> value of q
public Qlearn(int[] actions) {
this.actions = actions;
q = new Hashtable< Tuple<Integer,Integer>, Double>();
}
public Qlearn(int[] actions, double epsilon, double alpha, double gamma) {
this.actions = actions;
this.epsilon = epsilon;
this.alpha = alpha;
this.gamma = gamma;
q = new Hashtable< Tuple<Integer,Integer>, Double>();
}
public Double getQ(int id_state, int id_action) {
// get the value of Q for the state of id_state and the action id_action ( return 0 if the value is not in the hashtable )
Tuple<Integer,Integer> t = new Tuple<Integer,Integer> (id_state, id_action); // we creatte a new integer object Tubple with the value of id_state and id_action
Double v = q.get(t);
if(v != null) return v;
else return 0.0;
}
// get the argmax of a list
public int argmax(double[] list) {
int arg=-1;
double max= 0;
for ( int i = 0; i<list.length; i++){
if ( list[i]>max ){
max = list[i];
arg = i;
}
}
return arg;
}
// get all the argmax if the argmax has several iterations
public ArrayList<Integer> arg_allmax(double[] list) {
ArrayList<Integer> args = new ArrayList<Integer>();
int a = argmax(list);
for ( int i = 0; i< list.length; i++){
if (list[i] == list[a]){
args.add(i);
}
}
return args;
}
// get the max of the list
public double max(double[] list) {
double max_ = -1e20;
int a = argmax(list);
max_ = list[a];
return max_;
}
/*
* Fonction that updates the hashtable
* for the action id_action and the state id_state
* if Q(s,a) had an old value, we allocate it the new value+ alpha(value - old_value)
* if Q(s,a) had not an old value : we allocate reward
*/
public void learnQ(int id_state, int id_action, double reward, double value) {
Tuple<Integer,Integer> t = new Tuple<Integer,Integer>(id_state,id_action);
Double oldv = q.get(t);
if(oldv == null) {
q.put(t, reward);
} else {
q.put(t, oldv+alpha*(value-oldv));
}
}
/*
* Here is the Epsilon Greedy strategy
* with proba epsilon :we choose a random action
* avec proba 1-eps : we choose the most favorable action in fonction of Q(s,a)
*/
public int chooseAction(int id_state) {
int action = -1;
if(Math.random() < epsilon) {
int i = (int)(Math.random()*actions.length);
action = actions[i];
} else {
double[] tab = new double[actions.length];
ArrayList<Integer> argmaxarray = new ArrayList<Integer>();
for ( int i=0; i>actions.length; i++){
tab[i]=actions[i];
}
argmaxarray=arg_allmax(tab);
int i=(int)(Math.random()*argmaxarray.size());
action=argmaxarray.get(i);
}
return action;
}
/*
* Learning after the occurence of a move
* 1) get the most profitable potential action from Q(s',a)
* 2) call learnQ
*/
public void learn(int id_state1, int id_action1, double reward, int id_state2) {
int futureAction = 0;
futureAction = chooseAction(id_state2);
double maxqnew = 0; // REMPLIR
maxqnew = getQ(futureAction, id_state2);
learnQ(id_state1, id_action1, reward, reward + gamma*maxqnew);
}
// Affiche Q(s,a)
private void printQvalue(int id_state) {
for(int action : actions) {
Tuple<Integer,Integer> t = new Tuple<Integer,Integer>(id_state,action);
Double v = q.get(t);
System.out.print(v+" ");
}
System.out.println();
}
这是 eclipse 告诉我的内容:
Exception in thread "AWT-EventQueue-0" java.lang.ArrayIndexOutOfBoundsException: -1
at rl.Qlearn.arg_allmax(Qlearn.java:54)
at rl.Qlearn.chooseAction(Qlearn.java:108)
at rl.Qlearn.learn(Qlearn.java:138)
我认为它出现在使用 all_argmax 函数的 chooseAction 方法的其他地方,但我找不到确切的错误!
以下是涉及的两个方法(因此它对您来说更具可读性):
all_argmax :
public ArrayList<Integer> arg_allmax(double[] list) {
ArrayList<Integer> args = new ArrayList<Integer>();
int a = argmax(list);
for ( int i = 0; i< list.length; i++){
if (list[i] == list[a]){
args.add(i);
}
}
return args;
}
选择操作:
public int chooseAction(int id_state) {
int action = -1;
if(Math.random() < epsilon) {
int i = (int)(Math.random()*actions.length);
action = actions[i];
} else {
double[] tab = new double[actions.length];
ArrayList<Integer> argmaxarray = new ArrayList<Integer>();
for ( int i=0; i>actions.length; i++){
tab[i]=actions[i];
}
argmaxarray=arg_allmax(tab);
int i=(int)(Math.random()*argmaxarray.size());
action=argmaxarray.get(i);
}
return action;
}
你的 IndexOutOfBoundsException
发生是因为你的 argmax([])
方法,要么是因为数组为空,要么是因为列表中的所有双打都是负数。
在这两种情况下,int arg = -1
变量永远不会设置为 -1
之外的另一个值,这显然在任何情况下都是越界的,因为 -1
不是有效数组位置。
最好的做法是在将数组传递给 argmax
之前检查数组是否为空,或者在传递之前检查 return 值是否有效(不是 -1
)用它做任何事情。并且还将 double max = 0
更改为 double max = Double.NEGATIVE_INFINITY