[{"title":"(30个子文件3.63MB)MarkovDecisionProcesses:运行策略迭代,值迭代和Q学习算法来解决MDP问题-源码","children":[{"title":"MarkovDecisionProcesses-master","children":[{"title":"Book1.xlsx <span style='color:#111;'>17.44KB</span>","children":null,"spread":false},{"title":"~$ohan8-analysis.docx <span style='color:#111;'>162B</span>","children":null,"spread":false},{"title":"results","children":[{"title":"Picture7.png <span style='color:#111;'>28.76KB</span>","children":null,"spread":false},{"title":"Picture4.png <span style='color:#111;'>28.87KB</span>","children":null,"spread":false},{"title":"Picture2.png <span style='color:#111;'>5.14KB</span>","children":null,"spread":false},{"title":"Picture1.png <span style='color:#111;'>1.02KB</span>","children":null,"spread":false},{"title":"Picture6.png <span style='color:#111;'>96.65KB</span>","children":null,"spread":false},{"title":"Picture8.png <span style='color:#111;'>98.76KB</span>","children":null,"spread":false},{"title":"Picture3.png <span style='color:#111;'>29.29KB</span>","children":null,"spread":false},{"title":"Picture5.png <span style='color:#111;'>96.57KB</span>","children":null,"spread":false}],"spread":true},{"title":"experiments","children":[{"title":"value_iteration.py <span style='color:#111;'>3.37KB</span>","children":null,"spread":false},{"title":"policy_iteration.py <span style='color:#111;'>3.67KB</span>","children":null,"spread":false},{"title":"plotting.py <span style='color:#111;'>16.27KB</span>","children":null,"spread":false},{"title":"__init__.py <span style='color:#111;'>198B</span>","children":null,"spread":false},{"title":"q_learner.py <span style='color:#111;'>7.32KB</span>","children":null,"spread":false},{"title":"base.py <span style='color:#111;'>8.44KB</span>","children":null,"spread":false}],"spread":true},{"title":"~WRL2301.tmp <span style='color:#111;'>1.62MB</span>","children":null,"spread":false},{"title":"requirements.txt <span style='color:#111;'>161B</span>","children":null,"spread":false},{"title":".gitignore <span style='color:#111;'>1.76KB</span>","children":null,"spread":false},{"title":"Analysis.pdf <span style='color:#111;'>1.88MB</span>","children":null,"spread":false},{"title":"run_experiment.py <span style='color:#111;'>5.76KB</span>","children":null,"spread":false},{"title":"README.md <span style='color:#111;'>940B</span>","children":null,"spread":false},{"title":"environments","children":[{"title":"__init__.py <span style='color:#111;'>2.02KB</span>","children":null,"spread":false},{"title":"frozen_lake.py <span style='color:#111;'>6.88KB</span>","children":null,"spread":false},{"title":"cliff_walking.py <span style='color:#111;'>5.30KB</span>","children":null,"spread":false}],"spread":true},{"title":"solvers","children":[{"title":"value_iteration.py <span style='color:#111;'>2.53KB</span>","children":null,"spread":false},{"title":"policy_iteration.py <span style='color:#111;'>2.86KB</span>","children":null,"spread":false},{"title":"__init__.py <span style='color:#111;'>185B</span>","children":null,"spread":false},{"title":"q_learning.py <span style='color:#111;'>6.17KB</span>","children":null,"spread":false},{"title":"base.py <span style='color:#111;'>5.79KB</span>","children":null,"spread":false}],"spread":true}],"spread":false}],"spread":true}]