repo
stringlengths 2
91
| file
stringlengths 14
211
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
1.36M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
cowrie | cowrie-master/src/backend_pool/__init__.py | 0 | 0 | 0 | py |
|
cowrie | cowrie-master/src/backend_pool/libvirt/backend_service.py | # Copyright (c) 2019 Guilherme Borges <[email protected]>
# See the COPYRIGHT file for more information
from __future__ import annotations
import os
import random
import sys
import uuid
from twisted.python import log
import backend_pool.libvirt.guest_handler
import backend_pool.libvirt.network_handler
import backend_pool.util
from cowrie.core.config import CowrieConfig
LIBVIRT_URI = "qemu:///system"
class LibvirtError(Exception):
pass
class LibvirtBackendService:
def __init__(self):
# lazy import to avoid exception if not using the backend_pool and libvirt not installed (#1185)
import libvirt
# open connection to libvirt
self.conn = libvirt.open(LIBVIRT_URI)
if self.conn is None:
log.msg(
eventid="cowrie.backend_pool.qemu",
format="Failed to open connection to %(uri)s",
uri=LIBVIRT_URI,
)
raise LibvirtError()
self.filter = None
self.network = None
# signals backend is ready to be operated
self.ready: bool = False
# table to associate IPs and MACs
seed: int = random.randint(0, sys.maxsize)
self.network_table = backend_pool.util.generate_network_table(seed)
log.msg(
eventid="cowrie.backend_pool.qemu", format="Connection to QEMU established"
)
def start_backend(self):
"""
Initialises QEMU/libvirt environment needed to run guests. Namely starts networks and network filters.
"""
# create a network filter
self.filter = backend_pool.libvirt.network_handler.create_filter(self.conn)
# create a network for the guests (as a NAT)
self.network = backend_pool.libvirt.network_handler.create_network(
self.conn, self.network_table
)
# service is ready to be used (create guests and use them)
self.ready = True
def stop_backend(self):
log.msg(
eventid="cowrie.backend_pool.qemu", format="Doing QEMU clean shutdown..."
)
self.ready = False
self.destroy_all_cowrie()
def shutdown_backend(self):
self.conn.close() # close libvirt connection
log.msg(
eventid="cowrie.backend_pool.qemu",
format="Connection to QEMU closed successfully",
)
def get_mac_ip(self, ip_tester):
"""
Get a MAC and IP that are not being used by any guest.
"""
# Try to find a free pair 500 times.
retries = 0
while retries < 500:
mac = random.choice(list(self.network_table.keys()))
ip = self.network_table[mac]
if ip_tester(ip):
return mac, ip
retries += 1
raise LibvirtError()
def create_guest(self, ip_tester):
"""
Returns an unready domain and its snapshot information.
Guarantee that the IP is free with the ip_tester function.
"""
if not self.ready:
return
# create a single guest
guest_unique_id = uuid.uuid4().hex
guest_mac, guest_ip = self.get_mac_ip(ip_tester)
dom, snapshot = backend_pool.libvirt.guest_handler.create_guest(
self.conn, guest_mac, guest_unique_id
)
if dom is None:
log.msg(eventid="cowrie.backend_pool.qemu", format="Failed to create guest")
return None
return dom, snapshot, guest_ip
def destroy_guest(self, domain, snapshot):
if not self.ready:
return
try:
# destroy the domain in qemu
domain.destroy()
# we want to remove the snapshot if either:
# - explicitely set save_snapshots to False
# - no snapshot dir was defined (using cowrie's root dir) - should not happen but prevent it
if (
(
not CowrieConfig.getboolean(
"backend_pool", "save_snapshots", fallback=True
)
or CowrieConfig.get("backend_pool", "snapshot_path", fallback=None)
is None
)
and os.path.exists(snapshot)
and os.path.isfile(snapshot)
):
os.remove(snapshot) # destroy its disk snapshot
except Exception as error:
log.err(
eventid="cowrie.backend_pool.qemu",
format="Error destroying guest: %(error)s",
error=error,
)
def __destroy_all_guests(self):
domains = self.conn.listDomainsID()
if not domains:
log.msg(
eventid="cowrie.backend_pool.qemu", format="Could not get domain list"
)
for domain_id in domains:
d = self.conn.lookupByID(domain_id)
if d.name().startswith("cowrie"):
try:
d.destroy()
except KeyboardInterrupt:
pass
def __destroy_all_networks(self):
networks = self.conn.listNetworks()
if not networks:
log.msg(
eventid="cowrie.backend_pool.qemu", format="Could not get network list"
)
for network in networks:
if network.startswith("cowrie"):
n = self.conn.networkLookupByName(network)
n.destroy()
def __destroy_all_network_filters(self):
network_filters = self.conn.listNWFilters()
if not network_filters:
log.msg(
eventid="cowrie.backend_pool.qemu",
format="Could not get network filters list",
)
for nw_filter in network_filters:
if nw_filter.startswith("cowrie"):
n = self.conn.nwfilterLookupByName(nw_filter)
n.undefine()
def destroy_all_cowrie(self):
self.__destroy_all_guests()
self.__destroy_all_networks()
self.__destroy_all_network_filters()
| 6,096 | 29.949239 | 110 | py |
cowrie | cowrie-master/src/backend_pool/libvirt/guest_handler.py | # Copyright (c) 2019 Guilherme Borges <[email protected]>
# See the COPYRIGHT file for more information
from __future__ import annotations
import os
import sys
from configparser import NoOptionError
from twisted.python import log
import backend_pool.libvirt.snapshot_handler
import backend_pool.util
from cowrie.core.config import CowrieConfig
class QemuGuestError(Exception):
pass
def create_guest(connection, mac_address, guest_unique_id):
# lazy import to avoid exception if not using the backend_pool and libvirt not installed (#1185)
import libvirt
# get guest configurations
configuration_file: str = os.path.join(
CowrieConfig.get(
"backend_pool", "config_files_path", fallback="share/pool_configs"
),
CowrieConfig.get("backend_pool", "guest_config", fallback="default_guest.xml"),
)
version_tag: str = CowrieConfig.get("backend_pool", "guest_tag", fallback="guest")
base_image: str = CowrieConfig.get("backend_pool", "guest_image_path")
hypervisor: str = CowrieConfig.get(
"backend_pool", "guest_hypervisor", fallback="qemu"
)
memory: int = CowrieConfig.getint("backend_pool", "guest_memory", fallback=128)
qemu_machine: str = CowrieConfig.get(
"backend_pool", "guest_qemu_machine", fallback="pc-q35-3.1"
)
# check if base image exists
if not os.path.isfile(base_image):
log.msg(
eventid="cowrie.backend_pool.guest_handler",
format="Base image provided was not found: %(base_image)s",
base_image=base_image,
)
os._exit(1)
# only in some cases, like wrt
kernel_image: str = CowrieConfig.get(
"backend_pool", "guest_kernel_image", fallback=""
)
# get a directory to save snapshots, even if temporary
try:
# guest configuration, to be read by qemu, needs an absolute path
snapshot_path: str = backend_pool.util.to_absolute_path(
CowrieConfig.get("backend_pool", "snapshot_path")
)
except NoOptionError:
snapshot_path = os.getcwd()
# create a disk snapshot to be used by the guest
disk_img: str = os.path.join(
snapshot_path, f"snapshot-{version_tag}-{guest_unique_id}.qcow2"
)
if not backend_pool.libvirt.snapshot_handler.create_disk_snapshot(
base_image, disk_img
):
log.msg(
eventid="cowrie.backend_pool.guest_handler",
format="There was a problem creating the disk snapshot.",
)
raise QemuGuestError()
guest_xml = backend_pool.util.read_file(configuration_file)
guest_config = guest_xml.format(
guest_name="cowrie-" + version_tag + "_" + guest_unique_id,
disk_image=disk_img,
base_image=base_image,
kernel_image=kernel_image,
hypervisor=hypervisor,
memory=memory,
qemu_machine=qemu_machine,
mac_address=mac_address,
network_name="cowrie",
)
try:
dom = connection.createXML(guest_config, 0)
if dom is None:
log.err(
eventid="cowrie.backend_pool.guest_handler",
format="Failed to create a domain from an XML definition.",
)
sys.exit(1)
log.msg(
eventid="cowrie.backend_pool.guest_handler",
format="Guest %(name)s has booted",
name=dom.name(),
)
return dom, disk_img
except libvirt.libvirtError as e:
log.err(
eventid="cowrie.backend_pool.guest_handler",
format="Error booting guest: %(error)s",
error=e,
)
raise e
| 3,677 | 31.548673 | 100 | py |
cowrie | cowrie-master/src/backend_pool/libvirt/snapshot_handler.py | # Copyright (c) 2019 Guilherme Borges <[email protected]>
# See the COPYRIGHT file for more information
from __future__ import annotations
import getpass
import shutil
import subprocess
def create_disk_snapshot(source_img, destination_img):
try:
shutil.chown(source_img, getpass.getuser())
except PermissionError:
# log.msg('Should have root to create snapshot')
pass
# could use `capture_output=True` instead of `stdout` and `stderr` args in Python 3.7
out = subprocess.run(
[
"qemu-img",
"create",
"-f",
"qcow2",
"-F",
"qcow2",
"-b",
source_img,
destination_img,
],
capture_output=True,
)
return out.returncode == 0
| 816 | 23.029412 | 89 | py |
cowrie | cowrie-master/src/backend_pool/libvirt/network_handler.py | # Copyright (c) 2019 Guilherme Borges <[email protected]>
# See the COPYRIGHT file for more information
from __future__ import annotations
import os
import sys
from twisted.python import log
import backend_pool.util
from cowrie.core.config import CowrieConfig
def create_filter(connection):
# lazy import to avoid exception if not using the backend_pool and libvirt not installed (#1185)
import libvirt
filter_file: str = os.path.join(
CowrieConfig.get(
"backend_pool", "config_files_path", fallback="share/pool_configs"
),
CowrieConfig.get(
"backend_pool", "nw_filter_config", fallback="default_filter.xml"
),
)
filter_xml = backend_pool.util.read_file(filter_file)
try:
return connection.nwfilterDefineXML(filter_xml)
except libvirt.libvirtError as e:
log.err(
eventid="cowrie.backend_pool.network_handler",
format="Filter already exists: %(error)s",
error=e,
)
return connection.nwfilterLookupByName("cowrie-default-filter")
def create_network(connection, network_table):
# lazy import to avoid exception if not using the backend_pool and libvirt not installed (#1185)
import libvirt
# TODO support more interfaces and therefore more IP space to allow > 253 guests
network_file: str = os.path.join(
CowrieConfig.get(
"backend_pool", "config_files_path", fallback="share/pool_configs"
),
CowrieConfig.get(
"backend_pool", "network_config", fallback="default_network.xml"
),
)
network_xml = backend_pool.util.read_file(network_file)
template_host: str = "<host mac='{mac_address}' name='{name}' ip='{ip_address}'/>\n"
hosts: str = ""
# generate a host entry for every possible guest in this network (253 entries)
it = iter(network_table)
for guest_id in range(0, 253):
vm_name = "vm" + str(guest_id)
key = next(it)
hosts += template_host.format(
name=vm_name, mac_address=key, ip_address=network_table[key]
)
network_config = network_xml.format(
network_name="cowrie",
iface_name="virbr2",
default_gateway="192.168.150.1",
dhcp_range_start="192.168.150.2",
dhcp_range_end="192.168.150.254",
hosts=hosts,
)
try:
# create a transient virtual network
net = connection.networkCreateXML(network_config)
if net is None:
log.msg(
eventid="cowrie.backend_pool.network_handler",
format="Failed to define a virtual network",
)
sys.exit(1)
# set the network active
# not needed since apparently transient networks are created as active; uncomment if persistent
# net.create()
return net
except libvirt.libvirtError as e:
log.err(
eventid="cowrie.backend_pool.network_handler",
format="Network already exists: %(error)s",
error=e,
)
return connection.networkLookupByName("cowrie")
| 3,145 | 30.777778 | 103 | py |
cowrie | cowrie-master/src/backend_pool/libvirt/__init__.py | 0 | 0 | 0 | py |
|
cowrie | cowrie-master/bin/createdynamicprocess.py | #!/usr/bin/env python
import datetime
import json
import random
import psutil
command: dict = {}
command["command"] = {}
command["command"]["ps"] = []
randomStates = ["Ss", "S<", "D<", "Ss+"]
for proc in psutil.process_iter():
try:
info = proc.as_dict(
attrs=[
"pid",
"name",
"cmdline",
"username",
"cpu_percent",
"memory_percent",
"memory_info",
"create_time",
"terminal",
"status",
"cpu_times",
]
)
except psutil.NoSuchProcess:
pass
else:
object = {}
object["USER"] = info["username"]
object["PID"] = info["pid"]
if info["cmdline"]:
object["COMMAND"] = "/".join(info["cmdline"])
else:
object["COMMAND"] = "[ " + info["name"] + " ]"
object["CPU"] = info["cpu_percent"]
object["MEM"] = info["memory_percent"]
object["RSS"] = info["memory_info"].rss
object["VSZ"] = info["memory_info"].vms
object["START"] = datetime.datetime.fromtimestamp(info["create_time"]).strftime(
"%b%d"
)
if info["terminal"]:
object["TTY"] = str(info["terminal"]).replace("/dev/", "")
else:
object["TTY"] = "?"
object["STAT"] = random.choice(randomStates)
object["TIME"] = info["cpu_times"].user
command["command"]["ps"].append(object)
print(json.dumps(command, indent=4, sort_keys=True))
| 1,592 | 26.947368 | 88 | py |
cowrie | cowrie-master/docs/conf.py | #
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# The Makefile sets PYTHONPATH so this module is available
from cowrie import version as cowrie_version_object
# -- Project information -----------------------------------------------------
project = "cowrie"
copyright = "2014-2022, Michel Oosterhof"
author = "Michel Oosterhof"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "{major}.{minor}".format(
major=cowrie_version_object.major, minor=cowrie_version_object.minor
)
# The full version, including alpha/beta/rc tags.
release = cowrie_version_object.short()
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosectionlabel",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.githubpages",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx_copybutton",
"sphinx_search.extension",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "cowriedoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "cowrie.tex", "cowrie Documentation", "Michel Oosterhof", "manual"),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "cowrie", "cowrie Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"cowrie",
"cowrie Documentation",
author,
"cowrie",
"One line description of project.",
"Miscellaneous",
),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for intersphinx extension ---------------------------------------
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"twisted": ("https://docs.twistedmatrix.com/en/latest/api/", None),
}
# -- Options for sphinx copybutton extension ---------------------------------
copybutton_prompt_text = "$ "
| 6,383 | 30.141463 | 85 | py |
overcooked_ai | overcooked_ai-master/setup.py | #!/usr/bin/env python
from setuptools import find_packages, setup
with open("README.md", "r", encoding="UTF8") as fh:
long_description = fh.read()
setup(
name="overcooked_ai",
version="1.1.0",
description="Cooperative multi-agent environment based on Overcooked",
long_description=long_description,
long_description_content_type="text/markdown",
author="Micah Carroll",
author_email="[email protected]",
url="https://github.com/HumanCompatibleAI/overcooked_ai",
download_url="https://github.com/HumanCompatibleAI/overcooked_ai/archive/refs/tags/1.1.0.tar.gz",
packages=find_packages("src"),
keywords=["Overcooked", "AI", "Reinforcement Learning"],
package_dir={"": "src"},
package_data={
"overcooked_ai_py": [
"data/layouts/*.layout",
"data/planners/*.py",
"data/human_data/*.pickle",
"data/graphics/*.png",
"data/graphics/*.json",
"data/fonts/*.ttf",
],
"human_aware_rl": [
"static/**/*.pickle",
"static/**/*.csv",
"ppo/trained_example/*.pkl",
"ppo/trained_example/*.json",
"ppo/trained_example/*/.is_checkpoint",
"ppo/trained_example/*/.tune_metadata",
"ppo/trained_example/*/checkpoint-500",
],
},
install_requires=[
"dill",
"numpy",
"scipy",
"tqdm",
"gym",
"pettingzoo",
"ipython",
"pygame",
"ipywidgets",
"opencv-python",
],
# removed overlapping dependencies
extras_require={
"harl": [
"wandb",
"GitPython",
"memory_profiler",
"sacred",
"pymongo",
"matplotlib",
"requests",
"seaborn==0.9.0",
"ray[rllib]==2.0.0",
"protobuf",
"tensorflow==2.10",
]
},
entry_points={
"console_scripts": [
"overcooked-demo-up = overcooked_demo:start_server",
"overcooked-demo-move = overcooked_demo:move_agent",
]
},
)
| 2,151 | 27.693333 | 101 | py |
overcooked_ai | overcooked_ai-master/testing/visualization_test.py | import copy
import json
import os
import unittest
import numpy as np
import pygame
from overcooked_ai_py.agents.agent import RandomAgent
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
from overcooked_ai_py.mdp.overcooked_mdp import (
OvercookedGridworld,
OvercookedState,
Recipe,
)
from overcooked_ai_py.static import TESTING_DATA_DIR
from overcooked_ai_py.utils import generate_temporary_file_path, load_from_json
from overcooked_ai_py.visualization.state_visualizer import StateVisualizer
def get_file_count(directory_path):
path, dirs, files = next(os.walk(directory_path))
return len(files)
state_visualizer_dir = os.path.join(TESTING_DATA_DIR, "test_state_visualizer")
example_img_path = generate_temporary_file_path(
prefix="overcooked_visualized_state_", extension=".png"
)
def test_render_state_from_dict(test_dict):
input_dict = copy.deepcopy(test_dict)
test_dict = copy.deepcopy(test_dict)
test_dict["kwargs"]["state"] = OvercookedState.from_dict(
test_dict["kwargs"]["state"]
)
# check only if it raise error or not, for image fidelity render_state check is used
StateVisualizer(**test_dict["config"]).display_rendered_state(
img_path=example_img_path, **test_dict["kwargs"]
)
actual_result = pygame.surfarray.array3d(
StateVisualizer(**test_dict["config"]).render_state(
**test_dict["kwargs"]
)
)
expected_result = np.load(
os.path.join(state_visualizer_dir, test_dict["result_array_filename"])
)
if not actual_result.shape == expected_result.shape:
print("test with: ", input_dict["result_array_filename"], "is failed")
print(
"test not passed, wrong output shape",
actual_result.shape,
"!=",
expected_result.shape,
)
print(json.dumps(input_dict, indent=4, sort_keys=True))
return False
wrong_rows, wrong_columns, wrong_color_channels = np.where(
actual_result != expected_result
)
wrong_coordinates = set(
[(row, col) for row, col in zip(wrong_rows, wrong_columns)]
)
incorrect_pixels_num = len(wrong_coordinates)
all_pixels_num = int(expected_result.size / 3)
if incorrect_pixels_num:
wrong_coordinate_list = sorted(list(wrong_coordinates))
print("test with: ", input_dict["result_array_filename"], "is failed")
print(
"test not passed, wrong color of",
incorrect_pixels_num,
"pixels out of",
all_pixels_num,
)
print(
"first 100 wrong pixels coordinates", wrong_coordinate_list[:100]
)
print("coordinate\texpected\tactual")
for i in range(10):
(wrong_x, wrong_y) = wrong_coord = wrong_coordinate_list[i]
print(
"%s\t%s\t%s"
% (
str(wrong_coord),
str(expected_result[wrong_x, wrong_y]),
str(actual_result[wrong_x, wrong_y]),
)
)
print("test_dict", json.dumps(input_dict))
return False
print("test with: ", input_dict["result_array_filename"], "is ok")
return True
class TestStateVisualizer(unittest.TestCase):
def setUp(self):
Recipe.configure({})
def test_setting_up_configs(self):
default_values = copy.deepcopy(StateVisualizer.DEFAULT_VALUES)
init_config = {"tile_size": 123}
configure_config = {"tile_size": 234}
configure_defaults_config = {"tile_size": 345}
assert (
default_values["tile_size"]
!= init_config["tile_size"]
!= configure_config["tile_size"]
!= configure_defaults_config["tile_size"]
)
visualizer = StateVisualizer(**init_config)
self.assertEqual(init_config["tile_size"], visualizer.tile_size)
visualizer.configure(**configure_config)
self.assertEqual(configure_config["tile_size"], visualizer.tile_size)
StateVisualizer.configure_defaults(**configure_defaults_config)
self.assertEqual(
configure_defaults_config["tile_size"],
StateVisualizer.DEFAULT_VALUES["tile_size"],
)
self.assertEqual(
configure_defaults_config["tile_size"], StateVisualizer().tile_size
)
invalid_kwargs = {"invalid_argument": 123}
self.assertRaises(AssertionError, StateVisualizer, **invalid_kwargs)
self.assertRaises(
AssertionError,
StateVisualizer.configure_defaults,
**invalid_kwargs
)
self.assertRaises(
AssertionError, visualizer.configure, **invalid_kwargs
)
def test_properties(self):
visualizer = StateVisualizer(
tile_size=30, hud_interline_size=7, hud_font_size=26
)
self.assertEqual(visualizer.scale_by_factor, 2)
self.assertEqual(visualizer.hud_line_height, 26 + 7)
def test_hud_display(self):
for d in load_from_json(
os.path.join(
state_visualizer_dir, "render_state_data_test_hud.json"
)
):
test_render_state_from_dict(d)
def test_differnet_sizes(self):
for d in load_from_json(
os.path.join(
state_visualizer_dir, "render_state_data_test_sizes.json"
)
):
test_render_state_from_dict(d)
def test_cooking_timer_display(self):
for d in load_from_json(
os.path.join(
state_visualizer_dir,
"render_state_data_test_cooking_display.json",
)
):
test_render_state_from_dict(d)
def test_various_states(self):
for d in load_from_json(
os.path.join(
state_visualizer_dir, "render_state_data_test_various.json"
)
):
test_render_state_from_dict(d)
def test_generated_layout_states(self):
for d in load_from_json(
os.path.join(
state_visualizer_dir,
"render_state_data_test_generated_layout.json",
)
):
test_render_state_from_dict(d)
def test_default_hud_data_from_trajectories(self):
traj_path = os.path.join(
TESTING_DATA_DIR, "test_state_visualizer", "test_trajectory.json"
) # NOTE: for test purposes reward is added here despite there was no soup delivery in trajectory
test_trajectory = AgentEvaluator.load_traj_from_json(traj_path)
hud_data_path = os.path.join(
TESTING_DATA_DIR,
"test_state_visualizer",
"expected_default_hud_data_from_trajectories.json",
)
expected_hud_data = load_from_json(hud_data_path)
result_hud_data = StateVisualizer().default_hud_data_from_trajectories(
test_trajectory
)
self.assertEqual(
json.dumps(result_hud_data, sort_keys=True),
json.dumps(expected_hud_data, sort_keys=True),
)
def test_action_probs_display(self):
for d in load_from_json(
os.path.join(
state_visualizer_dir,
"render_state_data_test_action_probs_display.json",
)
):
test_render_state_from_dict(d)
def test_trajectory_visualization(self):
# we don't have good way to check slider automatically so its mostly test for basic stuff like numer of outputed images, if using method raises error etc.
traj_path = os.path.join(
TESTING_DATA_DIR, "test_state_visualizer", "test_trajectory.json"
)
test_trajectory = AgentEvaluator.load_traj_from_json(traj_path)
expected_images_num = len(test_trajectory["ep_states"][0])
assert expected_images_num == 10
action_probs = [
[RandomAgent(all_actions=True).action(state)[1]["action_probs"]]
* 2
for state in test_trajectory["ep_states"][0]
]
result_img_directory_path = (
StateVisualizer().display_rendered_trajectory(
test_trajectory,
action_probs=action_probs,
ipython_display=False,
)
)
self.assertEqual(
get_file_count(result_img_directory_path), expected_images_num
)
custom_img_directory_path = generate_temporary_file_path(
prefix="overcooked_visualized_trajectory", extension=""
)
self.assertNotEqual(
custom_img_directory_path, result_img_directory_path
)
result_img_directory_path = (
StateVisualizer().display_rendered_trajectory(
test_trajectory,
img_directory_path=custom_img_directory_path,
ipython_display=False,
)
)
self.assertEqual(custom_img_directory_path, result_img_directory_path)
self.assertEqual(
get_file_count(result_img_directory_path), expected_images_num
)
if __name__ == "__main__":
unittest.main()
| 9,206 | 33.743396 | 162 | py |
overcooked_ai | overcooked_ai-master/testing/overcooked_test.py | import copy
import glob
import json
import os
import shutil
import unittest
from math import factorial
import gym
import numpy as np
from overcooked_ai_py.agents.agent import (
AgentGroup,
AgentPair,
FixedPlanAgent,
GreedyHumanModel,
RandomAgent,
)
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
from overcooked_ai_py.mdp.actions import Action, Direction
from overcooked_ai_py.mdp.layout_generator import (
DISH_DISPENSER,
ONION_DISPENSER,
POT,
SERVING_LOC,
TOMATO_DISPENSER,
LayoutGenerator,
)
from overcooked_ai_py.mdp.overcooked_env import (
DEFAULT_ENV_PARAMS,
OvercookedEnv,
)
from overcooked_ai_py.mdp.overcooked_mdp import (
ObjectState,
OvercookedGridworld,
OvercookedState,
PlayerState,
Recipe,
SoupState,
)
from overcooked_ai_py.mdp.overcooked_trajectory import (
DEFAULT_TRAJ_KEYS,
EPISODE_TRAJ_KEYS,
TIMESTEP_TRAJ_KEYS,
append_trajectories,
)
from overcooked_ai_py.planning.planners import (
NO_COUNTERS_PARAMS,
MediumLevelActionManager,
MotionPlanner,
)
from overcooked_ai_py.static import TESTING_DATA_DIR
from overcooked_ai_py.utils import (
iterate_over_json_files_in_dir,
load_from_json,
load_pickle,
save_as_json,
save_pickle,
)
START_ORDER_LIST = ["any"]
n, s = Direction.NORTH, Direction.SOUTH
e, w = Direction.EAST, Direction.WEST
stay, interact = Action.STAY, Action.INTERACT
P, Obj = PlayerState, ObjectState
def comb(n, k):
return factorial(n) / (factorial(n - k) * factorial(k))
class TestRecipe(unittest.TestCase):
def setUp(self):
Recipe.configure({})
self.r1 = Recipe([Recipe.ONION, Recipe.ONION, Recipe.ONION])
self.r2 = Recipe([Recipe.ONION, Recipe.ONION, Recipe.ONION])
self.r3 = Recipe([Recipe.ONION, Recipe.TOMATO])
self.r4 = Recipe([Recipe.ONION, Recipe.TOMATO])
self.r5 = Recipe([Recipe.TOMATO, Recipe.ONION])
self.r6 = Recipe([Recipe.ONION, Recipe.ONION])
self.recipes = [self.r1, self.r2, self.r3, self.r4, self.r5, self.r6]
self.pickle_temp_dir = os.path.join(TESTING_DATA_DIR, "recipes")
if not os.path.exists(self.pickle_temp_dir):
os.makedirs(self.pickle_temp_dir)
def tearDown(self):
Recipe.configure({})
if os.path.exists(self.pickle_temp_dir):
shutil.rmtree(self.pickle_temp_dir)
def test_eq(self):
self.assertEqual(self.r1, self.r2, "Failed basic equality check")
self.assertNotEqual(self.r1, self.r3, "Failed Basic inequality check")
self.assertNotEqual(
self.r1, self.r6, "Failed inequality check with all one ingredient"
)
self.assertEqual(self.r3, self.r4, "Failed basic equality check")
self.assertEqual(self.r4, self.r5, "Failed ordered equality check")
def test_caching(self):
self.assertIs(self.r1, self.r2)
self.assertIs(self.r3, self.r4)
self.assertIs(self.r4, self.r5)
self.assertFalse(
self.r6 is self.r1, "different recipes cached to same value"
)
def test_serialization(self):
loaded_recipes = []
# Save and then load every recipe instance
for i, recipe in enumerate(self.recipes):
pickle_path = os.path.join(
self.pickle_temp_dir, "recipe_{}".format(i)
)
save_pickle(recipe, pickle_path)
loaded = load_pickle(pickle_path)
loaded_recipes.append(loaded)
# Ensure loaded recipes equal corresponding original recipe
for original, loaded in zip(self.recipes, loaded_recipes):
self.assertEqual(original, loaded)
def test_value(self):
# TODO
for recipe in self.recipes:
self.assertEqual(recipe.value, 20)
def test_time(self):
# TODO
for recipe in self.recipes:
self.assertEqual(recipe.time, 20)
def test_all_recipes(self):
for recipe in self.recipes:
self.assertTrue(recipe in Recipe.ALL_RECIPES)
self.assertEqual(
len(Recipe.ALL_RECIPES),
self._expected_num_recipes(
len(Recipe.ALL_INGREDIENTS), Recipe.MAX_NUM_INGREDIENTS
),
)
Recipe.configure({"max_num_ingredients": 4})
self.assertEqual(
len(Recipe.ALL_RECIPES),
self._expected_num_recipes(len(Recipe.ALL_INGREDIENTS), 4),
)
def test_invalid_input(self):
self.assertRaises(
ValueError, Recipe, [Recipe.ONION, Recipe.TOMATO, "carrot"]
)
self.assertRaises(ValueError, Recipe, [Recipe.ONION] * 4)
self.assertRaises(ValueError, Recipe, [])
self.assertRaises(ValueError, Recipe, "invalid argument")
def test_recipes_generation(self):
self.assertRaises(
AssertionError,
Recipe.generate_random_recipes,
max_size=Recipe.MAX_NUM_INGREDIENTS + 1,
)
self.assertRaises(
AssertionError, Recipe.generate_random_recipes, min_size=0
)
self.assertRaises(
AssertionError,
Recipe.generate_random_recipes,
min_size=3,
max_size=2,
)
self.assertRaises(
AssertionError,
Recipe.generate_random_recipes,
ingredients=["onion", "tomato", "fake_ingredient"],
)
self.assertRaises(
AssertionError, Recipe.generate_random_recipes, n=99999
)
self.assertEqual(len(Recipe.generate_random_recipes(n=3)), 3)
self.assertEqual(
len(Recipe.generate_random_recipes(n=99, unique=False)), 99
)
two_sized_recipes = [
Recipe(["onion", "onion"]),
Recipe(["onion", "tomato"]),
Recipe(["tomato", "tomato"]),
]
for _ in range(100):
self.assertCountEqual(
two_sized_recipes,
Recipe.generate_random_recipes(
n=3,
min_size=2,
max_size=2,
ingredients=["onion", "tomato"],
),
)
only_onions_recipes = [
Recipe(["onion", "onion"]),
Recipe(["onion", "onion", "onion"]),
]
for _ in range(100):
self.assertCountEqual(
only_onions_recipes,
Recipe.generate_random_recipes(
n=2, min_size=2, max_size=3, ingredients=["onion"]
),
)
self.assertCountEqual(
only_onions_recipes,
set(
[
Recipe.generate_random_recipes(
n=1, recipes=only_onions_recipes
)[0]
for _ in range(100)
]
),
) # false positives rate for this test is 1/10^99
def _expected_num_recipes(self, num_ingredients, max_len):
return comb(num_ingredients + max_len, num_ingredients) - 1
class TestSoupState(unittest.TestCase):
def setUp(self):
Recipe.configure({})
self.s1 = SoupState.get_soup((0, 0), num_onions=0, num_tomatoes=0)
self.s2 = SoupState.get_soup((0, 1), num_onions=2, num_tomatoes=1)
self.s3 = SoupState.get_soup(
(1, 1), num_onions=1, num_tomatoes=0, cooking_tick=1
)
self.s4 = SoupState.get_soup(
(1, 0), num_onions=0, num_tomatoes=2, finished=True
)
def test_position(self):
new_pos = (2, 0)
self.s4.position = new_pos
for ingredient in self.s4._ingredients:
self.assertEqual(new_pos, ingredient.position)
self.assertEqual(new_pos, self.s4.position)
def test_is_cooking(self):
self.assertFalse(self.s1.is_cooking)
self.assertFalse(self.s2.is_cooking)
self.assertTrue(self.s3.is_cooking)
self.assertFalse(self.s4.is_cooking)
def test_is_ready(self):
self.assertFalse(self.s1.is_ready)
self.assertFalse(self.s2.is_ready)
self.assertFalse(self.s3.is_ready)
self.assertTrue(self.s4.is_ready)
def test_is_idle(self):
self.assertTrue(self.s1.is_idle)
self.assertTrue(self.s2.is_idle)
self.assertFalse(self.s3.is_idle)
self.assertFalse(self.s4.is_idle)
def test_is_full(self):
self.assertFalse(self.s1.is_full)
self.assertTrue(self.s2.is_full)
self.assertTrue(self.s3.is_full)
self.assertTrue(self.s4.is_full)
def test_cooking(self):
self.s1.add_ingredient_from_str(Recipe.ONION)
self.s1.add_ingredient_from_str(Recipe.TOMATO)
self.assertTrue(self.s1.is_idle)
self.assertFalse(self.s1.is_cooking)
self.assertFalse(self.s1.is_full)
self.s1.begin_cooking()
self.assertFalse(self.s1.is_idle)
self.assertTrue(self.s1.is_full)
self.assertTrue(self.s1.is_cooking)
for _ in range(self.s1.cook_time):
self.s1.cook()
self.assertFalse(self.s1.is_cooking)
self.assertFalse(self.s1.is_idle)
self.assertTrue(self.s1.is_full)
self.assertTrue(self.s1.is_ready)
def test_attributes(self):
self.assertListEqual(self.s1.ingredients, [])
self.assertListEqual(
self.s2.ingredients, [Recipe.ONION, Recipe.ONION, Recipe.TOMATO]
)
self.assertListEqual(self.s3.ingredients, [Recipe.ONION])
self.assertListEqual(
self.s4.ingredients, [Recipe.TOMATO, Recipe.TOMATO]
)
try:
self.s1.recipe
self.fail("Expected ValueError to be raised")
except ValueError as e:
pass
except Exception as e:
self.fail(
"Expected ValueError to be raised, {} raised instead".format(e)
)
try:
self.s2.recipe
self.fail("Expected ValueError to be raised")
except ValueError as e:
pass
except Exception as e:
self.fail(
"Expected ValueError to be raised, {} raised instead".format(e)
)
self.assertEqual(self.s3.recipe, Recipe([Recipe.ONION]))
self.assertEqual(
self.s4.recipe, Recipe([Recipe.TOMATO, Recipe.TOMATO])
)
def test_invalid_ops(self):
# Cannot cook an empty soup
self.assertRaises(ValueError, self.s1.begin_cooking)
# Must call 'begin_cooking' before cooking a soup
self.assertRaises(ValueError, self.s2.cook)
# Cannot cook a done soup
self.assertRaises(ValueError, self.s4.cook)
# Cannot begin cooking a soup that is already cooking
self.assertRaises(ValueError, self.s3.begin_cooking)
# Cannot begin cooking a soup that is already done
self.assertRaises(ValueError, self.s4.begin_cooking)
# Cannot add ingredients to a soup that is cooking
self.assertRaises(
ValueError, self.s3.add_ingredient_from_str, Recipe.ONION
)
# Cannot add ingredients to a soup that is ready
self.assertRaises(
ValueError, self.s4.add_ingredient_from_str, Recipe.ONION
)
# Cannot remove an ingredient from a soup that is ready
self.assertRaises(ValueError, self.s4.pop_ingredient)
# Cannot remove an ingredient from a soup that is cooking
self.assertRaises(ValueError, self.s3.pop_ingredient)
# Cannot remove an ingredient from a soup that is empty
self.assertRaises(ValueError, self.s1.pop_ingredient)
class TestDirection(unittest.TestCase):
def test_direction_number_conversion(self):
all_directions = Direction.ALL_DIRECTIONS
all_numbers = []
for direction in Direction.ALL_DIRECTIONS:
number = Direction.DIRECTION_TO_INDEX[direction]
direction_again = Direction.INDEX_TO_DIRECTION[number]
self.assertEqual(direction, direction_again)
all_numbers.append(number)
# Check that all directions are distinct
num_directions = len(all_directions)
self.assertEqual(len(set(all_directions)), num_directions)
# Check that the numbers are 0, 1, ... num_directions - 1
self.assertEqual(set(all_numbers), set(range(num_directions)))
class TestGridworld(unittest.TestCase):
# TODO: write more smaller targeted tests to be loaded from jsons
verbose = False
def setUp(self):
self.base_mdp = OvercookedGridworld.from_layout_name("mdp_test")
def test_constructor_invalid_inputs(self):
# Height and width must be at least 3.
with self.assertRaises(AssertionError):
mdp = OvercookedGridworld.from_grid(["X", "X", "X"])
with self.assertRaises(AssertionError):
mdp = OvercookedGridworld.from_grid([["X", "X", "X"]])
with self.assertRaises(AssertionError):
# Borders must be present.
mdp = OvercookedGridworld.from_grid(["XOSX", "P D", " 21 "])
with self.assertRaises(AssertionError):
# The grid can't be ragged.
mdp = OvercookedGridworld.from_grid(
["XXPXX", "O 2XX", "X1 3 X", "XDXSXX"]
)
with self.assertRaises(AssertionError):
# The agents must be numbered 1 and 2.
mdp = OvercookedGridworld.from_grid(
["XXPXX", "O 3O", "X1 X", "XDXSX"]
)
with self.assertRaises(AssertionError):
# The agents must be numbered 1 and 2.
mdp = OvercookedGridworld.from_grid(
["XXPXX", "O 1O", "X1 X", "XDXSX"]
)
with self.assertRaises(AssertionError):
# B is not a valid element.
mdp = OvercookedGridworld.from_grid(
["XBPXX", "O 2O", "X1 X", "XDXSX"]
)
def test_start_positions(self):
actual_start_state = self.base_mdp.get_standard_start_state()
expected_state_path = os.path.join(
TESTING_DATA_DIR, "test_start_positions", "expected.json"
)
# NOTE: Uncomment the following line if expected start state deliberately changed
# save_as_json(actual_start_state.to_dict(), expected_state_path)
expected_start_state = OvercookedState.from_dict(
load_from_json(expected_state_path)
)
self.assertEqual(
actual_start_state,
expected_start_state,
"\n" + str(actual_start_state) + "\n" + str(expected_start_state),
)
def test_file_constructor(self):
mdp = OvercookedGridworld.from_layout_name("corridor")
expected_start_state = OvercookedState(
[
PlayerState((3, 1), Direction.NORTH),
PlayerState((10, 1), Direction.NORTH),
],
{},
all_orders=[{"ingredients": ["onion", "onion", "onion"]}],
)
actual_start_state = mdp.get_standard_start_state()
self.assertEqual(
actual_start_state,
expected_start_state,
"\n" + str(actual_start_state) + "\n" + str(expected_start_state),
)
def test_actions(self):
bad_state = OvercookedState(
[
PlayerState((0, 0), Direction.NORTH),
PlayerState((3, 1), Direction.NORTH),
],
{},
)
with self.assertRaises(AssertionError):
self.base_mdp.get_actions(bad_state)
self.assertEqual(
self.base_mdp.get_actions(
self.base_mdp.get_standard_start_state()
),
[Action.ALL_ACTIONS, Action.ALL_ACTIONS],
)
def test_from_dict(self):
state_dict = {
"players": [
{
"position": [2, 1],
"orientation": [0, -1],
"held_object": None,
},
{
"position": [1, 1],
"orientation": [0, -1],
"held_object": None,
},
],
"objects": [{"name": "onion", "position": [1, 0], "state": None}],
"order_list": None,
}
state = OvercookedState.from_dict(state_dict)
def test_transitions_and_environment(self):
bad_state = OvercookedState([P((0, 0), s), P((3, 1), s)], {})
with self.assertRaises(AssertionError):
self.base_mdp.get_state_transition(bad_state, stay)
env = OvercookedEnv.from_mdp(self.base_mdp, info_level=0)
def check_transition(action, expected_path, recompute=False):
# Compute actual values
state = env.state
pred_state, _ = self.base_mdp.get_state_transition(state, action)
new_state, sparse_reward, _, _ = env.step(action)
self.assertEqual(
pred_state,
new_state,
"\n" + str(pred_state) + "\n" + str(new_state),
)
# Recompute expected values if desired
if recompute:
actual = {
"state": pred_state.to_dict(),
"reward": sparse_reward,
}
save_as_json(actual, expected_path)
# Compute expected values
expected = load_from_json(expected_path)
expected_state = OvercookedState.from_dict(expected["state"])
expected_reward = expected["reward"]
# Make sure everything lines up (note __eq__ is transitive)
self.assertTrue(
pred_state.time_independent_equal(expected_state),
"\n" + str(pred_state) + "\n" + str(expected_state),
)
self.assertEqual(sparse_reward, expected_reward)
expected_path = os.path.join(
TESTING_DATA_DIR,
"test_transitions_and_environments",
"expected.json",
)
# NOTE: set 'recompute=True' if deliberately updating state dynamics
check_transition([n, e], expected_path, recompute=False)
def test_mdp_dynamics(self):
traj_path = os.path.join(
TESTING_DATA_DIR, "test_mdp_dynamics", "expected.json"
)
# NOTE: uncomment the following line to recompute trajectories if MDP dymamics were deliberately updated
# generate_serialized_trajectory(self.base_mdp, traj_path)
test_trajectory = AgentEvaluator.load_traj_from_json(traj_path)
AgentEvaluator.check_trajectories(
test_trajectory, from_json=True, verbose=False
)
def test_mdp_old_cook_dynamics(self):
with self.assertRaises(AssertionError):
# shouldn't be able to create a game with recipes of less than 3 ingredients
OvercookedGridworld.from_layout_name(
layout_name="mdp_test", old_dynamics=True
)
old_mdp = OvercookedGridworld.from_layout_name(
layout_name="old_dynamics_cook_test", old_dynamics=True
)
new_mdp = OvercookedGridworld.from_layout_name(
layout_name="old_dynamics_cook_test", old_dynamics=False
)
# test interacting with a 1-ingredient soup starts cooking in the new dynamics
new_state_n, _ = new_mdp.get_state_transition(
new_mdp.start_state, [interact]
)
soup_new = new_state_n.get_object((2, 0))
self.assertTrue(soup_new.is_cooking)
# this should have no effects
new_state_o, _ = old_mdp.get_state_transition(
old_mdp.start_state, [interact]
)
soup_old = new_state_o.get_object((2, 0))
self.assertFalse(soup_old.is_cooking)
def test_mdp_old_put_dynamics(self):
old_mdp = OvercookedGridworld.from_layout_name(
layout_name="old_dynamics_put_test", old_dynamics=True
)
new_mdp = OvercookedGridworld.from_layout_name(
layout_name="old_dynamics_put_test", old_dynamics=False
)
# putting in the third ingredient will not start cooking in the new dynamics
new_state_n, _ = new_mdp.get_state_transition(
new_mdp.start_state, [interact]
)
soup_new = new_state_n.get_object((2, 0))
self.assertFalse(soup_new.is_cooking)
# cooking should start automatically in the old dynamics
new_state_o, _ = old_mdp.get_state_transition(
old_mdp.start_state, [interact]
)
soup_old = new_state_o.get_object((2, 0))
self.assertTrue(soup_old.is_cooking)
def test_mdp_serialization(self):
# Where to store serialized states -- will be overwritten each timestep
dummy_path = os.path.join(
TESTING_DATA_DIR, "test_mdp_serialization", "dummy.json"
)
# Get starting seed and random agent pair
seed = 47
random_pair = AgentPair(
RandomAgent(all_actions=True), RandomAgent(all_actions=True)
)
# Run rollouts with different seeds until sparse reward is achieved
sparse_reward = 0
while sparse_reward <= 0:
np.random.seed(seed)
state = self.base_mdp.get_standard_start_state()
for _ in range(1500):
# Ensure serialization and deserializations are inverses
reconstructed_state = OvercookedState.from_dict(
load_from_json(save_as_json(state.to_dict(), dummy_path))
)
self.assertEqual(
state,
reconstructed_state,
"\nState: \t\t\t{}\nReconstructed State: \t{}".format(
state, reconstructed_state
),
)
# Advance state
joint_action, _ = zip(*random_pair.joint_action(state))
state, infos = self.base_mdp.get_state_transition(
state, joint_action
)
sparse_reward += sum(infos["sparse_reward_by_agent"])
seed += 1
def test_four_player_mdp(self):
try:
OvercookedGridworld.from_layout_name("multiplayer_schelling")
except AssertionError as e:
print("Loading > 2 player map failed with error:", e)
def test_potential_function(self):
mp = MotionPlanner(self.base_mdp)
state = self.base_mdp.get_standard_start_state()
val0 = self.base_mdp.potential_function(state, mp)
# Pick up onion
if self.verbose:
print("pick up onion")
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
actions = [Direction.EAST, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [Action.STAY, action]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val1 = self.base_mdp.potential_function(state, mp)
# Pick up tomato
if self.verbose:
print("pick up tomtato")
actions = [Direction.WEST, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [action, Action.STAY]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val2 = self.base_mdp.potential_function(state, mp)
self.assertLess(
val0, val1, "Picking up onion should increase potential"
)
self.assertLess(
val1, val2, "Picking up tomato should increase potential"
)
# Pot tomato
if self.verbose:
print("pot tomato")
actions = [
Direction.EAST,
Direction.NORTH,
Action.INTERACT,
Direction.WEST,
]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [action, Action.STAY]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val3 = self.base_mdp.potential_function(state, mp)
# Pot onion
if self.verbose:
print("pot onion")
actions = [Direction.WEST, Direction.NORTH, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [Action.STAY, action]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val4 = self.base_mdp.potential_function(state, mp)
self.assertLess(val2, val3, "Potting tomato should increase potential")
self.assertLess(val3, val4, "Potting onion should increase potential")
## Repeat on second pot ##
# Pick up onion
if self.verbose:
print("pick up onion")
state, _ = self.base_mdp.get_state_transition(
state, [Action.INTERACT, Action.STAY]
)
val5 = self.base_mdp.potential_function(state, mp)
if self.verbose:
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
# Pick up tomato
if self.verbose:
print("pick up tomato")
actions = [Direction.SOUTH, Direction.EAST, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [Action.STAY, action]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val6 = self.base_mdp.potential_function(state, mp)
self.assertLess(
val4, val5, "Picking up onion should increase potential"
)
self.assertLess(
val5, val6, "Picking up tomato should increase potential"
)
# Pot onion
if self.verbose:
print("pot onion")
actions = [
Direction.SOUTH,
Direction.EAST,
Direction.SOUTH,
Action.INTERACT,
Direction.WEST,
]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [action, Action.STAY]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val7 = self.base_mdp.potential_function(state, mp)
# Pot tomato
if self.verbose:
print("pot tomato")
actions = [Direction.WEST, Direction.SOUTH, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [Action.STAY, action]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val8 = self.base_mdp.potential_function(state, mp)
self.assertLess(val6, val7, "Potting onion should increase potential")
self.assertLess(val7, val8, "Potting tomato should increase potential")
## Useless pickups ##
# pickup tomato
if self.verbose:
print("pickup tomato")
actions = [Action.INTERACT, Direction.NORTH]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [action, Action.STAY]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val9 = self.base_mdp.potential_function(state, mp)
# pickup tomato
if self.verbose:
print("pickup tomato")
actions = [Direction.EAST, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [Action.STAY, action]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val10 = self.base_mdp.potential_function(state, mp)
self.assertLessEqual(
val9, val8, "Extraneous pickup should not increase potential"
)
self.assertLessEqual(
val10, val8, "Extraneous pickup should not increase potential"
)
## Catastrophic soup failure ##
# pot tomato
if self.verbose:
print("pot catastrophic tomato")
actions = [Direction.WEST, Direction.SOUTH, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [Action.STAY, action]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val11 = self.base_mdp.potential_function(state, mp)
self.assertLess(
val11, val10, "Catastrophic potting should decrease potential"
)
## Bonus soup creation
# pick up onion
if self.verbose:
print("pick up onion")
actions = [
Direction.NORTH,
Action.INTERACT,
Direction.WEST,
Action.INTERACT,
]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [action, Action.STAY]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val12 = self.base_mdp.potential_function(state, mp)
# pot onion
if self.verbose:
print("pot onion")
actions = [Direction.EAST, Direction.NORTH, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [action, Action.STAY]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val13 = self.base_mdp.potential_function(state, mp)
# Cook soup
if self.verbose:
print("cook soup")
actions = [Action.INTERACT, Direction.WEST]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [action, Action.STAY]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val14 = self.base_mdp.potential_function(state, mp)
self.assertLess(
val11, val12, "Useful onion pickup should increase potential"
)
self.assertLess(
val12, val13, "Potting useful onion should increase potential"
)
self.assertLess(
val13, val14, "Cooking optimal soup should increase potential"
)
## Soup pickup ##
# Pick up dish
if self.verbose:
print("pick up dish")
actions = [Direction.WEST, Direction.SOUTH, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [Action.STAY, action]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val15 = self.base_mdp.potential_function(state, mp)
# Move towards pot
if self.verbose:
print("move towards pot")
actions = [Direction.EAST, Direction.NORTH]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [Action.STAY, action]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val16 = self.base_mdp.potential_function(state, mp)
# Pickup soup
if self.verbose:
print("pickup soup")
state, _ = self.base_mdp.get_state_transition(
state, [Action.STAY, Action.INTERACT]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val17 = self.base_mdp.potential_function(state, mp)
self.assertLess(
val14, val15, "Useful dish pickups should increase potential"
)
self.assertLess(
val15,
val16,
"Moving towards soup with dish should increase potential",
)
self.assertLess(
val16, val17, "Picking up soup should increase potential"
)
## Removing failed soup from pot
# move towards failed soup
if self.verbose:
print("move torwards failed soup")
actions = [Direction.SOUTH, Direction.EAST, Direction.SOUTH]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [action, Action.STAY]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val18 = self.base_mdp.potential_function(state, mp)
# Cook failed soup
actions = [Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [action, Action.STAY]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val19 = self.base_mdp.potential_function(state, mp)
# Pickup dish
if self.verbose:
print("pickup dish")
actions = [Direction.WEST, Direction.SOUTH, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [action, Action.STAY]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val20 = self.base_mdp.potential_function(state, mp)
# Move towards soup
if self.verbose:
print("move towards soup")
actions = [Direction.EAST, Direction.SOUTH]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [action, Action.STAY]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val21 = self.base_mdp.potential_function(state, mp)
self.assertLess(
val17,
val18,
"Moving towards failed soup should increase potential",
)
self.assertLess(
val18, val19, "Cooking failed soup should increase potential"
)
self.assertLess(
val19, val20, "Dish pickup for failed soup is still useful"
)
self.assertLess(
val20,
val21,
"Moving towars pertinant pot with dish should increase potential",
)
## Deliver failed soup ##
# Pickup failed soup
actions = [Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [action, Action.STAY]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val22 = self.base_mdp.potential_function(state, mp)
# Move towards serving area
if self.verbose:
print("move towards servering area")
actions = [Direction.EAST, Direction.SOUTH]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [action, Action.STAY]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val23 = self.base_mdp.potential_function(state, mp)
# Move away from serving area
if self.verbose:
print("move away from serving area")
state, _ = self.base_mdp.get_state_transition(
state, [Direction.NORTH, Action.STAY]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val24 = self.base_mdp.potential_function(state, mp)
self.assertLess(
val21, val22, "Picking up failed soup should increase potential"
)
self.assertAlmostEqual(
val23,
val22,
delta=0.2,
msg="Moving to serve failed soup doesn't change potential much",
)
self.assertAlmostEqual(
val23,
val24,
delta=0.2,
msg="Moving away from serving area with failed soup doesn't change much",
)
## Deliver successful soup ##
# Move towards serving area
if self.verbose:
print("move towards serving area")
actions = [Direction.SOUTH, Direction.EAST, Direction.SOUTH]
for action in actions:
state, _ = self.base_mdp.get_state_transition(
state, [Action.STAY, action]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print(
"potential: ", self.base_mdp.potential_function(state, mp)
)
val25 = self.base_mdp.potential_function(state, mp)
# Deliver soup
if self.verbose:
print("deliver successful soup")
state, rewards = self.base_mdp.get_state_transition(
state, [Action.STAY, Action.INTERACT]
)
if self.verbose:
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
self.assertLess(
val24,
val25,
"Moving towards serving area with valid soup increases potential",
)
self.assertEqual(
sum(rewards["sparse_reward_by_agent"]),
50,
"Soup was not properly devivered, probably an error with MDP logic",
)
def random_joint_action():
num_actions = len(Action.ALL_ACTIONS)
a_idx0, a_idx1 = np.random.randint(low=0, high=num_actions, size=2)
return (Action.INDEX_TO_ACTION[a_idx0], Action.INDEX_TO_ACTION[a_idx1])
class TestFeaturizations(unittest.TestCase):
def setUp(self):
self.base_mdp = OvercookedGridworld.from_layout_name("cramped_room")
self.mlam = MediumLevelActionManager.from_pickle_or_compute(
self.base_mdp, NO_COUNTERS_PARAMS, force_compute=True
)
self.env = OvercookedEnv.from_mdp(
self.base_mdp, **DEFAULT_ENV_PARAMS, info_level=0
)
self.greedy_human_model_pair = AgentPair(
GreedyHumanModel(self.mlam), GreedyHumanModel(self.mlam)
)
np.random.seed(0)
def test_lossless_state_featurization_shape(self):
s = self.base_mdp.get_standard_start_state()
obs = self.base_mdp.lossless_state_encoding(s)[0]
self.assertTrue(
np.array_equal(
obs.shape, self.base_mdp.get_lossless_state_encoding_shape()
),
"{} vs {}".format(
obs.shape, self.base_mdp.get_lossless_state_encoding_shape()
),
)
def test_state_featurization_shape(self):
s = self.base_mdp.get_standard_start_state()
for num_pots in range(3):
obs_0, obs_1 = self.base_mdp.featurize_state(
s, self.mlam, num_pots=num_pots
)
expected_shape = self.base_mdp.get_featurize_state_shape(
num_pots=num_pots
)
self.assertTrue(
np.array_equal(obs_0.shape, expected_shape),
"{} vs {}".format(obs_0.shape, expected_shape),
)
self.assertTrue(
np.array_equal(obs_1.shape, expected_shape),
"{} vs {}".format(obs_1.shape, expected_shape),
)
def test_lossless_state_featurization(self):
trajs = self.env.get_rollouts(
self.greedy_human_model_pair, num_games=5, info=False
)
featurized_observations = [
[
self.base_mdp.lossless_state_encoding(state)
for state in ep_states
]
for ep_states in trajs["ep_states"]
]
pickle_path = os.path.join(
TESTING_DATA_DIR, "test_lossless_state_featurization", "expected"
)
# NOTE: If the featurizations are updated intentionally, you can overwrite the expected
# featurizations by uncommenting the following line:
# save_pickle(featurized_observations, pickle_path)
expected_featurization = load_pickle(pickle_path)
self.assertTrue(
np.array_equal(expected_featurization, featurized_observations)
)
def test_state_featurization(self):
trajs = self.env.get_rollouts(
self.greedy_human_model_pair, num_games=5, info=False
)
for num_pots in range(3):
featurized_observations = [
[
self.base_mdp.featurize_state(
state, self.mlam, num_pots=num_pots
)
for state in ep_states
]
for ep_states in trajs["ep_states"]
]
pickle_path = os.path.join(
TESTING_DATA_DIR,
"test_state_featurization",
"expected_{}".format(num_pots),
)
# NOTE: If the featurizations are updated intentionally, you can overwrite the expected
# featurizations by uncommenting the following line:
# save_pickle(featurized_observations, pickle_path)
expected_featurization = load_pickle(pickle_path)
self.assertTrue(
np.array_equal(expected_featurization, featurized_observations)
)
def test_state_featurization_symmetry(self):
trajs = self.env.get_rollouts(
self.greedy_human_model_pair, num_games=5, info=False
)
states = [
state for ep_states in trajs["ep_states"] for state in ep_states
]
for state in states:
p0_obs, p1_obs = self.base_mdp.featurize_state(
state, self.mlam, num_pots=2
)
state = state.reverse_players()
p0_obs_swapped, p1_obs_swapped = self.base_mdp.featurize_state(
state, self.mlam, num_pots=2
)
state = state.reverse_players()
self.assertTrue(np.array_equal(p0_obs, p1_obs_swapped))
self.assertTrue(np.array_equal(p1_obs, p0_obs_swapped))
def test_lossless_state_featurization_symmetry(self):
trajs = self.env.get_rollouts(
self.greedy_human_model_pair, num_games=5, info=False
)
states = [
state for ep_states in trajs["ep_states"] for state in ep_states
]
for state in states:
p0_obs, p1_obs = self.base_mdp.lossless_state_encoding(state)
state = state.reverse_players()
(
p0_obs_swapped,
p1_obs_swapped,
) = self.base_mdp.lossless_state_encoding(state)
state = state.reverse_players()
self.assertTrue(np.array_equal(p0_obs, p1_obs_swapped))
self.assertTrue(np.array_equal(p1_obs, p0_obs_swapped))
class TestOvercookedEnvironment(unittest.TestCase):
dummy_dir = "overcooked_test_temp"
def setUp(self):
if not os.path.exists(self.dummy_dir):
os.makedirs(self.dummy_dir)
self.base_mdp = OvercookedGridworld.from_layout_name("cramped_room")
self.env = OvercookedEnv.from_mdp(
self.base_mdp, info_level=0, **DEFAULT_ENV_PARAMS
)
self.rnd_agent_pair = AgentPair(
FixedPlanAgent([stay, w, w]), FixedPlanAgent([stay, e, e])
)
np.random.seed(0)
def tearDown(self):
shutil.rmtree(self.dummy_dir)
def _assert_files_equal(self, file_1, file_2):
with open(file_1, "r") as f:
lines_1 = f.readlines()
with open(file_2, "r") as f:
lines_2 = f.readlines()
for line_1, line_2 in zip(lines_1, lines_2):
self.assertEqual(line_1, line_2)
def test_constructor(self):
try:
OvercookedEnv.from_mdp(self.base_mdp, horizon=10, info_level=0)
except Exception as e:
self.fail("Failed to instantiate OvercookedEnv:\n{}".format(e))
with self.assertRaises(TypeError):
OvercookedEnv.from_mdp(
self.base_mdp, **{"invalid_env_param": None}
)
def test_step_fn(self):
for _ in range(10):
joint_action = random_joint_action()
self.env.step(joint_action)
def test_execute_plan(self):
action_plan = [random_joint_action() for _ in range(10)]
self.env.execute_plan(
self.base_mdp.get_standard_start_state(), action_plan
)
def test_run_agents(self):
start_state = self.env.state
self.env.run_agents(self.rnd_agent_pair)
self.assertNotEqual(self.env.state, start_state)
def test_rollouts(self):
try:
self.env.get_rollouts(self.rnd_agent_pair, 3, info=False)
except Exception as e:
print(e.with_traceback())
self.fail("Failed to get rollouts from environment:\n{}".format(e))
def test_one_player_env(self):
mdp = OvercookedGridworld.from_layout_name("cramped_room_single")
env = OvercookedEnv.from_mdp(mdp, horizon=12, info_level=0)
a0 = FixedPlanAgent([stay, w, w, e, e, n, e, interact, w, n, interact])
ag = AgentGroup(a0)
env.run_agents(ag, display=False)
self.assertEqual(env.state.players_pos_and_or, (((2, 1), (0, -1)),))
def test_four_player_env_fixed(self):
mdp = OvercookedGridworld.from_layout_name("multiplayer_schelling")
assert mdp.num_players == 4
env = OvercookedEnv.from_mdp(mdp, horizon=16, info_level=0)
a0 = FixedPlanAgent([stay, w, w])
a1 = FixedPlanAgent(
[
stay,
stay,
e,
e,
n,
n,
n,
e,
interact,
n,
n,
w,
w,
w,
n,
interact,
e,
]
)
a2 = FixedPlanAgent(
[stay, w, interact, n, n, e, e, e, n, e, n, interact, w]
)
a3 = FixedPlanAgent([e, interact, n, n, w, w, w, n, interact, e, s])
ag = AgentGroup(a0, a1, a2, a3)
env.run_agents(ag, display=False)
self.assertEqual(
env.state.players_pos_and_or,
(
((1, 1), (-1, 0)),
((3, 1), (0, -1)),
((2, 1), (-1, 0)),
((4, 2), (0, 1)),
),
)
def test_display(self):
mdp0 = OvercookedGridworld.from_layout_name("cramped_room")
mdp_fn = lambda _ignored: mdp0
env = OvercookedEnv(mdp_fn, horizon=20)
env.get_rollouts(
self.rnd_agent_pair,
1,
display=True,
info=False,
dir=self.dummy_dir,
)
expected_display_file = os.path.join(
TESTING_DATA_DIR, "test_display", "expected.txt"
)
actual_display_file = glob.glob(os.path.join(self.dummy_dir, "*.txt"))[
0
]
# If display intentionally updated, uncomment the line below to update expected values
shutil.copy(actual_display_file, expected_display_file)
self._assert_files_equal(expected_display_file, actual_display_file)
def test_display_phi(self):
mdp0 = OvercookedGridworld.from_layout_name("cramped_room")
mdp_fn = lambda _ignored: mdp0
env = OvercookedEnv(mdp_fn, horizon=20)
env.get_rollouts(
self.rnd_agent_pair,
1,
display=True,
display_phi=True,
info=False,
dir=self.dummy_dir,
)
expected_display_file = os.path.join(
TESTING_DATA_DIR, "test_display_phi", "expected.txt"
)
actual_display_file = glob.glob(os.path.join(self.dummy_dir, "*.txt"))[
0
]
# If display intentionally updated, uncomment the line below to update expected values
shutil.copy(actual_display_file, expected_display_file)
self._assert_files_equal(expected_display_file, actual_display_file)
def test_multiple_mdp_env(self):
mdp0 = OvercookedGridworld.from_layout_name("cramped_room")
mdp1 = OvercookedGridworld.from_layout_name("counter_circuit")
mdp_fn = lambda _ignored: np.random.choice([mdp0, mdp1])
# Default env
env = OvercookedEnv(mdp_fn, horizon=100)
env.get_rollouts(self.rnd_agent_pair, 5, info=False)
def test_starting_position_randomization(self):
self.base_mdp = OvercookedGridworld.from_layout_name("cramped_room")
start_state_fn = self.base_mdp.get_random_start_state_fn(
random_start_pos=True, rnd_obj_prob_thresh=0.0
)
env = OvercookedEnv.from_mdp(
self.base_mdp, start_state_fn, info_level=0
)
start_state = env.state.players_pos_and_or
for _ in range(3):
env.reset()
curr_terrain = env.state.players_pos_and_or
self.assertFalse(np.array_equal(start_state, curr_terrain))
def test_starting_obj_randomization(self):
self.base_mdp = OvercookedGridworld.from_layout_name("cramped_room")
start_state_fn = self.base_mdp.get_random_start_state_fn(
random_start_pos=False, rnd_obj_prob_thresh=0.8
)
env = OvercookedEnv.from_mdp(
self.base_mdp, start_state_fn, info_level=0
)
start_state = env.state.all_objects_list
for _ in range(3):
env.reset()
curr_terrain = env.state.all_objects_list
self.assertFalse(np.array_equal(start_state, curr_terrain))
def test_failing_rnd_layout(self):
with self.assertRaises(TypeError):
mdp_gen_params = {"None": None}
mdp_fn = LayoutGenerator.mdp_gen_fn_from_dict(**mdp_gen_params)
OvercookedEnv(mdp_fn, **DEFAULT_ENV_PARAMS)
def test_random_layout(self):
mdp_gen_params = {
"inner_shape": (5, 4),
"prop_empty": 0.8,
"prop_feats": 0.2,
"start_all_orders": [{"ingredients": ["onion", "onion", "onion"]}],
"recipe_values": [20],
"recipe_times": [20],
"display": False,
}
mdp_fn = LayoutGenerator.mdp_gen_fn_from_dict(
mdp_gen_params, outer_shape=(5, 4)
)
env = OvercookedEnv(mdp_fn, **DEFAULT_ENV_PARAMS)
start_terrain = env.mdp.terrain_mtx
for _ in range(3):
env.reset()
curr_terrain = env.mdp.terrain_mtx
self.assertFalse(np.array_equal(start_terrain, curr_terrain))
mdp_gen_params = {"layout_name": "cramped_room"}
mdp_fn = LayoutGenerator.mdp_gen_fn_from_dict(mdp_gen_params)
env = OvercookedEnv(mdp_fn, **DEFAULT_ENV_PARAMS)
layouts_seen = []
for _ in range(5):
layouts_seen.append(env.mdp.terrain_mtx)
env.reset()
all_same_layout = all(
[
np.array_equal(env.mdp.terrain_mtx, terrain)
for terrain in layouts_seen
]
)
self.assertTrue(all_same_layout)
mdp_gen_params = {"layout_name": "asymmetric_advantages"}
mdp_fn = LayoutGenerator.mdp_gen_fn_from_dict(mdp_gen_params)
env = OvercookedEnv(mdp_fn, **DEFAULT_ENV_PARAMS)
for _ in range(5):
layouts_seen.append(env.mdp.terrain_mtx)
env.reset()
all_same_layout = all(
[
np.array_equal(env.mdp.terrain_mtx, terrain)
for terrain in layouts_seen
]
)
self.assertFalse(all_same_layout)
def test_random_layout_feature_types(self):
mandatory_features = {POT, DISH_DISPENSER, SERVING_LOC}
optional_features = {ONION_DISPENSER, TOMATO_DISPENSER}
optional_features_combinations = [
{ONION_DISPENSER, TOMATO_DISPENSER},
{ONION_DISPENSER},
{TOMATO_DISPENSER},
]
for optional_features_combo in optional_features_combinations:
left_out_optional_features = (
optional_features - optional_features_combo
)
used_features = list(optional_features_combo | mandatory_features)
mdp_gen_params = {
"prop_feats": 0.9,
"feature_types": used_features,
"prop_empty": 0.1,
"inner_shape": (6, 5),
"display": False,
"start_all_orders": [
{"ingredients": ["onion", "onion", "onion"]}
],
}
mdp_fn = LayoutGenerator.mdp_gen_fn_from_dict(
mdp_gen_params, outer_shape=(6, 5)
)
env = OvercookedEnv(mdp_fn, **DEFAULT_ENV_PARAMS)
for _ in range(10):
env.reset()
curr_terrain = env.mdp.terrain_mtx
terrain_features = set.union(
*(set(line) for line in curr_terrain)
)
self.assertTrue(
all(elem in terrain_features for elem in used_features)
) # all used_features are actually used
if left_out_optional_features:
self.assertFalse(
any(
elem in terrain_features
for elem in left_out_optional_features
)
) # all left_out optional_features are not used
def test_random_layout_generated_recipes(self):
only_onions_recipes = [
Recipe(["onion", "onion"]),
Recipe(["onion", "onion", "onion"]),
]
only_onions_dict_recipes = [r.to_dict() for r in only_onions_recipes]
# checking if recipes are generated from mdp_params
mdp_gen_params = {
"generate_all_orders": {
"n": 2,
"ingredients": ["onion"],
"min_size": 2,
"max_size": 3,
},
"prop_feats": 0.9,
"prop_empty": 0.1,
"inner_shape": (6, 5),
"display": False,
}
mdp_fn = LayoutGenerator.mdp_gen_fn_from_dict(
mdp_gen_params, outer_shape=(6, 5)
)
env = OvercookedEnv(mdp_fn, **DEFAULT_ENV_PARAMS)
for _ in range(10):
env.reset()
self.assertCountEqual(
env.mdp.start_all_orders, only_onions_dict_recipes
)
self.assertEqual(len(env.mdp.start_bonus_orders), 0)
# checking if bonus_orders is subset of all_orders even if not specified
mdp_gen_params = {
"generate_all_orders": {
"n": 2,
"ingredients": ["onion"],
"min_size": 2,
"max_size": 3,
},
"generate_bonus_orders": {"n": 1, "min_size": 2, "max_size": 3},
"prop_feats": 0.9,
"prop_empty": 0.1,
"inner_shape": (6, 5),
"display": False,
}
mdp_fn = LayoutGenerator.mdp_gen_fn_from_dict(
mdp_gen_params, outer_shape=(6, 5)
)
env = OvercookedEnv(mdp_fn, **DEFAULT_ENV_PARAMS)
for _ in range(10):
env.reset()
self.assertCountEqual(
env.mdp.start_all_orders, only_onions_dict_recipes
)
self.assertEqual(len(env.mdp.start_bonus_orders), 1)
self.assertTrue(
env.mdp.start_bonus_orders[0] in only_onions_dict_recipes
)
# checking if after reset there are new recipes generated
mdp_gen_params = {
"generate_all_orders": {"n": 3, "min_size": 2, "max_size": 3},
"prop_feats": 0.9,
"prop_empty": 0.1,
"inner_shape": (6, 5),
"display": False,
"feature_types": [
POT,
DISH_DISPENSER,
SERVING_LOC,
ONION_DISPENSER,
TOMATO_DISPENSER,
],
}
mdp_fn = LayoutGenerator.mdp_gen_fn_from_dict(
mdp_gen_params, outer_shape=(6, 5)
)
env = OvercookedEnv(mdp_fn, **DEFAULT_ENV_PARAMS)
generated_recipes_strings = set()
for _ in range(20):
env.reset()
generated_recipes_strings |= {
json.dumps(o, sort_keys=True) for o in env.mdp.start_all_orders
}
self.assertTrue(len(generated_recipes_strings) > 3)
class TestGymEnvironment(unittest.TestCase):
def setUp(self):
self.base_mdp = OvercookedGridworld.from_layout_name("cramped_room")
self.env = OvercookedEnv.from_mdp(self.base_mdp, **DEFAULT_ENV_PARAMS)
self.rnd_agent_pair = AgentPair(FixedPlanAgent([]), FixedPlanAgent([]))
np.random.seed(0)
def test_creation(self):
env = gym.make(
"Overcooked-v0",
base_env=self.env,
featurize_fn=self.env.featurize_state_mdp,
)
# verify that the action_space * obs_space are initialized correctly
self.assertEqual(env.action_space, gym.spaces.Discrete(6))
self.assertEqual(
env.observation_space.shape,
self.base_mdp.get_featurize_state_shape(),
)
# TODO: write more tests here
class TestPettingZooEnvironment(unittest.TestCase):
def test_api(self):
from pettingzoo.test import parallel_api_test
from human_aware_rl.rllib.rllib import load_agent_pair
base_mdp = OvercookedGridworld.from_layout_name("cramped_room")
# get the current directory of the file
current_dir = os.path.dirname(os.path.realpath(__file__))
agent_dir = os.path.join(
current_dir,
"../src/overcooked_demo/server/static/assets/agents/RllibCrampedRoomSP/agent",
)
ap = load_agent_pair(agent_dir, "ppo", "ppo")
env = OvercookedEnv.from_mdp(base_mdp, info_level=0, horizon=1000)
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnvPettingZoo
wrapped_env = OvercookedEnvPettingZoo(env, ap)
parallel_api_test(wrapped_env, num_cycles=1000)
class TestTrajectories(unittest.TestCase):
def setUp(self):
self.base_mdp = OvercookedGridworld.from_layout_name("cramped_room")
self.mlam = MediumLevelActionManager.from_pickle_or_compute(
self.base_mdp, NO_COUNTERS_PARAMS, force_compute=True
)
self.env = OvercookedEnv.from_mdp(
self.base_mdp, **DEFAULT_ENV_PARAMS, info_level=0
)
self.greedy_human_model_pair = AgentPair(
GreedyHumanModel(self.mlam), GreedyHumanModel(self.mlam)
)
np.random.seed(0)
def test_append(self):
traj_one = self.env.get_rollouts(
self.greedy_human_model_pair, num_games=3, info=False
)
traj_two = self.env.get_rollouts(
self.greedy_human_model_pair, num_games=3, info=False
)
combined = append_trajectories(traj_one, traj_two)
# Ensure proper keys
self.assertEqual(set(combined.keys()), DEFAULT_TRAJ_KEYS)
# Ensure proper shapes
for key in TIMESTEP_TRAJ_KEYS:
shape_one = traj_one[key].shape
shape_two = traj_two[key].shape
shape_combined = combined[key].shape
self.assertEqual(shape_combined[0], shape_one[0] + shape_two[0])
self.assertEqual(shape_combined[1], shape_one[1])
self.assertEqual(shape_combined[1], shape_two[1])
for key in EPISODE_TRAJ_KEYS:
shape_one = traj_one[key].shape
shape_two = traj_two[key].shape
shape_combined = combined[key].shape
self.assertEqual(shape_combined[0], shape_one[0] + shape_two[0])
if __name__ == "__main__":
unittest.main()
| 63,066 | 34.391134 | 112 | py |
overcooked_ai | overcooked_ai-master/testing/utils.py | import numpy as np
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
# The point of this function is to generate serialized trajectories for MDP dynamics consistency testing
# NOTE: If intentionally updating MDP dynamics, this function should be used
def generate_serialized_trajectory(mdp, save_path):
# Saving trajectory for dynamics consistency test
seed = 0
sparse_reward = 0
while sparse_reward <= 0:
np.random.seed(seed)
ae = AgentEvaluator.from_mdp(mdp, env_params={"horizon": 1500})
test_trajs = ae.evaluate_random_pair(all_actions=True, num_games=1)
sparse_reward = np.mean(test_trajs["ep_returns"])
seed += 1
AgentEvaluator.save_traj_as_json(test_trajs, save_path)
| 754 | 34.952381 | 104 | py |
overcooked_ai | overcooked_ai-master/testing/planners_test.py | import unittest
from overcooked_ai_py.agents.agent import AgentPair, GreedyHumanModel
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
from overcooked_ai_py.mdp.actions import Action, Direction
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv
from overcooked_ai_py.mdp.overcooked_mdp import (
ObjectState,
OvercookedGridworld,
OvercookedState,
PlayerState,
SoupState,
)
from overcooked_ai_py.planning.planners import MediumLevelActionManager
large_mdp_tests = False
force_compute = True
force_compute_large = False
n, s = Direction.NORTH, Direction.SOUTH
e, w = Direction.EAST, Direction.WEST
stay, interact = Action.STAY, Action.INTERACT
P, Obj = PlayerState, ObjectState
# Simple MDP Setup
simple_mdp = OvercookedGridworld.from_layout_name("simple_o")
base_params = {
"start_orientations": False,
"wait_allowed": False,
"counter_goals": simple_mdp.terrain_pos_dict["X"],
"counter_drop": simple_mdp.terrain_pos_dict["X"][1:2],
"counter_pickup": simple_mdp.terrain_pos_dict["X"],
"same_motion_goals": True,
}
action_manger_filename = "simple_1_am.pkl"
ml_action_manager_simple = MediumLevelActionManager.from_pickle_or_compute(
simple_mdp,
mlam_params=base_params,
custom_filename=action_manger_filename,
force_compute=force_compute,
)
ml_action_manager_simple.env = OvercookedEnv.from_mdp(simple_mdp)
base_params_start_or = {
"start_orientations": True,
"wait_allowed": False,
"counter_goals": simple_mdp.terrain_pos_dict["X"],
"counter_drop": [],
"counter_pickup": simple_mdp.terrain_pos_dict["X"],
"same_motion_goals": False,
}
action_manger_filename = "simple_2_am.pkl"
or_ml_action_manager_simple = MediumLevelActionManager.from_pickle_or_compute(
simple_mdp,
mlam_params=base_params_start_or,
custom_filename=action_manger_filename,
force_compute=force_compute,
)
if large_mdp_tests:
# Not testing by default
# Large MDP Setup
large_mdp = OvercookedGridworld.from_layout_name("corridor", cook_time=5)
no_counters_params = {
"start_orientations": False,
"wait_allowed": False,
"counter_goals": [],
"counter_drop": [],
"counter_pickup": [],
"same_motion_goals": False,
}
action_manger_filename = "corridor_no_shared_motion_goals_am.pkl"
ml_planner_large_no_shared = (
MediumLevelActionManager.from_pickle_or_compute(
large_mdp,
no_counters_params,
custom_filename=action_manger_filename,
force_compute=force_compute_large,
)
)
same_goals_params = {
"start_orientations": False,
"wait_allowed": False,
"counter_goals": [],
"counter_drop": [],
"counter_pickup": [],
"same_motion_goals": True,
}
action_manger_filename = "corridor_am.pkl"
ml_planner_large = MediumLevelActionManager.from_pickle_or_compute(
large_mdp,
same_goals_params,
custom_filename=action_manger_filename,
force_compute=force_compute_large,
)
# Deprecated.
# hlam = HighLevelActionManager(ml_planner_large)
# hlp = HighLevelPlanner(hlam)
def done_soup_obj(soup_loc, num_onion_inside=3):
return soup_obj(soup_loc, num_onion_inside, 20)
def idle_soup_obj(soup_loc, num_onion_inside):
return soup_obj(soup_loc, num_onion_inside, -1)
def cooking_soup_obj(soup_loc, num_onion_inside=3, cooking_tick=0):
assert cooking_tick >= 0
assert num_onion_inside >= 0
return soup_obj(soup_loc, num_onion_inside, cooking_tick)
def soup_obj(soup_loc, num_onion_inside, cooking_tick):
ingredient_obj_lst = [Obj("onion", soup_loc)] * num_onion_inside
return SoupState(soup_loc, ingredient_obj_lst, cooking_tick)
class TestMotionPlanner(unittest.TestCase):
def test_gridworld_distance(self):
planner = ml_action_manager_simple.joint_motion_planner.motion_planner
start = ((2, 1), e)
end = ((1, 1), w)
dist = planner.get_gridworld_distance(start, end)
self.assertEqual(dist, 1)
start = ((2, 1), e)
end = ((1, 1), n)
dist = planner.get_gridworld_distance(start, end)
self.assertEqual(dist, 2)
start = (2, 1)
end = (1, 1)
dist = planner.get_gridworld_pos_distance(start, end)
self.assertEqual(dist, 1)
start = (1, 1)
end = (3, 2)
dist = planner.get_gridworld_pos_distance(start, end)
self.assertEqual(dist, 3)
def test_simple_mdp(self):
planner = ml_action_manager_simple.joint_motion_planner.motion_planner
self.simple_mdp_already_at_goal(planner)
self.simple_mdp_orientation_change(planner)
self.simple_mdp_basic_plan(planner)
self.simple_mdp_orientation_optimization_dependent_plans(planner)
def simple_mdp_already_at_goal(self, planner):
start_status = goal_status = ((1, 1), n)
self.check_single_motion_plan(
planner, start_status, goal_status, expected_length=1
)
def simple_mdp_orientation_change(self, planner):
start_status = ((1, 1), n)
goal_status = ((1, 1), w)
self.check_single_motion_plan(
planner, start_status, goal_status, expected_length=2
)
def simple_mdp_basic_plan(self, planner):
start_status = ((1, 1), n)
goal_status = ((3, 1), n)
self.check_single_motion_plan(
planner, start_status, goal_status, expected_length=4
)
def simple_mdp_orientation_optimization_dependent_plans(self, planner):
start_status = ((2, 1), n)
goal_status = ((1, 2), w)
self.check_single_motion_plan(
planner, start_status, goal_status, expected_length=3
)
goal_status = ((1, 2), s)
self.check_single_motion_plan(
planner, start_status, goal_status, expected_length=3
)
def test_larger_mdp(self):
if large_mdp_tests:
planner = (
ml_planner_large.ml_action_manager.joint_motion_planner.motion_planner
)
self.large_mdp_basic_plan(planner)
def large_mdp_basic_plan(self, planner):
start_status = ((1, 2), n)
goal_status = ((8, 1), n)
self.check_single_motion_plan(planner, start_status, goal_status)
def check_single_motion_plan(
self,
motion_planner,
start_pos_and_or,
goal_pos_and_or,
expected_length=None,
):
dummy_agent = P((3, 2), n)
start_state = OvercookedState(
[P(*start_pos_and_or), dummy_agent],
{},
all_orders=simple_mdp.start_all_orders,
)
action_plan, pos_and_or_plan, plan_cost = motion_planner.get_plan(
start_pos_and_or, goal_pos_and_or
)
# Checking that last state obtained matches goal position
self.assertEqual(pos_and_or_plan[-1], goal_pos_and_or)
# In single motion plans the graph cost should be equal to
# the plan cost (= plan length) as agents should never STAY
graph_plan_cost = sum(
[motion_planner._graph_action_cost(a) for a in action_plan]
)
self.assertEqual(plan_cost, graph_plan_cost)
joint_action_plan = [(a, stay) for a in action_plan]
env = OvercookedEnv.from_mdp(motion_planner.mdp, horizon=1000)
resulting_state, _ = env.execute_plan(start_state, joint_action_plan)
self.assertEqual(
resulting_state.players_pos_and_or[0], goal_pos_and_or
)
if expected_length is not None:
self.assertEqual(len(action_plan), expected_length)
class TestJointMotionPlanner(unittest.TestCase):
def test_same_start_and_end_pos_with_no_start_orientations(self):
jm_planner = ml_action_manager_simple.joint_motion_planner
start = (((1, 1), w), ((1, 2), s))
goal = (((1, 1), n), ((2, 1), n))
(
joint_action_plan,
end_jm_state,
finshing_times,
) = jm_planner.get_low_level_action_plan(start, goal)
optimal_plan = [(n, e), (interact, n)]
self.assertEqual(joint_action_plan, optimal_plan)
optimal_end_jm_state = (((1, 1), n), ((2, 1), n))
self.assertEqual(end_jm_state, optimal_end_jm_state)
optimal_finshing_times = (2, 3)
self.assertEqual(finshing_times, optimal_finshing_times)
def test_with_start_orientations_simple_mdp(self):
jm_planner = or_ml_action_manager_simple.joint_motion_planner
self.simple_mdp_suite(jm_planner)
def test_without_start_orientations_simple_mdp(self):
jm_planner = ml_action_manager_simple.joint_motion_planner
self.simple_mdp_suite(jm_planner)
def simple_mdp_suite(self, jm_planner):
self.simple_mdp_already_at_goal(jm_planner)
self.simple_mdp_only_orientations_switch(jm_planner)
self.simple_mdp_one_at_goal(jm_planner)
self.simple_mdp_position_swap(jm_planner)
self.simple_mdp_one_at_goal_other_conflicting_path(jm_planner)
self.simple_mdp_test_final_orientation_optimization(jm_planner)
def simple_mdp_already_at_goal(self, planner):
a1_start = a1_goal = ((1, 1), n)
a2_start = a2_goal = ((2, 1), n)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, times=(1, 1), min_t=1)
a1_start = a1_goal = ((1, 1), w)
a2_start = a2_goal = ((1, 2), s)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, times=(1, 1), min_t=1)
def simple_mdp_only_orientations_switch(self, planner):
a1_start = ((1, 1), s)
a1_goal = ((1, 1), w)
a2_start = ((1, 2), s)
a2_goal = ((1, 2), w)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, times=(2, 2), min_t=2)
def simple_mdp_one_at_goal(self, planner):
a1_start = ((3, 2), s)
a1_goal = ((3, 2), s)
a2_start = ((2, 1), w)
a2_goal = ((1, 1), w)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, times=(1, 2))
def simple_mdp_position_swap(self, planner):
a1_start = ((1, 1), w)
a2_start = ((3, 2), s)
a1_goal = a2_start
a2_goal = a1_start
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal)
def simple_mdp_one_at_goal_other_conflicting_path(self, planner):
a1_start = ((1, 1), w)
a1_goal = ((3, 1), e)
a2_start = a2_goal = ((2, 1), n)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, min_t=1)
def simple_mdp_test_final_orientation_optimization(self, planner):
a1_start = ((2, 1), n)
a1_goal = ((1, 2), w)
a2_start = a2_goal = ((3, 2), s)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
# NOTE: Not considering all plans with same cost yet, this won't work
# check_joint_plan(planner, mdp, start, goal, times=(3, 1))
a1_goal = ((1, 2), s)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, times=(3, 1))
def test_large_mdp_suite_shared_motion_goals(self):
if large_mdp_tests:
jmp = ml_planner_large.ml_action_manager.joint_motion_planner
self.large_mdp_test_basic_plan(jmp)
self.large_mdp_test_shared_motion_goal(jmp)
self.large_mdp_test_shared_motion_goal_with_conflict(jmp)
self.large_mdp_test_shared_motion_goal_with_conflict_other(jmp)
def large_mdp_test_basic_plan(self, planner):
a1_start = ((5, 1), n)
a2_start = ((8, 1), n)
a1_goal = a2_start
a2_goal = a1_start
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal)
def large_mdp_test_shared_motion_goal(self, planner):
a1_start = ((4, 1), n)
a2_start = ((1, 1), n)
a1_goal = ((5, 1), n)
a2_goal = ((5, 1), n)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, min_t=3)
def large_mdp_test_shared_motion_goal_with_conflict(self, planner):
assert planner.same_motion_goals
# When paths conflict for same goal, will resolve by making
# one agent wait (the one that results in the shortest plan)
a1_start = ((5, 2), n)
a2_start = ((4, 1), n)
a1_goal = ((5, 1), n)
a2_goal = ((5, 1), n)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, min_t=2)
def large_mdp_test_shared_motion_goal_with_conflict_other(self, planner):
assert planner.same_motion_goals
a1_start = ((4, 2), e)
a2_start = ((4, 1), e)
a1_goal = ((5, 1), n)
a2_goal = ((5, 1), n)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, min_t=3)
def check_joint_plan(
self,
joint_motion_planner,
start,
goal,
times=None,
min_t=None,
display=False,
):
"""Runs the plan in the environment and checks that the intended goals are achieved."""
debug = False
(
action_plan,
end_pos_and_orients,
plan_lengths,
) = joint_motion_planner.get_low_level_action_plan(start, goal)
if debug:
print(
"Start state: {}, Goal state: {}, Action plan: {}".format(
start, goal, action_plan
)
)
start_state = OvercookedState(
[P(*start[0]), P(*start[1])],
{},
all_orders=simple_mdp.start_all_orders,
)
env = OvercookedEnv.from_mdp(joint_motion_planner.mdp, horizon=1000)
resulting_state, _ = env.execute_plan(
start_state, action_plan, display=display
)
self.assertTrue(
any(
[
agent_goal in resulting_state.players_pos_and_or
for agent_goal in goal
]
)
)
self.assertEqual(
resulting_state.players_pos_and_or, end_pos_and_orients
)
self.assertEqual(len(action_plan), min(plan_lengths))
if min_t is not None:
self.assertEqual(len(action_plan), min_t)
if times is not None:
self.assertEqual(plan_lengths, times)
# Rewritten because the previous test depended on Heuristic, and Heuristic has been deprecated
class TestMediumLevelActionManagerSimple(unittest.TestCase):
def test_simple_mdp_without_start_orientations(self):
print("Simple - no start orientations (& shared motion goals)")
mlam = ml_action_manager_simple
self.simple_mpd_empty_hands(mlam)
self.simple_mdp_deliver_soup(mlam)
self.simple_mdp_pickup_counter_soup(mlam)
self.simple_mdp_pickup_counter_dish(mlam)
self.simple_mdp_pickup_counter_onion(mlam)
self.simple_mdp_drop_useless_dish_with_soup_idle(mlam)
self.simple_mdp_pickup_soup(mlam)
self.simple_mdp_pickup_dish(mlam)
self.simple_mdp_start_good_soup_cooking(mlam)
self.simple_mdp_start_bad_soup_cooking(mlam)
self.simple_mdp_start_1_onion_soup_cooking(mlam)
self.simple_mdp_drop_useless_onion_good_soup(mlam)
self.simple_mdp_drop_useless_onion_bad_soup(mlam)
self.simple_mdp_add_3rd_onion(mlam)
self.simple_mdp_add_2nd_onion(mlam)
self.simple_mdp_drop_useless_dish(mlam)
def test_simple_mdp_with_start_orientations(self):
print("Simple - with start orientations (no shared motion goals)")
mlam = or_ml_action_manager_simple
self.simple_mpd_empty_hands(mlam, counter_drop_forbidden=True)
self.simple_mdp_deliver_soup(mlam, counter_drop_forbidden=True)
self.simple_mdp_pickup_counter_soup(mlam, counter_drop_forbidden=True)
self.simple_mdp_pickup_counter_dish(mlam, counter_drop_forbidden=True)
self.simple_mdp_pickup_counter_onion(mlam, counter_drop_forbidden=True)
self.simple_mdp_drop_useless_dish_with_soup_idle(
mlam, counter_drop_forbidden=True
)
self.simple_mdp_pickup_soup(mlam, counter_drop_forbidden=True)
self.simple_mdp_pickup_dish(mlam, counter_drop_forbidden=True)
self.simple_mdp_start_good_soup_cooking(
mlam, counter_drop_forbidden=True
)
self.simple_mdp_start_bad_soup_cooking(
mlam, counter_drop_forbidden=True
)
self.simple_mdp_start_1_onion_soup_cooking(
mlam, counter_drop_forbidden=True
)
self.simple_mdp_drop_useless_onion_good_soup(
mlam, counter_drop_forbidden=True
)
self.simple_mdp_drop_useless_onion_bad_soup(
mlam, counter_drop_forbidden=True
)
self.simple_mdp_add_3rd_onion(mlam, counter_drop_forbidden=True)
self.simple_mdp_add_2nd_onion(mlam, counter_drop_forbidden=True)
self.simple_mdp_drop_useless_dish(mlam, counter_drop_forbidden=True)
ONION_PICKUP = ((3, 2), (1, 0))
DISH_PICKUP = ((2, 2), (0, 1))
COUNTER_DROP = ((1, 1), (0, -1))
COUNTER_PICKUP = ((1, 2), (-1, 0))
POT_INTERACT = ((2, 1), (00, -1))
SOUP_DELIVER = ((3, 2), (0, 1))
def simple_mpd_empty_hands(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n), P((2, 1), n)],
{},
all_orders=simple_mdp.start_all_orders,
)
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP],
[self.ONION_PICKUP, self.DISH_PICKUP],
)
def simple_mdp_deliver_soup(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n), P((2, 1), n, done_soup_obj((2, 1)))],
{},
all_orders=simple_mdp.start_all_orders,
)
if counter_drop_forbidden:
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP],
[self.SOUP_DELIVER],
)
else:
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP],
[self.COUNTER_DROP, self.SOUP_DELIVER],
)
def simple_mdp_pickup_counter_soup(
self, planner, counter_drop_forbidden=False
):
s = OvercookedState(
[P((2, 2), n), P((2, 1), n)],
{(0, 2): done_soup_obj((0, 2))},
all_orders=simple_mdp.start_all_orders,
)
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.COUNTER_PICKUP],
[self.ONION_PICKUP, self.DISH_PICKUP, self.COUNTER_PICKUP],
)
def simple_mdp_pickup_counter_dish(
self, planner, counter_drop_forbidden=False
):
s = OvercookedState(
[P((2, 2), n), P((2, 1), n)],
{(0, 2): Obj("dish", (0, 2))},
all_orders=simple_mdp.start_all_orders,
)
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.COUNTER_PICKUP],
[self.ONION_PICKUP, self.DISH_PICKUP, self.COUNTER_PICKUP],
)
def simple_mdp_pickup_counter_onion(
self, planner, counter_drop_forbidden=False
):
s = OvercookedState(
[P((2, 2), n), P((2, 1), n)],
{(0, 2): Obj("onion", (0, 2))},
all_orders=simple_mdp.start_all_orders,
)
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.COUNTER_PICKUP],
[self.ONION_PICKUP, self.DISH_PICKUP, self.COUNTER_PICKUP],
)
def simple_mdp_drop_useless_dish_with_soup_idle(
self, planner, counter_drop_forbidden=False
):
s = OvercookedState(
[P((2, 2), n), P((2, 1), n, Obj("dish", (2, 1)))],
{(2, 0): idle_soup_obj((2, 0), 3)},
all_orders=simple_mdp.start_all_orders,
)
if counter_drop_forbidden:
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[],
)
else:
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.COUNTER_DROP],
)
def simple_mdp_pickup_soup(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n), P((2, 1), n, Obj("dish", (2, 1)))],
{(2, 0): done_soup_obj((2, 0))},
all_orders=simple_mdp.start_all_orders,
)
if counter_drop_forbidden:
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP],
[self.POT_INTERACT],
)
else:
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP],
[self.COUNTER_DROP, self.POT_INTERACT],
)
def simple_mdp_pickup_dish(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n), P((2, 1), n)],
{(2, 0): done_soup_obj((2, 0))},
all_orders=simple_mdp.start_all_orders,
)
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP],
[self.ONION_PICKUP, self.DISH_PICKUP],
)
def simple_mdp_start_good_soup_cooking(
self, planner, counter_drop_forbidden=False
):
s = OvercookedState(
[P((2, 2), n), P((2, 1), n)],
{(2, 0): idle_soup_obj((2, 0), 3)},
all_orders=simple_mdp.start_all_orders,
)
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
)
def simple_mdp_start_bad_soup_cooking(
self, planner, counter_drop_forbidden=False
):
s = OvercookedState(
[P((2, 2), n), P((2, 1), n)],
{(2, 0): idle_soup_obj((2, 0), 2)},
all_orders=simple_mdp.start_all_orders,
)
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
)
def simple_mdp_start_1_onion_soup_cooking(
self, planner, counter_drop_forbidden=False
):
s = OvercookedState(
[P((2, 2), n), P((2, 1), n)],
{(2, 0): idle_soup_obj((2, 0), 1)},
all_orders=simple_mdp.start_all_orders,
)
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
)
def simple_mdp_drop_useless_onion_good_soup(
self, planner, counter_drop_forbidden=False
):
s = OvercookedState(
[P((2, 2), n), P((2, 1), n, Obj("onion", (2, 1)))],
{(2, 0): done_soup_obj((2, 0))},
all_orders=simple_mdp.start_all_orders,
)
if counter_drop_forbidden:
self.check_ml_action_manager(
s, planner, [self.ONION_PICKUP, self.DISH_PICKUP], []
)
else:
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP],
[self.COUNTER_DROP],
)
def simple_mdp_drop_useless_onion_bad_soup(
self, planner, counter_drop_forbidden=False
):
s = OvercookedState(
[P((2, 2), n), P((2, 1), n, Obj("onion", (2, 1)))],
{(2, 0): done_soup_obj((2, 0), 2)},
all_orders=simple_mdp.start_all_orders,
)
if counter_drop_forbidden:
self.check_ml_action_manager(
s, planner, [self.ONION_PICKUP, self.DISH_PICKUP], []
)
else:
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP],
[self.COUNTER_DROP],
)
def simple_mdp_add_3rd_onion(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n), P((2, 1), n, Obj("onion", (2, 1)))],
{(2, 0): idle_soup_obj((2, 0), 2)},
all_orders=simple_mdp.start_all_orders,
)
if counter_drop_forbidden:
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.POT_INTERACT],
)
else:
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.COUNTER_DROP, self.POT_INTERACT],
)
def simple_mdp_add_2nd_onion(self, planner, counter_drop_forbidden=False):
s = OvercookedState(
[P((2, 2), n), P((2, 1), n, Obj("onion", (2, 1)))],
{(2, 0): idle_soup_obj((2, 0), 1)},
all_orders=simple_mdp.start_all_orders,
)
if counter_drop_forbidden:
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.POT_INTERACT],
)
else:
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.COUNTER_DROP, self.POT_INTERACT],
)
def simple_mdp_drop_useless_dish(
self, planner, counter_drop_forbidden=False
):
s = OvercookedState(
[P((2, 2), n), P((2, 1), n, Obj("dish", (2, 1)))],
{(2, 0): idle_soup_obj((2, 0), 1)},
all_orders=simple_mdp.start_all_orders,
)
if counter_drop_forbidden:
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.POT_INTERACT],
)
else:
self.check_ml_action_manager(
s,
planner,
[self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT],
[self.COUNTER_DROP, self.POT_INTERACT],
)
def check_ml_action_manager(
self, state, am, expected_mla_0, expected_mla_1, debug=False
):
"""
args:
state (OvercookedState): an overcooked state
am (MediumLevelActionManager): the planer whose action manager will be tested
This function checks if all the mid-level actions make sense for each player state inside STATE
"""
player_0, player_1 = state.players
mla_0 = am.get_medium_level_actions(state, player_0)
mla_1 = am.get_medium_level_actions(state, player_1)
if debug:
print("Player 0 mla", mla_0)
print("Player 1 mla", mla_1)
print(am.mdp.state_string(state))
self.assertEqual(
set(mla_0),
set(expected_mla_0),
"player 0's ml_action should be "
+ str(expected_mla_0)
+ " but get "
+ str(mla_0),
)
self.assertEqual(
set(mla_1),
set(expected_mla_1),
"player 0's ml_action should be "
+ str(expected_mla_1)
+ " but get "
+ str(mla_1),
)
class TestScenarios(unittest.TestCase):
def repetative_runs(self, evaluator, num_games=10):
trajectory_0 = evaluator.evaluate_human_model_pair(
num_games=num_games, native_eval=True
)
trajectory_1 = evaluator.evaluate_human_model_pair(
num_games=num_games, native_eval=True
)
h0 = GreedyHumanModel(evaluator.env.mlam)
h1 = GreedyHumanModel(evaluator.env.mlam)
ap_hh_2 = AgentPair(h0, h1)
trajectory_2 = evaluator.evaluate_agent_pair(
agent_pair=ap_hh_2, num_games=num_games, native_eval=True
)
h3 = GreedyHumanModel(evaluator.env.mlam)
h4 = GreedyHumanModel(evaluator.env.mlam)
ap_hh_3 = AgentPair(h3, h4)
trajectory_3 = evaluator.evaluate_agent_pair(
agent_pair=ap_hh_3, num_games=num_games, native_eval=True
)
def test_scenario_3_no_counter(self):
# Asymmetric advantage scenario
#
# X X X X X O X X X X
# S X X P X
# X ↑H X
# D X X X X!X X X
# X →R O
# X X X X X X X X X X
#
# This test does not allow counter by using the default NO_COUNTER_PARAMS when calling from_layout_name
mdp_params = {"layout_name": "scenario3"}
mdp = OvercookedGridworld.from_layout_name(**mdp_params)
start_state = mdp.get_standard_start_state()
env_params = {"start_state_fn": lambda: start_state, "horizon": 1000}
eva = AgentEvaluator.from_layout_name(
mdp_params, env_params, force_compute=force_compute
)
self.repetative_runs(eva)
def test_scenario_3_yes_counter(self):
# Asymmetric advantage scenario
#
# X X X X X O X X X X
# S X X P X
# X ↑H X
# D X X X X!X X X
# X →R O
# X X X X X X X X X X
#
# This test does not allow only (5. 3) as the only counter
mdp_params = {"layout_name": "scenario3"}
mdp = OvercookedGridworld.from_layout_name(**mdp_params)
start_state = mdp.get_standard_start_state()
valid_counters = [(5, 3)]
one_counter_params = {
"start_orientations": False,
"wait_allowed": False,
"counter_goals": valid_counters,
"counter_drop": valid_counters,
"counter_pickup": [],
"same_motion_goals": True,
}
env_params = {"start_state_fn": lambda: start_state, "horizon": 1000}
eva = AgentEvaluator.from_layout_name(
mdp_params,
env_params,
mlam_params=one_counter_params,
force_compute=force_compute,
)
self.repetative_runs(eva)
# # Deprecated. because of Heuristic
# class TestHighLevelPlanner(unittest.TestCase):
# """The HighLevelPlanner class has been mostly discontinued"""
#
# def test_basic_hl_planning(self):
# if large_mdp_tests:
# s = OvercookedState(
# [P((2, 2), n),
# P((2, 1), n)],
# {}, order_list=[])
# h = Heuristic(hlp.mp)
# hlp.get_hl_plan(s, h.simple_heuristic)
#
# s = OvercookedState(
# [P((2, 2), n),
# P((2, 1), n)],
# {}, order_list=['any', 'any', 'any'])
#
# hlp.get_low_level_action_plan(s, h.simple_heuristic)
# # hlp.get_low_level_action_plan(s, h.hard_heuristic)
#
# # heuristic = Heuristic(ml_planner_large.mp)
# # ml_planner_large.get_low_level_action_plan(s, heuristic.simple_heuristic)
# # ml_planner_large.get_low_level_action_plan(s, heuristic.hard_heuristic)
if __name__ == "__main__":
unittest.main()
| 32,663 | 34.084855 | 111 | py |
overcooked_ai | overcooked_ai-master/testing/agent_test.py | import unittest
import numpy as np
from overcooked_ai_py.agents.agent import (
AgentPair,
FixedPlanAgent,
GreedyHumanModel,
RandomAgent,
SampleAgent,
)
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
from overcooked_ai_py.mdp.actions import Action, Direction
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv
from overcooked_ai_py.mdp.overcooked_mdp import (
ObjectState,
OvercookedGridworld,
OvercookedState,
PlayerState,
)
from overcooked_ai_py.planning.planners import (
NO_COUNTERS_PARAMS,
MediumLevelActionManager,
)
np.random.seed(42)
n, s = Direction.NORTH, Direction.SOUTH
e, w = Direction.EAST, Direction.WEST
stay, interact = Action.STAY, Action.INTERACT
P, Obj = PlayerState, ObjectState
force_compute_large = False
force_compute = True
DISPLAY = False
simple_mdp = OvercookedGridworld.from_layout_name("cramped_room")
large_mdp = OvercookedGridworld.from_layout_name("corridor")
class TestAgentEvaluator(unittest.TestCase):
def setUp(self):
self.agent_eval = AgentEvaluator.from_layout_name(
{"layout_name": "cramped_room"}, {"horizon": 100}
)
def test_human_model_pair(self):
trajs = self.agent_eval.evaluate_human_model_pair()
try:
AgentEvaluator.check_trajectories(trajs, verbose=False)
except AssertionError as e:
self.fail(
"Trajectories were not returned in standard format:\n{}".format(
e
)
)
def test_rollouts(self):
ap = AgentPair(RandomAgent(), RandomAgent())
trajs = self.agent_eval.evaluate_agent_pair(ap, num_games=5)
try:
AgentEvaluator.check_trajectories(trajs, verbose=False)
except AssertionError as e:
self.fail(
"Trajectories were not returned in standard format:\n{}".format(
e
)
)
def test_mlam_computation(self):
try:
self.agent_eval.env.mlam
except Exception as e:
self.fail(
"Failed to compute MediumLevelActionManager:\n{}".format(e)
)
class TestBasicAgents(unittest.TestCase):
def setUp(self):
self.mlam_large = MediumLevelActionManager.from_pickle_or_compute(
large_mdp, NO_COUNTERS_PARAMS, force_compute=force_compute_large
)
def test_fixed_plan_agents(self):
a0 = FixedPlanAgent([s, e, n, w])
a1 = FixedPlanAgent([s, w, n, e])
agent_pair = AgentPair(a0, a1)
env = OvercookedEnv.from_mdp(large_mdp, horizon=10)
trajectory, time_taken, _, _ = env.run_agents(
agent_pair, include_final_state=True, display=DISPLAY
)
end_state = trajectory[-1][0]
self.assertEqual(time_taken, 10)
self.assertEqual(
env.mdp.get_standard_start_state().player_positions,
end_state.player_positions,
)
def test_two_greedy_human_open_map(self):
scenario_2_mdp = OvercookedGridworld.from_layout_name("scenario2")
mlam = MediumLevelActionManager.from_pickle_or_compute(
scenario_2_mdp, NO_COUNTERS_PARAMS, force_compute=force_compute
)
a0 = GreedyHumanModel(mlam)
a1 = GreedyHumanModel(mlam)
agent_pair = AgentPair(a0, a1)
start_state = OvercookedState(
[P((8, 1), s), P((1, 1), s)],
{},
all_orders=scenario_2_mdp.start_all_orders,
)
env = OvercookedEnv.from_mdp(
scenario_2_mdp, start_state_fn=lambda: start_state, horizon=100
)
trajectory, time_taken, _, _ = env.run_agents(
agent_pair, include_final_state=True, display=DISPLAY
)
def test_sample_agent(self):
agent = SampleAgent(
[RandomAgent(all_actions=False), RandomAgent(all_actions=True)]
)
probs = agent.action(None)[1]["action_probs"]
expected_probs = np.array(
[
0.18333333,
0.18333333,
0.18333333,
0.18333333,
0.18333333,
0.08333333,
]
)
self.assertTrue(np.allclose(probs, expected_probs))
class TestAgentEvaluatorStatic(unittest.TestCase):
layout_name_lst = [
"asymmetric_advantages",
"asymmetric_advantages_tomato",
"bonus_order_test",
"bottleneck",
"centre_objects",
"centre_pots",
"corridor",
"forced_coordination_tomato",
"unident",
"marshmallow_experiment",
"marshmallow_experiment_coordination",
"you_shall_not_pass",
]
def test_from_mdp(self):
for layout_name in self.layout_name_lst:
orignal_mdp = OvercookedGridworld.from_layout_name(layout_name)
ae = AgentEvaluator.from_mdp(
mdp=orignal_mdp, env_params={"horizon": 400}
)
ae_mdp = ae.env.mdp
self.assertEqual(
orignal_mdp,
ae_mdp,
"mdp with name "
+ layout_name
+ " experienced an inconsistency",
)
def test_from_mdp_params_layout(self):
for layout_name in self.layout_name_lst:
orignal_mdp = OvercookedGridworld.from_layout_name(layout_name)
ae = AgentEvaluator.from_layout_name(
mdp_params={"layout_name": layout_name},
env_params={"horizon": 400},
)
ae_mdp = ae.env.mdp
self.assertEqual(
orignal_mdp,
ae_mdp,
"mdp with name "
+ layout_name
+ " experienced an inconsistency",
)
mdp_gen_params_1 = {
"inner_shape": (10, 7),
"prop_empty": 0.95,
"prop_feats": 0.1,
"start_all_orders": [{"ingredients": ["onion", "onion", "onion"]}],
"display": False,
}
mdp_gen_params_2 = {
"inner_shape": (10, 7),
"prop_empty": 0.7,
"prop_feats": 0.5,
"start_all_orders": [{"ingredients": ["onion", "onion", "onion"]}],
"display": False,
}
mdp_gen_params_3 = {
"inner_shape": (10, 7),
"prop_empty": 0.5,
"prop_feats": 0.4,
"start_all_orders": [{"ingredients": ["onion", "onion", "onion"]}],
"display": False,
}
mdp_gen_params_lst = [mdp_gen_params_1, mdp_gen_params_2, mdp_gen_params_3]
outer_shape = (10, 7)
def test_from_mdp_params_variable_across(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae0 = AgentEvaluator.from_mdp_params_infinite(
mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape,
)
ae1 = AgentEvaluator.from_mdp_params_infinite(
mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape,
)
self.assertFalse(
ae0.env.mdp == ae1.env.mdp,
"2 randomly generated layouts across 2 evaluators are the same, which is wrong",
)
def test_from_mdp_params_variable_infinite(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_infinite(
mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape,
)
mdp_0 = ae.env.mdp.copy()
for _ in range(5):
ae.env.reset(regen_mdp=True)
mdp_1 = ae.env.mdp
self.assertFalse(
mdp_0 == mdp_1,
"with infinite layout generator and regen_mdp=True, the 2 layouts should not be the same",
)
def test_from_mdp_params_variable_infinite_no_regen(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_infinite(
mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape,
)
mdp_0 = ae.env.mdp.copy()
for _ in range(5):
ae.env.reset(regen_mdp=False)
mdp_1 = ae.env.mdp
self.assertTrue(
mdp_0 == mdp_1,
"with infinite layout generator and regen_mdp=False, the 2 layouts should be the same",
)
def test_from_mdp_params_variable_infinite_specified(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_infinite(
mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape,
)
mdp_0 = ae.env.mdp.copy()
for _ in range(5):
ae.env.reset(regen_mdp=True)
mdp_1 = ae.env.mdp
self.assertFalse(
mdp_0 == mdp_1,
"with infinite layout generator and regen_mdp=True, the 2 layouts should not be the same",
)
def test_from_mdp_params_variable_finite(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_finite(
mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": 2},
outer_shape=self.outer_shape,
)
mdp_0 = ae.env.mdp.copy()
seen = [mdp_0]
for _ in range(20):
ae.env.reset(regen_mdp=True)
mdp_i = ae.env.mdp
if len(seen) == 1:
if mdp_i != seen[0]:
seen.append(mdp_i.copy())
elif len(seen) == 2:
mdp_0, mdp_1 = seen
self.assertTrue(
(mdp_i == mdp_0 or mdp_i == mdp_1),
"more than 2 mdp was created, the function failed to perform",
)
else:
self.assertTrue(
False, "theoretically unreachable statement"
)
layout_name_short_lst = [
"cramped_room",
"cramped_room_tomato",
"simple_o",
"simple_tomato",
"simple_o_t",
]
biased = [0.1, 0.15, 0.2, 0.25, 0.3]
num_reset = 200000
def test_from_mdp_lst_default(self):
mdp_lst = [
OvercookedGridworld.from_layout_name(name)
for name in self.layout_name_short_lst
]
ae = AgentEvaluator.from_mdp_lst(
mdp_lst=mdp_lst, env_params={"horizon": 400}
)
counts = {}
for _ in range(self.num_reset):
ae.env.reset(regen_mdp=True)
if ae.env.mdp.layout_name in counts:
counts[ae.env.mdp.layout_name] += 1
else:
counts[ae.env.mdp.layout_name] = 1
for k, v in counts.items():
self.assertAlmostEqual(
0.2, v / self.num_reset, 2, "more than 2 places off for " + k
)
def test_from_mdp_lst_uniform(self):
mdp_lst = [
OvercookedGridworld.from_layout_name(name)
for name in self.layout_name_short_lst
]
ae = AgentEvaluator.from_mdp_lst(
mdp_lst=mdp_lst,
env_params={"horizon": 400},
sampling_freq=[0.2, 0.2, 0.2, 0.2, 0.2],
)
counts = {}
for _ in range(self.num_reset):
ae.env.reset(regen_mdp=True)
if ae.env.mdp.layout_name in counts:
counts[ae.env.mdp.layout_name] += 1
else:
counts[ae.env.mdp.layout_name] = 1
for k, v in counts.items():
self.assertAlmostEqual(
0.2, v / self.num_reset, 2, "more than 2 places off for " + k
)
def test_from_mdp_lst_biased(self):
mdp_lst = [
OvercookedGridworld.from_layout_name(name)
for name in self.layout_name_short_lst
]
ae = AgentEvaluator.from_mdp_lst(
mdp_lst=mdp_lst,
env_params={"horizon": 400},
sampling_freq=self.biased,
)
counts = {}
for _ in range(self.num_reset):
ae.env.reset(regen_mdp=True)
if ae.env.mdp.layout_name in counts:
counts[ae.env.mdp.layout_name] += 1
else:
counts[ae.env.mdp.layout_name] = 1
# construct the ground truth
gt = {
self.layout_name_short_lst[i]: self.biased[i]
for i in range(len(self.layout_name_short_lst))
}
for k, v in counts.items():
self.assertAlmostEqual(
gt[k], v / self.num_reset, 2, "more than 2 places off for " + k
)
if __name__ == "__main__":
unittest.main()
| 13,226 | 32.656489 | 110 | py |
overcooked_ai | overcooked_ai-master/testing/mdp_gen_schedule_test.py | import unittest
import numpy as np
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
from overcooked_ai_py.mdp.actions import Action, Direction
from overcooked_ai_py.mdp.overcooked_mdp import (
ObjectState,
OvercookedGridworld,
PlayerState,
)
np.random.seed(42)
n, s = Direction.NORTH, Direction.SOUTH
e, w = Direction.EAST, Direction.WEST
stay, interact = Action.STAY, Action.INTERACT
P, Obj = PlayerState, ObjectState
force_compute_large = False
force_compute = True
DISPLAY = False
simple_mdp = OvercookedGridworld.from_layout_name("cramped_room")
large_mdp = OvercookedGridworld.from_layout_name("corridor")
def params_schedule_fn_constant_09_01(outside_information):
"""
In this preliminary version, the outside information is ignored
"""
mdp_default_gen_params = {
"inner_shape": (7, 5),
"prop_empty": 0.9,
"prop_feats": 0.1,
"start_all_orders": [{"ingredients": ["onion", "onion", "onion"]}],
"display": False,
"rew_shaping_params": None,
}
return mdp_default_gen_params
def params_schedule_fn_constant_07_03(outside_info):
"""
In this preliminary version, the outside information is ignored
"""
mdp_default_gen_params = {
"inner_shape": (7, 5),
"prop_empty": 0.7,
"prop_feats": 0.3,
"start_all_orders": [{"ingredients": ["onion", "onion", "onion"]}],
"display": False,
"rew_shaping_params": None,
}
return mdp_default_gen_params
def params_schedule_fn_constant_05_05(outside_info):
"""
In this preliminary version, the outside information is ignored
"""
mdp_default_gen_params = {
"inner_shape": (7, 5),
"prop_empty": 0.5,
"prop_feats": 0.5,
"start_all_orders": [{"ingredients": ["onion", "onion", "onion"]}],
"display": False,
"rew_shaping_params": None,
}
return mdp_default_gen_params
def params_schedule_fn_interval(outside_info):
"""
outside_information (dict):
progress (float in [0, 1] interval) a number that indicate progress
"""
assert (
outside_info != {} and "progress" in outside_info
), "if this happens during initialization, please add initial_info to env_params to address the issue"
progress = outside_info["progress"]
prop_empty = 0.9 - 0.4 * progress
prop_feats = 0.1 + 0.4 * progress
mdp_params_generated = {
"inner_shape": (7, 5),
"prop_empty": prop_empty,
"prop_feats": prop_feats,
"start_all_orders": [{"ingredients": ["onion", "onion", "onion"]}],
"display": False,
"rew_shaping_params": None,
}
return mdp_params_generated
default_env_params_infinite = {"horizon": 400, "num_mdp": np.inf}
default_env_params_infinite_interval = {
"horizon": 400,
"num_mdp": np.inf,
"initial_info": {"progress": 0},
}
class TestParamScheduleFnConstant(unittest.TestCase):
def test_constant_schedule_095_01(self):
ae = AgentEvaluator.from_mdp_params_infinite(
mdp_params=None,
env_params=default_env_params_infinite,
outer_shape=(7, 5),
mdp_params_schedule_fn=params_schedule_fn_constant_09_01,
)
num_empty_grid = []
for i in range(500):
ae.env.reset()
empty_i = len(ae.env.mdp.terrain_pos_dict[" "])
num_empty_grid.append(empty_i)
avg_num_empty = sum(num_empty_grid) / len(num_empty_grid)
print("avg number of empty grid:", avg_num_empty)
# the number of empty square should be consistant"
self.assertTrue(13.9 < avg_num_empty < 14.1)
def test_constant_schedule_07_03(self):
ae = AgentEvaluator.from_mdp_params_infinite(
mdp_params=None,
env_params=default_env_params_infinite,
outer_shape=(7, 5),
mdp_params_schedule_fn=params_schedule_fn_constant_07_03,
)
num_empty_grid = []
for i in range(500):
ae.env.reset()
empty_i = len(ae.env.mdp.terrain_pos_dict[" "])
num_empty_grid.append(empty_i)
avg_num_empty = sum(num_empty_grid) / len(num_empty_grid)
print("avg number of empty grid:", avg_num_empty)
# the number of empty square should be fairlyconsistant"
self.assertTrue(11.5 < avg_num_empty < 11.8)
def test_constant_schedule_05_05(self):
ae = AgentEvaluator.from_mdp_params_infinite(
mdp_params=None,
env_params=default_env_params_infinite,
outer_shape=(7, 5),
mdp_params_schedule_fn=params_schedule_fn_constant_05_05,
)
num_empty_grid = []
for i in range(500):
ae.env.reset()
empty_i = len(ae.env.mdp.terrain_pos_dict[" "])
num_empty_grid.append(empty_i)
avg_num_empty = sum(num_empty_grid) / len(num_empty_grid)
print("avg number of empty grid:", avg_num_empty)
# the number of empty square should be fairlyconsistant"
self.assertTrue(10.4 < avg_num_empty < 10.9)
class TestParamScheduleFnInterval(unittest.TestCase):
def test_interval_schedule(self):
ae = AgentEvaluator.from_mdp_params_infinite(
mdp_params=None,
env_params=default_env_params_infinite_interval,
outer_shape=(7, 5),
mdp_params_schedule_fn=params_schedule_fn_interval,
)
num_empty_grid = []
for i in range(4000):
ae.env.reset(outside_info={"progress": i / 4000})
empty_i = len(ae.env.mdp.terrain_pos_dict[" "])
num_empty_grid.append(empty_i)
avg_num_empty_09_01 = sum(num_empty_grid[0:50]) / 50
self.assertTrue(13.9 < avg_num_empty_09_01 < 14.1)
avg_num_empty_07_03 = sum(num_empty_grid[1975:2025]) / 50
self.assertTrue(11.5 < avg_num_empty_07_03 < 11.8)
avg_num_empty_05_05 = sum(num_empty_grid[3950:4000]) / 50
self.assertTrue(10.4 < avg_num_empty_05_05 < 10.9)
print(
"avg number of empty grids:",
avg_num_empty_09_01,
avg_num_empty_07_03,
avg_num_empty_05_05,
)
if __name__ == "__main__":
unittest.main()
| 6,279 | 32.582888 | 106 | py |
overcooked_ai | overcooked_ai-master/testing/__init__.py | 0 | 0 | 0 | py |
|
overcooked_ai | overcooked_ai-master/src/overcooked_demo/__init__.py | import os
import subprocess
def start_server():
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(dir_path)
subprocess.call("./up.sh")
def move_agent():
from overcooked_demo.server.move_agents import main
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(os.path.join(dir_path, "server"))
main()
| 357 | 20.058824 | 58 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_demo/server/app.py | import os
import sys
# Import and patch the production eventlet server if necessary
if os.getenv("FLASK_ENV", "production") == "production":
import eventlet
eventlet.monkey_patch()
import atexit
import json
import logging
# All other imports must come after patch to ensure eventlet compatibility
import pickle
import queue
from datetime import datetime
from threading import Lock
import game
from flask import Flask, jsonify, render_template, request
from flask_socketio import SocketIO, emit, join_room, leave_room
from game import Game, OvercookedGame, OvercookedTutorial
from utils import ThreadSafeDict, ThreadSafeSet
### Thoughts -- where I'll log potential issues/ideas as they come up
# Should make game driver code more error robust -- if overcooked randomlly errors we should catch it and report it to user
# Right now, if one user 'join's before other user's 'join' finishes, they won't end up in same game
# Could use a monitor on a conditional to block all global ops during calls to _ensure_consistent_state for debugging
# Could cap number of sinlge- and multi-player games separately since the latter has much higher RAM and CPU usage
###########
# Globals #
###########
# Read in global config
CONF_PATH = os.getenv("CONF_PATH", "config.json")
with open(CONF_PATH, "r") as f:
CONFIG = json.load(f)
# Where errors will be logged
LOGFILE = CONFIG["logfile"]
# Available layout names
LAYOUTS = CONFIG["layouts"]
# Values that are standard across layouts
LAYOUT_GLOBALS = CONFIG["layout_globals"]
# Maximum allowable game length (in seconds)
MAX_GAME_LENGTH = CONFIG["MAX_GAME_LENGTH"]
# Path to where pre-trained agents will be stored on server
AGENT_DIR = CONFIG["AGENT_DIR"]
# Maximum number of games that can run concurrently. Contrained by available memory and CPU
MAX_GAMES = CONFIG["MAX_GAMES"]
# Frames per second cap for serving to client
MAX_FPS = CONFIG["MAX_FPS"]
# Default configuration for predefined experiment
PREDEFINED_CONFIG = json.dumps(CONFIG["predefined"])
# Default configuration for tutorial
TUTORIAL_CONFIG = json.dumps(CONFIG["tutorial"])
# Global queue of available IDs. This is how we synch game creation and keep track of how many games are in memory
FREE_IDS = queue.Queue(maxsize=MAX_GAMES)
# Bitmap that indicates whether ID is currently in use. Game with ID=i is "freed" by setting FREE_MAP[i] = True
FREE_MAP = ThreadSafeDict()
# Initialize our ID tracking data
for i in range(MAX_GAMES):
FREE_IDS.put(i)
FREE_MAP[i] = True
# Mapping of game-id to game objects
GAMES = ThreadSafeDict()
# Set of games IDs that are currently being played
ACTIVE_GAMES = ThreadSafeSet()
# Queue of games IDs that are waiting for additional players to join. Note that some of these IDs might
# be stale (i.e. if FREE_MAP[id] = True)
WAITING_GAMES = queue.Queue()
# Mapping of users to locks associated with the ID. Enforces user-level serialization
USERS = ThreadSafeDict()
# Mapping of user id's to the current game (room) they are in
USER_ROOMS = ThreadSafeDict()
# Mapping of string game names to corresponding classes
GAME_NAME_TO_CLS = {
"overcooked": OvercookedGame,
"tutorial": OvercookedTutorial,
}
game._configure(MAX_GAME_LENGTH, AGENT_DIR)
#######################
# Flask Configuration #
#######################
# Create and configure flask app
app = Flask(__name__, template_folder=os.path.join("static", "templates"))
app.config["DEBUG"] = os.getenv("FLASK_ENV", "production") == "development"
socketio = SocketIO(app, cors_allowed_origins="*", logger=app.config["DEBUG"])
# Attach handler for logging errors to file
handler = logging.FileHandler(LOGFILE)
handler.setLevel(logging.ERROR)
app.logger.addHandler(handler)
#################################
# Global Coordination Functions #
#################################
def try_create_game(game_name, **kwargs):
"""
Tries to create a brand new Game object based on parameters in `kwargs`
Returns (Game, Error) that represent a pointer to a game object, and error that occured
during creation, if any. In case of error, `Game` returned in None. In case of sucess,
`Error` returned is None
Possible Errors:
- Runtime error if server is at max game capacity
- Propogate any error that occured in game __init__ function
"""
try:
curr_id = FREE_IDS.get(block=False)
assert FREE_MAP[curr_id], "Current id is already in use"
game_cls = GAME_NAME_TO_CLS.get(game_name, OvercookedGame)
game = game_cls(id=curr_id, **kwargs)
except queue.Empty:
err = RuntimeError("Server at max capacity")
return None, err
except Exception as e:
return None, e
else:
GAMES[game.id] = game
FREE_MAP[game.id] = False
return game, None
def cleanup_game(game: OvercookedGame):
if FREE_MAP[game.id]:
raise ValueError("Double free on a game")
# User tracking
for user_id in game.players:
leave_curr_room(user_id)
# Socketio tracking
socketio.close_room(game.id)
# Game tracking
FREE_MAP[game.id] = True
FREE_IDS.put(game.id)
del GAMES[game.id]
if game.id in ACTIVE_GAMES:
ACTIVE_GAMES.remove(game.id)
def get_game(game_id):
return GAMES.get(game_id, None)
def get_curr_game(user_id):
return get_game(get_curr_room(user_id))
def get_curr_room(user_id):
return USER_ROOMS.get(user_id, None)
def set_curr_room(user_id, room_id):
USER_ROOMS[user_id] = room_id
def leave_curr_room(user_id):
del USER_ROOMS[user_id]
def get_waiting_game():
"""
Return a pointer to a waiting game, if one exists
Note: The use of a queue ensures that no two threads will ever receive the same pointer, unless
the waiting game's ID is re-added to the WAITING_GAMES queue
"""
try:
waiting_id = WAITING_GAMES.get(block=False)
while FREE_MAP[waiting_id]:
waiting_id = WAITING_GAMES.get(block=False)
except queue.Empty:
return None
else:
return get_game(waiting_id)
##########################
# Socket Handler Helpers #
##########################
def _leave_game(user_id):
"""
Removes `user_id` from it's current game, if it exists. Rebroadcast updated game state to all
other users in the relevant game.
Leaving an active game force-ends the game for all other users, if they exist
Leaving a waiting game causes the garbage collection of game memory, if no other users are in the
game after `user_id` is removed
"""
# Get pointer to current game if it exists
game = get_curr_game(user_id)
if not game:
# Cannot leave a game if not currently in one
return False
# Acquire this game's lock to ensure all global state updates are atomic
with game.lock:
# Update socket state maintained by socketio
leave_room(game.id)
# Update user data maintained by this app
leave_curr_room(user_id)
# Update game state maintained by game object
if user_id in game.players:
game.remove_player(user_id)
else:
game.remove_spectator(user_id)
# Whether the game was active before the user left
was_active = game.id in ACTIVE_GAMES
# Rebroadcast data and handle cleanup based on the transition caused by leaving
if was_active and game.is_empty():
# Active -> Empty
game.deactivate()
elif game.is_empty():
# Waiting -> Empty
cleanup_game(game)
elif not was_active:
# Waiting -> Waiting
emit("waiting", {"in_game": True}, room=game.id)
elif was_active and game.is_ready():
# Active -> Active
pass
elif was_active and not game.is_empty():
# Active -> Waiting
game.deactivate()
return was_active
def _create_game(user_id, game_name, params={}):
game, err = try_create_game(game_name, **params)
if not game:
emit("creation_failed", {"error": err.__repr__()})
return
spectating = True
with game.lock:
if not game.is_full():
spectating = False
game.add_player(user_id)
else:
spectating = True
game.add_spectator(user_id)
join_room(game.id)
set_curr_room(user_id, game.id)
if game.is_ready():
game.activate()
ACTIVE_GAMES.add(game.id)
emit(
"start_game",
{"spectating": spectating, "start_info": game.to_json()},
room=game.id,
)
socketio.start_background_task(play_game, game, fps=6)
else:
WAITING_GAMES.put(game.id)
emit("waiting", {"in_game": True}, room=game.id)
#####################
# Debugging Helpers #
#####################
def _ensure_consistent_state():
"""
Simple sanity checks of invariants on global state data
Let ACTIVE be the set of all active game IDs, GAMES be the set of all existing
game IDs, and WAITING be the set of all waiting (non-stale) game IDs. Note that
a game could be in the WAITING_GAMES queue but no longer exist (indicated by
the FREE_MAP)
- Intersection of WAITING and ACTIVE games must be empty set
- Union of WAITING and ACTIVE must be equal to GAMES
- id \in FREE_IDS => FREE_MAP[id]
- id \in ACTIVE_GAMES => Game in active state
- id \in WAITING_GAMES => Game in inactive state
"""
waiting_games = set()
active_games = set()
all_games = set(GAMES)
for game_id in list(FREE_IDS.queue):
assert FREE_MAP[game_id], "Freemap in inconsistent state"
for game_id in list(WAITING_GAMES.queue):
if not FREE_MAP[game_id]:
waiting_games.add(game_id)
for game_id in ACTIVE_GAMES:
active_games.add(game_id)
assert (
waiting_games.union(active_games) == all_games
), "WAITING union ACTIVE != ALL"
assert not waiting_games.intersection(
active_games
), "WAITING intersect ACTIVE != EMPTY"
assert all(
[get_game(g_id)._is_active for g_id in active_games]
), "Active ID in waiting state"
assert all(
[not get_game(g_id)._id_active for g_id in waiting_games]
), "Waiting ID in active state"
def get_agent_names():
return [
d
for d in os.listdir(AGENT_DIR)
if os.path.isdir(os.path.join(AGENT_DIR, d))
]
######################
# Application routes #
######################
# Hitting each of these endpoints creates a brand new socket that is closed
# at after the server response is received. Standard HTTP protocol
@app.route("/")
def index():
agent_names = get_agent_names()
return render_template(
"index.html", agent_names=agent_names, layouts=LAYOUTS
)
@app.route("/predefined")
def predefined():
uid = request.args.get("UID")
num_layouts = len(CONFIG["predefined"]["experimentParams"]["layouts"])
return render_template(
"predefined.html",
uid=uid,
config=PREDEFINED_CONFIG,
num_layouts=num_layouts,
)
@app.route("/instructions")
def instructions():
return render_template("instructions.html", layout_conf=LAYOUT_GLOBALS)
@app.route("/tutorial")
def tutorial():
return render_template("tutorial.html", config=TUTORIAL_CONFIG)
@app.route("/debug")
def debug():
resp = {}
games = []
active_games = []
waiting_games = []
users = []
free_ids = []
free_map = {}
for game_id in ACTIVE_GAMES:
game = get_game(game_id)
active_games.append({"id": game_id, "state": game.to_json()})
for game_id in list(WAITING_GAMES.queue):
game = get_game(game_id)
game_state = None if FREE_MAP[game_id] else game.to_json()
waiting_games.append({"id": game_id, "state": game_state})
for game_id in GAMES:
games.append(game_id)
for user_id in USER_ROOMS:
users.append({user_id: get_curr_room(user_id)})
for game_id in list(FREE_IDS.queue):
free_ids.append(game_id)
for game_id in FREE_MAP:
free_map[game_id] = FREE_MAP[game_id]
resp["active_games"] = active_games
resp["waiting_games"] = waiting_games
resp["all_games"] = games
resp["users"] = users
resp["free_ids"] = free_ids
resp["free_map"] = free_map
return jsonify(resp)
#########################
# Socket Event Handlers #
#########################
# Asynchronous handling of client-side socket events. Note that the socket persists even after the
# event has been handled. This allows for more rapid data communication, as a handshake only has to
# happen once at the beginning. Thus, socket events are used for all game updates, where more rapid
# communication is needed
def creation_params(params):
"""
This function extracts the dataCollection and oldDynamics settings from the input and
process them before sending them to game creation
"""
# this params file should be a dictionary that can have these keys:
# playerZero: human/Rllib*agent
# playerOne: human/Rllib*agent
# layout: one of the layouts in the config file, I don't think this one is used
# gameTime: time in seconds
# oldDynamics: on/off
# dataCollection: on/off
# layouts: [layout in the config file], this one determines which layout to use, and if there is more than one layout, a series of game is run back to back
#
use_old = False
if "oldDynamics" in params and params["oldDynamics"] == "on":
params["mdp_params"] = {"old_dynamics": True}
use_old = True
if "dataCollection" in params and params["dataCollection"] == "on":
# config the necessary setting to properly save data
params["dataCollection"] = True
mapping = {"human": "H"}
# gameType is either HH, HA, AH, AA depending on the config
gameType = "{}{}".format(
mapping.get(params["playerZero"], "A"),
mapping.get(params["playerOne"], "A"),
)
params["collection_config"] = {
"time": datetime.today().strftime("%Y-%m-%d_%H-%M-%S"),
"type": gameType,
}
if use_old:
params["collection_config"]["old_dynamics"] = "Old"
else:
params["collection_config"]["old_dynamics"] = "New"
else:
params["dataCollection"] = False
@socketio.on("create")
def on_create(data):
user_id = request.sid
with USERS[user_id]:
# Retrieve current game if one exists
curr_game = get_curr_game(user_id)
if curr_game:
# Cannot create if currently in a game
return
params = data.get("params", {})
creation_params(params)
game_name = data.get("game_name", "overcooked")
_create_game(user_id, game_name, params)
@socketio.on("join")
def on_join(data):
user_id = request.sid
with USERS[user_id]:
create_if_not_found = data.get("create_if_not_found", True)
# Retrieve current game if one exists
curr_game = get_curr_game(user_id)
if curr_game:
# Cannot join if currently in a game
return
# Retrieve a currently open game if one exists
game = get_waiting_game()
if not game and create_if_not_found:
# No available game was found so create a game
params = data.get("params", {})
creation_params(params)
game_name = data.get("game_name", "overcooked")
_create_game(user_id, game_name, params)
return
elif not game:
# No available game was found so start waiting to join one
emit("waiting", {"in_game": False})
else:
# Game was found so join it
with game.lock:
join_room(game.id)
set_curr_room(user_id, game.id)
game.add_player(user_id)
if game.is_ready():
# Game is ready to begin play
game.activate()
ACTIVE_GAMES.add(game.id)
emit(
"start_game",
{"spectating": False, "start_info": game.to_json()},
room=game.id,
)
socketio.start_background_task(play_game, game)
else:
# Still need to keep waiting for players
WAITING_GAMES.put(game.id)
emit("waiting", {"in_game": True}, room=game.id)
@socketio.on("leave")
def on_leave(data):
user_id = request.sid
with USERS[user_id]:
was_active = _leave_game(user_id)
if was_active:
emit("end_game", {"status": Game.Status.DONE, "data": {}})
else:
emit("end_lobby")
@socketio.on("action")
def on_action(data):
user_id = request.sid
action = data["action"]
game = get_curr_game(user_id)
if not game:
return
game.enqueue_action(user_id, action)
@socketio.on("connect")
def on_connect():
user_id = request.sid
if user_id in USERS:
return
USERS[user_id] = Lock()
@socketio.on("disconnect")
def on_disconnect():
print("disonnect triggered", file=sys.stderr)
# Ensure game data is properly cleaned-up in case of unexpected disconnect
user_id = request.sid
if user_id not in USERS:
return
with USERS[user_id]:
_leave_game(user_id)
del USERS[user_id]
# Exit handler for server
def on_exit():
# Force-terminate all games on server termination
for game_id in GAMES:
socketio.emit(
"end_game",
{
"status": Game.Status.INACTIVE,
"data": get_game(game_id).get_data(),
},
room=game_id,
)
#############
# Game Loop #
#############
def play_game(game: OvercookedGame, fps=6):
"""
Asynchronously apply real-time game updates and broadcast state to all clients currently active
in the game. Note that this loop must be initiated by a parallel thread for each active game
game (Game object): Stores relevant game state. Note that the game id is the same as to socketio
room id for all clients connected to this game
fps (int): Number of game ticks that should happen every second
"""
status = Game.Status.ACTIVE
while status != Game.Status.DONE and status != Game.Status.INACTIVE:
with game.lock:
status = game.tick()
if status == Game.Status.RESET:
with game.lock:
data = game.get_data()
socketio.emit(
"reset_game",
{
"state": game.to_json(),
"timeout": game.reset_timeout,
"data": data,
},
room=game.id,
)
socketio.sleep(game.reset_timeout / 1000)
else:
socketio.emit(
"state_pong", {"state": game.get_state()}, room=game.id
)
socketio.sleep(1 / fps)
with game.lock:
data = game.get_data()
socketio.emit(
"end_game", {"status": status, "data": data}, room=game.id
)
if status != Game.Status.INACTIVE:
game.deactivate()
cleanup_game(game)
if __name__ == "__main__":
# Dynamically parse host and port from environment variables (set by docker build)
host = os.getenv("HOST", "0.0.0.0")
port = int(os.getenv("PORT", 80))
# Attach exit handler to ensure graceful shutdown
atexit.register(on_exit)
# https://localhost:80 is external facing address regardless of build environment
socketio.run(app, host=host, port=port, log_output=app.config["DEBUG"])
| 20,001 | 28.80924 | 159 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_demo/server/utils.py | import os
from threading import Lock
# this is the mounted volume
DOCKER_VOLUME = "/app/data"
class ThreadSafeSet(set):
def __init__(self, *args, **kwargs):
super(ThreadSafeSet, self).__init__(*args, **kwargs)
self.lock = Lock()
def add(self, *args):
with self.lock:
retval = super(ThreadSafeSet, self).add(*args)
return retval
def clear(self, *args):
with self.lock:
retval = super(ThreadSafeSet, self).clear(*args)
return retval
def pop(self, *args):
with self.lock:
if len(self):
retval = super(ThreadSafeSet, self).pop(*args)
else:
retval = None
return retval
def remove(self, item):
with self.lock:
if item in self:
retval = super(ThreadSafeSet, self).remove(item)
else:
retval = None
return retval
class ThreadSafeDict(dict):
def __init__(self, *args, **kwargs):
super(ThreadSafeDict, self).__init__(*args, **kwargs)
self.lock = Lock()
def clear(self, *args, **kwargs):
with self.lock:
retval = super(ThreadSafeDict, self).clear(*args, **kwargs)
return retval
def pop(self, *args, **kwargs):
with self.lock:
retval = super(ThreadSafeDict, self).pop(*args, **kwargs)
return retval
def __setitem__(self, *args, **kwargs):
with self.lock:
retval = super(ThreadSafeDict, self).__setitem__(*args, **kwargs)
return retval
def __delitem__(self, item):
with self.lock:
if item in self:
retval = super(ThreadSafeDict, self).__delitem__(item)
else:
retval = None
return retval
def create_dirs(config: dict, cur_layout: str):
"""
config has 3 keys:
{"time": datetime.today().strftime("%Y-%m-%d_%H-%M-%S"),
"type": gameType/a str of either "HH","HA","AH","AA",
"layout": a layout string}
We group the data by layout/type/time
"""
path = os.path.join(
DOCKER_VOLUME,
cur_layout,
config["old_dynamics"],
config["type"],
config["time"],
)
if not os.path.exists(path):
os.makedirs(path)
return path
| 2,329 | 25.781609 | 77 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_demo/server/move_agents.py | import argparse
import json
import os
import shutil
import sys
def main():
with open("config.json", "r") as f:
config = json.load(f)
# the agents dir
agent_dir = config["AGENT_DIR"]
parser = argparse.ArgumentParser(
prog="move_agent",
description="Create a directory for agent to be loaded into the game",
)
parser.add_argument(
"checkpoint",
help="The path to the checkpoint directory, e.g. ~/ray_results/run_xyz/checkpoint_000500",
)
parser.add_argument(
"agent_name",
help="The name you want for this agent; remember to follow the naming conventions: the name must start with 'Rllib'",
)
parser.add_argument(
"-o",
"--overwrite",
default=False,
help="Whether to overwrite existing agent if one with the same name already exists",
)
parser.add_argument(
"-b",
"--bc",
default=None,
help="If the agent was trained with BC agent, provide the path to the saved bc model directory",
)
args = parser.parse_args()
checkpoint, agent_name, overwrite, bc_model = (
args.checkpoint,
args.agent_name,
args.overwrite == "True",
args.bc,
)
if agent_name.lower()[:5] != "rllib":
sys.exit("Incampatible agent name")
elif agent_name in os.listdir(agent_dir) and not overwrite:
sys.exit("agent name already exists")
# make a new directory for the agent
new_agent_dir = os.path.join(agent_dir, agent_name, "agent")
if os.path.exists(new_agent_dir):
parent_dir = os.path.dirname(new_agent_dir)
shutil.rmtree(parent_dir)
# copy over files
shutil.copytree(checkpoint, new_agent_dir)
# copy over the config.pickle file
run_dir = os.path.dirname(checkpoint)
new_dir = os.path.dirname(new_agent_dir)
shutil.copy(
os.path.join(run_dir, "config.pkl"),
os.path.join(new_dir, "config.pkl"),
)
# if bc_model is provided
if bc_model:
bc_params = os.path.join(new_dir, "bc_params")
if not os.path.exists(bc_model):
sys.exit("bc_model dir doesn't exist")
shutil.copytree(bc_model, bc_params)
sys.exit("Copy succeeded")
if __name__ == "__main__":
main()
| 2,297 | 28.088608 | 125 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_demo/server/__init__.py | 0 | 0 | 0 | py |
|
overcooked_ai | overcooked_ai-master/src/overcooked_demo/server/game.py | import json
import os
import pickle
import random
from abc import ABC, abstractmethod
from queue import Empty, Full, LifoQueue, Queue
from threading import Lock, Thread
from time import time
import ray
from utils import DOCKER_VOLUME, create_dirs
from human_aware_rl.rllib.rllib import load_agent
from overcooked_ai_py.mdp.actions import Action, Direction
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv
from overcooked_ai_py.mdp.overcooked_mdp import OvercookedGridworld
from overcooked_ai_py.planning.planners import (
NO_COUNTERS_PARAMS,
MotionPlanner,
)
# Relative path to where all static pre-trained agents are stored on server
AGENT_DIR = None
# Maximum allowable game time (in seconds)
MAX_GAME_TIME = None
def _configure(max_game_time, agent_dir):
global AGENT_DIR, MAX_GAME_TIME
MAX_GAME_TIME = max_game_time
AGENT_DIR = agent_dir
def fix_bc_path(path):
"""
Loading a PPO agent trained with a BC agent requires loading the BC model as well when restoring the trainer, even though the BC model is not used in game
For now the solution is to include the saved BC model and fix the relative path to the model in the config.pkl file
"""
import dill
# the path is the agents/Rllib.*/agent directory
agent_path = os.path.dirname(path)
with open(os.path.join(agent_path, "config.pkl"), "rb") as f:
data = dill.load(f)
bc_model_dir = data["bc_params"]["bc_config"]["model_dir"]
last_dir = os.path.basename(bc_model_dir)
bc_model_dir = os.path.join(agent_path, "bc_params", last_dir)
data["bc_params"]["bc_config"]["model_dir"] = bc_model_dir
with open(os.path.join(agent_path, "config.pkl"), "wb") as f:
dill.dump(data, f)
class Game(ABC):
"""
Class representing a game object. Coordinates the simultaneous actions of arbitrary
number of players. Override this base class in order to use.
Players can post actions to a `pending_actions` queue, and driver code can call `tick` to apply these actions.
It should be noted that most operations in this class are not on their own thread safe. Thus, client code should
acquire `self.lock` before making any modifications to the instance.
One important exception to the above rule is `enqueue_actions` which is thread safe out of the box
"""
# Possible TODO: create a static list of IDs used by the class so far to verify id uniqueness
# This would need to be serialized, however, which might cause too great a performance hit to
# be worth it
EMPTY = "EMPTY"
class Status:
DONE = "done"
ACTIVE = "active"
RESET = "reset"
INACTIVE = "inactive"
ERROR = "error"
def __init__(self, *args, **kwargs):
"""
players (list): List of IDs of players currently in the game
spectators (set): Collection of IDs of players that are not allowed to enqueue actions but are currently watching the game
id (int): Unique identifier for this game
pending_actions List[(Queue)]: Buffer of (player_id, action) pairs have submitted that haven't been commited yet
lock (Lock): Used to serialize updates to the game state
is_active(bool): Whether the game is currently being played or not
"""
self.players = []
self.spectators = set()
self.pending_actions = []
self.id = kwargs.get("id", id(self))
self.lock = Lock()
self._is_active = False
@abstractmethod
def is_full(self):
"""
Returns whether there is room for additional players to join or not
"""
pass
@abstractmethod
def apply_action(self, player_idx, action):
"""
Updates the game state by applying a single (player_idx, action) tuple. Subclasses should try to override this method
if possible
"""
pass
@abstractmethod
def is_finished(self):
"""
Returns whether the game has concluded or not
"""
pass
def is_ready(self):
"""
Returns whether the game can be started. Defaults to having enough players
"""
return self.is_full()
@property
def is_active(self):
"""
Whether the game is currently being played
"""
return self._is_active
@property
def reset_timeout(self):
"""
Number of milliseconds to pause game on reset
"""
return 3000
def apply_actions(self):
"""
Updates the game state by applying each of the pending actions in the buffer. Is called by the tick method. Subclasses
should override this method if joint actions are necessary. If actions can be serialized, overriding `apply_action` is
preferred
"""
for i in range(len(self.players)):
try:
while True:
action = self.pending_actions[i].get(block=False)
self.apply_action(i, action)
except Empty:
pass
def activate(self):
"""
Activates the game to let server know real-time updates should start. Provides little functionality but useful as
a check for debugging
"""
self._is_active = True
def deactivate(self):
"""
Deactives the game such that subsequent calls to `tick` will be no-ops. Used to handle case where game ends but
there is still a buffer of client pings to handle
"""
self._is_active = False
def reset(self):
"""
Restarts the game while keeping all active players by resetting game stats and temporarily disabling `tick`
"""
if not self.is_active:
raise ValueError("Inactive Games cannot be reset")
if self.is_finished():
return self.Status.DONE
self.deactivate()
self.activate()
return self.Status.RESET
def needs_reset(self):
"""
Returns whether the game should be reset on the next call to `tick`
"""
return False
def tick(self):
"""
Updates the game state by applying each of the pending actions. This is done so that players cannot directly modify
the game state, offering an additional level of safety and thread security.
One can think of "enqueue_action" like calling "git add" and "tick" like calling "git commit"
Subclasses should try to override `apply_actions` if possible. Only override this method if necessary
"""
if not self.is_active:
return self.Status.INACTIVE
if self.needs_reset():
self.reset()
return self.Status.RESET
self.apply_actions()
return self.Status.DONE if self.is_finished() else self.Status.ACTIVE
def enqueue_action(self, player_id, action):
"""
Add (player_id, action) pair to the pending action queue, without modifying underlying game state
Note: This function IS thread safe
"""
if not self.is_active:
# Could run into issues with is_active not being thread safe
return
if player_id not in self.players:
# Only players actively in game are allowed to enqueue actions
return
try:
player_idx = self.players.index(player_id)
self.pending_actions[player_idx].put(action)
except Full:
pass
def get_state(self):
"""
Return a JSON compatible serialized state of the game. Note that this should be as minimalistic as possible
as the size of the game state will be the most important factor in game performance. This is sent to the client
every frame update.
"""
return {"players": self.players}
def to_json(self):
"""
Return a JSON compatible serialized state of the game. Contains all information about the game, does not need to
be minimalistic. This is sent to the client only once, upon game creation
"""
return self.get_state()
def is_empty(self):
"""
Return whether it is safe to garbage collect this game instance
"""
return not self.num_players
def add_player(self, player_id, idx=None, buff_size=-1):
"""
Add player_id to the game
"""
if self.is_full():
raise ValueError("Cannot add players to full game")
if self.is_active:
raise ValueError("Cannot add players to active games")
if not idx and self.EMPTY in self.players:
idx = self.players.index(self.EMPTY)
elif not idx:
idx = len(self.players)
padding = max(0, idx - len(self.players) + 1)
for _ in range(padding):
self.players.append(self.EMPTY)
self.pending_actions.append(self.EMPTY)
self.players[idx] = player_id
self.pending_actions[idx] = Queue(maxsize=buff_size)
def add_spectator(self, spectator_id):
"""
Add spectator_id to list of spectators for this game
"""
if spectator_id in self.players:
raise ValueError("Cannot spectate and play at same time")
self.spectators.add(spectator_id)
def remove_player(self, player_id):
"""
Remove player_id from the game
"""
try:
idx = self.players.index(player_id)
self.players[idx] = self.EMPTY
self.pending_actions[idx] = self.EMPTY
except ValueError:
return False
else:
return True
def remove_spectator(self, spectator_id):
"""
Removes spectator_id if they are in list of spectators. Returns True if spectator successfully removed, False otherwise
"""
try:
self.spectators.remove(spectator_id)
except ValueError:
return False
else:
return True
def clear_pending_actions(self):
"""
Remove all queued actions for all players
"""
for i, player in enumerate(self.players):
if player != self.EMPTY:
queue = self.pending_actions[i]
queue.queue.clear()
@property
def num_players(self):
return len([player for player in self.players if player != self.EMPTY])
def get_data(self):
"""
Return any game metadata to server driver.
"""
return {}
class DummyGame(Game):
"""
Standin class used to test basic server logic
"""
def __init__(self, **kwargs):
super(DummyGame, self).__init__(**kwargs)
self.counter = 0
def is_full(self):
return self.num_players == 2
def apply_action(self, idx, action):
pass
def apply_actions(self):
self.counter += 1
def is_finished(self):
return self.counter >= 100
def get_state(self):
state = super(DummyGame, self).get_state()
state["count"] = self.counter
return state
class DummyInteractiveGame(Game):
"""
Standing class used to test interactive components of the server logic
"""
def __init__(self, **kwargs):
super(DummyInteractiveGame, self).__init__(**kwargs)
self.max_players = int(
kwargs.get("playerZero", "human") == "human"
) + int(kwargs.get("playerOne", "human") == "human")
self.max_count = kwargs.get("max_count", 30)
self.counter = 0
self.counts = [0] * self.max_players
def is_full(self):
return self.num_players == self.max_players
def is_finished(self):
return max(self.counts) >= self.max_count
def apply_action(self, player_idx, action):
if action.upper() == Direction.NORTH:
self.counts[player_idx] += 1
if action.upper() == Direction.SOUTH:
self.counts[player_idx] -= 1
def apply_actions(self):
super(DummyInteractiveGame, self).apply_actions()
self.counter += 1
def get_state(self):
state = super(DummyInteractiveGame, self).get_state()
state["count"] = self.counter
for i in range(self.num_players):
state["player_{}_count".format(i)] = self.counts[i]
return state
class OvercookedGame(Game):
"""
Class for bridging the gap between Overcooked_Env and the Game interface
Instance variable:
- max_players (int): Maximum number of players that can be in the game at once
- mdp (OvercookedGridworld): Controls the underlying Overcooked game logic
- score (int): Current reward acheived by all players
- max_time (int): Number of seconds the game should last
- npc_policies (dict): Maps user_id to policy (Agent) for each AI player
- npc_state_queues (dict): Mapping of NPC user_ids to LIFO queues for the policy to process
- curr_tick (int): How many times the game server has called this instance's `tick` method
- ticker_per_ai_action (int): How many frames should pass in between NPC policy forward passes.
Note that this is a lower bound; if the policy is computationally expensive the actual frames
per forward pass can be higher
- action_to_overcooked_action (dict): Maps action names returned by client to action names used by OvercookedGridworld
Note that this is an instance variable and not a static variable for efficiency reasons
- human_players (set(str)): Collection of all player IDs that correspond to humans
- npc_players (set(str)): Collection of all player IDs that correspond to AI
- randomized (boolean): Whether the order of the layouts should be randomized
Methods:
- npc_policy_consumer: Background process that asynchronously computes NPC policy forward passes. One thread
spawned for each NPC
- _curr_game_over: Determines whether the game on the current mdp has ended
"""
def __init__(
self,
layouts=["cramped_room"],
mdp_params={},
num_players=2,
gameTime=30,
playerZero="human",
playerOne="human",
showPotential=False,
randomized=False,
ticks_per_ai_action=1,
**kwargs
):
super(OvercookedGame, self).__init__(**kwargs)
self.show_potential = showPotential
self.mdp_params = mdp_params
self.layouts = layouts
self.max_players = int(num_players)
self.mdp = None
self.mp = None
self.score = 0
self.phi = 0
self.max_time = min(int(gameTime), MAX_GAME_TIME)
self.npc_policies = {}
self.npc_state_queues = {}
self.action_to_overcooked_action = {
"STAY": Action.STAY,
"UP": Direction.NORTH,
"DOWN": Direction.SOUTH,
"LEFT": Direction.WEST,
"RIGHT": Direction.EAST,
"SPACE": Action.INTERACT,
}
self.ticks_per_ai_action = ticks_per_ai_action
self.curr_tick = 0
self.human_players = set()
self.npc_players = set()
if randomized:
random.shuffle(self.layouts)
if playerZero != "human":
player_zero_id = playerZero + "_0"
self.add_player(player_zero_id, idx=0, buff_size=1, is_human=False)
self.npc_policies[player_zero_id] = self.get_policy(
playerZero, idx=0
)
self.npc_state_queues[player_zero_id] = LifoQueue()
if playerOne != "human":
player_one_id = playerOne + "_1"
self.add_player(player_one_id, idx=1, buff_size=1, is_human=False)
self.npc_policies[player_one_id] = self.get_policy(
playerOne, idx=1
)
self.npc_state_queues[player_one_id] = LifoQueue()
# Always kill ray after loading agent, otherwise, ray will crash once process exits
# Only kill ray after loading both agents to avoid having to restart ray during loading
if ray.is_initialized():
ray.shutdown()
if kwargs["dataCollection"]:
self.write_data = True
self.write_config = kwargs["collection_config"]
else:
self.write_data = False
self.trajectory = []
def _curr_game_over(self):
return time() - self.start_time >= self.max_time
def needs_reset(self):
return self._curr_game_over() and not self.is_finished()
def add_player(self, player_id, idx=None, buff_size=-1, is_human=True):
super(OvercookedGame, self).add_player(
player_id, idx=idx, buff_size=buff_size
)
if is_human:
self.human_players.add(player_id)
else:
self.npc_players.add(player_id)
def remove_player(self, player_id):
removed = super(OvercookedGame, self).remove_player(player_id)
if removed:
if player_id in self.human_players:
self.human_players.remove(player_id)
elif player_id in self.npc_players:
self.npc_players.remove(player_id)
else:
raise ValueError("Inconsistent state")
def npc_policy_consumer(self, policy_id):
queue = self.npc_state_queues[policy_id]
policy = self.npc_policies[policy_id]
while self._is_active:
state = queue.get()
npc_action, _ = policy.action(state)
super(OvercookedGame, self).enqueue_action(policy_id, npc_action)
def is_full(self):
return self.num_players >= self.max_players
def is_finished(self):
val = not self.layouts and self._curr_game_over()
return val
def is_empty(self):
"""
Game is considered safe to scrap if there are no active players or if there are no humans (spectating or playing)
"""
return (
super(OvercookedGame, self).is_empty()
or not self.spectators
and not self.human_players
)
def is_ready(self):
"""
Game is ready to be activated if there are a sufficient number of players and at least one human (spectator or player)
"""
return super(OvercookedGame, self).is_ready() and not self.is_empty()
def apply_action(self, player_id, action):
pass
def apply_actions(self):
# Default joint action, as NPC policies and clients probably don't enqueue actions fast
# enough to produce one at every tick
joint_action = [Action.STAY] * len(self.players)
# Synchronize individual player actions into a joint-action as required by overcooked logic
for i in range(len(self.players)):
# if this is a human, don't block and inject
if self.players[i] in self.human_players:
try:
# we don't block here in case humans want to Stay
joint_action[i] = self.pending_actions[i].get(block=False)
except Empty:
pass
else:
# we block on agent actions to ensure that the agent gets to do one action per state
joint_action[i] = self.pending_actions[i].get(block=True)
# Apply overcooked game logic to get state transition
prev_state = self.state
self.state, info = self.mdp.get_state_transition(
prev_state, joint_action
)
if self.show_potential:
self.phi = self.mdp.potential_function(
prev_state, self.mp, gamma=0.99
)
# Send next state to all background consumers if needed
if self.curr_tick % self.ticks_per_ai_action == 0:
for npc_id in self.npc_policies:
self.npc_state_queues[npc_id].put(self.state, block=False)
# Update score based on soup deliveries that might have occured
curr_reward = sum(info["sparse_reward_by_agent"])
self.score += curr_reward
transition = {
"state": json.dumps(prev_state.to_dict()),
"joint_action": json.dumps(joint_action),
"reward": curr_reward,
"time_left": max(self.max_time - (time() - self.start_time), 0),
"score": self.score,
"time_elapsed": time() - self.start_time,
"cur_gameloop": self.curr_tick,
"layout": json.dumps(self.mdp.terrain_mtx),
"layout_name": self.curr_layout,
"trial_id": str(self.start_time),
"player_0_id": self.players[0],
"player_1_id": self.players[1],
"player_0_is_human": self.players[0] in self.human_players,
"player_1_is_human": self.players[1] in self.human_players,
}
self.trajectory.append(transition)
# Return about the current transition
return prev_state, joint_action, info
def enqueue_action(self, player_id, action):
overcooked_action = self.action_to_overcooked_action[action]
super(OvercookedGame, self).enqueue_action(
player_id, overcooked_action
)
def reset(self):
status = super(OvercookedGame, self).reset()
if status == self.Status.RESET:
# Hacky way of making sure game timer doesn't "start" until after reset timeout has passed
self.start_time += self.reset_timeout / 1000
def tick(self):
self.curr_tick += 1
return super(OvercookedGame, self).tick()
def activate(self):
super(OvercookedGame, self).activate()
# Sanity check at start of each game
if not self.npc_players.union(self.human_players) == set(self.players):
raise ValueError("Inconsistent State")
self.curr_layout = self.layouts.pop()
self.mdp = OvercookedGridworld.from_layout_name(
self.curr_layout, **self.mdp_params
)
if self.show_potential:
self.mp = MotionPlanner.from_pickle_or_compute(
self.mdp, counter_goals=NO_COUNTERS_PARAMS
)
self.state = self.mdp.get_standard_start_state()
if self.show_potential:
self.phi = self.mdp.potential_function(
self.state, self.mp, gamma=0.99
)
self.start_time = time()
self.curr_tick = 0
self.score = 0
self.threads = []
for npc_policy in self.npc_policies:
self.npc_policies[npc_policy].reset()
self.npc_state_queues[npc_policy].put(self.state)
t = Thread(target=self.npc_policy_consumer, args=(npc_policy,))
self.threads.append(t)
t.start()
def deactivate(self):
super(OvercookedGame, self).deactivate()
# Ensure the background consumers do not hang
for npc_policy in self.npc_policies:
self.npc_state_queues[npc_policy].put(self.state)
# Wait for all background threads to exit
for t in self.threads:
t.join()
# Clear all action queues
self.clear_pending_actions()
def get_state(self):
state_dict = {}
state_dict["potential"] = self.phi if self.show_potential else None
state_dict["state"] = self.state.to_dict()
state_dict["score"] = self.score
state_dict["time_left"] = max(
self.max_time - (time() - self.start_time), 0
)
return state_dict
def to_json(self):
obj_dict = {}
obj_dict["terrain"] = self.mdp.terrain_mtx if self._is_active else None
obj_dict["state"] = self.get_state() if self._is_active else None
return obj_dict
def get_policy(self, npc_id, idx=0):
if npc_id.lower().startswith("rllib"):
try:
# Loading rllib agents requires additional helpers
fpath = os.path.join(AGENT_DIR, npc_id, "agent")
fix_bc_path(fpath)
agent = load_agent(fpath, agent_index=idx)
return agent
except Exception as e:
raise IOError(
"Error loading Rllib Agent\n{}".format(e.__repr__())
)
else:
try:
fpath = os.path.join(AGENT_DIR, npc_id, "agent.pickle")
with open(fpath, "rb") as f:
return pickle.load(f)
except Exception as e:
raise IOError("Error loading agent\n{}".format(e.__repr__()))
def get_data(self):
"""
Returns and then clears the accumulated trajectory
"""
data = {
"uid": str(time()),
"trajectory": self.trajectory,
}
self.trajectory = []
# if we want to store the data and there is data to store
if self.write_data and len(data["trajectory"]) > 0:
configs = self.write_config
# create necessary dirs
data_path = create_dirs(configs, self.curr_layout)
# the 3-layer-directory structure should be able to uniquely define any experiment
with open(os.path.join(data_path, "result.pkl"), "wb") as f:
pickle.dump(data, f)
return data
class OvercookedTutorial(OvercookedGame):
"""
Wrapper on OvercookedGame that includes additional data for tutorial mechanics, most notably the introduction of tutorial "phases"
Instance Variables:
- curr_phase (int): Indicates what tutorial phase we are currently on
- phase_two_score (float): The exact sparse reward the user must obtain to advance past phase 2
"""
def __init__(
self,
layouts=["tutorial_0"],
mdp_params={},
playerZero="human",
playerOne="AI",
phaseTwoScore=15,
**kwargs
):
super(OvercookedTutorial, self).__init__(
layouts=layouts,
mdp_params=mdp_params,
playerZero=playerZero,
playerOne=playerOne,
showPotential=False,
**kwargs
)
self.phase_two_score = phaseTwoScore
self.phase_two_finished = False
self.max_time = 0
self.max_players = 2
self.ticks_per_ai_action = 1
self.curr_phase = 0
# we don't collect tutorial data
self.write_data = False
@property
def reset_timeout(self):
return 1
def needs_reset(self):
if self.curr_phase == 0:
return self.score > 0
elif self.curr_phase == 1:
return self.score > 0
elif self.curr_phase == 2:
return self.phase_two_finished
return False
def is_finished(self):
return not self.layouts and self.score >= float("inf")
def reset(self):
super(OvercookedTutorial, self).reset()
self.curr_phase += 1
def get_policy(self, *args, **kwargs):
return TutorialAI()
def apply_actions(self):
"""
Apply regular MDP logic with retroactive score adjustment tutorial purposes
"""
_, _, info = super(OvercookedTutorial, self).apply_actions()
human_reward, ai_reward = info["sparse_reward_by_agent"]
# We only want to keep track of the human's score in the tutorial
self.score -= ai_reward
# Phase two requires a specific reward to complete
if self.curr_phase == 2:
self.score = 0
if human_reward == self.phase_two_score:
self.phase_two_finished = True
class DummyOvercookedGame(OvercookedGame):
"""
Class that hardcodes the AI to be random. Used for debugging
"""
def __init__(self, layouts=["cramped_room"], **kwargs):
super(DummyOvercookedGame, self).__init__(layouts, **kwargs)
def get_policy(self, *args, **kwargs):
return DummyAI()
class DummyAI:
"""
Randomly samples actions. Used for debugging
"""
def action(self, state):
[action] = random.sample(
[
Action.STAY,
Direction.NORTH,
Direction.SOUTH,
Direction.WEST,
Direction.EAST,
Action.INTERACT,
],
1,
)
return action, None
def reset(self):
pass
class DummyComputeAI(DummyAI):
"""
Performs simulated compute before randomly sampling actions. Used for debugging
"""
def __init__(self, compute_unit_iters=1e5):
"""
compute_unit_iters (int): Number of for loop cycles in one "unit" of compute. Number of
units performed each time is randomly sampled
"""
super(DummyComputeAI, self).__init__()
self.compute_unit_iters = int(compute_unit_iters)
def action(self, state):
# Randomly sample amount of time to busy wait
iters = random.randint(1, 10) * self.compute_unit_iters
# Actually compute something (can't sleep) to avoid scheduling optimizations
val = 0
for i in range(iters):
# Avoid branch prediction optimizations
if i % 2 == 0:
val += 1
else:
val += 2
# Return randomly sampled action
return super(DummyComputeAI, self).action(state)
class StayAI:
"""
Always returns "stay" action. Used for debugging
"""
def action(self, state):
return Action.STAY, None
def reset(self):
pass
class TutorialAI:
COOK_SOUP_LOOP = [
# Grab first onion
Direction.WEST,
Direction.WEST,
Direction.WEST,
Action.INTERACT,
# Place onion in pot
Direction.EAST,
Direction.NORTH,
Action.INTERACT,
# Grab second onion
Direction.WEST,
Action.INTERACT,
# Place onion in pot
Direction.EAST,
Direction.NORTH,
Action.INTERACT,
# Grab third onion
Direction.WEST,
Action.INTERACT,
# Place onion in pot
Direction.EAST,
Direction.NORTH,
Action.INTERACT,
# Cook soup
Action.INTERACT,
# Grab plate
Direction.EAST,
Direction.SOUTH,
Action.INTERACT,
Direction.WEST,
Direction.NORTH,
# Deliver soup
Action.INTERACT,
Direction.EAST,
Direction.EAST,
Direction.EAST,
Action.INTERACT,
Direction.WEST,
]
COOK_SOUP_COOP_LOOP = [
# Grab first onion
Direction.WEST,
Direction.WEST,
Direction.WEST,
Action.INTERACT,
# Place onion in pot
Direction.EAST,
Direction.SOUTH,
Action.INTERACT,
# Move to start so this loops
Direction.EAST,
Direction.EAST,
# Pause to make cooperation more real time
Action.STAY,
Action.STAY,
Action.STAY,
Action.STAY,
Action.STAY,
Action.STAY,
Action.STAY,
Action.STAY,
Action.STAY,
]
def __init__(self):
self.curr_phase = -1
self.curr_tick = -1
def action(self, state):
self.curr_tick += 1
if self.curr_phase == 0:
return (
self.COOK_SOUP_LOOP[self.curr_tick % len(self.COOK_SOUP_LOOP)],
None,
)
elif self.curr_phase == 2:
return (
self.COOK_SOUP_COOP_LOOP[
self.curr_tick % len(self.COOK_SOUP_COOP_LOOP)
],
None,
)
return Action.STAY, None
def reset(self):
self.curr_tick = -1
self.curr_phase += 1
| 31,851 | 32.248434 | 158 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/utils.py | import cProfile
import io
import json
import os
import pickle
import pstats
import tempfile
import uuid
from collections import defaultdict
from collections.abc import Iterable
from pathlib import Path
import numpy as np
from numpy import nan
from overcooked_ai_py.static import LAYOUTS_DIR
# I/O
def save_pickle(data, filename):
with open(fix_filetype(filename, ".pickle"), "wb") as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
def load_pickle(filename):
with open(fix_filetype(filename, ".pickle"), "rb") as f:
return pickle.load(f)
def load_dict_from_file(filepath):
with open(filepath, "r") as f:
return eval(f.read())
def save_dict_to_file(dic, filename):
dic = dict(dic)
with open(fix_filetype(filename, ".txt"), "w") as f:
f.write(str(dic))
def load_dict_from_txt(filename):
return load_dict_from_file(fix_filetype(filename, ".txt"))
def save_as_json(data, filename):
with open(fix_filetype(filename, ".json"), "w") as outfile:
json.dump(data, outfile)
return filename
def load_from_json(filename):
with open(fix_filetype(filename, ".json"), "r") as json_file:
return json.load(json_file)
def iterate_over_json_files_in_dir(dir_path):
pathlist = Path(dir_path).glob("*.json")
return [str(path) for path in pathlist]
def fix_filetype(path, filetype):
if path[-len(filetype) :] == filetype:
return path
else:
return path + filetype
def generate_temporary_file_path(
file_name=None, prefix="", suffix="", extension=""
):
if file_name is None:
file_name = str(uuid.uuid1())
if extension and not extension.startswith("."):
extension = "." + extension
file_name = prefix + file_name + suffix + extension
return os.path.join(tempfile.gettempdir(), file_name)
# MDP
def cumulative_rewards_from_rew_list(rews):
return [sum(rews[:t]) for t in range(len(rews))]
# Gridworld
def manhattan_distance(pos1, pos2):
"""Returns manhattan distance between two points in (x, y) format"""
return abs(pos1[0] - pos2[0]) + abs(pos1[1] - pos2[1])
def pos_distance(pos0, pos1):
return tuple(np.array(pos0) - np.array(pos1))
# Randomness
def rnd_uniform(low, high):
if low == high:
return low
return np.random.uniform(low, high)
def rnd_int_uniform(low, high):
if low == high:
return low
return np.random.choice(range(low, high + 1))
# Statistics
def std_err(lst):
"""Computes the standard error"""
sd = np.std(lst)
n = len(lst)
return sd / np.sqrt(n)
def mean_and_std_err(lst):
"Mean and standard error of list"
mu = np.mean(lst)
return mu, std_err(lst)
# Other utils
def dict_mean_and_std_err(d):
"""
Takes in a dictionary with lists as keys, and returns a dictionary
with mean and standard error for each list as values
"""
assert all(isinstance(v, Iterable) for v in d.values())
result = {}
for k, v in d.items():
result[k] = mean_and_std_err(v)
return result
def append_dictionaries(dictionaries):
"""
Append many dictionaries with numbers as values into one dictionary with lists as values.
{a: 1, b: 2}, {a: 3, b: 0} -> {a: [1, 3], b: [2, 0]}
"""
assert all(
set(d.keys()) == set(dictionaries[0].keys()) for d in dictionaries
), "All key sets are the same across all dicts"
final_dict = defaultdict(list)
for d in dictionaries:
for k, v in d.items():
final_dict[k].append(v)
return dict(final_dict)
def merge_dictionaries(dictionaries):
"""
Merge many dictionaries by extending them to one another.
{a: [1, 7], b: [2, 5]}, {a: [3], b: [0]} -> {a: [1, 7, 3], b: [2, 5, 0]}
"""
assert all(
set(d.keys()) == set(dictionaries[0].keys()) for d in dictionaries
), "All key sets are the same across all dicts"
final_dict = defaultdict(list)
for d in dictionaries:
for k, v in d.items():
final_dict[k].extend(v)
return dict(final_dict)
def rm_idx_from_dict(d, idx):
"""
Takes in a dictionary with lists as values, and returns
a dictionary with lists as values, but containing
only the desired index
NOTE: this is a MUTATING METHOD, returns the POPPED IDX
"""
assert all(isinstance(v, Iterable) for v in d.values())
new_d = {}
for k, v in d.items():
new_d[k] = [d[k].pop(idx)]
return new_d
def take_indexes_from_dict(d, indices, keys_to_ignore=[]):
"""
Takes in a dictionary with lists as values, and returns
a dictionary with lists as values, but with subsampled indices
based on the `indices` input
"""
assert all(isinstance(v, Iterable) for v in d.values())
new_d = {}
for k, v in d.items():
if k in keys_to_ignore:
continue
new_d[k] = np.take(d[k], indices)
return new_d
def profile(fnc):
"""A decorator that uses cProfile to profile a function (from https://osf.io/upav8/)"""
def inner(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
retval = fnc(*args, **kwargs)
pr.disable()
s = io.StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats("cumulative")
ps.print_stats()
print(s.getvalue())
return retval
return inner
def read_layout_dict(layout_name):
return load_dict_from_file(
os.path.join(LAYOUTS_DIR, layout_name + ".layout")
)
class classproperty(property):
def __get__(self, cls, owner):
return classmethod(self.fget).__get__(None, owner)()
class OvercookedException(Exception):
pass
def is_iterable(obj):
return isinstance(obj, Iterable)
| 5,751 | 22.966667 | 93 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/__init__.py | from gym.envs.registration import register
register(
id="Overcooked-v0",
entry_point="overcooked_ai_py.mdp.overcooked_env:Overcooked",
)
| 146 | 20 | 65 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/static.py | import os
_current_dir = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(_current_dir, "data")
HUMAN_DATA_DIR = os.path.join(DATA_DIR, "human_data")
PLANNERS_DIR = os.path.join(DATA_DIR, "planners")
LAYOUTS_DIR = os.path.join(DATA_DIR, "layouts")
GRAPHICS_DIR = os.path.join(DATA_DIR, "graphics")
FONTS_DIR = os.path.join(DATA_DIR, "fonts")
TESTING_DATA_DIR = os.path.join(DATA_DIR, "testing")
| 414 | 36.727273 | 57 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/agents/benchmarking.py | import copy
import numpy as np
from overcooked_ai_py.agents.agent import (
AgentPair,
GreedyHumanModel,
RandomAgent,
)
from overcooked_ai_py.mdp.layout_generator import LayoutGenerator
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv
from overcooked_ai_py.mdp.overcooked_mdp import (
Action,
OvercookedGridworld,
OvercookedState,
)
from overcooked_ai_py.mdp.overcooked_trajectory import DEFAULT_TRAJ_KEYS
from overcooked_ai_py.planning.planners import NO_COUNTERS_PARAMS
from overcooked_ai_py.utils import (
cumulative_rewards_from_rew_list,
is_iterable,
load_from_json,
load_pickle,
merge_dictionaries,
rm_idx_from_dict,
save_as_json,
save_pickle,
take_indexes_from_dict,
)
class AgentEvaluator(object):
"""
Class used to get rollouts and evaluate performance of various types of agents.
TODO: This class currently only fully supports fixed mdps, or variable mdps that can be created with the LayoutGenerator class,
but might break with other types of variable mdps. Some methods currently assume that the AgentEvaluator can be reconstructed
from loaded params (which must be pickleable). However, some custom start_state_fns or mdp_generating_fns will not be easily
pickleable. We should think about possible improvements/what makes most sense to do here.
"""
def __init__(
self,
env_params,
mdp_fn,
force_compute=False,
mlam_params=NO_COUNTERS_PARAMS,
debug=False,
):
"""
env_params (dict): params for creation of an OvercookedEnv
mdp_fn (callable function): a function that can be used to create mdp
force_compute (bool): whether should re-compute MediumLevelActionManager although matching file is found
mlam_params (dict): the parameters for mlam, the MediumLevelActionManager
debug (bool): whether to display debugging information on init
"""
assert callable(
mdp_fn
), "mdp generating function must be a callable function"
env_params["mlam_params"] = mlam_params
self.mdp_fn = mdp_fn
self.env = OvercookedEnv(self.mdp_fn, **env_params)
self.force_compute = force_compute
@staticmethod
def from_mdp_params_infinite(
mdp_params,
env_params,
outer_shape=None,
mdp_params_schedule_fn=None,
force_compute=False,
mlam_params=NO_COUNTERS_PARAMS,
debug=False,
):
"""
mdp_params (dict): params for creation of an OvercookedGridworld instance through the `from_layout_name` method
outer_shape: the outer shape of environment
mdp_params_schedule_fn: the schedule for varying mdp params
Information for the rest of params please refer to the __init__ method above
Infinitely generate mdp using the naive mdp_fn
"""
assert (
outer_shape is not None
), "outer_shape needs to be defined for variable mdp"
assert "num_mdp" in env_params and np.isinf(
env_params["num_mdp"]
), "num_mdp needs to be specified and infinite"
mdp_fn_naive = LayoutGenerator.mdp_gen_fn_from_dict(
mdp_params, outer_shape, mdp_params_schedule_fn
)
return AgentEvaluator(
env_params, mdp_fn_naive, force_compute, mlam_params, debug
)
@staticmethod
def from_mdp_params_finite(
mdp_params,
env_params,
outer_shape=None,
mdp_params_schedule_fn=None,
force_compute=False,
mlam_params=NO_COUNTERS_PARAMS,
debug=False,
):
"""
mdp_params (dict): params for creation of an OvercookedGridworld instance through the `from_layout_name` method
outer_shape: the outer shape of environment
mdp_params_schedule_fn: the schedule for varying mdp params
Information for the rest of params please refer to the __init__ method above
Generate a finite list of mdp (mdp_lst) using the naive mdp_fn, and then use the from_mdp_lst to generate
the AgentEvaluator
"""
assert (
outer_shape is not None
), "outer_shape needs to be defined for variable mdp"
assert "num_mdp" in env_params and not np.isinf(
env_params["num_mdp"]
), "num_mdp needs to be specified and finite"
mdp_fn_naive = LayoutGenerator.mdp_gen_fn_from_dict(
mdp_params, outer_shape, mdp_params_schedule_fn
)
# finite mdp, random choice
num_mdp = env_params["num_mdp"]
assert (
type(num_mdp) == int and num_mdp > 0
), "invalid number of mdp: " + str(num_mdp)
mdp_lst = [mdp_fn_naive() for _ in range(num_mdp)]
return AgentEvaluator.from_mdp_lst(
mdp_lst=mdp_lst,
env_params=env_params,
force_compute=force_compute,
mlam_params=mlam_params,
debug=debug,
)
@staticmethod
def from_mdp(
mdp,
env_params,
force_compute=False,
mlam_params=NO_COUNTERS_PARAMS,
debug=False,
):
"""
mdp (OvercookedGridworld): the mdp that we want the AgentEvaluator to always generate
Information for the rest of params please refer to the __init__ method above
"""
assert (
type(mdp) == OvercookedGridworld
), "mdp must be a OvercookedGridworld object"
mdp_fn = lambda _ignored: mdp
return AgentEvaluator(
env_params, mdp_fn, force_compute, mlam_params, debug
)
@staticmethod
def from_layout_name(
mdp_params,
env_params,
force_compute=False,
mlam_params=NO_COUNTERS_PARAMS,
debug=False,
):
"""
mdp_params (dict): params for creation of an OvercookedGridworld instance through the `from_layout_name` method
Information for the rest of params please refer to the __init__ method above
"""
assert type(mdp_params) is dict and "layout_name" in mdp_params
mdp = OvercookedGridworld.from_layout_name(**mdp_params)
return AgentEvaluator.from_mdp(
mdp, env_params, force_compute, mlam_params, debug
)
@staticmethod
def from_mdp_lst(
mdp_lst,
env_params,
sampling_freq=None,
force_compute=False,
mlam_params=NO_COUNTERS_PARAMS,
debug=False,
):
"""
mdp_lst (list): a list of mdp (OvercookedGridworld) we would like to
sampling_freq (list): a list of number that signify the sampling frequency of each mdp in the mdp_lst
Information for the rest of params please refer to the __init__ method above
"""
assert is_iterable(mdp_lst), "mdp_lst must be a list"
assert all(
[type(mdp) == OvercookedGridworld for mdp in mdp_lst]
), "some mdps are not OvercookedGridworld objects"
if sampling_freq is None:
sampling_freq = np.ones(len(mdp_lst)) / len(mdp_lst)
mdp_fn = lambda _ignored: np.random.choice(mdp_lst, p=sampling_freq)
return AgentEvaluator(
env_params, mdp_fn, force_compute, mlam_params, debug
)
def evaluate_random_pair(
self, num_games=1, all_actions=True, display=False, native_eval=False
):
agent_pair = AgentPair(
RandomAgent(all_actions=all_actions),
RandomAgent(all_actions=all_actions),
)
return self.evaluate_agent_pair(
agent_pair,
num_games=num_games,
display=display,
native_eval=native_eval,
)
def evaluate_human_model_pair(
self, num_games=1, display=False, native_eval=False
):
a0 = GreedyHumanModel(self.env.mlam)
a1 = GreedyHumanModel(self.env.mlam)
agent_pair = AgentPair(a0, a1)
return self.evaluate_agent_pair(
agent_pair,
num_games=num_games,
display=display,
native_eval=native_eval,
)
def evaluate_agent_pair(
self,
agent_pair,
num_games,
game_length=None,
start_state_fn=None,
metadata_fn=None,
metadata_info_fn=None,
display=False,
dir=None,
display_phi=False,
info=True,
native_eval=False,
):
# this index has to be 0 because the Agent_Evaluator only has 1 env initiated
# if you would like to evaluate on a different env using rllib, please modifiy
# rllib/ -> rllib.py -> get_rllib_eval_function -> _evaluate
# native eval: using self.env in evaluation instead of creating a copy
# this is particulally helpful with variable MDP, where we want to make sure
# the mdp used in evaluation is the same as the native self.env.mdp
if native_eval:
return self.env.get_rollouts(
agent_pair,
num_games=num_games,
display=display,
dir=dir,
display_phi=display_phi,
info=info,
metadata_fn=metadata_fn,
metadata_info_fn=metadata_info_fn,
)
else:
horizon_env = self.env.copy()
horizon_env.horizon = (
self.env.horizon if game_length is None else game_length
)
horizon_env.start_state_fn = (
self.env.start_state_fn
if start_state_fn is None
else start_state_fn
)
horizon_env.reset()
return horizon_env.get_rollouts(
agent_pair,
num_games=num_games,
display=display,
dir=dir,
display_phi=display_phi,
info=info,
metadata_fn=metadata_fn,
metadata_info_fn=metadata_info_fn,
)
def get_agent_pair_trajs(
self,
a0,
a1=None,
num_games=100,
game_length=None,
start_state_fn=None,
display=False,
info=True,
):
"""Evaluate agent pair on both indices, and return trajectories by index"""
if a1 is None:
ap = AgentPair(a0, a0, allow_duplicate_agents=True)
trajs_0 = trajs_1 = self.evaluate_agent_pair(
ap,
num_games=num_games,
game_length=game_length,
start_state_fn=start_state_fn,
display=display,
info=info,
)
else:
trajs_0 = self.evaluate_agent_pair(
AgentPair(a0, a1),
num_games=num_games,
game_length=game_length,
start_state_fn=start_state_fn,
display=display,
info=info,
)
trajs_1 = self.evaluate_agent_pair(
AgentPair(a1, a0),
num_games=num_games,
game_length=game_length,
start_state_fn=start_state_fn,
display=display,
info=info,
)
return trajs_0, trajs_1
@staticmethod
def check_trajectories(trajectories, from_json=False, **kwargs):
"""
Checks that of trajectories are in standard format and are consistent with dynamics of mdp.
If the trajectories were saves as json, do not check that they have standard traj keys.
"""
if not from_json:
AgentEvaluator._check_standard_traj_keys(set(trajectories.keys()))
AgentEvaluator._check_right_types(trajectories)
# TODO: add this back in
# AgentEvaluator._check_trajectories_dynamics(trajectories, **kwargs)
# TODO: Check shapes?
@staticmethod
def _check_standard_traj_keys(traj_keys_set):
default_traj_keys = DEFAULT_TRAJ_KEYS
assert traj_keys_set == set(
default_traj_keys
), "Keys of traj dict did not match standard form.\nMissing keys: {}\nAdditional keys: {}".format(
[k for k in default_traj_keys if k not in traj_keys_set],
[k for k in traj_keys_set if k not in default_traj_keys],
)
@staticmethod
def _check_right_types(trajectories):
for idx in range(len(trajectories["ep_states"])):
states, actions, rewards = (
trajectories["ep_states"][idx],
trajectories["ep_actions"][idx],
trajectories["ep_rewards"][idx],
)
mdp_params, env_params = (
trajectories["mdp_params"][idx],
trajectories["env_params"][idx],
)
assert all(type(j_a) is tuple for j_a in actions)
assert all(type(s) is OvercookedState for s in states)
assert type(mdp_params) is dict
assert type(env_params) is dict
# TODO: check that are all lists
@staticmethod
def _check_trajectories_dynamics(trajectories, verbose=True):
if any(
env_params["num_mdp"] > 1
for env_params in trajectories["env_params"]
):
if verbose:
print(
"Skipping trajectory consistency checking because MDP was recognized as variable. "
"Trajectory consistency checking is not yet supported for variable MDPs."
)
return
_, envs = AgentEvaluator.get_mdps_and_envs_from_trajectories(
trajectories
)
for idx in range(len(trajectories["ep_states"])):
states, actions, rewards = (
trajectories["ep_states"][idx],
trajectories["ep_actions"][idx],
trajectories["ep_rewards"][idx],
)
simulation_env = envs[idx]
assert (
len(states) == len(actions) == len(rewards)
), "# states {}\t# actions {}\t# rewards {}".format(
len(states), len(actions), len(rewards)
)
# Checking that actions would give rise to same behaviour in current MDP
for i in range(len(states) - 1):
curr_state = states[i]
simulation_env.state = curr_state
next_state, reward, done, info = simulation_env.step(
actions[i]
)
assert (
states[i + 1] == next_state
), "States differed (expected vs actual): {}\n\nexpected dict: \t{}\nactual dict: \t{}".format(
simulation_env.display_states(states[i + 1], next_state),
states[i + 1].to_dict(),
next_state.to_dict(),
)
assert rewards[i] == reward, "{} \t {}".format(
rewards[i], reward
)
@staticmethod
def get_mdps_and_envs_from_trajectories(trajectories):
mdps, envs = [], []
for idx in range(len(trajectories["ep_lengths"])):
mdp_params = copy.deepcopy(trajectories["mdp_params"][idx])
env_params = copy.deepcopy(trajectories["env_params"][idx])
mdp = OvercookedGridworld.from_layout_name(**mdp_params)
env = OvercookedEnv.from_mdp(mdp, **env_params)
mdps.append(mdp)
envs.append(env)
return mdps, envs
### I/O METHODS ###
@staticmethod
def save_trajectories(trajectories, filename):
AgentEvaluator.check_trajectories(trajectories)
if any(
t["env_params"]["start_state_fn"] is not None for t in trajectories
):
print(
"Saving trajectories with a custom start state. This can currently "
"cause things to break when loading in the trajectories."
)
save_pickle(trajectories, filename)
@staticmethod
def load_trajectories(filename):
trajs = load_pickle(filename)
AgentEvaluator.check_trajectories(trajs)
return trajs
@staticmethod
def save_traj_as_json(trajectory, filename):
"""Saves the `idx`th trajectory as a list of state action pairs"""
assert set(DEFAULT_TRAJ_KEYS) == set(
trajectory.keys()
), "{} vs\n{}".format(DEFAULT_TRAJ_KEYS, trajectory.keys())
AgentEvaluator.check_trajectories(trajectory)
trajectory = AgentEvaluator.make_trajectories_json_serializable(
trajectory
)
save_as_json(trajectory, filename)
@staticmethod
def make_trajectories_json_serializable(trajectories):
"""
Cannot convert np.arrays or special types of ints to JSON.
This method converts all components of a trajectory to standard types.
"""
dict_traj = copy.deepcopy(trajectories)
dict_traj["ep_states"] = [
[ob.to_dict() for ob in one_ep_obs]
for one_ep_obs in trajectories["ep_states"]
]
for k in dict_traj.keys():
dict_traj[k] = list(dict_traj[k])
dict_traj["ep_actions"] = [
list(lst) for lst in dict_traj["ep_actions"]
]
dict_traj["ep_rewards"] = [
list(lst) for lst in dict_traj["ep_rewards"]
]
dict_traj["ep_dones"] = [list(lst) for lst in dict_traj["ep_dones"]]
dict_traj["ep_returns"] = [int(val) for val in dict_traj["ep_returns"]]
dict_traj["ep_lengths"] = [int(val) for val in dict_traj["ep_lengths"]]
# NOTE: Currently saving to JSON does not support ep_infos (due to nested np.arrays) or metadata
del dict_traj["ep_infos"]
del dict_traj["metadatas"]
return dict_traj
@staticmethod
def load_traj_from_json(filename):
traj_dict = load_from_json(filename)
traj_dict["ep_states"] = [
[OvercookedState.from_dict(ob) for ob in curr_ep_obs]
for curr_ep_obs in traj_dict["ep_states"]
]
traj_dict["ep_actions"] = [
[
tuple(tuple(a) if type(a) is list else a for a in j_a)
for j_a in ep_acts
]
for ep_acts in traj_dict["ep_actions"]
]
return traj_dict
############################
# TRAJ MANINPULATION UTILS #
############################
# TODO: add more documentation!
@staticmethod
def merge_trajs(trajs_n):
"""
Takes in multiple trajectory objects and appends all the information into one trajectory object
[trajs0, trajs1] -> trajs
"""
metadatas_merged = merge_dictionaries(
[trajs["metadatas"] for trajs in trajs_n]
)
merged_trajs = merge_dictionaries(trajs_n)
merged_trajs["metadatas"] = metadatas_merged
return merged_trajs
@staticmethod
def remove_traj_idx(trajs, idx):
# NOTE: MUTATING METHOD for trajs, returns the POPPED IDX
metadatas = trajs["metadatas"]
del trajs["metadatas"]
removed_idx_d = rm_idx_from_dict(trajs, idx)
removed_idx_metas = rm_idx_from_dict(metadatas, idx)
trajs["metadatas"] = metadatas
removed_idx_d["metadatas"] = removed_idx_metas
return removed_idx_d
@staticmethod
def take_traj_indices(trajs, indices):
# NOTE: non mutating method
subset_trajs = take_indexes_from_dict(
trajs, indices, keys_to_ignore=["metadatas"]
)
# TODO: Make metadatas field into additional keys for trajs, rather than having a metadatas field?
subset_trajs["metadatas"] = take_indexes_from_dict(
trajs["metadatas"], indices
)
return subset_trajs
@staticmethod
def add_metadata_to_traj(trajs, metadata_fn, input_keys):
"""
Add an additional metadata entry to the trajectory, based on manipulating
the trajectory `input_keys` values
"""
metadata_fn_input = [trajs[k] for k in input_keys]
metadata_key, metadata_data = metadata_fn(metadata_fn_input)
assert metadata_key not in trajs["metadatas"].keys()
trajs["metadatas"][metadata_key] = metadata_data
return trajs
@staticmethod
def add_observations_to_trajs_in_metadata(trajs, encoding_fn):
"""Adds processed observations (for both agent indices) in the metadatas"""
def metadata_fn(data):
traj_ep_states = data[0]
obs_metadata = []
for one_traj_states in traj_ep_states:
obs_metadata.append([encoding_fn(s) for s in one_traj_states])
return "ep_obs_for_both_agents", obs_metadata
return AgentEvaluator.add_metadata_to_traj(
trajs, metadata_fn, ["ep_states"]
)
# EVENTS VISUALIZATION METHODS #
@staticmethod
def events_visualization(trajs, traj_index):
# TODO
pass
| 21,055 | 35.366149 | 131 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/agents/agent.py | import itertools
import math
import os
from collections import defaultdict
import dill
import numpy as np
from overcooked_ai_py.mdp.actions import Action
from overcooked_ai_py.mdp.overcooked_mdp import Recipe
from overcooked_ai_py.utils import OvercookedException
class Agent(object):
agent_file_name = "agent.pickle"
def __init__(self):
self.reset()
def action(self, state):
"""
Should return an action, and an action info dictionary.
If collecting trajectories of the agent with OvercookedEnv, the action
info data will be included in the trajectory data under `ep_infos`.
This allows agents to optionally store useful information about them
in the trajectory for further analysis.
"""
return NotImplementedError()
def actions(self, states, agent_indices):
"""
A multi-state version of the action method. This enables for parallized
implementations that can potentially give speedups in action prediction.
Args:
states (list): list of OvercookedStates for which we want actions for
agent_indices (list): list to inform which agent we are requesting the action for in each state
Returns:
[(action, action_info), (action, action_info), ...]: the actions and action infos for each state-agent_index pair
"""
return NotImplementedError()
@staticmethod
def a_probs_from_action(action):
action_idx = Action.ACTION_TO_INDEX[action]
return np.eye(Action.NUM_ACTIONS)[action_idx]
@staticmethod
def check_action_probs(action_probs, tolerance=1e-4):
"""Check that action probabilities sum to ≈ 1.0"""
probs_sum = sum(action_probs)
assert math.isclose(
probs_sum, 1.0, rel_tol=tolerance
), "Action probabilities {} should sum up to approximately 1 but sum up to {}".format(
list(action_probs), probs_sum
)
def set_agent_index(self, agent_index):
self.agent_index = agent_index
def set_mdp(self, mdp):
self.mdp = mdp
def reset(self):
"""
One should always reset agents in between trajectory rollouts, as resetting
usually clears history or other trajectory-specific attributes.
"""
self.agent_index = None
self.mdp = None
def save(self, path):
if os.path.isfile(path):
raise IOError(
"Must specify a path to directory! Got: {}".format(path)
)
if not os.path.exists(path):
os.makedirs(path)
pickle_path = os.path.join(path, self.agent_file_name)
with open(pickle_path, "wb") as f:
dill.dump(self, f)
return path
@classmethod
def load(cls, path):
if os.path.isdir(path):
path = os.path.join(path, cls.agent_file_name)
try:
with open(path, "rb") as f:
obj = dill.load(f)
return obj
except OvercookedException:
Recipe.configure({})
with open(path, "rb") as f:
obj = dill.load(f)
return obj
class AgentGroup(object):
"""
AgentGroup is a group of N agents used to sample
joint actions in the context of an OvercookedEnv instance.
"""
def __init__(self, *agents, allow_duplicate_agents=False):
self.agents = agents
self.n = len(self.agents)
self.reset()
if not all(
a0 is not a1 for a0, a1 in itertools.combinations(agents, 2)
):
assert (
allow_duplicate_agents
), "All agents should be separate instances, unless allow_duplicate_agents is set to true"
def joint_action(self, state):
actions_and_probs_n = tuple(a.action(state) for a in self.agents)
return actions_and_probs_n
def set_mdp(self, mdp):
for a in self.agents:
a.set_mdp(mdp)
def reset(self):
"""
When resetting an agent group, we know that the agent indices will remain the same,
but we have no guarantee about the mdp, that must be set again separately.
"""
for i, agent in enumerate(self.agents):
agent.reset()
agent.set_agent_index(i)
class AgentPair(AgentGroup):
"""
AgentPair is the N=2 case of AgentGroup. Unlike AgentGroup,
it supports having both agents being the same instance of Agent.
NOTE: Allowing duplicate agents (using the same instance of an agent
for both fields can lead to problems if the agents have state / history)
"""
def __init__(self, *agents, allow_duplicate_agents=False):
super().__init__(
*agents, allow_duplicate_agents=allow_duplicate_agents
)
assert self.n == 2
self.a0, self.a1 = self.agents
def joint_action(self, state):
if self.a0 is self.a1:
# When using the same instance of an agent for self-play,
# reset agent index at each turn to prevent overwriting it
self.a0.set_agent_index(0)
action_and_infos_0 = self.a0.action(state)
self.a1.set_agent_index(1)
action_and_infos_1 = self.a1.action(state)
joint_action_and_infos = (action_and_infos_0, action_and_infos_1)
return joint_action_and_infos
else:
return super().joint_action(state)
class NNPolicy(object):
"""
This is a common format for NN-based policies. Once one has wrangled the intended trained neural net
to this format, one can then easily create an Agent with the AgentFromPolicy class.
"""
def __init__(self):
pass
def multi_state_policy(self, states, agent_indices):
"""
A function that takes in multiple OvercookedState instances and their respective agent indices and returns action probabilities.
"""
raise NotImplementedError()
def multi_obs_policy(self, states):
"""
A function that takes in multiple preprocessed OvercookedState instatences and returns action probabilities.
"""
raise NotImplementedError()
class AgentFromPolicy(Agent):
"""
This is a useful Agent class backbone from which to subclass from NN-based agents.
"""
def __init__(self, policy):
"""
Takes as input an NN Policy instance
"""
self.policy = policy
self.reset()
def action(self, state):
return self.actions([state], [self.agent_index])[0]
def actions(self, states, agent_indices):
action_probs_n = self.policy.multi_state_policy(states, agent_indices)
actions_and_infos_n = []
for action_probs in action_probs_n:
action = Action.sample(action_probs)
actions_and_infos_n.append(
(action, {"action_probs": action_probs})
)
return actions_and_infos_n
def set_mdp(self, mdp):
super().set_mdp(mdp)
self.policy.mdp = mdp
def reset(self):
super(AgentFromPolicy, self).reset()
self.policy.mdp = None
class RandomAgent(Agent):
"""
An agent that randomly picks motion actions.
NOTE: Does not perform interact actions, unless specified
"""
def __init__(
self, sim_threads=None, all_actions=False, custom_wait_prob=None
):
self.sim_threads = sim_threads
self.all_actions = all_actions
self.custom_wait_prob = custom_wait_prob
def action(self, state):
action_probs = np.zeros(Action.NUM_ACTIONS)
legal_actions = list(Action.MOTION_ACTIONS)
if self.all_actions:
legal_actions = Action.ALL_ACTIONS
legal_actions_indices = np.array(
[Action.ACTION_TO_INDEX[motion_a] for motion_a in legal_actions]
)
action_probs[legal_actions_indices] = 1 / len(legal_actions_indices)
if self.custom_wait_prob is not None:
stay = Action.STAY
if np.random.random() < self.custom_wait_prob:
return stay, {"action_probs": Agent.a_probs_from_action(stay)}
else:
action_probs = Action.remove_indices_and_renormalize(
action_probs, [Action.ACTION_TO_INDEX[stay]]
)
return Action.sample(action_probs), {"action_probs": action_probs}
def actions(self, states, agent_indices):
return [self.action(state) for state in states]
def direct_action(self, obs):
return [np.random.randint(4) for _ in range(self.sim_threads)]
class StayAgent(Agent):
def __init__(self, sim_threads=None):
self.sim_threads = sim_threads
def action(self, state):
a = Action.STAY
return a, {}
def direct_action(self, obs):
return [Action.ACTION_TO_INDEX[Action.STAY]] * self.sim_threads
class FixedPlanAgent(Agent):
"""
An Agent with a fixed plan. Returns Stay actions once pre-defined plan has terminated.
# NOTE: Assumes that calls to action are sequential (agent has history)
"""
def __init__(self, plan):
self.plan = plan
self.i = 0
def action(self, state):
if self.i >= len(self.plan):
return Action.STAY, {}
curr_action = self.plan[self.i]
self.i += 1
return curr_action, {}
def reset(self):
super().reset()
self.i = 0
class GreedyHumanModel(Agent):
"""
Agent that at each step selects a medium level action corresponding
to the most intuitively high-priority thing to do
NOTE: MIGHT NOT WORK IN ALL ENVIRONMENTS, for example forced_coordination.layout,
in which an individual agent cannot complete the task on their own.
Will work only in environments where the only order is 3 onion soup.
"""
def __init__(
self,
mlam,
hl_boltzmann_rational=False,
ll_boltzmann_rational=False,
hl_temp=1,
ll_temp=1,
auto_unstuck=True,
):
self.mlam = mlam
self.mdp = self.mlam.mdp
# Bool for perfect rationality vs Boltzmann rationality for high level and low level action selection
self.hl_boltzmann_rational = hl_boltzmann_rational # For choices among high level goals of same type
self.ll_boltzmann_rational = (
ll_boltzmann_rational # For choices about low level motion
)
# Coefficient for Boltzmann rationality for high level action selection
self.hl_temperature = hl_temp
self.ll_temperature = ll_temp
# Whether to automatically take an action to get the agent unstuck if it's in the same
# state as the previous turn. If false, the agent is history-less, while if true it has history.
self.auto_unstuck = auto_unstuck
self.reset()
def reset(self):
super().reset()
self.prev_state = None
def actions(self, states, agent_indices):
actions_and_infos_n = []
for state, agent_idx in zip(states, agent_indices):
self.set_agent_index(agent_idx)
self.reset()
actions_and_infos_n.append(self.action(state))
return actions_and_infos_n
def action(self, state):
possible_motion_goals = self.ml_action(state)
# Once we have identified the motion goals for the medium
# level action we want to perform, select the one with lowest cost
start_pos_and_or = state.players_pos_and_or[self.agent_index]
chosen_goal, chosen_action, action_probs = self.choose_motion_goal(
start_pos_and_or, possible_motion_goals
)
if (
self.ll_boltzmann_rational
and chosen_goal[0] == start_pos_and_or[0]
):
chosen_action, action_probs = self.boltzmann_rational_ll_action(
start_pos_and_or, chosen_goal
)
if self.auto_unstuck:
# HACK: if two agents get stuck, select an action at random that would
# change the player positions if the other player were not to move
if (
self.prev_state is not None
and state.players_pos_and_or
== self.prev_state.players_pos_and_or
):
if self.agent_index == 0:
joint_actions = list(
itertools.product(Action.ALL_ACTIONS, [Action.STAY])
)
elif self.agent_index == 1:
joint_actions = list(
itertools.product([Action.STAY], Action.ALL_ACTIONS)
)
else:
raise ValueError("Player index not recognized")
unblocking_joint_actions = []
for j_a in joint_actions:
new_state, _ = self.mlam.mdp.get_state_transition(
state, j_a
)
if (
new_state.player_positions
!= self.prev_state.player_positions
):
unblocking_joint_actions.append(j_a)
# Getting stuck became a possiblity simply because the nature of a layout (having a dip in the middle)
if len(unblocking_joint_actions) == 0:
unblocking_joint_actions.append([Action.STAY, Action.STAY])
chosen_action = unblocking_joint_actions[
np.random.choice(len(unblocking_joint_actions))
][self.agent_index]
action_probs = self.a_probs_from_action(chosen_action)
# NOTE: Assumes that calls to the action method are sequential
self.prev_state = state
return chosen_action, {"action_probs": action_probs}
def choose_motion_goal(self, start_pos_and_or, motion_goals):
"""
For each motion goal, consider the optimal motion plan that reaches the desired location.
Based on the plan's cost, the method chooses a motion goal (either boltzmann rationally
or rationally), and returns the plan and the corresponding first action on that plan.
"""
if self.hl_boltzmann_rational:
possible_plans = [
self.mlam.motion_planner.get_plan(start_pos_and_or, goal)
for goal in motion_goals
]
plan_costs = [plan[2] for plan in possible_plans]
goal_idx, action_probs = self.get_boltzmann_rational_action_idx(
plan_costs, self.hl_temperature
)
chosen_goal = motion_goals[goal_idx]
chosen_goal_action = possible_plans[goal_idx][0][0]
else:
(
chosen_goal,
chosen_goal_action,
) = self.get_lowest_cost_action_and_goal(
start_pos_and_or, motion_goals
)
action_probs = self.a_probs_from_action(chosen_goal_action)
return chosen_goal, chosen_goal_action, action_probs
def get_boltzmann_rational_action_idx(self, costs, temperature):
"""Chooses index based on softmax probabilities obtained from cost array"""
costs = np.array(costs)
softmax_probs = np.exp(-costs * temperature) / np.sum(
np.exp(-costs * temperature)
)
action_idx = np.random.choice(len(costs), p=softmax_probs)
return action_idx, softmax_probs
def get_lowest_cost_action_and_goal(self, start_pos_and_or, motion_goals):
"""
Chooses motion goal that has the lowest cost action plan.
Returns the motion goal itself and the first action on the plan.
"""
min_cost = np.Inf
best_action, best_goal = None, None
for goal in motion_goals:
action_plan, _, plan_cost = self.mlam.motion_planner.get_plan(
start_pos_and_or, goal
)
if plan_cost < min_cost:
best_action = action_plan[0]
min_cost = plan_cost
best_goal = goal
return best_goal, best_action
def boltzmann_rational_ll_action(
self, start_pos_and_or, goal, inverted_costs=False
):
"""
Computes the plan cost to reach the goal after taking each possible low level action.
Selects a low level action boltzmann rationally based on the one-step-ahead plan costs.
If `inverted_costs` is True, it will make a boltzmann "irrational" choice, exponentially
favouring high cost plans rather than low cost ones.
"""
future_costs = []
for action in Action.ALL_ACTIONS:
pos, orient = start_pos_and_or
new_pos_and_or = self.mdp._move_if_direction(pos, orient, action)
_, _, plan_cost = self.mlam.motion_planner.get_plan(
new_pos_and_or, goal
)
sign = (-1) ** int(inverted_costs)
future_costs.append(sign * plan_cost)
action_idx, action_probs = self.get_boltzmann_rational_action_idx(
future_costs, self.ll_temperature
)
return Action.ALL_ACTIONS[action_idx], action_probs
def ml_action(self, state):
"""
Selects a medium level action for the current state.
Motion goals can be thought of instructions of the form:
[do X] at location [Y]
In this method, X (e.g. deliver the soup, pick up an onion, etc) is chosen based on
a simple set of greedy heuristics based on the current state.
Effectively, will return a list of all possible locations Y in which the selected
medium level action X can be performed.
"""
player = state.players[self.agent_index]
other_player = state.players[1 - self.agent_index]
am = self.mlam
counter_objects = self.mlam.mdp.get_counter_objects_dict(
state, list(self.mlam.mdp.terrain_pos_dict["X"])
)
pot_states_dict = self.mlam.mdp.get_pot_states(state)
if not player.has_object():
ready_soups = pot_states_dict["ready"]
cooking_soups = pot_states_dict["cooking"]
soup_nearly_ready = len(ready_soups) > 0 or len(cooking_soups) > 0
other_has_dish = (
other_player.has_object()
and other_player.get_object().name == "dish"
)
if soup_nearly_ready and not other_has_dish:
motion_goals = am.pickup_dish_actions(counter_objects)
else:
assert len(state.all_orders) == 1 and list(
state.all_orders[0].ingredients
) == ["onion", "onion", "onion"], (
"The current mid level action manager only support 3-onion-soup order, but got orders"
+ str(state.all_orders)
)
next_order = list(state.all_orders)[0]
soups_ready_to_cook_key = "{}_items".format(
len(next_order.ingredients)
)
soups_ready_to_cook = pot_states_dict[soups_ready_to_cook_key]
if soups_ready_to_cook:
only_pot_states_ready_to_cook = defaultdict(list)
only_pot_states_ready_to_cook[
soups_ready_to_cook_key
] = soups_ready_to_cook
# we want to cook only soups that has same len as order
motion_goals = am.start_cooking_actions(
only_pot_states_ready_to_cook
)
else:
motion_goals = am.pickup_onion_actions(counter_objects)
# it does not make sense to have tomato logic when the only possible order is 3 onion soup (see assertion above)
# elif 'onion' in next_order:
# motion_goals = am.pickup_onion_actions(counter_objects)
# elif 'tomato' in next_order:
# motion_goals = am.pickup_tomato_actions(counter_objects)
# else:
# motion_goals = am.pickup_onion_actions(counter_objects) + am.pickup_tomato_actions(counter_objects)
else:
player_obj = player.get_object()
if player_obj.name == "onion":
motion_goals = am.put_onion_in_pot_actions(pot_states_dict)
elif player_obj.name == "tomato":
motion_goals = am.put_tomato_in_pot_actions(pot_states_dict)
elif player_obj.name == "dish":
motion_goals = am.pickup_soup_with_dish_actions(
pot_states_dict, only_nearly_ready=True
)
elif player_obj.name == "soup":
motion_goals = am.deliver_soup_actions()
else:
raise ValueError()
motion_goals = [
mg
for mg in motion_goals
if self.mlam.motion_planner.is_valid_motion_start_goal_pair(
player.pos_and_or, mg
)
]
if len(motion_goals) == 0:
motion_goals = am.go_to_closest_feature_actions(player)
motion_goals = [
mg
for mg in motion_goals
if self.mlam.motion_planner.is_valid_motion_start_goal_pair(
player.pos_and_or, mg
)
]
assert len(motion_goals) != 0
return motion_goals
class SampleAgent(Agent):
"""Agent that samples action using the average action_probs across multiple agents"""
def __init__(self, agents):
self.agents = agents
def action(self, state):
action_probs = np.zeros(Action.NUM_ACTIONS)
for agent in self.agents:
action_probs += agent.action(state)[1]["action_probs"]
action_probs = action_probs / len(self.agents)
return Action.sample(action_probs), {"action_probs": action_probs}
# Deprecated. Need to fix Heuristic to work with the new MDP to reactivate Planning
# class CoupledPlanningAgent(Agent):
# """
# An agent that uses a joint planner (mlp, a MediumLevelPlanner) to find near-optimal
# plans. At each timestep the agent re-plans under the assumption that the other agent
# is also a CoupledPlanningAgent, and then takes the first action in the plan.
# """
#
# def __init__(self, mlp, delivery_horizon=2, heuristic=None):
# self.mlp = mlp
# self.mlp.failures = 0
# self.heuristic = heuristic if heuristic is not None else Heuristic(mlp.mp).simple_heuristic
# self.delivery_horizon = delivery_horizon
#
# def action(self, state):
# try:
# joint_action_plan = self.mlp.get_low_level_action_plan(state, self.heuristic, delivery_horizon=self.delivery_horizon, goal_info=True)
# except TimeoutError:
# print("COUPLED PLANNING FAILURE")
# self.mlp.failures += 1
# return Direction.ALL_DIRECTIONS[np.random.randint(4)]
# return (joint_action_plan[0][self.agent_index], {}) if len(joint_action_plan) > 0 else (Action.STAY, {})
#
#
# class EmbeddedPlanningAgent(Agent):
# """
# An agent that uses A* search to find an optimal action based on a model of the other agent,
# `other_agent`. This class approximates the other agent as being deterministic even though it
# might be stochastic in order to perform the search.
# """
#
# def __init__(self, other_agent, mlp, env, delivery_horizon=2, logging_level=0):
# """mlp is a MediumLevelPlanner"""
# self.other_agent = other_agent
# self.delivery_horizon = delivery_horizon
# self.mlp = mlp
# self.env = env
# self.h_fn = Heuristic(mlp.mp).simple_heuristic
# self.logging_level = logging_level
#
# def action(self, state):
# start_state = state.deepcopy()
# order_list = start_state.order_list if start_state.order_list is not None else ["any", "any"]
# start_state.order_list = order_list[:self.delivery_horizon]
# other_agent_index = 1 - self.agent_index
# initial_env_state = self.env.state
# self.other_agent.env = self.env
#
# expand_fn = lambda state: self.mlp.get_successor_states_fixed_other(state, self.other_agent, other_agent_index)
# goal_fn = lambda state: len(state.order_list) == 0
# heuristic_fn = lambda state: self.h_fn(state)
#
# search_problem = SearchTree(start_state, goal_fn, expand_fn, heuristic_fn, max_iter_count=50000)
#
# try:
# ml_s_a_plan, cost = search_problem.A_star_graph_search(info=True)
# except TimeoutError:
# print("A* failed, taking random action")
# idx = np.random.randint(5)
# return Action.ALL_ACTIONS[idx]
#
# # Check estimated cost of the plan equals
# # the sum of the costs of each medium-level action
# assert sum([len(item[0]) for item in ml_s_a_plan[1:]]) == cost
#
# # In this case medium level actions are tuples of low level actions
# # We just care about the first low level action of the first med level action
# first_s_a = ml_s_a_plan[1]
#
# # Print what the agent is expecting to happen
# if self.logging_level >= 2:
# self.env.state = start_state
# for joint_a in first_s_a[0]:
# print(self.env)
# print(joint_a)
# self.env.step(joint_a)
# print(self.env)
# print("======The End======")
#
# self.env.state = initial_env_state
#
# first_joint_action = first_s_a[0][0]
# if self.logging_level >= 1:
# print("expected joint action", first_joint_action)
# action = first_joint_action[self.agent_index]
# return action, {}
#
# Deprecated. Due to Heuristic and MLP
# class CoupledPlanningPair(AgentPair):
# """
# Pair of identical coupled planning agents. Enables to search for optimal
# action once rather than repeating computation to find action of second agent
# """
#
# def __init__(self, agent):
# super().__init__(agent, agent, allow_duplicate_agents=True)
#
# def joint_action(self, state):
# # Reduce computation by half if both agents are coupled planning agents
# joint_action_plan = self.a0.mlp.get_low_level_action_plan(state, self.a0.heuristic, delivery_horizon=self.a0.delivery_horizon, goal_info=True)
#
# if len(joint_action_plan) == 0:
# return ((Action.STAY, {}), (Action.STAY, {}))
#
# joint_action_and_infos = [(a, {}) for a in joint_action_plan[0]]
# return joint_action_and_infos
| 26,870 | 36.740169 | 152 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/agents/__init__.py | 0 | 0 | 0 | py |
|
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/mdp/layout_generator.py | import copy
import random
import numpy as np
from overcooked_ai_py.mdp.actions import Action, Direction
from overcooked_ai_py.mdp.overcooked_mdp import OvercookedGridworld, Recipe
from overcooked_ai_py.utils import rnd_int_uniform, rnd_uniform
EMPTY = " "
COUNTER = "X"
ONION_DISPENSER = "O"
TOMATO_DISPENSER = "T"
POT = "P"
DISH_DISPENSER = "D"
SERVING_LOC = "S"
CODE_TO_TYPE = {
0: EMPTY,
1: COUNTER,
2: ONION_DISPENSER,
3: TOMATO_DISPENSER,
4: POT,
5: DISH_DISPENSER,
6: SERVING_LOC,
}
TYPE_TO_CODE = {v: k for k, v in CODE_TO_TYPE.items()}
def mdp_fn_random_choice(mdp_fn_choices):
assert type(mdp_fn_choices) is list and len(mdp_fn_choices) > 0
return random.choice(mdp_fn_choices)
"""
size_bounds: (min_layout_size, max_layout_size)
prop_empty: (min, max) proportion of empty space in generated layout
prop_feats: (min, max) proportion of counters with features on them
"""
DEFAULT_MDP_GEN_PARAMS = {
"inner_shape": (5, 4),
"prop_empty": 0.95,
"prop_feats": 0.1,
"start_all_orders": [{"ingredients": ["onion", "onion", "onion"]}],
"recipe_values": [20],
"recipe_times": [20],
"display": False,
}
def DEFAILT_PARAMS_SCHEDULE_FN(outside_information):
mdp_default_gen_params = {
"inner_shape": (5, 4),
"prop_empty": 0.95,
"prop_feats": 0.1,
"start_all_orders": [{"ingredients": ["onion", "onion", "onion"]}],
"recipe_values": [20],
"recipe_times": [20],
"display": False,
}
return mdp_default_gen_params
class MDPParamsGenerator(object):
def __init__(self, params_schedule_fn):
"""
params_schedule_fn (callable): the function to produce a set of mdp_params for a specific layout
"""
assert callable(
params_schedule_fn
), "params scheduling function must be a callable"
self.params_schedule_fn = params_schedule_fn
@staticmethod
def from_fixed_param(mdp_params_always):
# s naive schedule function that always return the same set of parameter
naive_schedule_fn = lambda _ignored: mdp_params_always
return MDPParamsGenerator(naive_schedule_fn)
def generate(self, outside_information={}):
"""
generate a set of mdp_params that can be used to generate a mdp
outside_information (dict): passing in outside information
"""
assert type(outside_information) is dict
mdp_params = self.params_schedule_fn(outside_information)
return mdp_params
DEFAULT_FEATURE_TYPES = (
POT,
ONION_DISPENSER,
DISH_DISPENSER,
SERVING_LOC,
) # NOTE: TOMATO_DISPENSER is disabled by default
class LayoutGenerator(object):
# NOTE: This class hasn't been tested extensively.
def __init__(self, mdp_params_generator, outer_shape=(5, 4)):
"""
Defines a layout generator that will return OvercoookedGridworld instances
using mdp_params_generator
"""
self.mdp_params_generator = mdp_params_generator
self.outer_shape = outer_shape
@staticmethod
def mdp_gen_fn_from_dict(
mdp_params, outer_shape=None, mdp_params_schedule_fn=None
):
"""
mdp_params: one set of fixed mdp parameter used by the enviroment
outer_shape: outer shape of the environment
mdp_params_schedule_fn: the schedule for varying mdp params
"""
# if outer_shape is not defined, we have to be using one of the defualt layout from names bank
if outer_shape is None:
assert type(mdp_params) is dict and "layout_name" in mdp_params
mdp = OvercookedGridworld.from_layout_name(**mdp_params)
mdp_fn = lambda _ignored: mdp
else:
# there is no schedule, we are using the same set of mdp_params all the time
if mdp_params_schedule_fn is None:
assert mdp_params is not None
mdp_pg = MDPParamsGenerator.from_fixed_param(
mdp_params_always=mdp_params
)
else:
assert mdp_params is None, (
"please remove the mdp_params from the variable, "
"because mdp_params_schedule_fn exist and we will "
"always use the schedule_fn if it exist"
)
mdp_pg = MDPParamsGenerator(
params_schedule_fn=mdp_params_schedule_fn
)
lg = LayoutGenerator(mdp_pg, outer_shape)
mdp_fn = lg.generate_padded_mdp
return mdp_fn
def generate_padded_mdp(self, outside_information={}):
"""
Return a PADDED MDP with mdp params specified in self.mdp_params
"""
mdp_gen_params = self.mdp_params_generator.generate(
outside_information
)
outer_shape = self.outer_shape
if (
"layout_name" in mdp_gen_params.keys()
and mdp_gen_params["layout_name"] is not None
):
mdp = OvercookedGridworld.from_layout_name(**mdp_gen_params)
mdp_generator_fn = lambda: self.padded_mdp(mdp)
else:
required_keys = [
"inner_shape",
"prop_empty",
"prop_feats",
"display",
]
# with generate_all_orders key start_all_orders will be generated inside make_new_layout method
if not mdp_gen_params.get("generate_all_orders"):
required_keys.append("start_all_orders")
missing_keys = [
k for k in required_keys if k not in mdp_gen_params.keys()
]
if len(missing_keys) != 0:
print("missing keys dict", mdp_gen_params)
assert (
len(missing_keys) == 0
), "These keys were missing from the mdp_params: {}".format(
missing_keys
)
inner_shape = mdp_gen_params["inner_shape"]
assert (
inner_shape[0] <= outer_shape[0]
and inner_shape[1] <= outer_shape[1]
), "inner_shape cannot fit into the outershap"
layout_generator = LayoutGenerator(
self.mdp_params_generator, outer_shape=self.outer_shape
)
if "feature_types" not in mdp_gen_params:
mdp_gen_params["feature_types"] = DEFAULT_FEATURE_TYPES
mdp_generator_fn = lambda: layout_generator.make_new_layout(
mdp_gen_params
)
return mdp_generator_fn()
@staticmethod
def create_base_params(mdp_gen_params):
assert mdp_gen_params.get("start_all_orders") or mdp_gen_params.get(
"generate_all_orders"
)
mdp_gen_params = LayoutGenerator.add_generated_mdp_params_orders(
mdp_gen_params
)
recipe_params = {
"start_all_orders": mdp_gen_params["start_all_orders"]
}
if mdp_gen_params.get("start_bonus_orders"):
recipe_params["start_bonus_orders"] = mdp_gen_params[
"start_bonus_orders"
]
if "recipe_values" in mdp_gen_params:
recipe_params["recipe_values"] = mdp_gen_params["recipe_values"]
if "recipe_times" in mdp_gen_params:
recipe_params["recipe_times"] = mdp_gen_params["recipe_times"]
return recipe_params
@staticmethod
def add_generated_mdp_params_orders(mdp_params):
"""
adds generated parameters (i.e. generated orders) to mdp_params,
returns onchanged copy of mdp_params when there is no "generate_all_orders" and "generate_bonus_orders" keys inside mdp_params
"""
mdp_params = copy.deepcopy(mdp_params)
if mdp_params.get("generate_all_orders"):
all_orders_kwargs = copy.deepcopy(
mdp_params["generate_all_orders"]
)
if all_orders_kwargs.get("recipes"):
all_orders_kwargs["recipes"] = [
Recipe.from_dict(r) for r in all_orders_kwargs["recipes"]
]
all_recipes = Recipe.generate_random_recipes(**all_orders_kwargs)
mdp_params["start_all_orders"] = [r.to_dict() for r in all_recipes]
else:
Recipe.configure({})
all_recipes = Recipe.ALL_RECIPES
if mdp_params.get("generate_bonus_orders"):
bonus_orders_kwargs = copy.deepcopy(
mdp_params["generate_bonus_orders"]
)
if not bonus_orders_kwargs.get("recipes"):
bonus_orders_kwargs["recipes"] = all_recipes
bonus_recipes = Recipe.generate_random_recipes(
**bonus_orders_kwargs
)
mdp_params["start_bonus_orders"] = [
r.to_dict() for r in bonus_recipes
]
return mdp_params
def padded_mdp(self, mdp, display=False):
"""Returns a padded MDP from an MDP"""
grid = Grid.from_mdp(mdp)
padded_grid = self.embed_grid(grid)
start_positions = self.get_random_starting_positions(padded_grid)
mdp_grid = self.padded_grid_to_layout_grid(
padded_grid, start_positions, display=display
)
return OvercookedGridworld.from_grid(mdp_grid)
def make_new_layout(self, mdp_gen_params):
return self.make_disjoint_sets_layout(
inner_shape=mdp_gen_params["inner_shape"],
prop_empty=mdp_gen_params["prop_empty"],
prop_features=mdp_gen_params["prop_feats"],
base_param=LayoutGenerator.create_base_params(mdp_gen_params),
feature_types=mdp_gen_params["feature_types"],
display=mdp_gen_params["display"],
)
def make_disjoint_sets_layout(
self,
inner_shape,
prop_empty,
prop_features,
base_param,
feature_types=DEFAULT_FEATURE_TYPES,
display=True,
):
grid = Grid(inner_shape)
self.dig_space_with_disjoint_sets(grid, prop_empty)
self.add_features(grid, prop_features, feature_types)
padded_grid = self.embed_grid(grid)
start_positions = self.get_random_starting_positions(padded_grid)
mdp_grid = self.padded_grid_to_layout_grid(
padded_grid, start_positions, display=display
)
return OvercookedGridworld.from_grid(mdp_grid, base_param)
def padded_grid_to_layout_grid(
self, padded_grid, start_positions, display=False
):
if display:
print("Generated layout")
print(padded_grid)
# Start formatting to actual OvercookedGridworld input type
mdp_grid = padded_grid.convert_to_string()
for i, pos in enumerate(start_positions):
x, y = pos
mdp_grid[y][x] = str(i + 1)
return mdp_grid
def embed_grid(self, grid):
"""Randomly embeds a smaller grid in a grid of size self.outer_shape"""
# Check that smaller grid fits
assert all(grid.shape <= self.outer_shape)
padded_grid = Grid(self.outer_shape)
x_leeway, y_leeway = self.outer_shape - grid.shape
starting_x = np.random.randint(0, x_leeway) if x_leeway else 0
starting_y = np.random.randint(0, y_leeway) if y_leeway else 0
for x in range(grid.shape[0]):
for y in range(grid.shape[1]):
item = grid.terrain_at_loc((x, y))
# Abstraction violation
padded_grid.mtx[x + starting_x][y + starting_y] = item
return padded_grid
def dig_space_with_disjoint_sets(self, grid, prop_empty):
dsets = DisjointSets([])
while not (
grid.proportion_empty() > prop_empty and dsets.num_sets == 1
):
valid_dig_location = False
while not valid_dig_location:
loc = grid.get_random_interior_location()
valid_dig_location = grid.is_valid_dig_location(loc)
grid.dig(loc)
dsets.add_singleton(loc)
for neighbour in grid.get_near_locations(loc):
if dsets.contains(neighbour):
dsets.union(neighbour, loc)
def make_fringe_expansion_layout(self, shape, prop_empty=0.1):
grid = Grid(shape)
self.dig_space_with_fringe_expansion(grid, prop_empty)
self.add_features(grid)
# print(grid)
def dig_space_with_fringe_expansion(self, grid, prop_empty=0.1):
starting_location = grid.get_random_interior_location()
fringe = Fringe(grid)
fringe.add(starting_location)
while grid.proportion_empty() < prop_empty:
curr_location = fringe.pop()
grid.dig(curr_location)
for location in grid.get_near_locations(curr_location):
if grid.is_valid_dig_location(location):
fringe.add(location)
def add_features(
self, grid, prop_features=0, feature_types=DEFAULT_FEATURE_TYPES
):
"""
Places one round of basic features and then adds random features
until prop_features of valid locations are filled"""
valid_locations = grid.valid_feature_locations()
np.random.shuffle(valid_locations)
assert len(valid_locations) > len(feature_types)
num_features_placed = 0
for location in valid_locations:
current_prop = num_features_placed / len(valid_locations)
if num_features_placed < len(feature_types):
grid.add_feature(location, feature_types[num_features_placed])
elif current_prop >= prop_features:
break
else:
random_feature = np.random.choice(feature_types)
grid.add_feature(location, random_feature)
num_features_placed += 1
def get_random_starting_positions(self, grid, divider_x=None):
pos0 = grid.get_random_empty_location()
pos1 = grid.get_random_empty_location()
# NOTE: Assuming more than 1 empty location, hacky code
while pos0 == pos1:
pos0 = grid.get_random_empty_location()
return pos0, pos1
class Grid(object):
def __init__(self, shape):
assert len(shape) == 2, "Grid must be 2 dimensional"
grid = (np.ones(shape) * TYPE_TO_CODE[COUNTER]).astype(np.int)
self.mtx = grid
self.shape = np.array(shape)
self.width = shape[0]
self.height = shape[1]
@staticmethod
def from_mdp(mdp):
terrain_matrix = np.array(mdp.terrain_mtx)
mdp_grid = Grid((terrain_matrix.shape[1], terrain_matrix.shape[0]))
for y in range(terrain_matrix.shape[0]):
for x in range(terrain_matrix.shape[1]):
feature = terrain_matrix[y][x]
mdp_grid.mtx[x][y] = TYPE_TO_CODE[feature]
return mdp_grid
def terrain_at_loc(self, location):
x, y = location
return self.mtx[x][y]
def dig(self, location):
assert self.is_valid_dig_location(location)
self.change_location(location, EMPTY)
def add_feature(self, location, feature_string):
assert self.is_valid_feature_location(location)
self.change_location(location, feature_string)
def change_location(self, location, feature_string):
x, y = location
self.mtx[x][y] = TYPE_TO_CODE[feature_string]
def proportion_empty(self):
flattened_grid = self.mtx.flatten()
num_eligible = len(flattened_grid) - 2 * sum(self.shape) + 4
num_empty = sum(
[1 for x in flattened_grid if x == TYPE_TO_CODE[EMPTY]]
)
return float(num_empty) / num_eligible
def get_near_locations(self, location):
"""Get neighbouring locations to the passed in location"""
near_locations = []
for d in Direction.ALL_DIRECTIONS:
new_location = Action.move_in_direction(location, d)
if self.is_in_bounds(new_location):
near_locations.append(new_location)
return near_locations
def is_in_bounds(self, location):
x, y = location
return x >= 0 and y >= 0 and x < self.shape[0] and y < self.shape[1]
def is_valid_dig_location(self, location):
x, y = location
# If already empty
if self.location_is_empty(location):
return False
# If one of the edges of the map, or outside the map
if (
x <= 0
or y <= 0
or x >= self.shape[0] - 1
or y >= self.shape[1] - 1
):
return False
return True
def valid_feature_locations(self):
valid_locations = []
for x in range(self.shape[0]):
for y in range(self.shape[1]):
location = (x, y)
if self.is_valid_feature_location(location):
valid_locations.append(location)
return np.array(valid_locations)
def is_valid_feature_location(self, location):
x, y = location
# If is empty or has a feature on it
if not self.mtx[x][y] == TYPE_TO_CODE[COUNTER]:
return False
# If outside the map
if not self.is_in_bounds(location):
return False
# If location is next to at least one empty square
if any(
[
loc
for loc in self.get_near_locations(location)
if CODE_TO_TYPE[self.terrain_at_loc(loc)] == EMPTY
]
):
return True
else:
return False
def location_is_empty(self, location):
x, y = location
return self.mtx[x][y] == TYPE_TO_CODE[EMPTY]
def get_random_interior_location(self):
rand_x = np.random.randint(low=1, high=self.shape[0] - 1)
rand_y = np.random.randint(low=1, high=self.shape[1] - 1)
return rand_x, rand_y
def get_random_empty_location(self):
is_empty = False
while not is_empty:
loc = self.get_random_interior_location()
is_empty = self.location_is_empty(loc)
return loc
def convert_to_string(self):
rows = []
for y in range(self.shape[1]):
column = []
for x in range(self.shape[0]):
column.append(CODE_TO_TYPE[self.mtx[x][y]])
rows.append(column)
string_grid = np.array(rows)
assert np.array_equal(
string_grid.T.shape, self.shape
), "{} vs {}".format(string_grid.shape, self.shape)
return string_grid
def __repr__(self):
s = ""
for y in range(self.shape[1]):
for x in range(self.shape[0]):
s += CODE_TO_TYPE[self.mtx[x][y]]
s += " "
s += "\n"
return s
class Fringe(object):
def __init__(self, grid):
self.fringe_list = []
self.distribution = []
self.grid = grid
def add(self, item):
if item not in self.fringe_list:
self.fringe_list.append(item)
self.update_probs()
def pop(self):
assert len(self.fringe_list) > 0
choice_idx = np.random.choice(
len(self.fringe_list), p=self.distribution
)
removed_pos = self.fringe_list.pop(choice_idx)
self.update_probs()
return removed_pos
def update_probs(self):
self.distribution = np.ones(len(self.fringe_list)) / len(
self.fringe_list
)
class DisjointSets(object):
"""A simple implementation of the Disjoint Sets data structure.
Implements path compression but not union-by-rank.
Taken from https://github.com/HumanCompatibleAI/planner-inference
"""
def __init__(self, elements):
self.num_elements = len(elements)
self.num_sets = len(elements)
self.parents = {element: element for element in elements}
def is_connected(self):
return self.num_sets == 1
def get_num_elements(self):
return self.num_elements
def contains(self, element):
return element in self.parents
def add_singleton(self, element):
assert not self.contains(element)
self.num_elements += 1
self.num_sets += 1
self.parents[element] = element
def find(self, element):
parent = self.parents[element]
if element == parent:
return parent
result = self.find(parent)
self.parents[element] = result
return result
def union(self, e1, e2):
p1, p2 = map(self.find, (e1, e2))
if p1 != p2:
self.num_sets -= 1
self.parents[p1] = p2
| 20,796 | 33.037643 | 134 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/mdp/actions.py | import copy
import itertools
import numpy as np
class Direction(object):
"""
The four possible directions a player can be facing.
"""
NORTH = (0, -1)
SOUTH = (0, 1)
EAST = (1, 0)
WEST = (-1, 0)
ALL_DIRECTIONS = INDEX_TO_DIRECTION = [NORTH, SOUTH, EAST, WEST]
DIRECTION_TO_INDEX = {a: i for i, a in enumerate(INDEX_TO_DIRECTION)}
OPPOSITE_DIRECTIONS = {NORTH: SOUTH, SOUTH: NORTH, EAST: WEST, WEST: EAST}
DIRECTION_TO_NAME = {
d: name
for d, name in zip(
[NORTH, SOUTH, EAST, WEST], ["NORTH", "SOUTH", "EAST", "WEST"]
)
}
@staticmethod
def get_adjacent_directions(direction):
"""Returns the directions within 90 degrees of the given direction.
direction: One of the Directions, except not Direction.STAY.
"""
if direction in [Direction.NORTH, Direction.SOUTH]:
return [Direction.EAST, Direction.WEST]
elif direction in [Direction.EAST, Direction.WEST]:
return [Direction.NORTH, Direction.SOUTH]
raise ValueError("Invalid direction: %s" % direction)
class Action(object):
"""
The six actions available in the OvercookedGridworld.
Includes definitions of the actions as well as utility functions for
manipulating them or applying them.
"""
STAY = (0, 0)
INTERACT = "interact"
ALL_ACTIONS = INDEX_TO_ACTION = Direction.INDEX_TO_DIRECTION + [
STAY,
INTERACT,
]
INDEX_TO_ACTION_INDEX_PAIRS = [
v for v in itertools.product(range(len(INDEX_TO_ACTION)), repeat=2)
]
ACTION_TO_INDEX = {a: i for i, a in enumerate(INDEX_TO_ACTION)}
MOTION_ACTIONS = Direction.ALL_DIRECTIONS + [STAY]
ACTION_TO_CHAR = {
Direction.NORTH: "↑",
Direction.SOUTH: "↓",
Direction.EAST: "→",
Direction.WEST: "←",
STAY: "stay",
INTERACT: INTERACT,
}
NUM_ACTIONS = len(ALL_ACTIONS)
@staticmethod
def move_in_direction(point, direction):
"""
Takes a step in the given direction and returns the new point.
point: Tuple (x, y) representing a point in the x-y plane.
direction: One of the Directions.
"""
assert direction in Action.MOTION_ACTIONS
x, y = point
dx, dy = direction
return (x + dx, y + dy)
@staticmethod
def determine_action_for_change_in_pos(old_pos, new_pos):
"""Determines an action that will enable intended transition"""
if old_pos == new_pos:
return Action.STAY
new_x, new_y = new_pos
old_x, old_y = old_pos
direction = (new_x - old_x, new_y - old_y)
assert direction in Direction.ALL_DIRECTIONS
return direction
@staticmethod
def sample(action_probs):
return np.random.choice(
np.array(Action.ALL_ACTIONS, dtype=object), p=action_probs
)
@staticmethod
def argmax(action_probs):
action_idx = np.argmax(action_probs)
return Action.INDEX_TO_ACTION[action_idx]
@staticmethod
def remove_indices_and_renormalize(probs, indices, eps=0.0):
probs = copy.deepcopy(probs)
if len(np.array(probs).shape) > 1:
probs = np.array(probs)
for row_idx, row in enumerate(indices):
for idx in indices:
probs[row_idx][idx] = eps
norm_probs = probs.T / np.sum(probs, axis=1)
return norm_probs.T
else:
for idx in indices:
probs[idx] = eps
return probs / sum(probs)
@staticmethod
def to_char(action):
assert action in Action.ALL_ACTIONS
return Action.ACTION_TO_CHAR[action]
@staticmethod
def joint_action_to_char(joint_action):
assert all([a in Action.ALL_ACTIONS for a in joint_action])
return tuple(Action.to_char(a) for a in joint_action)
@staticmethod
def uniform_probs_over_actions():
num_acts = len(Action.ALL_ACTIONS)
return np.ones(num_acts) / num_acts
| 4,077 | 29.893939 | 78 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/mdp/overcooked_env.py | import copy
import time
import cv2
import gym
import gymnasium
import numpy as np
import pygame
import tqdm
from overcooked_ai_py.mdp.actions import Action
from overcooked_ai_py.mdp.overcooked_mdp import (
EVENT_TYPES,
OvercookedGridworld,
)
from overcooked_ai_py.mdp.overcooked_trajectory import (
DEFAULT_TRAJ_KEYS,
EPISODE_TRAJ_KEYS,
TIMESTEP_TRAJ_KEYS,
)
from overcooked_ai_py.planning.planners import (
NO_COUNTERS_PARAMS,
MediumLevelActionManager,
MotionPlanner,
)
from overcooked_ai_py.utils import append_dictionaries, mean_and_std_err
from overcooked_ai_py.visualization.state_visualizer import StateVisualizer
DEFAULT_ENV_PARAMS = {"horizon": 400}
MAX_HORIZON = 1e10
class OvercookedEnv(object):
"""
An environment wrapper for the OvercookedGridworld Markov Decision Process.
The environment keeps track of the current state of the agent, updates
it as the agent takes actions, and provides rewards to the agent.
E.g. of how to instantiate OvercookedEnv:
> mdp = OvercookedGridworld(...)
> env = OvercookedEnv.from_mdp(mdp, horizon=400)
"""
#########################
# INSTANTIATION METHODS #
#########################
def __init__(
self,
mdp_generator_fn,
start_state_fn=None,
horizon=MAX_HORIZON,
mlam_params=NO_COUNTERS_PARAMS,
info_level=0,
num_mdp=1,
initial_info={},
):
"""
mdp_generator_fn (callable): A no-argument function that returns a OvercookedGridworld instance
start_state_fn (callable): Function that returns start state for the MDP, called at each environment reset
horizon (int): Number of steps before the environment returns done=True
mlam_params (dict): params for MediumLevelActionManager
info_level (int): Change amount of logging
num_mdp (int): the number of mdp if we are using a list of mdps
initial_info (dict): the initial outside information feed into the generator function
TODO: Potentially make changes based on this discussion
https://github.com/HumanCompatibleAI/overcooked_ai/pull/22#discussion_r416786847
"""
assert callable(mdp_generator_fn), (
"OvercookedEnv takes in a OvercookedGridworld generator function. "
"If trying to instantiate directly from a OvercookedGridworld "
"instance, use the OvercookedEnv.from_mdp method"
)
self.num_mdp = num_mdp
self.variable_mdp = num_mdp > 1
self.mdp_generator_fn = mdp_generator_fn
self.horizon = horizon
self._mlam = None
self._mp = None
self.mlam_params = mlam_params
self.start_state_fn = start_state_fn
self.info_level = info_level
self.reset(outside_info=initial_info)
if self.horizon >= MAX_HORIZON and self.info_level > 0:
print(
"Environment has (near-)infinite horizon and no terminal states. \
Reduce info level of OvercookedEnv to not see this message."
)
@property
def mlam(self):
if self._mlam is None:
if self.info_level > 0:
print("Computing MediumLevelActionManager")
self._mlam = MediumLevelActionManager.from_pickle_or_compute(
self.mdp, self.mlam_params, force_compute=False
)
return self._mlam
@property
def mp(self):
if self._mp is None:
if self._mlam is not None:
self._mp = self.mlam.motion_planner
else:
if self.info_level > 0:
print("Computing MotionPlanner")
self._mp = MotionPlanner.from_pickle_or_compute(
self.mdp,
self.mlam_params["counter_goals"],
force_compute=False,
)
return self._mp
@staticmethod
def from_mdp(
mdp,
start_state_fn=None,
horizon=MAX_HORIZON,
mlam_params=NO_COUNTERS_PARAMS,
info_level=1,
num_mdp=None,
):
"""
Create an OvercookedEnv directly from a OvercookedGridworld mdp
rather than a mdp generating function.
"""
assert isinstance(mdp, OvercookedGridworld)
if num_mdp is not None:
assert num_mdp == 1
mdp_generator_fn = lambda _ignored: mdp
return OvercookedEnv(
mdp_generator_fn=mdp_generator_fn,
start_state_fn=start_state_fn,
horizon=horizon,
mlam_params=mlam_params,
info_level=info_level,
num_mdp=1,
)
#####################
# BASIC CLASS UTILS #
#####################
@property
def env_params(self):
"""
Env params should be thought of as all of the params of an env WITHOUT the mdp.
Alone, env_params is not sufficent to recreate a copy of the Env instance, but it is
together with mdp_params (which is sufficient to build a copy of the Mdp instance).
"""
return {
"start_state_fn": self.start_state_fn,
"horizon": self.horizon,
"info_level": self.info_level,
"num_mdp": self.num_mdp,
}
def copy(self):
# TODO: Add testing for checking that these util methods are up to date?
return OvercookedEnv(
mdp_generator_fn=self.mdp_generator_fn,
start_state_fn=self.start_state_fn,
horizon=self.horizon,
info_level=self.info_level,
num_mdp=self.num_mdp,
)
#############################
# ENV VISUALIZATION METHODS #
#############################
def __repr__(self):
"""
Standard way to view the state of an environment programatically
is just to print the Env object
"""
return self.mdp.state_string(self.state)
def display_states(self, *states):
old_state = self.state
for s in states:
self.state = s
print(self)
self.state = old_state
def print_state_transition(
self, a_t, r_t, env_info, fname=None, display_phi=False
):
"""
Terminal graphics visualization of a state transition.
"""
# TODO: turn this into a "formatting action probs" function and add action symbols too
action_probs = [
None
if "action_probs" not in agent_info.keys()
else list(agent_info["action_probs"])
for agent_info in env_info["agent_infos"]
]
action_probs = [
None
if player_action_probs is None
else [round(p, 2) for p in player_action_probs[0]]
for player_action_probs in action_probs
]
if display_phi:
state_potential_str = (
"\nState potential = " + str(env_info["phi_s_prime"]) + "\t"
)
potential_diff_str = (
"Δ potential = "
+ str(0.99 * env_info["phi_s_prime"] - env_info["phi_s"])
+ "\n"
) # Assuming gamma 0.99
else:
state_potential_str = ""
potential_diff_str = ""
output_string = "Timestep: {}\nJoint action taken: {} \t Reward: {} + shaping_factor * {}\nAction probs by index: {} {} {}\n{}\n".format(
self.state.timestep,
tuple(Action.ACTION_TO_CHAR[a] for a in a_t),
r_t,
env_info["shaped_r_by_agent"],
action_probs,
state_potential_str,
potential_diff_str,
self,
)
if fname is None:
print(output_string)
else:
f = open(fname, "a")
print(output_string, file=f)
f.close()
###################
# BASIC ENV LOGIC #
###################
def step(
self, joint_action, joint_agent_action_info=None, display_phi=False
):
"""Performs a joint action, updating the environment state
and providing a reward.
On being done, stats about the episode are added to info:
ep_sparse_r: the environment sparse reward, given only at soup delivery
ep_shaped_r: the component of the reward that is due to reward shaped (excluding sparse rewards)
ep_length: length of rollout
"""
assert not self.is_done()
if joint_agent_action_info is None:
joint_agent_action_info = [{}, {}]
next_state, mdp_infos = self.mdp.get_state_transition(
self.state, joint_action, display_phi, self.mp
)
# Update game_stats
self._update_game_stats(mdp_infos)
# Update state and done
self.state = next_state
done = self.is_done()
env_info = self._prepare_info_dict(joint_agent_action_info, mdp_infos)
if done:
self._add_episode_info(env_info)
timestep_sparse_reward = sum(mdp_infos["sparse_reward_by_agent"])
return (next_state, timestep_sparse_reward, done, env_info)
def lossless_state_encoding_mdp(self, state):
"""
Wrapper of the mdp's lossless_encoding
"""
return self.mdp.lossless_state_encoding(state, self.horizon)
def featurize_state_mdp(self, state, num_pots=2):
"""
Wrapper of the mdp's featurize_state
"""
return self.mdp.featurize_state(state, self.mlam, num_pots=num_pots)
def reset(self, regen_mdp=True, outside_info={}):
"""
Resets the environment. Does NOT reset the agent.
Args:
regen_mdp (bool): gives the option of not re-generating mdp on the reset,
which is particularly helpful with reproducing results on variable mdp
outside_info (dict): the outside information that will be fed into the scheduling_fn (if used), which will
in turn generate a new set of mdp_params that is used to regenerate mdp.
Please note that, if you intend to use this arguments throughout the run,
you need to have a "initial_info" dictionary with the same keys in the "env_params"
"""
if regen_mdp:
self.mdp = self.mdp_generator_fn(outside_info)
self._mlam = None
self._mp = None
if self.start_state_fn is None:
self.state = self.mdp.get_standard_start_state()
else:
self.state = self.start_state_fn()
events_dict = {
k: [[] for _ in range(self.mdp.num_players)] for k in EVENT_TYPES
}
rewards_dict = {
"cumulative_sparse_rewards_by_agent": np.array(
[0] * self.mdp.num_players
),
"cumulative_shaped_rewards_by_agent": np.array(
[0] * self.mdp.num_players
),
}
self.game_stats = {**events_dict, **rewards_dict}
def is_done(self):
"""Whether the episode is over."""
return self.state.timestep >= self.horizon or self.mdp.is_terminal(
self.state
)
def potential(self, mlam, state=None, gamma=0.99):
"""
Return the potential of the environment's current state, if no state is provided
Otherwise return the potential of `state`
args:
mlam (MediumLevelActionManager): the mlam of self.mdp
state (OvercookedState): the current state we are evaluating the potential on
gamma (float): discount rate
"""
state = state if state else self.state
return self.mdp.potential_function(state, mp=self.mp, gamma=gamma)
def _prepare_info_dict(self, joint_agent_action_info, mdp_infos):
"""
The normal timestep info dict will contain infos specifc to each agent's action taken,
and reward shaping information.
"""
# Get the agent action info, that could contain info about action probs, or other
# custom user defined information
env_info = {
"agent_infos": [
joint_agent_action_info[agent_idx]
for agent_idx in range(self.mdp.num_players)
]
}
# TODO: This can be further simplified by having all the mdp_infos copied over to the env_infos automatically
env_info["sparse_r_by_agent"] = mdp_infos["sparse_reward_by_agent"]
env_info["shaped_r_by_agent"] = mdp_infos["shaped_reward_by_agent"]
env_info["phi_s"] = (
mdp_infos["phi_s"] if "phi_s" in mdp_infos else None
)
env_info["phi_s_prime"] = (
mdp_infos["phi_s_prime"] if "phi_s_prime" in mdp_infos else None
)
return env_info
def _add_episode_info(self, env_info):
env_info["episode"] = {
"ep_game_stats": self.game_stats,
"ep_sparse_r": sum(
self.game_stats["cumulative_sparse_rewards_by_agent"]
),
"ep_shaped_r": sum(
self.game_stats["cumulative_shaped_rewards_by_agent"]
),
"ep_sparse_r_by_agent": self.game_stats[
"cumulative_sparse_rewards_by_agent"
],
"ep_shaped_r_by_agent": self.game_stats[
"cumulative_shaped_rewards_by_agent"
],
"ep_length": self.state.timestep,
}
return env_info
def _update_game_stats(self, infos):
"""
Update the game stats dict based on the events of the current step
NOTE: the timer ticks after events are logged, so there can be events from time 0 to time self.horizon - 1
"""
self.game_stats["cumulative_sparse_rewards_by_agent"] += np.array(
infos["sparse_reward_by_agent"]
)
self.game_stats["cumulative_shaped_rewards_by_agent"] += np.array(
infos["shaped_reward_by_agent"]
)
for event_type, bool_list_by_agent in infos["event_infos"].items():
# For each event type, store the timestep if it occurred
event_occurred_by_idx = [int(x) for x in bool_list_by_agent]
for idx, event_by_agent in enumerate(event_occurred_by_idx):
if event_by_agent:
self.game_stats[event_type][idx].append(
self.state.timestep
)
####################
# TRAJECTORY LOGIC #
####################
def execute_plan(self, start_state, joint_action_plan, display=False):
"""Executes action_plan (a list of joint actions) from a start
state in the mdp and returns the resulting state."""
self.state = start_state
done = False
if display:
print("Starting state\n{}".format(self))
for joint_action in joint_action_plan:
self.step(joint_action)
done = self.is_done()
if display:
print(self)
if done:
break
successor_state = self.state
self.reset(False)
return successor_state, done
def run_agents(
self,
agent_pair,
include_final_state=False,
display=False,
dir=None,
display_phi=False,
display_until=np.Inf,
):
"""
Trajectory returned will a list of state-action pairs (s_t, joint_a_t, r_t, done_t, info_t).
"""
assert (
self.state.timestep == 0
), "Did not reset environment before running agents"
trajectory = []
done = False
# default is to not print to file
fname = None
if dir != None:
fname = dir + "/roll_out_" + str(time.time()) + ".txt"
f = open(fname, "w+")
print(self, file=f)
f.close()
while not done:
s_t = self.state
# Getting actions and action infos (optional) for both agents
joint_action_and_infos = agent_pair.joint_action(s_t)
a_t, a_info_t = zip(*joint_action_and_infos)
assert all(a in Action.ALL_ACTIONS for a in a_t)
assert all(type(a_info) is dict for a_info in a_info_t)
s_tp1, r_t, done, info = self.step(a_t, a_info_t, display_phi)
trajectory.append((s_t, a_t, r_t, done, info))
if display and self.state.timestep < display_until:
self.print_state_transition(a_t, r_t, info, fname, display_phi)
assert len(trajectory) == self.state.timestep, "{} vs {}".format(
len(trajectory), self.state.timestep
)
# Add final state
if include_final_state:
trajectory.append((s_tp1, (None, None), 0, True, None))
total_sparse = sum(
self.game_stats["cumulative_sparse_rewards_by_agent"]
)
total_shaped = sum(
self.game_stats["cumulative_shaped_rewards_by_agent"]
)
return (
np.array(trajectory, dtype=object),
self.state.timestep,
total_sparse,
total_shaped,
)
def get_rollouts(
self,
agent_pair,
num_games,
display=False,
dir=None,
final_state=False,
display_phi=False,
display_until=np.Inf,
metadata_fn=None,
metadata_info_fn=None,
info=True,
):
"""
Simulate `num_games` number rollouts with the current agent_pair and returns processed
trajectories.
Returning excessive information to be able to convert trajectories to any required format
(baselines, stable_baselines, etc)
metadata_fn returns some metadata information computed at the end of each trajectory based on
some of the trajectory data.
NOTE: this is the standard trajectories format used throughout the codebase
"""
trajectories = {k: [] for k in DEFAULT_TRAJ_KEYS}
metadata_fn = (lambda x: {}) if metadata_fn is None else metadata_fn
metadata_info_fn = (
(lambda x: "") if metadata_info_fn is None else metadata_info_fn
)
range_iterator = (
tqdm.trange(num_games, desc="", leave=True)
if info
else range(num_games)
)
for i in range_iterator:
agent_pair.set_mdp(self.mdp)
rollout_info = self.run_agents(
agent_pair,
display=display,
dir=dir,
include_final_state=final_state,
display_phi=display_phi,
display_until=display_until,
)
(
trajectory,
time_taken,
tot_rews_sparse,
_tot_rews_shaped,
) = rollout_info
obs, actions, rews, dones, infos = (
trajectory.T[0],
trajectory.T[1],
trajectory.T[2],
trajectory.T[3],
trajectory.T[4],
)
trajectories["ep_states"].append(obs)
trajectories["ep_actions"].append(actions)
trajectories["ep_rewards"].append(rews)
trajectories["ep_dones"].append(dones)
trajectories["ep_infos"].append(infos)
trajectories["ep_returns"].append(tot_rews_sparse)
trajectories["ep_lengths"].append(time_taken)
trajectories["mdp_params"].append(self.mdp.mdp_params)
trajectories["env_params"].append(self.env_params)
trajectories["metadatas"].append(metadata_fn(rollout_info))
# we do not need to regenerate MDP if we are trying to generate a series of rollouts using the same MDP
# Basically, the FALSE here means that we are using the same layout and starting positions
# (if regen_mdp == True, resetting will call mdp_gen_fn to generate another layout & starting position)
self.reset(regen_mdp=False)
agent_pair.reset()
if info:
mu, se = mean_and_std_err(trajectories["ep_returns"])
description = "Avg rew: {:.2f} (std: {:.2f}, se: {:.2f}); avg len: {:.2f}; ".format(
mu,
np.std(trajectories["ep_returns"]),
se,
np.mean(trajectories["ep_lengths"]),
)
description += metadata_info_fn(trajectories["metadatas"])
range_iterator.set_description(description)
range_iterator.refresh()
# Converting to numpy arrays
trajectories = {k: np.array(v) for k, v in trajectories.items()}
# Merging all metadata dictionaries, assumes same keys throughout all
trajectories["metadatas"] = append_dictionaries(
trajectories["metadatas"]
)
# TODO: should probably transfer check methods over to Env class
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
AgentEvaluator.check_trajectories(trajectories, verbose=info)
return trajectories
####################
# TRAJECTORY UTILS #
####################
@staticmethod
def get_discounted_rewards(trajectories, gamma):
rews = trajectories["ep_rewards"]
horizon = rews.shape[1]
return OvercookedEnv._get_discounted_rewards_with_horizon(
rews, gamma, horizon
)
@staticmethod
def _get_discounted_rewards_with_horizon(rewards_matrix, gamma, horizon):
rewards_matrix = np.array(rewards_matrix)
discount_array = [gamma**i for i in range(horizon)]
rewards_matrix = rewards_matrix[:, :horizon]
discounted_rews = np.sum(rewards_matrix * discount_array, axis=1)
return discounted_rews
@staticmethod
def get_agent_infos_for_trajectories(trajectories, agent_idx):
"""
Returns a dictionary of the form
{
"[agent_info_0]": [ [episode_values], [], ... ],
"[agent_info_1]": [ [], [], ... ],
...
}
with as keys the keys returned by the agent in it's agent_info dictionary
NOTE: deprecated
"""
agent_infos = []
for traj_idx in range(len(trajectories["ep_lengths"])):
ep_infos = trajectories["ep_infos"][traj_idx]
traj_agent_infos = [
step_info["agent_infos"][agent_idx] for step_info in ep_infos
]
# Append all dictionaries together
traj_agent_infos = append_dictionaries(traj_agent_infos)
agent_infos.append(traj_agent_infos)
# Append all dictionaries together once again
agent_infos = append_dictionaries(agent_infos)
agent_infos = {k: np.array(v) for k, v in agent_infos.items()}
return agent_infos
@staticmethod
def proportion_stuck_time(trajectories, agent_idx, stuck_time=3):
"""
Simple util for calculating a guess for the proportion of time in the trajectories
during which the agent with the desired agent index was stuck.
NOTE: deprecated
"""
stuck_matrix = []
for traj_idx in range(len(trajectories["ep_lengths"])):
stuck_matrix.append([])
obs = trajectories["ep_states"][traj_idx]
for traj_timestep in range(
stuck_time, trajectories["ep_lengths"][traj_idx]
):
if traj_timestep >= stuck_time:
recent_states = obs[
traj_timestep - stuck_time : traj_timestep + 1
]
recent_player_pos_and_or = [
s.players[agent_idx].pos_and_or for s in recent_states
]
if len({item for item in recent_player_pos_and_or}) == 1:
# If there is only one item in the last stuck_time steps, then we classify the agent as stuck
stuck_matrix[traj_idx].append(True)
else:
stuck_matrix[traj_idx].append(False)
else:
stuck_matrix[traj_idx].append(False)
return stuck_matrix
from pettingzoo.utils.env import ParallelEnv
from overcooked_ai_py.agents.agent import AgentPair
class OvercookedEnvPettingZoo(ParallelEnv):
def __init__(self, base_env, agents):
"""
base_env: OvercookedEnv
agents: AgentPair
Example creating a PettingZoo env from a base_env:
mdp = OvercookedGridworld.from_layout_name("asymmetric_advantages")
base_env = OvercookedEnv.from_mdp(mdp, horizon=500)
agent_pair = load_agent_pair("path/to/checkpoint", "ppo", "ppo")
env = OvercookedEnvPettingZoo(base_env, agent_pair)
"""
# we need agent-dependent observation space, and the best way to do it is just to include an agentPair
assert isinstance(
agents, AgentPair
), "agents must be an AgentPair object"
self.agents = ["agent_0", "agent_1"]
self.possible_agents = ["agent_0", "agent_1"]
self.agent_map = {"agent_0": agents.a0, "agent_1": agents.a1}
self.base_env = base_env
self.observation_spaces = {
agent: self.observation_space(agent) for agent in self.agents
}
self.action_spaces = {
agent: gymnasium.spaces.Discrete(len(Action.ALL_ACTIONS))
for agent in self.agents
}
# this is the AgentPair object
self.reset()
import functools
# we want to return the same space object every time
@functools.lru_cache(maxsize=2)
def observation_space(self, agent):
# the observation can be different for each agent
agent = self.agent_map[agent]
dummy_mdp = self.base_env.mdp
dummy_state = dummy_mdp.get_standard_start_state()
obs_shape = agent.featurize(dummy_state)[0].shape
high = np.ones(obs_shape) * float("inf")
low = np.zeros(obs_shape)
return gymnasium.spaces.Box(low, high, dtype=np.float32)
# we want to return the same space object every time
@functools.lru_cache(maxsize=1)
def action_space(self, agent):
# the action space is the same for each agent
return gymnasium.spaces.Discrete(len(Action.ALL_ACTIONS))
def step(self, joint_action):
joint_action = [
Action.ALL_ACTIONS[joint_action[agent]] for agent in joint_action
]
obs, reward, done, info = self.base_env.step(joint_action)
# https://gymnasium.farama.org/content/basic_usage/
# we have no early termination condition in this env, and the environment only terminates when the time horizon is reached
# therefore the terminated is always False, and we set truncated to done
terminated = False
truncated = done
def create_dict(value):
"""
Each agent should have the same reward, terminated, truncated, info
"""
return {agent: value for agent in self.agents}
def create_obs_dict(obs):
"""
Observation is potentially different for each agent
"""
return {
agent: self.agent_map[agent].featurize(obs)
for agent in self.agents
}
obs = create_obs_dict(obs)
reward = create_dict(reward)
terminated = create_dict(terminated)
truncated = create_dict(truncated)
info = create_dict(info)
if done:
self.agents = []
return obs, reward, terminated, truncated, info
def reset(self, seed=None, options=None):
"""
Reset the embedded OvercookedEnv envrionment to the starting state
"""
self.base_env.reset()
dummy_mdp = self.base_env.mdp
dummy_state = dummy_mdp.get_standard_start_state()
# when an environment terminates/truncates, PettingZoo wants all agents removed, so during reset we re-add them
self.agents = self.possible_agents[:]
# return the obsevations as dict
obs_dict = {
agent: self.agent_map[agent].featurize(dummy_state)[0]
for agent in self.agents
}
return obs_dict, None
def render(self, mode="human", close=False):
pass
class Overcooked(gym.Env):
"""
Wrapper for the Env class above that is SOMEWHAT compatible with the standard gym API.
Why only somewhat? Because we need to flatten a multi-agent env to be a single-agent env (as gym requires).
NOTE: Observations returned are in a dictionary format with various information that is
necessary to be able to handle the multi-agent nature of the environment. There are probably
better ways to handle this, but we found this to work with minor modifications to OpenAI Baselines.
NOTE: The index of the main agent (as gym envs are 'single-agent') in the mdp is randomized at each reset
of the environment, and is kept track of by the self.agent_idx attribute. This means that it is necessary
to pass on this information in the output to know for which agent index featurizations should be made for
other agents.
For example, say one is training A0 paired with A1, and A1 takes a custom state featurization.
Then in the runner.py loop in OpenAI Baselines, we will get the lossless encodings of the state,
and the true Overcooked state. When we encode the true state to feed to A1, we also need to know
what agent index it has in the environment (as encodings will be index dependent).
"""
env_name = "Overcooked-v0"
# gym checks for the action space and obs space while initializing the env and throws an error if none exists
# custom_init after __init__ no longer works
# might as well move all the initilization into the actual __init__
def __init__(self, base_env, featurize_fn, baselines_reproducible=False):
"""
base_env: OvercookedEnv
featurize_fn(mdp, state): fn used to featurize states returned in the 'both_agent_obs' field
Example creating a gym env:
mdp = OvercookedGridworld.from_layout_name("asymmetric_advantages")
base_env = OvercookedEnv.from_mdp(mdp, horizon=500)
env = gym.make("Overcooked-v0",base_env = base_env, featurize_fn =base_env.featurize_state_mdp)
"""
if baselines_reproducible:
# NOTE:
# This will cause all agent indices to be chosen in sync across simulation
# envs (for each update, all envs will have index 0 or index 1).
# This is to prevent the randomness of choosing agent indexes
# from leaking when using subprocess-vec-env in baselines (which
# seeding does not reach) i.e. having different results for different
# runs with the same seed.
# The effect of this should be negligible, as all other randomness is
# controlled by the actual run seeds
np.random.seed(0)
self.base_env = base_env
self.featurize_fn = featurize_fn
self.observation_space = self._setup_observation_space()
self.action_space = gym.spaces.Discrete(len(Action.ALL_ACTIONS))
self.reset()
self.visualizer = StateVisualizer()
def _setup_observation_space(self):
dummy_mdp = self.base_env.mdp
dummy_state = dummy_mdp.get_standard_start_state()
obs_shape = self.featurize_fn(dummy_state)[0].shape
high = np.ones(obs_shape) * float("inf")
low = np.zeros(obs_shape)
return gym.spaces.Box(low, high, dtype=np.float32)
def step(self, action):
"""
action:
(agent with index self.agent_idx action, other agent action)
is a tuple with the joint action of the primary and secondary agents in index format
returns:
observation: formatted to be standard input for self.agent_idx's policy
"""
assert all(
self.action_space.contains(a) for a in action
), "%r (%s) invalid" % (
action,
type(action),
)
agent_action, other_agent_action = [
Action.INDEX_TO_ACTION[a] for a in action
]
if self.agent_idx == 0:
joint_action = (agent_action, other_agent_action)
else:
joint_action = (other_agent_action, agent_action)
next_state, reward, done, env_info = self.base_env.step(joint_action)
ob_p0, ob_p1 = self.featurize_fn(next_state)
if self.agent_idx == 0:
both_agents_ob = (ob_p0, ob_p1)
else:
both_agents_ob = (ob_p1, ob_p0)
env_info["policy_agent_idx"] = self.agent_idx
if "episode" in env_info.keys():
env_info["episode"]["policy_agent_idx"] = self.agent_idx
obs = {
"both_agent_obs": both_agents_ob,
"overcooked_state": next_state,
"other_agent_env_idx": 1 - self.agent_idx,
}
return obs, reward, done, env_info
def reset(self):
"""
When training on individual maps, we want to randomize which agent is assigned to which
starting location, in order to make sure that the agents are trained to be able to
complete the task starting at either of the hardcoded positions.
NOTE: a nicer way to do this would be to just randomize starting positions, and not
have to deal with randomizing indices.
"""
self.base_env.reset()
self.mdp = self.base_env.mdp
self.agent_idx = np.random.choice([0, 1])
ob_p0, ob_p1 = self.featurize_fn(self.base_env.state)
if self.agent_idx == 0:
both_agents_ob = (ob_p0, ob_p1)
else:
both_agents_ob = (ob_p1, ob_p0)
return {
"both_agent_obs": both_agents_ob,
"overcooked_state": self.base_env.state,
"other_agent_env_idx": 1 - self.agent_idx,
}
def render(self):
rewards_dict = {} # dictionary of details you want rendered in the UI
for key, value in self.base_env.game_stats.items():
if key in [
"cumulative_shaped_rewards_by_agent",
"cumulative_sparse_rewards_by_agent",
]:
rewards_dict[key] = value
image = self.visualizer.render_state(
state=self.base_env.state,
grid=self.base_env.mdp.terrain_mtx,
hud_data=StateVisualizer.default_hud_data(
self.base_env.state, **rewards_dict
),
)
buffer = pygame.surfarray.array3d(image)
image = copy.deepcopy(buffer)
image = np.flip(np.rot90(image, 3), 1)
image = cv2.resize(image, (2 * 528, 2 * 464))
return image
| 35,211 | 36.619658 | 145 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/mdp/overcooked_mdp.py | import copy
import itertools
import warnings
from collections import Counter, defaultdict
from functools import reduce
import numpy as np
from overcooked_ai_py.mdp.actions import Action, Direction
from overcooked_ai_py.utils import (
OvercookedException,
classproperty,
pos_distance,
read_layout_dict,
)
class Recipe:
MAX_NUM_INGREDIENTS = 3
TOMATO = "tomato"
ONION = "onion"
ALL_INGREDIENTS = [ONION, TOMATO]
ALL_RECIPES_CACHE = {}
STR_REP = {"tomato": "†", "onion": "ø"}
_computed = False
_configured = False
_conf = {}
def __new__(cls, ingredients):
if not cls._configured:
raise OvercookedException(
"Recipe class must be configured before recipes can be created"
)
# Some basic argument verification
if (
not ingredients
or not hasattr(ingredients, "__iter__")
or len(ingredients) == 0
):
raise ValueError(
"Invalid input recipe. Must be ingredients iterable with non-zero length"
)
for elem in ingredients:
if not elem in cls.ALL_INGREDIENTS:
raise ValueError(
"Invalid ingredient: {0}. Recipe can only contain ingredients {1}".format(
elem, cls.ALL_INGREDIENTS
)
)
if not len(ingredients) <= cls.MAX_NUM_INGREDIENTS:
raise ValueError(
"Recipe of length {0} is invalid. Recipe can contain at most {1} ingredients".format(
len(ingredients), cls.MAX_NUM_INGREDIENTS
)
)
key = hash(tuple(sorted(ingredients)))
if key in cls.ALL_RECIPES_CACHE:
return cls.ALL_RECIPES_CACHE[key]
cls.ALL_RECIPES_CACHE[key] = super(Recipe, cls).__new__(cls)
return cls.ALL_RECIPES_CACHE[key]
def __init__(self, ingredients):
self._ingredients = ingredients
def __getnewargs__(self):
return (self._ingredients,)
def __int__(self):
num_tomatoes = len([_ for _ in self.ingredients if _ == Recipe.TOMATO])
num_onions = len([_ for _ in self.ingredients if _ == Recipe.ONION])
mixed_mask = int(bool(num_tomatoes * num_onions))
mixed_shift = (Recipe.MAX_NUM_INGREDIENTS + 1) ** len(
Recipe.ALL_INGREDIENTS
)
encoding = num_onions + (Recipe.MAX_NUM_INGREDIENTS + 1) * num_tomatoes
return mixed_mask * encoding * mixed_shift + encoding
def __hash__(self):
return hash(self.ingredients)
def __eq__(self, other):
# The ingredients property already returns sorted items, so equivalence check is sufficient
return self.ingredients == other.ingredients
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return int(self) < int(other)
def __le__(self, other):
return int(self) <= int(other)
def __gt__(self, other):
return int(self) > int(other)
def __ge__(self, other):
return int(self) >= int(other)
def __repr__(self):
return self.ingredients.__repr__()
def __iter__(self):
return iter(self.ingredients)
def __copy__(self):
return Recipe(self.ingredients)
def __deepcopy__(self, memo):
ingredients_cpy = copy.deepcopy(self.ingredients)
return Recipe(ingredients_cpy)
@classmethod
def _compute_all_recipes(cls):
for i in range(cls.MAX_NUM_INGREDIENTS):
for ingredient_list in itertools.combinations_with_replacement(
cls.ALL_INGREDIENTS, i + 1
):
cls(ingredient_list)
@property
def ingredients(self):
return tuple(sorted(self._ingredients))
@ingredients.setter
def ingredients(self, _):
raise AttributeError(
"Recpes are read-only. Do not modify instance attributes after creation"
)
@property
def value(self):
if self._delivery_reward:
return self._delivery_reward
if self._value_mapping and self in self._value_mapping:
return self._value_mapping[self]
if self._onion_value and self._tomato_value:
num_onions = len(
[
ingredient
for ingredient in self.ingredients
if ingredient == self.ONION
]
)
num_tomatoes = len(
[
ingredient
for ingredient in self.ingredients
if ingredient == self.TOMATO
]
)
return (
self._tomato_value * num_tomatoes
+ self._onion_value * num_onions
)
return 20
@property
def time(self):
if self._cook_time:
return self._cook_time
if self._time_mapping and self in self._time_mapping:
return self._time_mapping[self]
if self._onion_time and self._tomato_time:
num_onions = len(
[
ingredient
for ingredient in self.ingredients
if ingredient == self.ONION
]
)
num_tomatoes = len(
[
ingredient
for ingredient in self.ingredients
if ingredient == self.TOMATO
]
)
return (
self._onion_time * num_onions
+ self._tomato_time * num_tomatoes
)
return 20
def to_dict(self):
return {"ingredients": self.ingredients}
def neighbors(self):
"""
Return all "neighbor" recipes to this recipe. A neighbor recipe is one that can be obtained
by adding exactly one ingredient to the current recipe
"""
neighbors = []
if len(self.ingredients) == self.MAX_NUM_INGREDIENTS:
return neighbors
for ingredient in self.ALL_INGREDIENTS:
new_ingredients = [*self.ingredients, ingredient]
new_recipe = Recipe(new_ingredients)
neighbors.append(new_recipe)
return neighbors
@classproperty
def ALL_RECIPES(cls):
if not cls._computed:
cls._compute_all_recipes()
cls._computed = True
return set(cls.ALL_RECIPES_CACHE.values())
@classproperty
def configuration(cls):
if not cls._configured:
raise ValueError("Recipe class not yet configured")
return cls._conf
@classmethod
def configure(cls, conf):
cls._conf = conf
cls._configured = True
cls._computed = False
cls.MAX_NUM_INGREDIENTS = conf.get("max_num_ingredients", 3)
cls._cook_time = None
cls._delivery_reward = None
cls._value_mapping = None
cls._time_mapping = None
cls._onion_value = None
cls._onion_time = None
cls._tomato_value = None
cls._tomato_time = None
## Basic checks for validity ##
# Mutual Exclusion
if (
"tomato_time" in conf
and not "onion_time" in conf
or "onion_time" in conf
and not "tomato_time" in conf
):
raise ValueError(
"Must specify both 'onion_time' and 'tomato_time'"
)
if (
"tomato_value" in conf
and not "onion_value" in conf
or "onion_value" in conf
and not "tomato_value" in conf
):
raise ValueError(
"Must specify both 'onion_value' and 'tomato_value'"
)
if "tomato_value" in conf and "delivery_reward" in conf:
raise ValueError(
"'delivery_reward' incompatible with '<ingredient>_value'"
)
if "tomato_value" in conf and "recipe_values" in conf:
raise ValueError(
"'recipe_values' incompatible with '<ingredient>_value'"
)
if "recipe_values" in conf and "delivery_reward" in conf:
raise ValueError(
"'delivery_reward' incompatible with 'recipe_values'"
)
if "tomato_time" in conf and "cook_time" in conf:
raise ValueError(
"'cook_time' incompatible with '<ingredient>_time"
)
if "tomato_time" in conf and "recipe_times" in conf:
raise ValueError(
"'recipe_times' incompatible with '<ingredient>_time'"
)
if "recipe_times" in conf and "cook_time" in conf:
raise ValueError(
"'delivery_reward' incompatible with 'recipe_times'"
)
# recipe_ lists and orders compatibility
if "recipe_values" in conf:
if not "all_orders" in conf or not conf["all_orders"]:
raise ValueError(
"Must specify 'all_orders' if 'recipe_values' specified"
)
if not len(conf["all_orders"]) == len(conf["recipe_values"]):
raise ValueError(
"Number of recipes in 'all_orders' must be the same as number in 'recipe_values"
)
if "recipe_times" in conf:
if not "all_orders" in conf or not conf["all_orders"]:
raise ValueError(
"Must specify 'all_orders' if 'recipe_times' specified"
)
if not len(conf["all_orders"]) == len(conf["recipe_times"]):
raise ValueError(
"Number of recipes in 'all_orders' must be the same as number in 'recipe_times"
)
## Conifgure ##
if "cook_time" in conf:
cls._cook_time = conf["cook_time"]
if "delivery_reward" in conf:
cls._delivery_reward = conf["delivery_reward"]
if "recipe_values" in conf:
cls._value_mapping = {
cls.from_dict(recipe): value
for (recipe, value) in zip(
conf["all_orders"], conf["recipe_values"]
)
}
if "recipe_times" in conf:
cls._time_mapping = {
cls.from_dict(recipe): time
for (recipe, time) in zip(
conf["all_orders"], conf["recipe_times"]
)
}
if "tomato_time" in conf:
cls._tomato_time = conf["tomato_time"]
if "onion_time" in conf:
cls._onion_time = conf["onion_time"]
if "tomato_value" in conf:
cls._tomato_value = conf["tomato_value"]
if "onion_value" in conf:
cls._onion_value = conf["onion_value"]
@classmethod
def generate_random_recipes(
cls,
n=1,
min_size=2,
max_size=3,
ingredients=None,
recipes=None,
unique=True,
):
"""
n (int): how many recipes generate
min_size (int): min generated recipe size
max_size (int): max generated recipe size
ingredients (list(str)): list of ingredients used for generating recipes (default is cls.ALL_INGREDIENTS)
recipes (list(Recipe)): list of recipes to choose from (default is cls.ALL_RECIPES)
unique (bool): if all recipes are unique (without repeats)
"""
if recipes is None:
recipes = cls.ALL_RECIPES
ingredients = set(ingredients or cls.ALL_INGREDIENTS)
choice_replace = not (unique)
assert 1 <= min_size <= max_size <= cls.MAX_NUM_INGREDIENTS
assert all(
ingredient in cls.ALL_INGREDIENTS for ingredient in ingredients
)
def valid_size(r):
return min_size <= len(r.ingredients) <= max_size
def valid_ingredients(r):
return all(i in ingredients for i in r.ingredients)
relevant_recipes = [
r for r in recipes if valid_size(r) and valid_ingredients(r)
]
assert choice_replace or (n <= len(relevant_recipes))
return np.random.choice(relevant_recipes, n, replace=choice_replace)
@classmethod
def from_dict(cls, obj_dict):
return cls(**obj_dict)
class ObjectState(object):
"""
State of an object in OvercookedGridworld.
"""
def __init__(self, name, position, **kwargs):
"""
name (str): The name of the object
position (int, int): Tuple for the current location of the object.
"""
self.name = name
self._position = tuple(position)
@property
def position(self):
return self._position
@position.setter
def position(self, new_pos):
self._position = new_pos
def is_valid(self):
return self.name in ["onion", "tomato", "dish"]
def deepcopy(self):
return ObjectState(self.name, self.position)
def __eq__(self, other):
return (
isinstance(other, ObjectState)
and self.name == other.name
and self.position == other.position
)
def __hash__(self):
return hash((self.name, self.position))
def __repr__(self):
return "{}@{}".format(self.name, self.position)
def to_dict(self):
return {"name": self.name, "position": self.position}
@classmethod
def from_dict(cls, obj_dict):
obj_dict = copy.deepcopy(obj_dict)
return ObjectState(**obj_dict)
class SoupState(ObjectState):
def __init__(
self,
position,
ingredients=[],
cooking_tick=-1,
cook_time=None,
**kwargs
):
"""
Represents a soup object. An object becomes a soup the instant it is placed in a pot. The
soup's recipe is a list of ingredient names used to create it. A soup's recipe is undetermined
until it has begun cooking.
position (tupe): (x, y) coordinates in the grid
ingrdients (list(ObjectState)): Objects that have been used to cook this soup. Determiens @property recipe
cooking (int): How long the soup has been cooking for. -1 means cooking hasn't started yet
cook_time(int): How long soup needs to be cooked, used only mostly for getting soup from dict with supplied cook_time, if None self.recipe.time is used
"""
super(SoupState, self).__init__("soup", position)
self._ingredients = ingredients
self._cooking_tick = cooking_tick
self._recipe = None
self._cook_time = cook_time
def __eq__(self, other):
return (
isinstance(other, SoupState)
and self.name == other.name
and self.position == other.position
and self._cooking_tick == other._cooking_tick
and all(
[
this_i == other_i
for this_i, other_i in zip(
self._ingredients, other._ingredients
)
]
)
)
def __hash__(self):
ingredient_hash = hash(tuple([hash(i) for i in self._ingredients]))
supercls_hash = super(SoupState, self).__hash__()
return hash((supercls_hash, self._cooking_tick, ingredient_hash))
def __repr__(self):
supercls_str = super(SoupState, self).__repr__()
ingredients_str = self._ingredients.__repr__()
return "{}\nIngredients:\t{}\nCooking Tick:\t{}".format(
supercls_str, ingredients_str, self._cooking_tick
)
def __str__(self):
res = "{"
for ingredient in sorted(self.ingredients):
res += Recipe.STR_REP[ingredient]
if self.is_cooking:
res += str(self._cooking_tick)
elif self.is_ready:
res += str("✓")
return res
@ObjectState.position.setter
def position(self, new_pos):
self._position = new_pos
for ingredient in self._ingredients:
ingredient.position = new_pos
@property
def ingredients(self):
return [ingredient.name for ingredient in self._ingredients]
@property
def is_cooking(self):
return not self.is_idle and not self.is_ready
@property
def recipe(self):
if self.is_idle:
raise ValueError(
"Recipe is not determined until soup begins cooking"
)
if not self._recipe:
self._recipe = Recipe(self.ingredients)
return self._recipe
@property
def value(self):
return self.recipe.value
@property
def cook_time(self):
# used mostly when cook time is supplied by state dict
if self._cook_time is not None:
return self._cook_time
else:
return self.recipe.time
@property
def cook_time_remaining(self):
return max(0, self.cook_time - self._cooking_tick)
@property
def is_ready(self):
if self.is_idle:
return False
return self._cooking_tick >= self.cook_time
@property
def is_idle(self):
return self._cooking_tick < 0
@property
def is_full(self):
return (
not self.is_idle
or len(self.ingredients) == Recipe.MAX_NUM_INGREDIENTS
)
def is_valid(self):
if not all(
[
ingredient.position == self.position
for ingredient in self._ingredients
]
):
return False
if len(self.ingredients) > Recipe.MAX_NUM_INGREDIENTS:
return False
return True
def auto_finish(self):
if len(self.ingredients) == 0:
raise ValueError("Cannot finish soup with no ingredients")
self._cooking_tick = 0
self._cooking_tick = self.cook_time
def add_ingredient(self, ingredient):
if not ingredient.name in Recipe.ALL_INGREDIENTS:
raise ValueError("Invalid ingredient")
if self.is_full:
raise ValueError("Reached maximum number of ingredients in recipe")
ingredient.position = self.position
self._ingredients.append(ingredient)
def add_ingredient_from_str(self, ingredient_str):
ingredient_obj = ObjectState(ingredient_str, self.position)
self.add_ingredient(ingredient_obj)
def pop_ingredient(self):
if not self.is_idle:
raise ValueError(
"Cannot remove an ingredient from this soup at this time"
)
if len(self._ingredients) == 0:
raise ValueError("No ingredient to remove")
return self._ingredients.pop()
def begin_cooking(self):
if not self.is_idle:
raise ValueError("Cannot begin cooking this soup at this time")
if len(self.ingredients) == 0:
raise ValueError(
"Must add at least one ingredient to soup before you can begin cooking"
)
self._cooking_tick = 0
def cook(self):
if self.is_idle:
raise ValueError("Must begin cooking before advancing cook tick")
if self.is_ready:
raise ValueError("Cannot cook a soup that is already done")
self._cooking_tick += 1
def deepcopy(self):
return SoupState(
self.position,
[ingredient.deepcopy() for ingredient in self._ingredients],
self._cooking_tick,
)
def to_dict(self):
info_dict = super(SoupState, self).to_dict()
ingrdients_dict = [
ingredient.to_dict() for ingredient in self._ingredients
]
info_dict["_ingredients"] = ingrdients_dict
info_dict["cooking_tick"] = self._cooking_tick
info_dict["is_cooking"] = self.is_cooking
info_dict["is_ready"] = self.is_ready
info_dict["is_idle"] = self.is_idle
info_dict["cook_time"] = -1 if self.is_idle else self.cook_time
# This is for backwards compatibility w/ overcooked-demo
# Should be removed once overcooked-demo is updated to use 'cooking_tick' instead of '_cooking_tick'
info_dict["_cooking_tick"] = self._cooking_tick
return info_dict
@classmethod
def from_dict(cls, obj_dict):
obj_dict = copy.deepcopy(obj_dict)
if obj_dict["name"] != "soup":
return super(SoupState, cls).from_dict(obj_dict)
if "state" in obj_dict:
# Legacy soup representation
ingredient, num_ingredient, time = obj_dict["state"]
cooking_tick = -1 if time == 0 else time
finished = time >= 20
if ingredient == Recipe.TOMATO:
return SoupState.get_soup(
obj_dict["position"],
num_tomatoes=num_ingredient,
cooking_tick=cooking_tick,
finished=finished,
)
else:
return SoupState.get_soup(
obj_dict["position"],
num_onions=num_ingredient,
cooking_tick=cooking_tick,
finished=finished,
)
ingredients_objs = [
ObjectState.from_dict(ing_dict)
for ing_dict in obj_dict["_ingredients"]
]
obj_dict["ingredients"] = ingredients_objs
return cls(**obj_dict)
@classmethod
def get_soup(
cls,
position,
num_onions=1,
num_tomatoes=0,
cooking_tick=-1,
finished=False,
**kwargs
):
if num_onions < 0 or num_tomatoes < 0:
raise ValueError("Number of active ingredients must be positive")
if num_onions + num_tomatoes > Recipe.MAX_NUM_INGREDIENTS:
raise ValueError("Too many ingredients specified for this soup")
if cooking_tick >= 0 and num_tomatoes + num_onions == 0:
raise ValueError("_cooking_tick must be -1 for empty soup")
if finished and num_tomatoes + num_onions == 0:
raise ValueError("Empty soup cannot be finished")
onions = [
ObjectState(Recipe.ONION, position) for _ in range(num_onions)
]
tomatoes = [
ObjectState(Recipe.TOMATO, position) for _ in range(num_tomatoes)
]
ingredients = onions + tomatoes
soup = cls(position, ingredients, cooking_tick)
if finished:
soup.auto_finish()
return soup
class PlayerState(object):
"""
State of a player in OvercookedGridworld.
position: (x, y) tuple representing the player's location.
orientation: Direction.NORTH/SOUTH/EAST/WEST representing orientation.
held_object: ObjectState representing the object held by the player, or
None if there is no such object.
"""
def __init__(self, position, orientation, held_object=None):
self.position = tuple(position)
self.orientation = tuple(orientation)
self.held_object = held_object
assert self.orientation in Direction.ALL_DIRECTIONS
if self.held_object is not None:
assert isinstance(self.held_object, ObjectState)
assert self.held_object.position == self.position
@property
def pos_and_or(self):
return (self.position, self.orientation)
def has_object(self):
return self.held_object is not None
def get_object(self):
assert self.has_object()
return self.held_object
def set_object(self, obj):
assert not self.has_object()
obj.position = self.position
self.held_object = obj
def remove_object(self):
assert self.has_object()
obj = self.held_object
self.held_object = None
return obj
def update_pos_and_or(self, new_position, new_orientation):
self.position = new_position
self.orientation = new_orientation
if self.has_object():
self.get_object().position = new_position
def deepcopy(self):
new_obj = (
None if self.held_object is None else self.held_object.deepcopy()
)
return PlayerState(self.position, self.orientation, new_obj)
def __eq__(self, other):
return (
isinstance(other, PlayerState)
and self.position == other.position
and self.orientation == other.orientation
and self.held_object == other.held_object
)
def __hash__(self):
return hash((self.position, self.orientation, self.held_object))
def __repr__(self):
return "{} facing {} holding {}".format(
self.position, self.orientation, str(self.held_object)
)
def to_dict(self):
return {
"position": self.position,
"orientation": self.orientation,
"held_object": self.held_object.to_dict()
if self.held_object is not None
else None,
}
@staticmethod
def from_dict(player_dict):
player_dict = copy.deepcopy(player_dict)
held_obj = player_dict.get("held_object", None)
if held_obj is not None:
player_dict["held_object"] = SoupState.from_dict(held_obj)
return PlayerState(**player_dict)
class OvercookedState(object):
"""A state in OvercookedGridworld."""
def __init__(
self,
players,
objects,
bonus_orders=[],
all_orders=[],
timestep=0,
**kwargs
):
"""
players (list(PlayerState)): Currently active PlayerStates (index corresponds to number)
objects (dict({tuple:list(ObjectState)})): Dictionary mapping positions (x, y) to ObjectStates.
NOTE: Does NOT include objects held by players (they are in
the PlayerState objects).
bonus_orders (list(dict)): Current orders worth a bonus
all_orders (list(dict)): Current orders allowed at all
timestep (int): The current timestep of the state
"""
bonus_orders = [Recipe.from_dict(order) for order in bonus_orders]
all_orders = [Recipe.from_dict(order) for order in all_orders]
for pos, obj in objects.items():
assert obj.position == pos
self.players = tuple(players)
self.objects = objects
self._bonus_orders = bonus_orders
self._all_orders = all_orders
self.timestep = timestep
assert len(set(self.bonus_orders)) == len(
self.bonus_orders
), "Bonus orders must not have duplicates"
assert len(set(self.all_orders)) == len(
self.all_orders
), "All orders must not have duplicates"
assert set(self.bonus_orders).issubset(
set(self.all_orders)
), "Bonus orders must be a subset of all orders"
@property
def player_positions(self):
return tuple([player.position for player in self.players])
@property
def player_orientations(self):
return tuple([player.orientation for player in self.players])
@property
def players_pos_and_or(self):
"""Returns a ((pos1, or1), (pos2, or2)) tuple"""
return tuple(zip(*[self.player_positions, self.player_orientations]))
@property
def unowned_objects_by_type(self):
"""
Returns dictionary of (obj_name: ObjState)
for all objects in the environment, NOT including
ones held by players.
"""
objects_by_type = defaultdict(list)
for _pos, obj in self.objects.items():
objects_by_type[obj.name].append(obj)
return objects_by_type
@property
def player_objects_by_type(self):
"""
Returns dictionary of (obj_name: ObjState)
for all objects held by players.
"""
player_objects = defaultdict(list)
for player in self.players:
if player.has_object():
player_obj = player.get_object()
player_objects[player_obj.name].append(player_obj)
return player_objects
@property
def all_objects_by_type(self):
"""
Returns dictionary of (obj_name: ObjState)
for all objects in the environment, including
ones held by players.
"""
all_objs_by_type = self.unowned_objects_by_type.copy()
for obj_type, player_objs in self.player_objects_by_type.items():
all_objs_by_type[obj_type].extend(player_objs)
return all_objs_by_type
@property
def all_objects_list(self):
all_objects_lists = list(self.all_objects_by_type.values()) + [[], []]
return reduce(lambda x, y: x + y, all_objects_lists)
@property
def all_orders(self):
return (
sorted(self._all_orders)
if self._all_orders
else sorted(Recipe.ALL_RECIPES)
)
@property
def bonus_orders(self):
return sorted(self._bonus_orders)
def has_object(self, pos):
return pos in self.objects
def get_object(self, pos):
assert self.has_object(pos)
return self.objects[pos]
def add_object(self, obj, pos=None):
if pos is None:
pos = obj.position
assert not self.has_object(pos)
obj.position = pos
self.objects[pos] = obj
def remove_object(self, pos):
assert self.has_object(pos)
obj = self.objects[pos]
del self.objects[pos]
return obj
def reverse_players(self):
reversed = []
for player in self.players:
reversed.insert(0, player)
self.players = tuple(reversed)
return self
@classmethod
def from_players_pos_and_or(
cls, players_pos_and_or, bonus_orders=[], all_orders=[]
):
"""
Make a dummy OvercookedState with no objects based on the passed in player
positions and orientations and order list
"""
return cls(
[
PlayerState(*player_pos_and_or)
for player_pos_and_or in players_pos_and_or
],
objects={},
bonus_orders=bonus_orders,
all_orders=all_orders,
)
@classmethod
def from_player_positions(
cls, player_positions, bonus_orders=[], all_orders=[]
):
"""
Make a dummy OvercookedState with no objects and with players facing
North based on the passed in player positions and order list
"""
dummy_pos_and_or = [(pos, Direction.NORTH) for pos in player_positions]
return cls.from_players_pos_and_or(
dummy_pos_and_or, bonus_orders, all_orders
)
def deepcopy(self):
return OvercookedState(
players=[player.deepcopy() for player in self.players],
objects={pos: obj.deepcopy() for pos, obj in self.objects.items()},
bonus_orders=[order.to_dict() for order in self.bonus_orders],
all_orders=[order.to_dict() for order in self.all_orders],
timestep=self.timestep,
)
def time_independent_equal(self, other):
order_lists_equal = (
self.all_orders == other.all_orders
and self.bonus_orders == other.bonus_orders
)
return (
isinstance(other, OvercookedState)
and self.players == other.players
and set(self.objects.items()) == set(other.objects.items())
and order_lists_equal
)
def __eq__(self, other):
return (
self.time_independent_equal(other)
and self.timestep == other.timestep
)
def __hash__(self):
# NOTE: hash doesn't take into account timestep
order_list_hash = hash(tuple(self.bonus_orders)) + hash(
tuple(self.all_orders)
)
return hash(
(self.players, tuple(self.objects.values()), order_list_hash)
)
def __str__(self):
return "Players: {}, Objects: {}, Bonus orders: {} All orders: {} Timestep: {}".format(
str(self.players),
str(list(self.objects.values())),
str(self.bonus_orders),
str(self.all_orders),
str(self.timestep),
)
def to_dict(self):
return {
"players": [p.to_dict() for p in self.players],
"objects": [obj.to_dict() for obj in self.objects.values()],
"bonus_orders": [order.to_dict() for order in self.bonus_orders],
"all_orders": [order.to_dict() for order in self.all_orders],
"timestep": self.timestep,
}
@staticmethod
def from_dict(state_dict):
state_dict = copy.deepcopy(state_dict)
state_dict["players"] = [
PlayerState.from_dict(p) for p in state_dict["players"]
]
object_list = [SoupState.from_dict(o) for o in state_dict["objects"]]
state_dict["objects"] = {ob.position: ob for ob in object_list}
return OvercookedState(**state_dict)
BASE_REW_SHAPING_PARAMS = {
"PLACEMENT_IN_POT_REW": 3,
"DISH_PICKUP_REWARD": 3,
"SOUP_PICKUP_REWARD": 5,
"DISH_DISP_DISTANCE_REW": 0,
"POT_DISTANCE_REW": 0,
"SOUP_DISTANCE_REW": 0,
}
EVENT_TYPES = [
# Tomato events
"tomato_pickup",
"useful_tomato_pickup",
"tomato_drop",
"useful_tomato_drop",
"potting_tomato",
# Onion events
"onion_pickup",
"useful_onion_pickup",
"onion_drop",
"useful_onion_drop",
"potting_onion",
# Dish events
"dish_pickup",
"useful_dish_pickup",
"dish_drop",
"useful_dish_drop",
# Soup events
"soup_pickup",
"soup_delivery",
"soup_drop",
# Potting events
"optimal_onion_potting",
"optimal_tomato_potting",
"viable_onion_potting",
"viable_tomato_potting",
"catastrophic_onion_potting",
"catastrophic_tomato_potting",
"useless_onion_potting",
"useless_tomato_potting",
]
POTENTIAL_CONSTANTS = {
"default": {
"max_delivery_steps": 10,
"max_pickup_steps": 10,
"pot_onion_steps": 10,
"pot_tomato_steps": 10,
},
"mdp_test_tomato": {
"max_delivery_steps": 4,
"max_pickup_steps": 4,
"pot_onion_steps": 5,
"pot_tomato_steps": 6,
},
}
class OvercookedGridworld(object):
"""
An MDP grid world based off of the Overcooked game.
Importantly, an OvercookedGridworld object has no state. Once initialized,
all instance attributes will stay fixed.
TODO: clean the organization of this class further.
"""
#########################
# INSTANTIATION METHODS #
#########################
def __init__(
self,
terrain,
start_player_positions,
start_bonus_orders=[],
rew_shaping_params=None,
layout_name="unnamed_layout",
start_all_orders=[],
num_items_for_soup=3,
order_bonus=2,
start_state=None,
old_dynamics=False,
**kwargs
):
"""
terrain: a matrix of strings that encode the MDP layout
layout_name: string identifier of the layout
start_player_positions: tuple of positions for both players' starting positions
start_bonus_orders: List of recipes dicts that are worth a bonus
rew_shaping_params: reward given for completion of specific subgoals
all_orders: List of all available order dicts the players can make, defaults to all possible recipes if empy list provided
num_items_for_soup: Maximum number of ingredients that can be placed in a soup
order_bonus: Multiplicative factor for serving a bonus recipe
start_state: Default start state returned by get_standard_start_state
"""
self._configure_recipes(start_all_orders, num_items_for_soup, **kwargs)
self.start_all_orders = (
[r.to_dict() for r in Recipe.ALL_RECIPES]
if not start_all_orders
else start_all_orders
)
if old_dynamics:
assert all(
[
len(order["ingredients"]) == 3
for order in self.start_all_orders
]
), "Only accept orders with 3 items when using the old_dynamics"
self.height = len(terrain)
self.width = len(terrain[0])
self.shape = (self.width, self.height)
self.terrain_mtx = terrain
self.terrain_pos_dict = self._get_terrain_type_pos_dict()
self.start_player_positions = start_player_positions
self.num_players = len(start_player_positions)
self.start_bonus_orders = start_bonus_orders
self.reward_shaping_params = (
BASE_REW_SHAPING_PARAMS
if rew_shaping_params is None
else rew_shaping_params
)
self.layout_name = layout_name
self.order_bonus = order_bonus
self.start_state = start_state
self._opt_recipe_discount_cache = {}
self._opt_recipe_cache = {}
self._prev_potential_params = {}
# determines whether to start cooking automatically once 3 items are in the pot
self.old_dynamics = old_dynamics
@staticmethod
def from_layout_name(layout_name, **params_to_overwrite):
"""
Generates a OvercookedGridworld instance from a layout file.
One can overwrite the default mdp configuration using partial_mdp_config.
"""
params_to_overwrite = params_to_overwrite.copy()
base_layout_params = read_layout_dict(layout_name)
grid = base_layout_params["grid"]
del base_layout_params["grid"]
base_layout_params["layout_name"] = layout_name
if "start_state" in base_layout_params:
base_layout_params["start_state"] = OvercookedState.from_dict(
base_layout_params["start_state"]
)
# Clean grid
grid = [layout_row.strip() for layout_row in grid.split("\n")]
return OvercookedGridworld.from_grid(
grid, base_layout_params, params_to_overwrite
)
@staticmethod
def from_grid(
layout_grid, base_layout_params={}, params_to_overwrite={}, debug=False
):
"""
Returns instance of OvercookedGridworld with terrain and starting
positions derived from layout_grid.
One can override default configuration parameters of the mdp in
partial_mdp_config.
"""
mdp_config = copy.deepcopy(base_layout_params)
layout_grid = [[c for c in row] for row in layout_grid]
OvercookedGridworld._assert_valid_grid(layout_grid)
if "layout_name" not in mdp_config:
layout_name = "|".join(["".join(line) for line in layout_grid])
mdp_config["layout_name"] = layout_name
player_positions = [None] * 9
for y, row in enumerate(layout_grid):
for x, c in enumerate(row):
if c in ["1", "2", "3", "4", "5", "6", "7", "8", "9"]:
layout_grid[y][x] = " "
# -1 is to account for fact that player indexing starts from 1 rather than 0
assert (
player_positions[int(c) - 1] is None
), "Duplicate player in grid"
player_positions[int(c) - 1] = (x, y)
num_players = len([x for x in player_positions if x is not None])
player_positions = player_positions[:num_players]
# After removing player positions from grid we have a terrain mtx
mdp_config["terrain"] = layout_grid
mdp_config["start_player_positions"] = player_positions
for k, v in params_to_overwrite.items():
curr_val = mdp_config.get(k, None)
if debug:
print(
"Overwriting mdp layout standard config value {}:{} -> {}".format(
k, curr_val, v
)
)
mdp_config[k] = v
return OvercookedGridworld(**mdp_config)
def _configure_recipes(
self, start_all_orders, num_items_for_soup, **kwargs
):
self.recipe_config = {
"num_items_for_soup": num_items_for_soup,
"all_orders": start_all_orders,
**kwargs,
}
Recipe.configure(self.recipe_config)
#####################
# BASIC CLASS UTILS #
#####################
def __eq__(self, other):
return (
np.array_equal(self.terrain_mtx, other.terrain_mtx)
and self.start_player_positions == other.start_player_positions
and self.start_bonus_orders == other.start_bonus_orders
and self.start_all_orders == other.start_all_orders
and self.reward_shaping_params == other.reward_shaping_params
and self.layout_name == other.layout_name
)
def copy(self):
return OvercookedGridworld(
terrain=self.terrain_mtx.copy(),
start_player_positions=self.start_player_positions,
start_bonus_orders=self.start_bonus_orders,
rew_shaping_params=copy.deepcopy(self.reward_shaping_params),
layout_name=self.layout_name,
start_all_orders=self.start_all_orders,
)
@property
def mdp_params(self):
return {
"layout_name": self.layout_name,
"terrain": self.terrain_mtx,
"start_player_positions": self.start_player_positions,
"start_bonus_orders": self.start_bonus_orders,
"rew_shaping_params": copy.deepcopy(self.reward_shaping_params),
"start_all_orders": self.start_all_orders,
}
##############
# GAME LOGIC #
##############
def get_actions(self, state):
"""
Returns the list of lists of valid actions for 'state'.
The ith element of the list is the list of valid actions that player i
can take.
"""
self._check_valid_state(state)
return [
self._get_player_actions(state, i)
for i in range(len(state.players))
]
def _get_player_actions(self, state, player_num):
"""All actions are allowed to all players in all states."""
return Action.ALL_ACTIONS
def _check_action(self, state, joint_action):
for p_action, p_legal_actions in zip(
joint_action, self.get_actions(state)
):
if p_action not in p_legal_actions:
raise ValueError("Invalid action")
def get_standard_start_state(self):
if self.start_state:
return self.start_state
start_state = OvercookedState.from_player_positions(
self.start_player_positions,
bonus_orders=self.start_bonus_orders,
all_orders=self.start_all_orders,
)
return start_state
def get_random_start_state_fn(
self, random_start_pos=False, rnd_obj_prob_thresh=0.0
):
def start_state_fn():
if random_start_pos:
valid_positions = self.get_valid_joint_player_positions()
start_pos = valid_positions[
np.random.choice(len(valid_positions))
]
else:
start_pos = self.start_player_positions
start_state = OvercookedState.from_player_positions(
start_pos,
bonus_orders=self.start_bonus_orders,
all_orders=self.start_all_orders,
)
if rnd_obj_prob_thresh == 0:
return start_state
# Arbitrary hard-coding for randomization of objects
# For each pot, add a random amount of onions and tomatoes with prob rnd_obj_prob_thresh
# Begin the soup cooking with probability rnd_obj_prob_thresh
pots = self.get_pot_states(start_state)["empty"]
for pot_loc in pots:
p = np.random.rand()
if p < rnd_obj_prob_thresh:
n = int(np.random.randint(low=1, high=4))
m = int(np.random.randint(low=0, high=4 - n))
q = np.random.rand()
cooking_tick = 0 if q < rnd_obj_prob_thresh else -1
start_state.objects[pot_loc] = SoupState.get_soup(
pot_loc,
num_onions=n,
num_tomatoes=m,
cooking_tick=cooking_tick,
)
# For each player, add a random object with prob rnd_obj_prob_thresh
for player in start_state.players:
p = np.random.rand()
if p < rnd_obj_prob_thresh:
# Different objects have different probabilities
obj = np.random.choice(
["dish", "onion", "soup"], p=[0.2, 0.6, 0.2]
)
n = int(np.random.randint(low=1, high=4))
m = int(np.random.randint(low=0, high=4 - n))
if obj == "soup":
player.set_object(
SoupState.get_soup(
player.position,
num_onions=n,
num_tomatoes=m,
finished=True,
)
)
else:
player.set_object(ObjectState(obj, player.position))
return start_state
return start_state_fn
def is_terminal(self, state):
# There is a finite horizon, handled by the environment.
return False
def get_state_transition(
self, state, joint_action, display_phi=False, motion_planner=None
):
"""Gets information about possible transitions for the action.
Returns the next state, sparse reward and reward shaping.
Assumes all actions are deterministic.
NOTE: Sparse reward is given only when soups are delivered,
shaped reward is given only for completion of subgoals
(not soup deliveries).
"""
events_infos = {
event: [False] * self.num_players for event in EVENT_TYPES
}
assert not self.is_terminal(
state
), "Trying to find successor of a terminal state: {}".format(state)
for action, action_set in zip(joint_action, self.get_actions(state)):
if action not in action_set:
raise ValueError(
"Illegal action %s in state %s" % (action, state)
)
new_state = state.deepcopy()
# Resolve interacts first
(
sparse_reward_by_agent,
shaped_reward_by_agent,
) = self.resolve_interacts(new_state, joint_action, events_infos)
assert new_state.player_positions == state.player_positions
assert new_state.player_orientations == state.player_orientations
# Resolve player movements
self.resolve_movement(new_state, joint_action)
# Finally, environment effects
self.step_environment_effects(new_state)
# Additional dense reward logic
# shaped_reward += self.calculate_distance_based_shaped_reward(state, new_state)
infos = {
"event_infos": events_infos,
"sparse_reward_by_agent": sparse_reward_by_agent,
"shaped_reward_by_agent": shaped_reward_by_agent,
}
if display_phi:
assert (
motion_planner is not None
), "motion planner must be defined if display_phi is true"
infos["phi_s"] = self.potential_function(state, motion_planner)
infos["phi_s_prime"] = self.potential_function(
new_state, motion_planner
)
return new_state, infos
def resolve_interacts(self, new_state, joint_action, events_infos):
"""
Resolve any INTERACT actions, if present.
Currently if two players both interact with a terrain, we resolve player 1's interact
first and then player 2's, without doing anything like collision checking.
"""
pot_states = self.get_pot_states(new_state)
# We divide reward by agent to keep track of who contributed
sparse_reward, shaped_reward = (
[0] * self.num_players,
[0] * self.num_players,
)
for player_idx, (player, action) in enumerate(
zip(new_state.players, joint_action)
):
if action != Action.INTERACT:
continue
pos, o = player.position, player.orientation
i_pos = Action.move_in_direction(pos, o)
terrain_type = self.get_terrain_type_at_pos(i_pos)
# NOTE: we always log pickup/drop before performing it, as that's
# what the logic of determining whether the pickup/drop is useful assumes
if terrain_type == "X":
if player.has_object() and not new_state.has_object(i_pos):
obj_name = player.get_object().name
self.log_object_drop(
events_infos,
new_state,
obj_name,
pot_states,
player_idx,
)
# Drop object on counter
obj = player.remove_object()
new_state.add_object(obj, i_pos)
elif not player.has_object() and new_state.has_object(i_pos):
obj_name = new_state.get_object(i_pos).name
self.log_object_pickup(
events_infos,
new_state,
obj_name,
pot_states,
player_idx,
)
# Pick up object from counter
obj = new_state.remove_object(i_pos)
player.set_object(obj)
elif terrain_type == "O" and player.held_object is None:
self.log_object_pickup(
events_infos, new_state, "onion", pot_states, player_idx
)
# Onion pickup from dispenser
obj = ObjectState("onion", pos)
player.set_object(obj)
elif terrain_type == "T" and player.held_object is None:
# Tomato pickup from dispenser
player.set_object(ObjectState("tomato", pos))
elif terrain_type == "D" and player.held_object is None:
self.log_object_pickup(
events_infos, new_state, "dish", pot_states, player_idx
)
# Give shaped reward if pickup is useful
if self.is_dish_pickup_useful(new_state, pot_states):
shaped_reward[player_idx] += self.reward_shaping_params[
"DISH_PICKUP_REWARD"
]
# Perform dish pickup from dispenser
obj = ObjectState("dish", pos)
player.set_object(obj)
elif terrain_type == "P" and not player.has_object():
# An interact action will only start cooking the soup if we are using the new dynamics
if (
not self.old_dynamics
and self.soup_to_be_cooked_at_location(new_state, i_pos)
):
soup = new_state.get_object(i_pos)
soup.begin_cooking()
elif terrain_type == "P" and player.has_object():
if (
player.get_object().name == "dish"
and self.soup_ready_at_location(new_state, i_pos)
):
self.log_object_pickup(
events_infos, new_state, "soup", pot_states, player_idx
)
# Pick up soup
player.remove_object() # Remove the dish
obj = new_state.remove_object(i_pos) # Get soup
player.set_object(obj)
shaped_reward[player_idx] += self.reward_shaping_params[
"SOUP_PICKUP_REWARD"
]
elif player.get_object().name in Recipe.ALL_INGREDIENTS:
# Adding ingredient to soup
if not new_state.has_object(i_pos):
# Pot was empty, add soup to it
new_state.add_object(SoupState(i_pos, ingredients=[]))
# Add ingredient if possible
soup = new_state.get_object(i_pos)
if not soup.is_full:
old_soup = soup.deepcopy()
obj = player.remove_object()
soup.add_ingredient(obj)
shaped_reward[
player_idx
] += self.reward_shaping_params["PLACEMENT_IN_POT_REW"]
# Log potting
self.log_object_potting(
events_infos,
new_state,
old_soup,
soup,
obj.name,
player_idx,
)
if obj.name == Recipe.ONION:
events_infos["potting_onion"][player_idx] = True
elif terrain_type == "S" and player.has_object():
obj = player.get_object()
if obj.name == "soup":
delivery_rew = self.deliver_soup(new_state, player, obj)
sparse_reward[player_idx] += delivery_rew
# Log soup delivery
events_infos["soup_delivery"][player_idx] = True
return sparse_reward, shaped_reward
def get_recipe_value(
self,
state,
recipe,
discounted=False,
base_recipe=None,
potential_params={},
):
"""
Return the reward the player should receive for delivering this recipe
The player receives 0 if recipe not in all_orders, receives base value * order_bonus
if recipe is in bonus orders, and receives base value otherwise
"""
if not discounted:
if not recipe in state.all_orders:
return 0
if not recipe in state.bonus_orders:
return recipe.value
return self.order_bonus * recipe.value
else:
# Calculate missing ingredients needed to complete recipe
missing_ingredients = list(recipe.ingredients)
prev_ingredients = (
list(base_recipe.ingredients) if base_recipe else []
)
for ingredient in prev_ingredients:
missing_ingredients.remove(ingredient)
n_tomatoes = len(
[i for i in missing_ingredients if i == Recipe.TOMATO]
)
n_onions = len(
[i for i in missing_ingredients if i == Recipe.ONION]
)
gamma, pot_onion_steps, pot_tomato_steps = (
potential_params["gamma"],
potential_params["pot_onion_steps"],
potential_params["pot_tomato_steps"],
)
return (
gamma**recipe.time
* gamma ** (pot_onion_steps * n_onions)
* gamma ** (pot_tomato_steps * n_tomatoes)
* self.get_recipe_value(state, recipe, discounted=False)
)
def deliver_soup(self, state, player, soup):
"""
Deliver the soup, and get reward if there is no order list
or if the type of the delivered soup matches the next order.
"""
assert (
soup.name == "soup"
), "Tried to deliver something that wasn't soup"
assert soup.is_ready, "Tried to deliever soup that isn't ready"
player.remove_object()
return self.get_recipe_value(state, soup.recipe)
def resolve_movement(self, state, joint_action):
"""Resolve player movement and deal with possible collisions"""
(
new_positions,
new_orientations,
) = self.compute_new_positions_and_orientations(
state.players, joint_action
)
for player_state, new_pos, new_o in zip(
state.players, new_positions, new_orientations
):
player_state.update_pos_and_or(new_pos, new_o)
def compute_new_positions_and_orientations(
self, old_player_states, joint_action
):
"""Compute new positions and orientations ignoring collisions"""
new_positions, new_orientations = list(
zip(
*[
self._move_if_direction(p.position, p.orientation, a)
for p, a in zip(old_player_states, joint_action)
]
)
)
old_positions = tuple(p.position for p in old_player_states)
new_positions = self._handle_collisions(old_positions, new_positions)
return new_positions, new_orientations
def is_transition_collision(self, old_positions, new_positions):
# Checking for any players ending in same square
if self.is_joint_position_collision(new_positions):
return True
# Check if any two players crossed paths
for idx0, idx1 in itertools.combinations(range(self.num_players), 2):
p1_old, p2_old = old_positions[idx0], old_positions[idx1]
p1_new, p2_new = new_positions[idx0], new_positions[idx1]
if p1_new == p2_old and p1_old == p2_new:
return True
return False
def is_joint_position_collision(self, joint_position):
return any(
pos0 == pos1
for pos0, pos1 in itertools.combinations(joint_position, 2)
)
def step_environment_effects(self, state):
state.timestep += 1
for obj in state.objects.values():
if obj.name == "soup":
# automatically starts cooking when the pot has 3 ingredients
if self.old_dynamics and (
not obj.is_cooking
and not obj.is_ready
and len(obj.ingredients) == 3
):
obj.begin_cooking()
if obj.is_cooking:
obj.cook()
def _handle_collisions(self, old_positions, new_positions):
"""If agents collide, they stay at their old locations"""
if self.is_transition_collision(old_positions, new_positions):
return old_positions
return new_positions
def _get_terrain_type_pos_dict(self):
pos_dict = defaultdict(list)
for y, terrain_row in enumerate(self.terrain_mtx):
for x, terrain_type in enumerate(terrain_row):
pos_dict[terrain_type].append((x, y))
return pos_dict
def _move_if_direction(self, position, orientation, action):
"""Returns position and orientation that would
be obtained after executing action"""
if action not in Action.MOTION_ACTIONS:
return position, orientation
new_pos = Action.move_in_direction(position, action)
new_orientation = orientation if action == Action.STAY else action
if new_pos not in self.get_valid_player_positions():
return position, new_orientation
return new_pos, new_orientation
#######################
# LAYOUT / STATE INFO #
#######################
def get_valid_player_positions(self):
return self.terrain_pos_dict[" "]
def get_valid_joint_player_positions(self):
"""Returns all valid tuples of the form (p0_pos, p1_pos, p2_pos, ...)"""
valid_positions = self.get_valid_player_positions()
all_joint_positions = list(
itertools.product(valid_positions, repeat=self.num_players)
)
valid_joint_positions = [
j_pos
for j_pos in all_joint_positions
if not self.is_joint_position_collision(j_pos)
]
return valid_joint_positions
def get_valid_player_positions_and_orientations(self):
valid_states = []
for pos in self.get_valid_player_positions():
valid_states.extend([(pos, d) for d in Direction.ALL_DIRECTIONS])
return valid_states
def get_valid_joint_player_positions_and_orientations(self):
"""All joint player position and orientation pairs that are not
overlapping and on empty terrain."""
valid_player_states = (
self.get_valid_player_positions_and_orientations()
)
valid_joint_player_states = []
for players_pos_and_orientations in itertools.product(
valid_player_states, repeat=self.num_players
):
joint_position = [
plyer_pos_and_or[0]
for plyer_pos_and_or in players_pos_and_orientations
]
if not self.is_joint_position_collision(joint_position):
valid_joint_player_states.append(players_pos_and_orientations)
return valid_joint_player_states
def get_adjacent_features(self, player):
adj_feats = []
pos = player.position
for d in Direction.ALL_DIRECTIONS:
adj_pos = Action.move_in_direction(pos, d)
adj_feats.append((adj_pos, self.get_terrain_type_at_pos(adj_pos)))
return adj_feats
def get_terrain_type_at_pos(self, pos):
x, y = pos
return self.terrain_mtx[y][x]
def get_dish_dispenser_locations(self):
return list(self.terrain_pos_dict["D"])
def get_onion_dispenser_locations(self):
return list(self.terrain_pos_dict["O"])
def get_tomato_dispenser_locations(self):
return list(self.terrain_pos_dict["T"])
def get_serving_locations(self):
return list(self.terrain_pos_dict["S"])
def get_pot_locations(self):
return list(self.terrain_pos_dict["P"])
def get_counter_locations(self):
return list(self.terrain_pos_dict["X"])
@property
def num_pots(self):
return len(self.get_pot_locations())
def get_pot_states(self, state):
"""Returns dict with structure:
{
empty: [positions of empty pots]
'x_items': [soup objects with x items that have yet to start cooking],
'cooking': [soup objs that are cooking but not ready]
'ready': [ready soup objs],
}
NOTE: all returned pots are just pot positions
"""
pots_states_dict = defaultdict(list)
for pot_pos in self.get_pot_locations():
if not state.has_object(pot_pos):
pots_states_dict["empty"].append(pot_pos)
else:
soup = state.get_object(pot_pos)
assert soup.name == "soup", (
"soup at " + pot_pos + " is not a soup but a " + soup.name
)
if soup.is_ready:
pots_states_dict["ready"].append(pot_pos)
elif soup.is_cooking:
pots_states_dict["cooking"].append(pot_pos)
else:
num_ingredients = len(soup.ingredients)
pots_states_dict[
"{}_items".format(num_ingredients)
].append(pot_pos)
return pots_states_dict
def get_counter_objects_dict(self, state, counter_subset=None):
"""Returns a dictionary of pos:objects on counters by type"""
counters_considered = (
self.terrain_pos_dict["X"]
if counter_subset is None
else counter_subset
)
counter_objects_dict = defaultdict(list)
for obj in state.objects.values():
if obj.position in counters_considered:
counter_objects_dict[obj.name].append(obj.position)
return counter_objects_dict
def get_empty_counter_locations(self, state):
counter_locations = self.get_counter_locations()
return [pos for pos in counter_locations if not state.has_object(pos)]
def get_empty_pots(self, pot_states):
"""Returns pots that have 0 items in them"""
return pot_states["empty"]
def get_non_empty_pots(self, pot_states):
return self.get_full_pots(pot_states) + self.get_partially_full_pots(
pot_states
)
def get_ready_pots(self, pot_states):
return pot_states["ready"]
def get_cooking_pots(self, pot_states):
return pot_states["cooking"]
def get_full_but_not_cooking_pots(self, pot_states):
return pot_states["{}_items".format(Recipe.MAX_NUM_INGREDIENTS)]
def get_full_pots(self, pot_states):
return (
self.get_cooking_pots(pot_states)
+ self.get_ready_pots(pot_states)
+ self.get_full_but_not_cooking_pots(pot_states)
)
def get_partially_full_pots(self, pot_states):
return list(
set().union(
*[
pot_states["{}_items".format(i)]
for i in range(1, Recipe.MAX_NUM_INGREDIENTS)
]
)
)
def soup_ready_at_location(self, state, pos):
if not state.has_object(pos):
return False
obj = state.get_object(pos)
assert obj.name == "soup", "Object in pot was not soup"
return obj.is_ready
def soup_to_be_cooked_at_location(self, state, pos):
if not state.has_object(pos):
return False
obj = state.get_object(pos)
return (
obj.name == "soup"
and not obj.is_cooking
and not obj.is_ready
and len(obj.ingredients) > 0
)
def _check_valid_state(self, state):
"""Checks that the state is valid.
Conditions checked:
- Players are on free spaces, not terrain
- Held objects have the same position as the player holding them
- Non-held objects are on terrain
- No two players or non-held objects occupy the same position
- Objects have a valid state (eg. no pot with 4 onions)
"""
all_objects = list(state.objects.values())
for player_state in state.players:
# Check that players are not on terrain
pos = player_state.position
assert pos in self.get_valid_player_positions()
# Check that held objects have the same position
if player_state.held_object is not None:
all_objects.append(player_state.held_object)
assert (
player_state.held_object.position == player_state.position
)
for obj_pos, obj_state in state.objects.items():
# Check that the hash key position agrees with the position stored
# in the object state
assert obj_state.position == obj_pos
# Check that non-held objects are on terrain
assert self.get_terrain_type_at_pos(obj_pos) != " "
# Check that players and non-held objects don't overlap
all_pos = [player_state.position for player_state in state.players]
all_pos += [obj_state.position for obj_state in state.objects.values()]
assert len(all_pos) == len(
set(all_pos)
), "Overlapping players or objects"
# Check that objects have a valid state
for obj_state in all_objects:
assert obj_state.is_valid()
def find_free_counters_valid_for_both_players(self, state, mlam):
"""Finds all empty counter locations that are accessible to both players"""
one_player, other_player = state.players
free_counters = self.get_empty_counter_locations(state)
free_counters_valid_for_both = []
for free_counter in free_counters:
goals = mlam.motion_planner.motion_goals_for_pos[free_counter]
if any(
[
mlam.motion_planner.is_valid_motion_start_goal_pair(
one_player.pos_and_or, goal
)
for goal in goals
]
) and any(
[
mlam.motion_planner.is_valid_motion_start_goal_pair(
other_player.pos_and_or, goal
)
for goal in goals
]
):
free_counters_valid_for_both.append(free_counter)
return free_counters_valid_for_both
def _get_optimal_possible_recipe(
self, state, recipe, discounted, potential_params, return_value
):
"""
Traverse the recipe-space graph using DFS to find the best possible recipe that can be made
from the current recipe
Because we can't have empty recipes, we handle the case by letting recipe==None be a stand-in for empty recipe
"""
start_recipe = recipe
visited = set()
stack = []
best_recipe = recipe
best_value = 0
if not recipe:
for ingredient in Recipe.ALL_INGREDIENTS:
stack.append(Recipe([ingredient]))
else:
stack.append(recipe)
while stack:
curr_recipe = stack.pop()
if curr_recipe not in visited:
visited.add(curr_recipe)
curr_value = self.get_recipe_value(
state,
curr_recipe,
base_recipe=start_recipe,
discounted=discounted,
potential_params=potential_params,
)
if curr_value > best_value:
best_value, best_recipe = curr_value, curr_recipe
for neighbor in curr_recipe.neighbors():
if not neighbor in visited:
stack.append(neighbor)
if return_value:
return best_recipe, best_value
return best_recipe
def get_optimal_possible_recipe(
self,
state,
recipe,
discounted=False,
potential_params={},
return_value=False,
):
"""
Return the best possible recipe that can be made starting with ingredients in `recipe`
Uses self._optimal_possible_recipe as a cache to avoid re-computing. This only works because
the recipe values are currently static (i.e. bonus_orders doesn't change). Would need to have cache
flushed if order dynamics are introduced
"""
cache_valid = (
not discounted or self._prev_potential_params == potential_params
)
if not cache_valid:
if discounted:
self._opt_recipe_discount_cache = {}
else:
self._opt_recipe_cache = {}
if discounted:
cache = self._opt_recipe_discount_cache
self._prev_potential_params = potential_params
else:
cache = self._opt_recipe_cache
if recipe not in cache:
# Compute best recipe now and store in cache for later use
opt_recipe, value = self._get_optimal_possible_recipe(
state,
recipe,
discounted=discounted,
potential_params=potential_params,
return_value=True,
)
cache[recipe] = (opt_recipe, value)
# Return best recipe (and value) from cache
if return_value:
return cache[recipe]
return cache[recipe][0]
@staticmethod
def _assert_valid_grid(grid):
"""Raises an AssertionError if the grid is invalid.
grid: A sequence of sequences of spaces, representing a grid of a
certain height and width. grid[y][x] is the space at row y and column
x. A space must be either 'X' (representing a counter), ' ' (an empty
space), 'O' (onion supply), 'P' (pot), 'D' (dish supply), 'S' (serving
location), '1' (player 1) and '2' (player 2).
"""
height = len(grid)
width = len(grid[0])
# Make sure the grid is not ragged
assert all(len(row) == width for row in grid), "Ragged grid"
# Borders must not be free spaces
def is_not_free(c):
return c in "XOPDST"
for y in range(height):
assert is_not_free(grid[y][0]), "Left border must not be free"
assert is_not_free(grid[y][-1]), "Right border must not be free"
for x in range(width):
assert is_not_free(grid[0][x]), "Top border must not be free"
assert is_not_free(grid[-1][x]), "Bottom border must not be free"
all_elements = [element for row in grid for element in row]
digits = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
layout_digits = [e for e in all_elements if e in digits]
num_players = len(layout_digits)
assert num_players > 0, "No players (digits) in grid"
layout_digits = list(sorted(map(int, layout_digits)))
assert layout_digits == list(
range(1, num_players + 1)
), "Some players were missing"
assert all(
c in "XOPDST123456789 " for c in all_elements
), "Invalid character in grid"
assert all_elements.count("1") == 1, "'1' must be present exactly once"
assert (
all_elements.count("D") >= 1
), "'D' must be present at least once"
assert (
all_elements.count("S") >= 1
), "'S' must be present at least once"
assert (
all_elements.count("P") >= 1
), "'P' must be present at least once"
assert (
all_elements.count("O") >= 1 or all_elements.count("T") >= 1
), "'O' or 'T' must be present at least once"
################################
# EVENT LOGGING HELPER METHODS #
################################
def log_object_potting(
self, events_infos, state, old_soup, new_soup, obj_name, player_index
):
"""Player added an ingredient to a pot"""
obj_pickup_key = "potting_" + obj_name
if obj_pickup_key not in events_infos:
raise ValueError("Unknown event {}".format(obj_pickup_key))
events_infos[obj_pickup_key][player_index] = True
POTTING_FNS = {
"optimal": self.is_potting_optimal,
"catastrophic": self.is_potting_catastrophic,
"viable": self.is_potting_viable,
"useless": self.is_potting_useless,
}
for outcome, outcome_fn in POTTING_FNS.items():
if outcome_fn(state, old_soup, new_soup):
potting_key = "{}_{}_potting".format(outcome, obj_name)
events_infos[potting_key][player_index] = True
def log_object_pickup(
self, events_infos, state, obj_name, pot_states, player_index
):
"""Player picked an object up from a counter or a dispenser"""
obj_pickup_key = obj_name + "_pickup"
if obj_pickup_key not in events_infos:
raise ValueError("Unknown event {}".format(obj_pickup_key))
events_infos[obj_pickup_key][player_index] = True
USEFUL_PICKUP_FNS = {
"tomato": self.is_ingredient_pickup_useful,
"onion": self.is_ingredient_pickup_useful,
"dish": self.is_dish_pickup_useful,
}
if obj_name in USEFUL_PICKUP_FNS:
if USEFUL_PICKUP_FNS[obj_name](state, pot_states, player_index):
obj_useful_key = "useful_" + obj_name + "_pickup"
events_infos[obj_useful_key][player_index] = True
def log_object_drop(
self, events_infos, state, obj_name, pot_states, player_index
):
"""Player dropped the object on a counter"""
obj_drop_key = obj_name + "_drop"
if obj_drop_key not in events_infos:
raise ValueError("Unknown event {}".format(obj_drop_key))
events_infos[obj_drop_key][player_index] = True
USEFUL_DROP_FNS = {
"tomato": self.is_ingredient_drop_useful,
"onion": self.is_ingredient_drop_useful,
"dish": self.is_dish_drop_useful,
}
if obj_name in USEFUL_DROP_FNS:
if USEFUL_DROP_FNS[obj_name](state, pot_states, player_index):
obj_useful_key = "useful_" + obj_name + "_drop"
events_infos[obj_useful_key][player_index] = True
def is_dish_pickup_useful(self, state, pot_states, player_index=None):
"""
NOTE: this only works if self.num_players == 2
Useful if:
- Pot is ready/cooking and there is no player with a dish \
- 2 pots are ready/cooking and there is one player with a dish | -> number of dishes in players hands < number of ready/cooking/partially full soups
- Partially full pot is ok if the other player is on course to fill it /
We also want to prevent picking up and dropping dishes, so add the condition
that there must be no dishes on counters
"""
if self.num_players != 2:
return False
# This next line is to prevent reward hacking (this logic is also used by reward shaping)
dishes_on_counters = self.get_counter_objects_dict(state)["dish"]
no_dishes_on_counters = len(dishes_on_counters) == 0
num_player_dishes = len(state.player_objects_by_type["dish"])
non_empty_pots = len(
self.get_ready_pots(pot_states)
+ self.get_cooking_pots(pot_states)
+ self.get_partially_full_pots(pot_states)
)
return no_dishes_on_counters and num_player_dishes < non_empty_pots
def is_dish_drop_useful(self, state, pot_states, player_index):
"""
NOTE: this only works if self.num_players == 2
Useful if:
- Onion is needed (all pots are non-full)
- Nobody is holding onions
"""
if self.num_players != 2:
return False
all_non_full = len(self.get_full_pots(pot_states)) == 0
other_player = state.players[1 - player_index]
other_player_holding_onion = (
other_player.has_object()
and other_player.get_object().name == "onion"
)
return all_non_full and not other_player_holding_onion
def is_ingredient_pickup_useful(self, state, pot_states, player_index):
"""
NOTE: this only works if self.num_players == 2
Always useful unless:
- All pots are full & other agent is not holding a dish
"""
if self.num_players != 2:
return False
all_pots_full = self.num_pots == len(self.get_full_pots(pot_states))
other_player = state.players[1 - player_index]
other_player_has_dish = (
other_player.has_object()
and other_player.get_object().name == "dish"
)
return not (all_pots_full and not other_player_has_dish)
def is_ingredient_drop_useful(self, state, pot_states, player_index):
"""
NOTE: this only works if self.num_players == 2
Useful if:
- Dish is needed (all pots are full)
- Nobody is holding a dish
"""
if self.num_players != 2:
return False
all_pots_full = len(self.get_full_pots(pot_states)) == self.num_pots
other_player = state.players[1 - player_index]
other_player_holding_dish = (
other_player.has_object()
and other_player.get_object().name == "dish"
)
return all_pots_full and not other_player_holding_dish
def is_potting_optimal(self, state, old_soup, new_soup):
"""
True if the highest valued soup possible is the same before and after the potting
"""
old_recipe = (
Recipe(old_soup.ingredients) if old_soup.ingredients else None
)
new_recipe = Recipe(new_soup.ingredients)
old_val = self.get_recipe_value(
state, self.get_optimal_possible_recipe(state, old_recipe)
)
new_val = self.get_recipe_value(
state, self.get_optimal_possible_recipe(state, new_recipe)
)
return old_val == new_val
def is_potting_viable(self, state, old_soup, new_soup):
"""
True if there exists a non-zero reward soup possible from new ingredients
"""
new_recipe = Recipe(new_soup.ingredients)
new_val = self.get_recipe_value(
state, self.get_optimal_possible_recipe(state, new_recipe)
)
return new_val > 0
def is_potting_catastrophic(self, state, old_soup, new_soup):
"""
True if no non-zero reward soup is possible from new ingredients
"""
old_recipe = (
Recipe(old_soup.ingredients) if old_soup.ingredients else None
)
new_recipe = Recipe(new_soup.ingredients)
old_val = self.get_recipe_value(
state, self.get_optimal_possible_recipe(state, old_recipe)
)
new_val = self.get_recipe_value(
state, self.get_optimal_possible_recipe(state, new_recipe)
)
return old_val > 0 and new_val == 0
def is_potting_useless(self, state, old_soup, new_soup):
"""
True if ingredient added to a soup that was already gauranteed to be worth at most 0 points
"""
old_recipe = (
Recipe(old_soup.ingredients) if old_soup.ingredients else None
)
old_val = self.get_recipe_value(
state, self.get_optimal_possible_recipe(state, old_recipe)
)
return old_val == 0
#####################
# TERMINAL GRAPHICS #
#####################
def state_string(self, state):
"""String representation of the current state"""
players_dict = {player.position: player for player in state.players}
grid_string = ""
for y, terrain_row in enumerate(self.terrain_mtx):
for x, element in enumerate(terrain_row):
grid_string_add = ""
if (x, y) in players_dict.keys():
player = players_dict[(x, y)]
orientation = player.orientation
assert orientation in Direction.ALL_DIRECTIONS
player_idx_lst = [
i
for i, p in enumerate(state.players)
if p.position == player.position
]
assert len(player_idx_lst) == 1
grid_string_add += Action.ACTION_TO_CHAR[orientation]
player_object = player.held_object
if player_object:
grid_string_add += str(player_idx_lst[0])
if player_object.name[0] == "s":
# this is a soup
grid_string_add += str(player_object)
else:
grid_string_add += player_object.name[:1]
else:
grid_string_add += str(player_idx_lst[0])
else:
grid_string_add += element
if element == "X" and state.has_object((x, y)):
state_obj = state.get_object((x, y))
if state_obj.name[0] == "s":
grid_string_add += str(state_obj)
else:
grid_string_add += state_obj.name[:1]
elif element == "P" and state.has_object((x, y)):
soup = state.get_object((x, y))
# display soup
grid_string_add += str(soup)
grid_string += grid_string_add
grid_string += "".join([" "] * (7 - len(grid_string_add)))
grid_string += " "
grid_string += "\n\n"
if state.bonus_orders:
grid_string += "Bonus orders: {}\n".format(state.bonus_orders)
# grid_string += "State potential value: {}\n".format(self.potential_function(state))
return grid_string
###################
# STATE ENCODINGS #
###################
@property
def lossless_state_encoding_shape(self):
warnings.warn(
"Using the `lossless_state_encoding_shape` property is deprecated. Please use `get_lossless_state_encoding_shape` method instead",
DeprecationWarning,
)
return np.array(list(self.shape) + [26])
def get_lossless_state_encoding_shape(self):
return np.array(list(self.shape) + [26])
def lossless_state_encoding(
self, overcooked_state, horizon=400, debug=False
):
"""Featurizes a OvercookedState object into a stack of boolean masks that are easily readable by a CNN"""
assert (
self.num_players == 2
), "Functionality has to be added to support encondings for > 2 players"
assert type(debug) is bool
base_map_features = [
"pot_loc",
"counter_loc",
"onion_disp_loc",
"tomato_disp_loc",
"dish_disp_loc",
"serve_loc",
]
variable_map_features = [
"onions_in_pot",
"tomatoes_in_pot",
"onions_in_soup",
"tomatoes_in_soup",
"soup_cook_time_remaining",
"soup_done",
"dishes",
"onions",
"tomatoes",
]
urgency_features = ["urgency"]
all_objects = overcooked_state.all_objects_list
def make_layer(position, value):
layer = np.zeros(self.shape)
layer[position] = value
return layer
def process_for_player(primary_agent_idx):
# Ensure that primary_agent_idx layers are ordered before other_agent_idx layers
other_agent_idx = 1 - primary_agent_idx
ordered_player_features = [
"player_{}_loc".format(primary_agent_idx),
"player_{}_loc".format(other_agent_idx),
] + [
"player_{}_orientation_{}".format(
i, Direction.DIRECTION_TO_INDEX[d]
)
for i, d in itertools.product(
[primary_agent_idx, other_agent_idx],
Direction.ALL_DIRECTIONS,
)
]
# LAYERS = ordered_player_features + base_map_features + variable_map_features
LAYERS = (
ordered_player_features
+ base_map_features
+ variable_map_features
+ urgency_features
)
state_mask_dict = {k: np.zeros(self.shape) for k in LAYERS}
# MAP LAYERS
if horizon - overcooked_state.timestep < 40:
state_mask_dict["urgency"] = np.ones(self.shape)
for loc in self.get_counter_locations():
state_mask_dict["counter_loc"][loc] = 1
for loc in self.get_pot_locations():
state_mask_dict["pot_loc"][loc] = 1
for loc in self.get_onion_dispenser_locations():
state_mask_dict["onion_disp_loc"][loc] = 1
for loc in self.get_tomato_dispenser_locations():
state_mask_dict["tomato_disp_loc"][loc] = 1
for loc in self.get_dish_dispenser_locations():
state_mask_dict["dish_disp_loc"][loc] = 1
for loc in self.get_serving_locations():
state_mask_dict["serve_loc"][loc] = 1
# PLAYER LAYERS
for i, player in enumerate(overcooked_state.players):
player_orientation_idx = Direction.DIRECTION_TO_INDEX[
player.orientation
]
state_mask_dict["player_{}_loc".format(i)] = make_layer(
player.position, 1
)
state_mask_dict[
"player_{}_orientation_{}".format(
i, player_orientation_idx
)
] = make_layer(player.position, 1)
# OBJECT & STATE LAYERS
for obj in all_objects:
if obj.name == "soup":
# removed the next line because onion doesn't have to be in all the soups?
# if Recipe.ONION in obj.ingredients:
# get the ingredients into a {object: number} dictionary
ingredients_dict = Counter(obj.ingredients)
# assert "onion" in ingredients_dict.keys()
if obj.position in self.get_pot_locations():
if obj.is_idle:
# onions_in_pot and tomatoes_in_pot are used when the soup is idling, and ingredients could still be added
state_mask_dict["onions_in_pot"] += make_layer(
obj.position, ingredients_dict["onion"]
)
state_mask_dict["tomatoes_in_pot"] += make_layer(
obj.position, ingredients_dict["tomato"]
)
else:
state_mask_dict["onions_in_soup"] += make_layer(
obj.position, ingredients_dict["onion"]
)
state_mask_dict["tomatoes_in_soup"] += make_layer(
obj.position, ingredients_dict["tomato"]
)
state_mask_dict[
"soup_cook_time_remaining"
] += make_layer(
obj.position, obj.cook_time - obj._cooking_tick
)
if obj.is_ready:
state_mask_dict["soup_done"] += make_layer(
obj.position, 1
)
else:
# If player soup is not in a pot, treat it like a soup that is cooked with remaining time 0
state_mask_dict["onions_in_soup"] += make_layer(
obj.position, ingredients_dict["onion"]
)
state_mask_dict["tomatoes_in_soup"] += make_layer(
obj.position, ingredients_dict["tomato"]
)
state_mask_dict["soup_done"] += make_layer(
obj.position, 1
)
elif obj.name == "dish":
state_mask_dict["dishes"] += make_layer(obj.position, 1)
elif obj.name == "onion":
state_mask_dict["onions"] += make_layer(obj.position, 1)
elif obj.name == "tomato":
state_mask_dict["tomatoes"] += make_layer(obj.position, 1)
else:
raise ValueError("Unrecognized object")
if debug:
print("terrain----")
print(np.array(self.terrain_mtx))
print("-----------")
print(len(LAYERS))
print(len(state_mask_dict))
for k, v in state_mask_dict.items():
print(k)
print(np.transpose(v, (1, 0)))
# Stack of all the state masks, order decided by order of LAYERS
state_mask_stack = np.array(
[state_mask_dict[layer_id] for layer_id in LAYERS]
)
state_mask_stack = np.transpose(state_mask_stack, (1, 2, 0))
assert state_mask_stack.shape[:2] == self.shape
assert state_mask_stack.shape[2] == len(LAYERS)
# NOTE: currently not including time left or order_list in featurization
return np.array(state_mask_stack).astype(int)
# NOTE: Currently not very efficient, a decent amount of computation repeated here
num_players = len(overcooked_state.players)
final_obs_for_players = tuple(
process_for_player(i) for i in range(num_players)
)
return final_obs_for_players
@property
def featurize_state_shape(self):
warnings.warn(
"Using the `featurize_state_shape` property is deprecated. Please use `get_featurize_state_shape` method instead",
DeprecationWarning,
)
return self.get_featurize_state_shape(2)
def get_featurize_state_shape(self, num_pots=2):
num_pot_features = 10
base_features = 28
total_features = self.num_players * (
num_pots * num_pot_features + base_features
)
return (total_features,)
def featurize_state(self, overcooked_state, mlam, num_pots=2, **kwargs):
"""
Encode state with some manually designed features. Works for arbitrary number of players
Arguments:
overcooked_state (OvercookedState): state we wish to featurize
mlam (MediumLevelActionManager): to be used for distance computations necessary for our higher-level feature encodings
num_pots (int): Encode the state (ingredients, whether cooking or not, etc) of the 'num_pots' closest pots to each player.
If i < num_pots pots are reachable by player i, then pots [i+1, num_pots] are encoded as all zeros. Changing this
impacts the shape of the feature encoding
Returns:
ordered_features (list[np.Array]): The ith element contains a player-centric featurized view for the ith player
The encoding for player i is as follows:
[player_i_features, other_player_features player_i_dist_to_other_players, player_i_position]
player_{i}_features (length num_pots*10 + 24):
pi_orientation: length 4 one-hot-encoding of direction currently facing
pi_obj: length 4 one-hot-encoding of object currently being held (all 0s if no object held)
pi_wall_{j}: {0, 1} boolean value of whether player i has wall immediately in direction j
pi_closest_{onion|tomato|dish|soup|serving|empty_counter}: (dx, dy) where dx = x dist to item, dy = y dist to item. (0, 0) if item is currently held
pi_cloest_soup_n_{onions|tomatoes}: int value for number of this ingredient in closest soup
pi_closest_pot_{j}_exists: {0, 1} depending on whether jth closest pot found. If 0, then all other pot features are 0. Note: can
be 0 even if there are more than j pots on layout, if the pot is not reachable by player i
pi_closest_pot_{j}_{is_empty|is_full|is_cooking|is_ready}: {0, 1} depending on boolean value for jth closest pot
pi_closest_pot_{j}_{num_onions|num_tomatoes}: int value for number of this ingredient in jth closest pot
pi_closest_pot_{j}_cook_time: int value for seconds remaining on soup. -1 if no soup is cooking
pi_closest_pot_{j}: (dx, dy) to jth closest pot from player i location
other_player_features (length (num_players - 1)*(num_pots*10 + 24)):
ordered concatenation of player_{j}_features for j != i
player_i_dist_to_other_players (length (num_players - 1)*2):
[player_j.pos - player_i.pos for j != i]
player_i_position (length 2)
"""
all_features = {}
def concat_dicts(a, b):
return {**a, **b}
def make_closest_feature(idx, player, name, locations):
"""
Compute (x, y) deltas to closest feature of type `name`, and save it in the features dict
"""
feat_dict = {}
obj = None
held_obj = player.held_object
held_obj_name = held_obj.name if held_obj else "none"
if held_obj_name == name:
obj = held_obj
feat_dict["p{}_closest_{}".format(i, name)] = (0, 0)
else:
loc, deltas = self.get_deltas_to_closest_location(
player, locations, mlam
)
if loc and overcooked_state.has_object(loc):
obj = overcooked_state.get_object(loc)
feat_dict["p{}_closest_{}".format(idx, name)] = deltas
if name == "soup":
num_onions = num_tomatoes = 0
if obj:
ingredients_cnt = Counter(obj.ingredients)
num_onions, num_tomatoes = (
ingredients_cnt["onion"],
ingredients_cnt["tomato"],
)
feat_dict["p{}_closest_soup_n_onions".format(i)] = [num_onions]
feat_dict["p{}_closest_soup_n_tomatoes".format(i)] = [
num_tomatoes
]
return feat_dict
def make_pot_feature(idx, player, pot_idx, pot_loc, pot_states):
"""
Encode pot at pot_loc relative to 'player'
"""
# Pot doesn't exist
feat_dict = {}
if not pot_loc:
feat_dict["p{}_closest_pot_{}_exists".format(idx, pot_idx)] = [
0
]
feat_dict[
"p{}_closest_pot_{}_is_empty".format(idx, pot_idx)
] = [0]
feat_dict[
"p{}_closest_pot_{}_is_full".format(idx, pot_idx)
] = [0]
feat_dict[
"p{}_closest_pot_{}_is_cooking".format(idx, pot_idx)
] = [0]
feat_dict[
"p{}_closest_pot_{}_is_ready".format(idx, pot_idx)
] = [0]
feat_dict[
"p{}_closest_pot_{}_num_onions".format(idx, pot_idx)
] = [0]
feat_dict[
"p{}_closest_pot_{}_num_tomatoes".format(idx, pot_idx)
] = [0]
feat_dict[
"p{}_closest_pot_{}_cook_time".format(idx, pot_idx)
] = [0]
feat_dict["p{}_closest_pot_{}".format(idx, pot_idx)] = (0, 0)
return feat_dict
# Get position information
deltas = self.get_deltas_to_location(player, pot_loc)
# Get pot state info
is_empty = int(pot_loc in self.get_empty_pots(pot_states))
is_full = int(pot_loc in self.get_full_pots(pot_states))
is_cooking = int(pot_loc in self.get_cooking_pots(pot_states))
is_ready = int(pot_loc in self.get_ready_pots(pot_states))
# Get soup state info
num_onions = num_tomatoes = 0
cook_time_remaining = 0
if not is_empty:
soup = overcooked_state.get_object(pot_loc)
ingredients_cnt = Counter(soup.ingredients)
num_onions, num_tomatoes = (
ingredients_cnt["onion"],
ingredients_cnt["tomato"],
)
cook_time_remaining = (
0 if soup.is_idle else soup.cook_time_remaining
)
# Encode pot and soup info
feat_dict["p{}_closest_pot_{}_exists".format(idx, pot_idx)] = [1]
feat_dict["p{}_closest_pot_{}_is_empty".format(idx, pot_idx)] = [
is_empty
]
feat_dict["p{}_closest_pot_{}_is_full".format(idx, pot_idx)] = [
is_full
]
feat_dict["p{}_closest_pot_{}_is_cooking".format(idx, pot_idx)] = [
is_cooking
]
feat_dict["p{}_closest_pot_{}_is_ready".format(idx, pot_idx)] = [
is_ready
]
feat_dict["p{}_closest_pot_{}_num_onions".format(idx, pot_idx)] = [
num_onions
]
feat_dict[
"p{}_closest_pot_{}_num_tomatoes".format(idx, pot_idx)
] = [num_tomatoes]
feat_dict["p{}_closest_pot_{}_cook_time".format(idx, pot_idx)] = [
cook_time_remaining
]
feat_dict["p{}_closest_pot_{}".format(idx, pot_idx)] = deltas
return feat_dict
IDX_TO_OBJ = ["onion", "soup", "dish", "tomato"]
OBJ_TO_IDX = {o_name: idx for idx, o_name in enumerate(IDX_TO_OBJ)}
counter_objects = self.get_counter_objects_dict(overcooked_state)
pot_states = self.get_pot_states(overcooked_state)
for i, player in enumerate(overcooked_state.players):
# Player info
orientation_idx = Direction.DIRECTION_TO_INDEX[player.orientation]
all_features["p{}_orientation".format(i)] = np.eye(4)[
orientation_idx
]
obj = player.held_object
if obj is None:
held_obj_name = "none"
all_features["p{}_objs".format(i)] = np.zeros(len(IDX_TO_OBJ))
else:
held_obj_name = obj.name
obj_idx = OBJ_TO_IDX[held_obj_name]
all_features["p{}_objs".format(i)] = np.eye(len(IDX_TO_OBJ))[
obj_idx
]
# Closest feature for each object type
all_features = concat_dicts(
all_features,
make_closest_feature(
i,
player,
"onion",
self.get_onion_dispenser_locations()
+ counter_objects["onion"],
),
)
all_features = concat_dicts(
all_features,
make_closest_feature(
i,
player,
"tomato",
self.get_tomato_dispenser_locations()
+ counter_objects["tomato"],
),
)
all_features = concat_dicts(
all_features,
make_closest_feature(
i,
player,
"dish",
self.get_dish_dispenser_locations()
+ counter_objects["dish"],
),
)
all_features = concat_dicts(
all_features,
make_closest_feature(
i, player, "soup", counter_objects["soup"]
),
)
all_features = concat_dicts(
all_features,
make_closest_feature(
i, player, "serving", self.get_serving_locations()
),
)
all_features = concat_dicts(
all_features,
make_closest_feature(
i,
player,
"empty_counter",
self.get_empty_counter_locations(overcooked_state),
),
)
# Closest pots info
pot_locations = self.get_pot_locations().copy()
for pot_idx in range(num_pots):
_, closest_pot_loc = mlam.motion_planner.min_cost_to_feature(
player.pos_and_or, pot_locations, with_argmin=True
)
pot_features = make_pot_feature(
i, player, pot_idx, closest_pot_loc, pot_states
)
all_features = concat_dicts(all_features, pot_features)
if closest_pot_loc:
pot_locations.remove(closest_pot_loc)
# Adjacent features info
for direction, pos_and_feat in enumerate(
self.get_adjacent_features(player)
):
_, feat = pos_and_feat
all_features["p{}_wall_{}".format(i, direction)] = (
[0] if feat == " " else [1]
)
# Convert all list and tuple values to np.arrays
features_np = {k: np.array(v) for k, v in all_features.items()}
player_features = [] # Non-position player-specific features
player_absolute_positions = [] # Position player-specific features
player_relative_positions = (
[]
) # Relative position player-specific features
# Compute all player-centric features for each player
for i, player_i in enumerate(overcooked_state.players):
# All absolute player-centric features
player_i_dict = {
k: v
for k, v in features_np.items()
if k[:2] == "p{}".format(i)
}
features = np.concatenate(list(player_i_dict.values()))
abs_pos = np.array(player_i.position)
# Calculate position relative to all other players
rel_pos = []
for player_j in overcooked_state.players:
if player_i == player_j:
continue
pj_rel_to_pi = np.array(
pos_distance(player_j.position, player_i.position)
)
rel_pos.append(pj_rel_to_pi)
rel_pos = np.concatenate(rel_pos)
player_features.append(features)
player_absolute_positions.append(abs_pos)
player_relative_positions.append(rel_pos)
# Compute a symmetric, player-centric encoding of features for each player
ordered_features = []
for i, player_i in enumerate(overcooked_state.players):
player_i_features = player_features[i]
player_i_abs_pos = player_absolute_positions[i]
player_i_rel_pos = player_relative_positions[i]
other_player_features = np.concatenate(
[feats for j, feats in enumerate(player_features) if j != i]
)
player_i_ordered_features = np.squeeze(
np.concatenate(
[
player_i_features,
other_player_features,
player_i_rel_pos,
player_i_abs_pos,
]
)
)
ordered_features.append(player_i_ordered_features)
return ordered_features
def get_deltas_to_closest_location(self, player, locations, mlam):
_, closest_loc = mlam.motion_planner.min_cost_to_feature(
player.pos_and_or, locations, with_argmin=True
)
deltas = self.get_deltas_to_location(player, closest_loc)
return closest_loc, deltas
def get_deltas_to_location(self, player, location):
if location is None:
# "any object that does not exist or I am carrying is going to show up as a (0,0)
# but I can disambiguate the two possibilities by looking at the features
# for what kind of object I'm carrying"
return (0, 0)
dy_loc, dx_loc = pos_distance(location, player.position)
return dy_loc, dx_loc
###############################
# POTENTIAL REWARD SHAPING FN #
###############################
def potential_function(self, state, mp, gamma=0.99):
"""
Essentially, this is the ɸ(s) function.
The main goal here to to approximately infer the actions of an optimal agent, and derive an estimate for the value
function of the optimal policy. The perfect potential function is indeed the value function
At a high level, we assume each agent acts independetly, and greedily optimally, and then, using the decay factor "gamma",
we calculate the expected discounted reward under this policy
Some implementation details:
* the process of delivering a soup is broken into 4 steps
* Step 1: placing the first ingredient into an empty pot
* Step 2: placing the remaining ingredients in the pot
* Step 3: cooking the soup/retreiving a dish with which to serve the soup
* Step 4: delivering the soup once it is in a dish
* Here is an exhaustive list of the greedy assumptions made at each step
* step 1:
* If an agent is holding an ingredient that could be used to cook an optimal soup, it will use it in that soup
* If no such optimal soup exists, but there is an empty pot, the agent will place the ingredient there
* If neither of the above cases holds, no potential is awarded for possessing the ingredient
* step 2:
* The agent will always try to cook the highest valued soup possible based on the current ingredients in a pot
* Any agent possessing a missing ingredient for an optimal soup will travel directly to the closest such pot
* If the optimal soup has all ingredients, the closest agent not holding anything will go to cook it
* step 3:
* Any player holding a dish attempts to serve the highest valued soup based on recipe values and cook time remaining
* step 4:
* Any agent holding a soup will go directly to the nearest serving area
* At every step, the expected reward is discounted by multiplying the optimal reward by gamma ^ (estimated #steps to complete greedy action)
* In the case that certain actions are infeasible (i.e. an agent is holding a soup in step 4, but no path exists to a serving
area), estimated number of steps in order to complete the action defaults to `max_steps`
* Cooperative behavior between the two agents is not considered for complexity reasons
* Soups that are worth <1 points are rounded to be worth 1 point. This is to incentivize the agent to cook a worthless soup
that happens to be in a pot in order to free up the pot
Parameters:
state: OvercookedState instance representing the state to evaluate potential for
mp: MotionPlanner instance used to calculate gridworld distances to objects
gamma: float, discount factor
max_steps: int, number of steps a high level action is assumed to take in worst case
Returns
phi(state), the potential of the state
"""
if not hasattr(Recipe, "_tomato_value") or not hasattr(
Recipe, "_onion_value"
):
raise ValueError(
"Potential function requires Recipe onion and tomato values to work properly"
)
# Constants needed for potential function
potential_params = {
"gamma": gamma,
"tomato_value": Recipe._tomato_value
if Recipe._tomato_value
else 13,
"onion_value": Recipe._onion_value if Recipe._onion_value else 21,
**POTENTIAL_CONSTANTS.get(
self.layout_name, POTENTIAL_CONSTANTS["default"]
),
}
pot_states = self.get_pot_states(state)
# Base potential value is the geometric sum of making optimal soups infinitely
(
opt_recipe,
discounted_opt_recipe_value,
) = self.get_optimal_possible_recipe(
state,
None,
discounted=True,
potential_params=potential_params,
return_value=True,
)
opt_recipe_value = self.get_recipe_value(state, opt_recipe)
discount = discounted_opt_recipe_value / opt_recipe_value
steady_state_value = (discount / (1 - discount)) * opt_recipe_value
potential = steady_state_value
# Get list of all soups that have >0 ingredients, sorted based on value of best possible recipe
idle_soups = [
state.get_object(pos)
for pos in self.get_full_but_not_cooking_pots(pot_states)
]
idle_soups.extend(
[
state.get_object(pos)
for pos in self.get_partially_full_pots(pot_states)
]
)
idle_soups = sorted(
idle_soups,
key=lambda soup: self.get_optimal_possible_recipe(
state,
Recipe(soup.ingredients),
discounted=True,
potential_params=potential_params,
return_value=True,
)[1],
reverse=True,
)
# Build mapping of non_idle soups to the potential value each one will contribue
# Default potential value is maximimal discount for last two steps applied to optimal recipe value
cooking_soups = [
state.get_object(pos) for pos in self.get_cooking_pots(pot_states)
]
done_soups = [
state.get_object(pos) for pos in self.get_ready_pots(pot_states)
]
non_idle_soup_vals = {
soup: gamma
** (
potential_params["max_delivery_steps"]
+ max(
potential_params["max_pickup_steps"],
soup.cook_time - soup._cooking_tick,
)
)
* max(self.get_recipe_value(state, soup.recipe), 1)
for soup in cooking_soups + done_soups
}
# Get descriptive list of players based on different attributes
# Note that these lists are mutually exclusive
players_holding_soups = [
player
for player in state.players
if player.has_object() and player.get_object().name == "soup"
]
players_holding_dishes = [
player
for player in state.players
if player.has_object() and player.get_object().name == "dish"
]
players_holding_tomatoes = [
player
for player in state.players
if player.has_object()
and player.get_object().name == Recipe.TOMATO
]
players_holding_onions = [
player
for player in state.players
if player.has_object() and player.get_object().name == Recipe.ONION
]
players_holding_nothing = [
player for player in state.players if not player.has_object()
]
### Step 4 potential ###
# Add potential for each player with a soup
for player in players_holding_soups:
# Even if delivery_dist is infinite, we still award potential (as an agent might need to pass the soup to other player first)
delivery_dist = mp.min_cost_to_feature(
player.pos_and_or, self.terrain_pos_dict["S"]
)
potential += gamma ** min(
delivery_dist, potential_params["max_delivery_steps"]
) * max(
self.get_recipe_value(state, player.get_object().recipe), 1
)
### Step 3 potential ###
# Reweight each non-idle soup value based on agents with dishes performing greedily-optimally as outlined in docstring
for player in players_holding_dishes:
best_pickup_soup = None
best_pickup_value = 0
# find best soup to pick up with dish agent currently has
for soup in non_idle_soup_vals:
# How far away the soup is (inf if not-reachable)
pickup_dist = mp.min_cost_to_feature(
player.pos_and_or, [soup.position]
)
# mask to award zero score if not reachable
# Note: this means that potentially "useful" dish pickups (where agent passes dish to other agent
# that can reach the soup) do not recive a potential bump
is_useful = int(pickup_dist < np.inf)
# Always assume worst-case discounting for step 4, and bump zero-valued soups to 1 as mentioned in docstring
pickup_soup_value = gamma ** potential_params[
"max_delivery_steps"
] * max(self.get_recipe_value(state, soup.recipe), 1)
cook_time_remaining = soup.cook_time - soup._cooking_tick
discount = gamma ** max(
cook_time_remaining,
min(pickup_dist, potential_params["max_pickup_steps"]),
)
# Final discount-adjusted value for this player pursuing this soup
pickup_value = discount * pickup_soup_value * is_useful
# Update best soup found for this player
if pickup_dist < np.inf and pickup_value > best_pickup_value:
best_pickup_soup = soup
best_pickup_value = pickup_value
# Set best-case score for this soup. Can only improve upon previous players policies
# Note cooperative policies between players not considered
if best_pickup_soup:
non_idle_soup_vals[best_pickup_soup] = max(
non_idle_soup_vals[best_pickup_soup], best_pickup_value
)
# Apply potential for each idle soup as calculated above
for soup in non_idle_soup_vals:
potential += non_idle_soup_vals[soup]
### Step 2 potential ###
# Iterate over idle soups in decreasing order of value so we greedily prioritize higher valued soups
for soup in idle_soups:
# Calculate optimal recipe
curr_recipe = Recipe(soup.ingredients)
opt_recipe = self.get_optimal_possible_recipe(
state,
curr_recipe,
discounted=True,
potential_params=potential_params,
)
# Calculate missing ingredients needed to complete optimal recipe
missing_ingredients = list(opt_recipe.ingredients)
for ingredient in soup.ingredients:
missing_ingredients.remove(ingredient)
# Base discount for steps 3-4
discount = gamma ** (
max(potential_params["max_pickup_steps"], opt_recipe.time)
+ potential_params["max_delivery_steps"]
)
# Add a multiplicative discount for each needed ingredient (this has the effect of giving more award to soups
# that are closer to being completed)
for ingredient in missing_ingredients:
# Players who might have an ingredient we need
pertinent_players = (
players_holding_tomatoes
if ingredient == Recipe.TOMATO
else players_holding_onions
)
dist = np.inf
closest_player = None
# Find closest player with ingredient we need
for player in pertinent_players:
curr_dist = mp.min_cost_to_feature(
player.pos_and_or, [soup.position]
)
if curr_dist < dist:
dist = curr_dist
closest_player = player
# Update discount to account for adding this missing ingredient (defaults to min_coeff if no pertinent players exist)
discount *= gamma ** min(
dist, potential_params["pot_{}_steps".format(ingredient)]
)
# Cross off this player's ingreident contribution so it can't be double-counted
if closest_player:
pertinent_players.remove(closest_player)
# Update discount to account for time it takes to start the soup cooking once last ingredient is added
if missing_ingredients:
# We assume it only takes one timestep if there are missing ingredients since the agent delivering the last ingredient
# will be at the pot already
discount *= gamma
else:
# Otherwise, we assume that every player holding nothing will make a beeline to this soup since it's already optimal
cook_dist = min(
[
mp.min_cost_to_feature(
player.pos_and_or, [soup.position]
)
for player in players_holding_nothing
],
default=np.inf,
)
discount *= gamma ** min(
cook_dist, potential_params["max_pickup_steps"]
)
potential += discount * max(
self.get_recipe_value(state, opt_recipe), 1
)
### Step 1 Potential ###
# Add potential for each tomato that is left over after using all others to complete optimal recipes
for player in players_holding_tomatoes:
# will be inf if there exists no empty pot that is reachable
dist = mp.min_cost_to_feature(
player.pos_and_or, self.get_empty_pots(pot_states)
)
is_useful = int(dist < np.inf)
discount = (
gamma
** (
min(potential_params["pot_tomato_steps"], dist)
+ potential_params["max_pickup_steps"]
+ potential_params["max_delivery_steps"]
)
* is_useful
)
potential += discount * potential_params["tomato_value"]
# Add potential for each onion that is remaining after using others to complete optimal recipes if possible
for player in players_holding_onions:
dist = mp.min_cost_to_feature(
player.pos_and_or, self.get_empty_pots(pot_states)
)
is_useful = int(dist < np.inf)
discount = (
gamma
** (
min(potential_params["pot_onion_steps"], dist)
+ potential_params["max_pickup_steps"]
+ potential_params["max_delivery_steps"]
)
* is_useful
)
potential += discount * potential_params["onion_value"]
# At last
return potential
##############
# DEPRECATED #
##############
# def calculate_distance_based_shaped_reward(self, state, new_state):
# """
# Adding reward shaping based on distance to certain features.
# """
# distance_based_shaped_reward = 0
#
# pot_states = self.get_pot_states(new_state)
# ready_pots = pot_states["tomato"]["ready"] + pot_states["onion"]["ready"]
# cooking_pots = ready_pots + pot_states["tomato"]["cooking"] + pot_states["onion"]["cooking"]
# nearly_ready_pots = cooking_pots + pot_states["tomato"]["partially_full"] + pot_states["onion"]["partially_full"]
# dishes_in_play = len(new_state.player_objects_by_type['dish'])
# for player_old, player_new in zip(state.players, new_state.players):
# # Linearly increase reward depending on vicinity to certain features, where distance of 10 achieves 0 reward
# max_dist = 8
#
# if player_new.held_object is not None and player_new.held_object.name == 'dish' and len(nearly_ready_pots) >= dishes_in_play:
# min_dist_to_pot_new = np.inf
# min_dist_to_pot_old = np.inf
# for pot in nearly_ready_pots:
# new_dist = np.linalg.norm(np.array(pot) - np.array(player_new.position))
# old_dist = np.linalg.norm(np.array(pot) - np.array(player_old.position))
# if new_dist < min_dist_to_pot_new:
# min_dist_to_pot_new = new_dist
# if old_dist < min_dist_to_pot_old:
# min_dist_to_pot_old = old_dist
# if min_dist_to_pot_old > min_dist_to_pot_new:
# distance_based_shaped_reward += self.reward_shaping_params["POT_DISTANCE_REW"] * (1 - min(min_dist_to_pot_new / max_dist, 1))
#
# if player_new.held_object is None and len(cooking_pots) > 0 and dishes_in_play == 0:
# min_dist_to_d_new = np.inf
# min_dist_to_d_old = np.inf
# for serving_loc in self.terrain_pos_dict['D']:
# new_dist = np.linalg.norm(np.array(serving_loc) - np.array(player_new.position))
# old_dist = np.linalg.norm(np.array(serving_loc) - np.array(player_old.position))
# if new_dist < min_dist_to_d_new:
# min_dist_to_d_new = new_dist
# if old_dist < min_dist_to_d_old:
# min_dist_to_d_old = old_dist
#
# if min_dist_to_d_old > min_dist_to_d_new:
# distance_based_shaped_reward += self.reward_shaping_params["DISH_DISP_DISTANCE_REW"] * (1 - min(min_dist_to_d_new / max_dist, 1))
#
# if player_new.held_object is not None and player_new.held_object.name == 'soup':
# min_dist_to_s_new = np.inf
# min_dist_to_s_old = np.inf
# for serving_loc in self.terrain_pos_dict['S']:
# new_dist = np.linalg.norm(np.array(serving_loc) - np.array(player_new.position))
# old_dist = np.linalg.norm(np.array(serving_loc) - np.array(player_old.position))
# if new_dist < min_dist_to_s_new:
# min_dist_to_s_new = new_dist
#
# if old_dist < min_dist_to_s_old:
# min_dist_to_s_old = old_dist
#
# if min_dist_to_s_old > min_dist_to_s_new:
# distance_based_shaped_reward += self.reward_shaping_params["SOUP_DISTANCE_REW"] * (1 - min(min_dist_to_s_new / max_dist, 1))
#
# return distance_based_shaped_reward
| 127,017 | 37.327701 | 168 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/mdp/__init__.py | 0 | 0 | 0 | py |
|
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/mdp/overcooked_trajectory.py | import numpy as np
"""
NOTE: Currently under construction...
TODO: stretch goal of taking object-oriented approach to trajectories by creating Trajectory class.
This would require changes both throughout this repo and overcooked-ai repo, so it's blue sky goal for now
This file's utility functions represents a primitive first-step towards treating trajectories as first class objects
The standard format for Overcooked trajectories is:
trajs = {
# With shape (n_episodes, game_len), where game_len might vary across games:
"ep_states": [ [traj_1_states], [traj_2_states], ... ], # Individual trajectory states
"ep_actions": [ [traj_1_joint_actions], [traj_2_joint_actions], ... ], # Trajectory joint actions, by agent
"ep_rewards": [ [traj_1_timestep_rewards], [traj_2_timestep_rewards], ... ], # (Sparse) reward values by timestep
"ep_dones": [ [traj_1_timestep_dones], [traj_2_timestep_dones], ... ], # Done values (should be all 0s except last one for each traj) TODO: add this to traj checks
"ep_infos": [ [traj_1_timestep_infos], [traj_2_traj_1_timestep_infos], ... ], # Info dictionaries
# With shape (n_episodes, ):
"ep_returns": [ cumulative_traj1_reward, cumulative_traj2_reward, ... ], # Sum of sparse rewards across each episode
"ep_lengths": [ traj1_length, traj2_length, ... ], # Lengths (in env timesteps) of each episode
"mdp_params": [ traj1_mdp_params, traj2_mdp_params, ... ], # Custom Mdp params to for each episode
"env_params": [ traj1_env_params, traj2_env_params, ... ], # Custom Env params for each episode
# Custom metadata key value pairs
"metadatas": [{custom metadata key:value pairs for traj 1}, {...}, ...] # Each metadata dictionary is of similar format to the trajectories dictionary
}
"""
TIMESTEP_TRAJ_KEYS = set(
["ep_states", "ep_actions", "ep_rewards", "ep_dones", "ep_infos"]
)
EPISODE_TRAJ_KEYS = set(
["ep_returns", "ep_lengths", "mdp_params", "env_params"]
)
DEFAULT_TRAJ_KEYS = set(
list(TIMESTEP_TRAJ_KEYS) + list(EPISODE_TRAJ_KEYS) + ["metadatas"]
)
def get_empty_trajectory():
return {k: [] if k != "metadatas" else {} for k in DEFAULT_TRAJ_KEYS}
def append_trajectories(traj_one, traj_two):
# Note: Drops metadatas for now
if not traj_one and not traj_two:
return {}
if not traj_one:
traj_one = get_empty_trajectory()
if not traj_two:
traj_two = get_empty_trajectory()
if (
set(traj_one.keys()) != DEFAULT_TRAJ_KEYS
or set(traj_two.keys()) != DEFAULT_TRAJ_KEYS
):
raise ValueError("Trajectory key mismatch!")
appended_traj = {"metadatas": {}}
for k in traj_one:
if k != "metadatas":
traj_one_value = traj_one[k]
traj_two_value = traj_two[k]
assert type(traj_one_value) == type(
traj_two_value
), "mismatched trajectory types!"
if type(traj_one_value) == list:
appended_traj[k] = traj_one_value + traj_two_value
else:
appended_traj[k] = np.concatenate(
[traj_one_value, traj_two_value], axis=0
)
return appended_traj
| 3,430 | 41.358025 | 184 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/visualization/visualization_utils.py | from IPython.display import Image, display
from ipywidgets import IntSlider, interactive
def show_image_in_ipython(data, *args, **kwargs):
display(Image(data, *args, **kwargs))
def ipython_images_slider(image_pathes_list, slider_label="", first_arg=0):
def display_f(**kwargs):
display(Image(image_pathes_list[kwargs[slider_label]]))
return interactive(
display_f,
**{
slider_label: IntSlider(
min=0, max=len(image_pathes_list) - 1, step=1
)
}
)
def show_ipython_images_slider(
image_pathes_list, slider_label="", first_arg=0
):
def display_f(**kwargs):
display(Image(image_pathes_list[kwargs[slider_label]]))
display(
interactive(
display_f,
**{
slider_label: IntSlider(
min=0, max=len(image_pathes_list) - 1, step=1
)
}
)
)
| 950 | 23.384615 | 75 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/visualization/state_visualizer.py | import copy
import math
import os
import pygame
from overcooked_ai_py.mdp.actions import Action, Direction
from overcooked_ai_py.mdp.layout_generator import (
COUNTER,
DISH_DISPENSER,
EMPTY,
ONION_DISPENSER,
POT,
SERVING_LOC,
TOMATO_DISPENSER,
)
from overcooked_ai_py.static import FONTS_DIR, GRAPHICS_DIR
from overcooked_ai_py.utils import (
classproperty,
cumulative_rewards_from_rew_list,
generate_temporary_file_path,
)
from overcooked_ai_py.visualization.pygame_utils import (
MultiFramePygameImage,
blit_on_new_surface_of_size,
run_static_resizeable_window,
scale_surface_by_factor,
vstack_surfaces,
)
from overcooked_ai_py.visualization.visualization_utils import (
show_image_in_ipython,
show_ipython_images_slider,
)
roboto_path = os.path.join(FONTS_DIR, "Roboto-Regular.ttf")
class StateVisualizer:
TERRAINS_IMG = MultiFramePygameImage(
os.path.join(GRAPHICS_DIR, "terrain.png"),
os.path.join(GRAPHICS_DIR, "terrain.json"),
)
OBJECTS_IMG = MultiFramePygameImage(
os.path.join(GRAPHICS_DIR, "objects.png"),
os.path.join(GRAPHICS_DIR, "objects.json"),
)
SOUPS_IMG = MultiFramePygameImage(
os.path.join(GRAPHICS_DIR, "soups.png"),
os.path.join(GRAPHICS_DIR, "soups.json"),
)
CHEFS_IMG = MultiFramePygameImage(
os.path.join(GRAPHICS_DIR, "chefs.png"),
os.path.join(GRAPHICS_DIR, "chefs.json"),
)
ARROW_IMG = pygame.image.load(os.path.join(GRAPHICS_DIR, "arrow.png"))
INTERACT_IMG = pygame.image.load(
os.path.join(GRAPHICS_DIR, "interact.png")
)
STAY_IMG = pygame.image.load(os.path.join(GRAPHICS_DIR, "stay.png"))
UNSCALED_TILE_SIZE = 15
DEFAULT_VALUES = {
"height": None, # if None use grid_width - NOTE: can chop down hud if hud is wider than grid
"width": None, # if None use (hud_height+grid_height)
"tile_size": 75,
"window_fps": 30,
"player_colors": ["blue", "green"],
"is_rendering_hud": True,
"hud_font_size": 10,
"hud_font_path": roboto_path,
"hud_system_font_name": None, # if set to None use hud_font_path
"hud_font_color": (255, 255, 255), # white
"hud_data_default_key_order": [
"all_orders",
"bonus_orders",
"time_left",
"score",
"potential",
],
"hud_interline_size": 10,
"hud_margin_bottom": 10,
"hud_margin_top": 10,
"hud_margin_left": 10,
"hud_distance_between_orders": 5,
"hud_order_size": 15,
"is_rendering_cooking_timer": True,
"show_timer_when_cooked": True,
"cooking_timer_font_size": 20, # # if set to None use cooking_timer_font_path
"cooking_timer_font_path": roboto_path,
"cooking_timer_system_font_name": None,
"cooking_timer_font_color": (255, 0, 0), # red
"grid": None,
"background_color": (155, 101, 0), # color of empty counter
"is_rendering_action_probs": True, # whatever represent visually on the grid what actions some given agent would make
}
TILE_TO_FRAME_NAME = {
EMPTY: "floor",
COUNTER: "counter",
ONION_DISPENSER: "onions",
TOMATO_DISPENSER: "tomatoes",
POT: "pot",
DISH_DISPENSER: "dishes",
SERVING_LOC: "serve",
}
def __init__(self, **kwargs):
params = copy.deepcopy(self.DEFAULT_VALUES)
params.update(kwargs)
self.configure(**params)
self.reload_fonts()
def reload_fonts(self):
pygame.font.init()
if not hasattr(self, "_font"):
self._fonts = {}
# initializing fonts only if needed because it can take a quite long time,
# see https://pygame.readthedocs.io/en/latest/4_text/text.html#initialize-a-font
if self.is_rendering_hud:
self.hud_font = self._init_font(
self.hud_font_size,
self.hud_font_path,
self.hud_system_font_name,
)
else:
self.hud_font = None
if self.is_rendering_cooking_timer:
self.cooking_timer_font = self._init_font(
self.cooking_timer_font_size,
self.cooking_timer_font_path,
self.cooking_timer_system_font_name,
)
else:
self.cooking_timer_font = None
@classmethod
def configure_defaults(cls, **kwargs):
cls._check_config_validity(kwargs)
cls.DEFAULT_VALUES.update(copy.deepcopy(kwargs))
def configure(self, **kwargs):
StateVisualizer._check_config_validity(kwargs)
for param_name, param_value in copy.deepcopy(kwargs).items():
setattr(self, param_name, param_value)
@staticmethod
def default_hud_data(state, **kwargs):
result = {
"timestep": state.timestep,
"all_orders": [r.to_dict() for r in state.all_orders],
"bonus_orders": [r.to_dict() for r in state.bonus_orders],
}
result.update(copy.deepcopy(kwargs))
return result
@staticmethod
def default_hud_data_from_trajectories(trajectories, trajectory_idx=0):
scores = cumulative_rewards_from_rew_list(
trajectories["ep_rewards"][trajectory_idx]
)
return [
StateVisualizer.default_hud_data(state, score=scores[i])
for i, state in enumerate(
trajectories["ep_states"][trajectory_idx]
)
]
def display_rendered_trajectory(
self,
trajectories,
trajectory_idx=0,
hud_data=None,
action_probs=None,
img_directory_path=None,
img_extension=".png",
img_prefix="",
ipython_display=True,
):
"""
saves images of every timestep from trajectory in img_directory_path (or temporary directory if not path is not specified)
trajectories (dict): trajectories dict, same format as used by AgentEvaluator
trajectory_idx(int): index of trajectory in case of multiple trajectories inside trajectories param
img_path (str): img_directory_path - path to directory where consequtive images will be saved
ipython_display(bool): if True render slider with rendered states
hud_data(list(dict)): hud data for every timestep
action_probs(list(list((list(float))))): action probs for every player and timestep acessed in the way action_probs[timestep][player][action]
"""
states = trajectories["ep_states"][trajectory_idx]
grid = trajectories["mdp_params"][trajectory_idx]["terrain"]
if hud_data is None:
if self.is_rendering_hud:
hud_data = StateVisualizer.default_hud_data_from_trajectories(
trajectories, trajectory_idx
)
else:
hud_data = [None] * len(states)
if action_probs is None:
action_probs = [None] * len(states)
if not img_directory_path:
img_directory_path = generate_temporary_file_path(
prefix="overcooked_visualized_trajectory", extension=""
)
os.makedirs(img_directory_path, exist_ok=True)
img_pathes = []
for i, state in enumerate(states):
img_name = img_prefix + str(i) + img_extension
img_path = os.path.join(img_directory_path, img_name)
img_pathes.append(
self.display_rendered_state(
state=state,
hud_data=hud_data[i],
action_probs=action_probs[i],
grid=grid,
img_path=img_path,
ipython_display=False,
window_display=False,
)
)
if ipython_display:
return show_ipython_images_slider(img_pathes, "timestep")
return img_directory_path
def display_rendered_state(
self,
state,
hud_data=None,
action_probs=None,
grid=None,
img_path=None,
ipython_display=False,
window_display=False,
):
"""
renders state as image
state (OvercookedState): state to render
hud_data (dict): dict with hud data, keys are used for string that describes after using _key_to_hud_text on them
grid (iterable): 2d map of the layout, when not supplied take grid from object attribute NOTE: when grid in both method param and object atribute is no supplied it will raise an error
img_path (str): if it is not None save image to specific path
ipython_display (bool): if True render state in ipython cell, if img_path is None create file with randomized name in /tmp directory
window_display (bool): if True render state into pygame window
action_probs(list(list(float))): action probs for every player acessed in the way action_probs[player][action]
"""
assert (
window_display or img_path or ipython_display
), "specify at least one of the ways to output result state image: window_display, img_path, or ipython_display"
surface = self.render_state(
state, grid, hud_data, action_probs=action_probs
)
if img_path is None and ipython_display:
img_path = generate_temporary_file_path(
prefix="overcooked_visualized_state_", extension=".png"
)
if img_path is not None:
pygame.image.save(surface, img_path)
if ipython_display:
show_image_in_ipython(img_path)
if window_display:
run_static_resizeable_window(surface, self.window_fps)
return img_path
def render_state(self, state, grid, hud_data=None, action_probs=None):
"""
returns surface with rendered game state scaled to selected size,
decoupled from display_rendered_state function to make testing easier
"""
pygame.init()
grid = grid or self.grid
assert grid
grid_surface = pygame.surface.Surface(
self._unscaled_grid_pixel_size(grid)
)
self._render_grid(grid_surface, grid)
self._render_players(grid_surface, state.players)
self._render_objects(grid_surface, state.objects, grid)
if self.scale_by_factor != 1:
grid_surface = scale_surface_by_factor(
grid_surface, self.scale_by_factor
)
# render text after rescaling as text looks bad when is rendered small resolution and then rescalled to bigger one
if self.is_rendering_cooking_timer:
self._render_cooking_timers(grid_surface, state.objects, grid)
# arrows does not seem good when rendered in very small resolution
if self.is_rendering_action_probs and action_probs is not None:
self._render_actions_probs(
grid_surface, state.players, action_probs
)
if self.is_rendering_hud and hud_data:
hud_width = self.width or grid_surface.get_width()
hud_surface = pygame.surface.Surface(
(hud_width, self._calculate_hud_height(hud_data))
)
hud_surface.fill(self.background_color)
self._render_hud_data(hud_surface, hud_data)
rendered_surface = vstack_surfaces(
[hud_surface, grid_surface], self.background_color
)
else:
hud_width = None
rendered_surface = grid_surface
result_surface_size = (
self.width or rendered_surface.get_width(),
self.height or rendered_surface.get_height(),
)
if result_surface_size != rendered_surface.get_size():
result_surface = blit_on_new_surface_of_size(
rendered_surface,
result_surface_size,
background_color=self.background_color,
)
else:
result_surface = rendered_surface
return result_surface
@property
def scale_by_factor(self):
return self.tile_size / StateVisualizer.UNSCALED_TILE_SIZE
@property
def hud_line_height(self):
return self.hud_interline_size + self.hud_font_size
@staticmethod
def _check_config_validity(config):
assert set(config.keys()).issubset(
set(StateVisualizer.DEFAULT_VALUES.keys())
)
def _init_font(self, font_size, font_path=None, system_font_name=None):
if system_font_name:
key = "%i-sys:%s" % (font_size, system_font_name)
font = self._fonts.get(key) or pygame.font.SysFont(
system_font_name, font_size
)
else:
key = "%i-path:%s" % (font_size, font_path)
font = self._fonts.get(key) or pygame.font.Font(
font_path, font_size
)
self._fonts[key] = font
return font
def _unscaled_grid_pixel_size(self, grid):
y_tiles = len(grid)
x_tiles = len(grid[0])
return (
x_tiles * self.UNSCALED_TILE_SIZE,
y_tiles * self.UNSCALED_TILE_SIZE,
)
def _render_grid(self, surface, grid):
for y_tile, row in enumerate(grid):
for x_tile, tile in enumerate(row):
self.TERRAINS_IMG.blit_on_surface(
surface,
self._position_in_unscaled_pixels((x_tile, y_tile)),
StateVisualizer.TILE_TO_FRAME_NAME[tile],
)
def _position_in_unscaled_pixels(self, position):
"""
get x and y coordinates in tiles, returns x and y coordinates in pixels
"""
(x, y) = position
return (self.UNSCALED_TILE_SIZE * x, self.UNSCALED_TILE_SIZE * y)
def _position_in_scaled_pixels(self, position):
"""
get x and y coordinates in tiles, returns x and y coordinates in pixels
"""
(x, y) = position
return (self.tile_size * x, self.tile_size * y)
def _render_players(self, surface, players):
def chef_frame_name(direction_name, held_object_name):
frame_name = direction_name
if held_object_name:
frame_name += "-" + held_object_name
return frame_name
def hat_frame_name(direction_name, player_color_name):
return "%s-%shat" % (direction_name, player_color_name)
for player_num, player in enumerate(players):
player_color_name = self.player_colors[player_num]
direction_name = Direction.DIRECTION_TO_NAME[player.orientation]
held_obj = player.held_object
if held_obj is None:
held_object_name = ""
else:
if held_obj.name == "soup":
if "onion" in held_obj.ingredients:
held_object_name = "soup-onion"
else:
held_object_name = "soup-tomato"
else:
held_object_name = held_obj.name
self.CHEFS_IMG.blit_on_surface(
surface,
self._position_in_unscaled_pixels(player.position),
chef_frame_name(direction_name, held_object_name),
)
self.CHEFS_IMG.blit_on_surface(
surface,
self._position_in_unscaled_pixels(player.position),
hat_frame_name(direction_name, player_color_name),
)
@staticmethod
def _soup_frame_name(ingredients_names, status):
num_onions = ingredients_names.count("onion")
num_tomatoes = ingredients_names.count("tomato")
return "soup_%s_tomato_%i_onion_%i" % (
status,
num_tomatoes,
num_onions,
)
def _render_objects(self, surface, objects, grid):
def render_soup(surface, obj, grid):
(x_pos, y_pos) = obj.position
if grid[y_pos][x_pos] == POT:
if obj.is_ready:
soup_status = "cooked"
else:
soup_status = "idle"
else: # grid[x][y] != POT
soup_status = "done"
frame_name = StateVisualizer._soup_frame_name(
obj.ingredients, soup_status
)
self.SOUPS_IMG.blit_on_surface(
surface,
self._position_in_unscaled_pixels(obj.position),
frame_name,
)
for obj in objects.values():
if obj.name == "soup":
render_soup(surface, obj, grid)
else:
self.OBJECTS_IMG.blit_on_surface(
surface,
self._position_in_unscaled_pixels(obj.position),
obj.name,
)
def _render_cooking_timers(self, surface, objects, grid):
for key, obj in objects.items():
(x_pos, y_pos) = obj.position
if obj.name == "soup" and grid[y_pos][x_pos] == POT:
if obj._cooking_tick != -1 and (
obj._cooking_tick <= obj.cook_time
or self.show_timer_when_cooked
):
text_surface = self.cooking_timer_font.render(
str(obj._cooking_tick),
True,
self.cooking_timer_font_color,
)
(tile_pos_x, tile_pos_y) = self._position_in_scaled_pixels(
obj.position
)
# calculate font position to be in center on x axis, and 0.9 from top on y axis
font_position = (
tile_pos_x
+ int(
(self.tile_size - text_surface.get_width()) * 0.5
),
tile_pos_y
+ int(
(self.tile_size - text_surface.get_height()) * 0.9
),
)
surface.blit(text_surface, font_position)
def _sorted_hud_items(self, hud_data):
def default_order_then_alphabetic(item):
key = item[0]
try:
i = self.hud_data_default_key_order.index(key)
except:
i = 99999
return (i, key)
return sorted(hud_data.items(), key=default_order_then_alphabetic)
def _key_to_hud_text(self, key):
return key.replace("_", " ").title() + ": "
def _render_hud_data(self, surface, hud_data):
def hud_text_position(line_num):
return (
self.hud_margin_left,
self.hud_margin_top + self.hud_line_height * line_num,
)
def hud_recipes_position(text_surface, text_surface_position):
(text_surface_x, text_surface_y) = text_surface_position
return (text_surface_x + text_surface.get_width(), text_surface_y)
def get_hud_recipes_surface(orders_dicts):
order_width = order_height = self.hud_order_size
scaled_order_size = (order_width, order_width)
orders_surface_height = order_height
orders_surface_width = (
len(orders_dicts) * order_width
+ (len(orders_dicts) - 1) * self.hud_distance_between_orders
)
unscaled_order_size = (
self.UNSCALED_TILE_SIZE,
self.UNSCALED_TILE_SIZE,
)
recipes_surface = pygame.surface.Surface(
(orders_surface_width, orders_surface_height)
)
recipes_surface.fill(self.background_color)
next_surface_x = 0
for order_dict in orders_dicts:
frame_name = StateVisualizer._soup_frame_name(
order_dict["ingredients"], "done"
)
unscaled_order_surface = pygame.surface.Surface(
unscaled_order_size
)
unscaled_order_surface.fill(self.background_color)
self.SOUPS_IMG.blit_on_surface(
unscaled_order_surface, (0, 0), frame_name
)
if scaled_order_size == unscaled_order_size:
scaled_order_surface = unscaled_order_surface
else:
scaled_order_surface = pygame.transform.scale(
unscaled_order_surface, (order_width, order_width)
)
recipes_surface.blit(scaled_order_surface, (next_surface_x, 0))
next_surface_x += (
order_width + self.hud_distance_between_orders
)
return recipes_surface
for hud_line_num, (key, value) in enumerate(
self._sorted_hud_items(hud_data)
):
hud_text = self._key_to_hud_text(key)
if key not in [
"all_orders",
"bonus_orders",
"start_all_orders",
"start_bonus_orders",
]:
hud_text += str(value)
text_surface = self.hud_font.render(
hud_text, True, self.hud_font_color
)
text_surface_position = hud_text_position(hud_line_num)
surface.blit(text_surface, text_surface_position)
if (
key
in [
"all_orders",
"bonus_orders",
"start_all_orders",
"start_bonus_orders",
]
and value
):
recipes_surface_position = hud_recipes_position(
text_surface, text_surface_position
)
recipes_surface = get_hud_recipes_surface(value)
assert (
recipes_surface.get_width() + text_surface.get_width()
<= surface.get_width()
), "surface width is too small to fit recipes in single line"
surface.blit(recipes_surface, recipes_surface_position)
def _calculate_hud_height(self, hud_data):
return (
self.hud_margin_top
+ len(hud_data) * self.hud_line_height
+ self.hud_margin_bottom
)
def _render_on_tile_position(
self,
scaled_grid_surface,
source_surface,
tile_position,
horizontal_align="left",
vertical_align="top",
):
assert vertical_align in ["top", "center", "bottom"]
left_x, top_y = self._position_in_scaled_pixels(tile_position)
if horizontal_align == "left":
x = left_x
elif horizontal_align == "center":
x = left_x + (self.tile_size - source_surface.get_width()) / 2
elif horizontal_align == "right":
x = left_x + self.tile_size - source_surface.get_width()
else:
raise ValueError(
"horizontal_align can have one of the values: "
+ str(["left", "center", "right"])
)
if vertical_align == "top":
y = top_y
elif vertical_align == "center":
y = top_y + (self.tile_size - source_surface.get_height()) / 2
elif vertical_align == "bottom":
y = top_y + self.tile_size - source_surface.get_height()
else:
raise ValueError(
"vertical_align can have one of the values: "
+ str(["top", "center", "bottom"])
)
scaled_grid_surface.blit(source_surface, (x, y))
def _render_actions_probs(self, surface, players, action_probs):
direction_to_rotation = {
Direction.NORTH: 0,
Direction.WEST: 90,
Direction.SOUTH: 180,
Direction.EAST: 270,
}
direction_to_aligns = {
Direction.NORTH: {
"horizontal_align": "center",
"vertical_align": "bottom",
},
Direction.WEST: {
"horizontal_align": "right",
"vertical_align": "center",
},
Direction.SOUTH: {
"horizontal_align": "center",
"vertical_align": "top",
},
Direction.EAST: {
"horizontal_align": "left",
"vertical_align": "center",
},
}
rescaled_arrow = pygame.transform.scale(
self.ARROW_IMG, (self.tile_size, self.tile_size)
)
# divide width by math.sqrt(2) to always fit both interact icon and stay icon into single tile
rescaled_interact = pygame.transform.scale(
self.INTERACT_IMG,
(int(self.tile_size / math.sqrt(2)), self.tile_size),
)
rescaled_stay = pygame.transform.scale(
self.STAY_IMG, (int(self.tile_size / math.sqrt(2)), self.tile_size)
)
for player, probs in zip(players, action_probs):
if probs is not None:
for action in Action.ALL_ACTIONS:
# use math sqrt to make probability proportional to area of the image
size = math.sqrt(probs[Action.ACTION_TO_INDEX[action]])
if action == "interact":
img = pygame.transform.rotozoom(
rescaled_interact, 0, size
)
self._render_on_tile_position(
surface,
img,
player.position,
horizontal_align="left",
vertical_align="center",
)
elif action == Action.STAY:
img = pygame.transform.rotozoom(rescaled_stay, 0, size)
self._render_on_tile_position(
surface,
img,
player.position,
horizontal_align="right",
vertical_align="center",
)
else:
position = Action.move_in_direction(
player.position, action
)
img = pygame.transform.rotozoom(
rescaled_arrow, direction_to_rotation[action], size
)
self._render_on_tile_position(
surface,
img,
position,
**direction_to_aligns[action]
)
| 27,047 | 36.776536 | 191 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/visualization/pygame_utils.py | import pygame
from pygame.locals import DOUBLEBUF, HWSURFACE, QUIT, RESIZABLE, VIDEORESIZE
from overcooked_ai_py.utils import load_from_json
def run_static_resizeable_window(surface, fps=30):
"""
window that can be resized and closed using gui
"""
pygame.init()
clock = pygame.time.Clock()
window = pygame.display.set_mode(
surface.get_size(), HWSURFACE | DOUBLEBUF | RESIZABLE
)
window.blit(surface, (0, 0))
pygame.display.flip()
try:
while True:
pygame.event.pump()
event = pygame.event.wait()
if event.type == QUIT:
pygame.display.quit()
pygame.quit()
elif event.type == VIDEORESIZE:
window = pygame.display.set_mode(
event.dict["size"], HWSURFACE | DOUBLEBUF | RESIZABLE
)
window.blit(
pygame.transform.scale(surface, event.dict["size"]), (0, 0)
)
pygame.display.flip()
clock.tick(fps)
except:
pygame.display.quit()
pygame.quit()
if event.type != QUIT: # if user meant to quit error does not matter
raise
def vstack_surfaces(surfaces, background_color=None):
"""
stack surfaces vertically (on y axis)
if surfaces have different width fill remaining area with background color
"""
result_width = max(surface.get_width() for surface in surfaces)
result_height = sum(surface.get_height() for surface in surfaces)
result_surface = pygame.surface.Surface((result_width, result_height))
if background_color:
result_surface.fill(background_color)
next_surface_y_position = 0
for surface in surfaces:
result_surface.blit(surface, (0, next_surface_y_position))
next_surface_y_position += surface.get_height()
return result_surface
def scale_surface_by_factor(surface, scale_by_factor):
"""return scaled input surfacem (with size multiplied by scale_by_factor param)
scales also content of the surface
"""
unscaled_size = surface.get_size()
scaled_size = tuple(int(dim * scale_by_factor) for dim in unscaled_size)
return pygame.transform.scale(surface, scaled_size)
def blit_on_new_surface_of_size(surface, size, background_color=None):
"""blit surface on new surface of given size of surface (with no resize of its content), filling not covered parts of result area with background color"""
result_surface = pygame.surface.Surface(size)
if background_color:
result_surface.fill(background_color)
result_surface.blit(surface, (0, 0))
return result_surface
class MultiFramePygameImage:
"""use to read frames of images from overcooked-demo repo easly"""
def __init__(self, img_path, frames_path):
self.image = pygame.image.load(img_path)
self.frames_rectangles = MultiFramePygameImage.load_frames_rectangles(
frames_path
)
def blit_on_surface(
self, surface, top_left_pixel_position, frame_name, **kwargs
):
surface.blit(
self.image,
top_left_pixel_position,
area=self.frames_rectangles[frame_name],
**kwargs
)
@staticmethod
def load_frames_rectangles(json_path):
frames_json = load_from_json(json_path)
if (
"textures" in frames_json.keys()
): # check if its format of soups.json
assert (
frames_json["textures"][0]["scale"] == 1
) # not implemented support for scale here
frames = frames_json["textures"][0]["frames"]
else: # assume its format of objects.json, terrain.json and chefs.json
frames = []
for filename, frame_dict in frames_json["frames"].items():
frame_dict["filename"] = filename
frames.append(frame_dict)
result = {}
for frame_dict in frames:
assert not frame_dict.get("rotated") # not implemented support yet
assert not frame_dict.get("trimmed") # not implemented support yet
frame_name = frame_dict["filename"].split(".")[0]
frame = frame_dict["frame"]
rect = pygame.Rect(frame["x"], frame["y"], frame["w"], frame["h"])
result[frame_name] = rect
return result
| 4,406 | 35.122951 | 158 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/visualization/__init__.py | 0 | 0 | 0 | py |
|
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/planning/planners.py | import itertools
import os
import pickle
import time
import numpy as np
from overcooked_ai_py.data.planners import (
PLANNERS_DIR,
load_saved_action_manager,
load_saved_motion_planner,
)
from overcooked_ai_py.mdp.actions import Action, Direction
from overcooked_ai_py.mdp.overcooked_mdp import (
EVENT_TYPES,
OvercookedGridworld,
OvercookedState,
PlayerState,
)
from overcooked_ai_py.planning.search import Graph, NotConnectedError
from overcooked_ai_py.utils import manhattan_distance
# Run planning logic with additional checks and
# computation to prevent or identify possible minor errors
SAFE_RUN = False
NO_COUNTERS_PARAMS = {
"start_orientations": False,
"wait_allowed": False,
"counter_goals": [],
"counter_drop": [],
"counter_pickup": [],
"same_motion_goals": True,
}
NO_COUNTERS_START_OR_PARAMS = {
"start_orientations": True,
"wait_allowed": False,
"counter_goals": [],
"counter_drop": [],
"counter_pickup": [],
"same_motion_goals": True,
}
class MotionPlanner(object):
"""A planner that computes optimal plans for a single agent to
arrive at goal positions and orientations in an OvercookedGridworld.
Args:
mdp (OvercookedGridworld): gridworld of interest
counter_goals (list): list of positions of counters we will consider
as valid motion goals
"""
def __init__(self, mdp, counter_goals=[]):
self.mdp = mdp
# If positions facing counters should be
# allowed as motion goals
self.counter_goals = counter_goals
# Graph problem that solves shortest path problem
# between any position & orientation start-goal pair
self.graph_problem = self._graph_from_grid()
self.motion_goals_for_pos = self._get_goal_dict()
self.all_plans = self._populate_all_plans()
def save_to_file(self, filename):
with open(filename, "wb") as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
@staticmethod
def from_file(filename):
return load_saved_motion_planner(filename)
@staticmethod
def from_pickle_or_compute(
mdp,
counter_goals,
custom_filename=None,
force_compute=False,
info=False,
):
assert isinstance(mdp, OvercookedGridworld)
filename = (
custom_filename
if custom_filename is not None
else mdp.layout_name + "_mp.pkl"
)
if force_compute:
return MotionPlanner.compute_mp(filename, mdp, counter_goals)
try:
mp = MotionPlanner.from_file(filename)
if mp.counter_goals != counter_goals or mp.mdp != mdp:
if info:
print(
"motion planner with different counter goal or mdp found, computing from scratch"
)
return MotionPlanner.compute_mp(filename, mdp, counter_goals)
except (
FileNotFoundError,
ModuleNotFoundError,
EOFError,
AttributeError,
) as e:
if info:
print("Recomputing motion planner due to:", e)
return MotionPlanner.compute_mp(filename, mdp, counter_goals)
if info:
print(
"Loaded MotionPlanner from {}".format(
os.path.join(PLANNERS_DIR, filename)
)
)
return mp
@staticmethod
def compute_mp(filename, mdp, counter_goals):
final_filepath = os.path.join(PLANNERS_DIR, filename)
print(
"Computing MotionPlanner to be saved in {}".format(final_filepath)
)
start_time = time.time()
mp = MotionPlanner(mdp, counter_goals)
print(
"It took {} seconds to create mp".format(time.time() - start_time)
)
mp.save_to_file(final_filepath)
return mp
def get_plan(self, start_pos_and_or, goal_pos_and_or):
"""
Returns pre-computed plan from initial agent position
and orientation to a goal position and orientation.
Args:
start_pos_and_or (tuple): starting (pos, or) tuple
goal_pos_and_or (tuple): goal (pos, or) tuple
"""
plan_key = (start_pos_and_or, goal_pos_and_or)
action_plan, pos_and_or_path, plan_cost = self.all_plans[plan_key]
return action_plan, pos_and_or_path, plan_cost
def get_gridworld_distance(self, start_pos_and_or, goal_pos_and_or):
"""Number of actions necessary to go from starting position
and orientations to goal position and orientation (not including
interaction action)"""
assert self.is_valid_motion_start_goal_pair(
start_pos_and_or, goal_pos_and_or
), "Goal position and orientation were not a valid motion goal"
_, _, plan_cost = self.get_plan(start_pos_and_or, goal_pos_and_or)
# Removing interaction cost
return plan_cost - 1
def get_gridworld_pos_distance(self, pos1, pos2):
"""Minimum (over possible orientations) number of actions necessary
to go from starting position to goal position (not including
interaction action)."""
# NOTE: currently unused, pretty bad code. If used in future, clean up
min_cost = np.Inf
for d1, d2 in itertools.product(Direction.ALL_DIRECTIONS, repeat=2):
start = (pos1, d1)
end = (pos2, d2)
if self.is_valid_motion_start_goal_pair(start, end):
plan_cost = self.get_gridworld_distance(start, end)
if plan_cost < min_cost:
min_cost = plan_cost
return min_cost
def _populate_all_plans(self):
"""Pre-computes all valid plans from any valid pos_or to any valid motion_goal"""
all_plans = {}
valid_pos_and_ors = (
self.mdp.get_valid_player_positions_and_orientations()
)
valid_motion_goals = filter(
self.is_valid_motion_goal, valid_pos_and_ors
)
for start_motion_state, goal_motion_state in itertools.product(
valid_pos_and_ors, valid_motion_goals
):
if not self.is_valid_motion_start_goal_pair(
start_motion_state, goal_motion_state
):
continue
action_plan, pos_and_or_path, plan_cost = self._compute_plan(
start_motion_state, goal_motion_state
)
plan_key = (start_motion_state, goal_motion_state)
all_plans[plan_key] = (action_plan, pos_and_or_path, plan_cost)
return all_plans
def is_valid_motion_start_goal_pair(
self, start_pos_and_or, goal_pos_and_or
):
if not self.is_valid_motion_goal(goal_pos_and_or):
return False
# the valid motion start goal needs to be in the same connected component
if not self.positions_are_connected(start_pos_and_or, goal_pos_and_or):
return False
return True
def is_valid_motion_goal(self, goal_pos_and_or):
"""Checks that desired single-agent goal state (position and orientation)
is reachable and is facing a terrain feature"""
goal_position, goal_orientation = goal_pos_and_or
if goal_position not in self.mdp.get_valid_player_positions():
return False
# Restricting goals to be facing a terrain feature
pos_of_facing_terrain = Action.move_in_direction(
goal_position, goal_orientation
)
facing_terrain_type = self.mdp.get_terrain_type_at_pos(
pos_of_facing_terrain
)
if facing_terrain_type == " " or (
facing_terrain_type == "X"
and pos_of_facing_terrain not in self.counter_goals
):
return False
return True
def _compute_plan(self, start_motion_state, goal_motion_state):
"""Computes optimal action plan for single agent movement
Args:
start_motion_state (tuple): starting positions and orientations
goal_motion_state (tuple): goal positions and orientations
"""
assert self.is_valid_motion_start_goal_pair(
start_motion_state, goal_motion_state
)
positions_plan = self._get_position_plan_from_graph(
start_motion_state, goal_motion_state
)
(
action_plan,
pos_and_or_path,
plan_length,
) = self.action_plan_from_positions(
positions_plan, start_motion_state, goal_motion_state
)
return action_plan, pos_and_or_path, plan_length
def positions_are_connected(self, start_pos_and_or, goal_pos_and_or):
return self.graph_problem.are_in_same_cc(
start_pos_and_or, goal_pos_and_or
)
def _get_position_plan_from_graph(self, start_node, end_node):
"""Recovers positions to be reached by agent after the start node to reach the end node"""
node_path = self.graph_problem.get_node_path(start_node, end_node)
assert node_path[0] == start_node and node_path[-1] == end_node
positions_plan = [state_node[0] for state_node in node_path[1:]]
return positions_plan
def action_plan_from_positions(
self, position_list, start_motion_state, goal_motion_state
):
"""
Recovers an action plan reaches the goal motion position and orientation, and executes
and interact action.
Args:
position_list (list): list of positions to be reached after the starting position
(does not include starting position, but includes ending position)
start_motion_state (tuple): starting position and orientation
goal_motion_state (tuple): goal position and orientation
Returns:
action_plan (list): list of actions to reach goal state
pos_and_or_path (list): list of (pos, or) pairs visited during plan execution
(not including start, but including goal)
"""
goal_position, goal_orientation = goal_motion_state
action_plan, pos_and_or_path = [], []
position_to_go = list(position_list)
curr_pos, curr_or = start_motion_state
# Get agent to goal position
while position_to_go and curr_pos != goal_position:
next_pos = position_to_go.pop(0)
action = Action.determine_action_for_change_in_pos(
curr_pos, next_pos
)
action_plan.append(action)
curr_or = action if action != Action.STAY else curr_or
pos_and_or_path.append((next_pos, curr_or))
curr_pos = next_pos
# Fix agent orientation if necessary
if curr_or != goal_orientation:
new_pos, _ = self.mdp._move_if_direction(
curr_pos, curr_or, goal_orientation
)
assert new_pos == goal_position
action_plan.append(goal_orientation)
pos_and_or_path.append((goal_position, goal_orientation))
# Add interact action
action_plan.append(Action.INTERACT)
pos_and_or_path.append((goal_position, goal_orientation))
return action_plan, pos_and_or_path, len(action_plan)
def _graph_from_grid(self):
"""Creates a graph adjacency matrix from an Overcooked MDP class."""
state_decoder = {}
for state_index, motion_state in enumerate(
self.mdp.get_valid_player_positions_and_orientations()
):
state_decoder[state_index] = motion_state
pos_encoder = {
motion_state: state_index
for state_index, motion_state in state_decoder.items()
}
num_graph_nodes = len(state_decoder)
adjacency_matrix = np.zeros((num_graph_nodes, num_graph_nodes))
for state_index, start_motion_state in state_decoder.items():
for (
action,
successor_motion_state,
) in self._get_valid_successor_motion_states(start_motion_state):
adj_pos_index = pos_encoder[successor_motion_state]
adjacency_matrix[state_index][
adj_pos_index
] = self._graph_action_cost(action)
return Graph(adjacency_matrix, pos_encoder, state_decoder)
def _graph_action_cost(self, action):
"""Returns cost of a single-agent action"""
assert action in Action.ALL_ACTIONS
return 1
def _get_valid_successor_motion_states(self, start_motion_state):
"""Get valid motion states one action away from the starting motion state."""
start_position, start_orientation = start_motion_state
return [
(
action,
self.mdp._move_if_direction(
start_position, start_orientation, action
),
)
for action in Action.ALL_ACTIONS
]
def min_cost_between_features(
self, pos_list1, pos_list2, manhattan_if_fail=False
):
"""
Determines the minimum number of timesteps necessary for a player to go from any
terrain feature in list1 to any feature in list2 and perform an interact action
"""
min_dist = np.Inf
min_manhattan = np.Inf
for pos1, pos2 in itertools.product(pos_list1, pos_list2):
for mg1, mg2 in itertools.product(
self.motion_goals_for_pos[pos1],
self.motion_goals_for_pos[pos2],
):
if not self.is_valid_motion_start_goal_pair(mg1, mg2):
if manhattan_if_fail:
pos0, pos1 = mg1[0], mg2[0]
curr_man_dist = manhattan_distance(pos0, pos1)
if curr_man_dist < min_manhattan:
min_manhattan = curr_man_dist
continue
curr_dist = self.get_gridworld_distance(mg1, mg2)
if curr_dist < min_dist:
min_dist = curr_dist
# +1 to account for interaction action
if manhattan_if_fail and min_dist == np.Inf:
min_dist = min_manhattan
min_cost = min_dist + 1
return min_cost
def min_cost_to_feature(
self,
start_pos_and_or,
feature_pos_list,
with_argmin=False,
debug=False,
):
"""
Determines the minimum number of timesteps necessary for a player to go from the starting
position and orientation to any feature in feature_pos_list and perform an interact action
"""
start_pos = start_pos_and_or[0]
assert self.mdp.get_terrain_type_at_pos(start_pos) != "X"
min_dist = np.Inf
best_feature = None
for feature_pos in feature_pos_list:
for feature_goal in self.motion_goals_for_pos[feature_pos]:
if not self.is_valid_motion_start_goal_pair(
start_pos_and_or, feature_goal
):
continue
curr_dist = self.get_gridworld_distance(
start_pos_and_or, feature_goal
)
if curr_dist < min_dist:
best_feature = feature_pos
min_dist = curr_dist
# +1 to account for interaction action
min_cost = min_dist + 1
if with_argmin:
# assert best_feature is not None, "{} vs {}".format(start_pos_and_or, feature_pos_list)
return min_cost, best_feature
return min_cost
def _get_goal_dict(self):
"""Creates a dictionary of all possible goal states for all possible
terrain features that the agent might want to interact with."""
terrain_feature_locations = []
for terrain_type, pos_list in self.mdp.terrain_pos_dict.items():
if terrain_type != " ":
terrain_feature_locations += pos_list
return {
feature_pos: self._get_possible_motion_goals_for_feature(
feature_pos
)
for feature_pos in terrain_feature_locations
}
def _get_possible_motion_goals_for_feature(self, goal_pos):
"""Returns a list of possible goal positions (and orientations)
that could be used for motion planning to get to goal_pos"""
goals = []
valid_positions = self.mdp.get_valid_player_positions()
for d in Direction.ALL_DIRECTIONS:
adjacent_pos = Action.move_in_direction(goal_pos, d)
if adjacent_pos in valid_positions:
goal_orientation = Direction.OPPOSITE_DIRECTIONS[d]
motion_goal = (adjacent_pos, goal_orientation)
goals.append(motion_goal)
return goals
class JointMotionPlanner(object):
"""A planner that computes optimal plans for a two agents to
arrive at goal positions and orientations in a OvercookedGridworld.
Args:
mdp (OvercookedGridworld): gridworld of interest
"""
def __init__(self, mdp, params, debug=False):
self.mdp = mdp
# Whether starting orientations should be accounted for
# when solving all motion problems
# (increases number of plans by a factor of 4)
# but removes additional fudge factor <= 1 for each
# joint motion plan
self.debug = debug
self.start_orientations = params["start_orientations"]
# Enable both agents to have the same motion goal
self.same_motion_goals = params["same_motion_goals"]
# Single agent motion planner
self.motion_planner = MotionPlanner(
mdp, counter_goals=params["counter_goals"]
)
# Graph problem that returns optimal paths from
# starting positions to goal positions (without
# accounting for orientations)
self.joint_graph_problem = self._joint_graph_from_grid()
self.all_plans = self._populate_all_plans()
def get_low_level_action_plan(self, start_jm_state, goal_jm_state):
"""
Returns pre-computed plan from initial joint motion state
to a goal joint motion state.
Args:
start_jm_state (tuple): starting pos & orients ((pos1, or1), (pos2, or2))
goal_jm_state (tuple): goal pos & orients ((pos1, or1), (pos2, or2))
Returns:
joint_action_plan (list): joint actions to be executed to reach end_jm_state
end_jm_state (tuple): the pair of (pos, or) tuples corresponding
to the ending timestep (this will usually be different from
goal_jm_state, as one agent will end before other).
plan_lengths (tuple): lengths for each agent's plan
"""
assert self.is_valid_joint_motion_pair(
start_jm_state, goal_jm_state
), "start: {} \t end: {} was not a valid motion goal pair".format(
start_jm_state, goal_jm_state
)
if self.start_orientations:
plan_key = (start_jm_state, goal_jm_state)
else:
starting_positions = tuple(
player_pos_and_or[0] for player_pos_and_or in start_jm_state
)
goal_positions = tuple(
player_pos_and_or[0] for player_pos_and_or in goal_jm_state
)
# If beginning positions are equal to end positions, the pre-stored
# plan (not dependent on initial positions) will likely return a
# wrong answer, so we compute it from scratch.
#
# This is because we only compute plans with starting orientations
# (North, North), so if one of the two agents starts at location X
# with orientation East it's goal is to get to location X with
# orientation North. The precomputed plan will just tell that agent
# that it is already at the goal, so no actions (or just 'interact')
# are necessary.
#
# We also compute the plan for any shared motion goal with SAFE_RUN,
# as there are some minor edge cases that could not be accounted for
# but I expect should not make a difference in nearly all scenarios
if any(
[s == g for s, g in zip(starting_positions, goal_positions)]
) or (SAFE_RUN and goal_positions[0] == goal_positions[1]):
return self._obtain_plan(start_jm_state, goal_jm_state)
dummy_orientation = Direction.NORTH
dummy_start_jm_state = tuple(
(pos, dummy_orientation) for pos in starting_positions
)
plan_key = (dummy_start_jm_state, goal_jm_state)
if plan_key not in self.all_plans:
num_player = len(goal_jm_state)
return [], None, [np.inf] * num_player
joint_action_plan, end_jm_state, plan_lengths = self.all_plans[
plan_key
]
return joint_action_plan, end_jm_state, plan_lengths
def _populate_all_plans(self):
"""Pre-compute all valid plans"""
all_plans = {}
# Joint states are valid if players are not in same location
if self.start_orientations:
valid_joint_start_states = (
self.mdp.get_valid_joint_player_positions_and_orientations()
)
else:
valid_joint_start_states = (
self.mdp.get_valid_joint_player_positions()
)
valid_player_states = (
self.mdp.get_valid_player_positions_and_orientations()
)
possible_joint_goal_states = list(
itertools.product(valid_player_states, repeat=2)
)
valid_joint_goal_states = list(
filter(self.is_valid_joint_motion_goal, possible_joint_goal_states)
)
if self.debug:
print(
"Number of plans being pre-calculated: ",
len(valid_joint_start_states) * len(valid_joint_goal_states),
)
for joint_start_state, joint_goal_state in itertools.product(
valid_joint_start_states, valid_joint_goal_states
):
# If orientations not present, joint_start_state just includes positions.
if not self.start_orientations:
dummy_orientation = Direction.NORTH
joint_start_state = tuple(
(pos, dummy_orientation) for pos in joint_start_state
)
# If either start-end states are not connected, skip to next plan
if not self.is_valid_jm_start_goal_pair(
joint_start_state, joint_goal_state
):
continue
# Note: we might fail to get the plan, just due to the nature of the layouts
joint_action_list, end_statuses, plan_lengths = self._obtain_plan(
joint_start_state, joint_goal_state
)
if end_statuses is None:
continue
plan_key = (joint_start_state, joint_goal_state)
all_plans[plan_key] = (
joint_action_list,
end_statuses,
plan_lengths,
)
return all_plans
def is_valid_jm_start_goal_pair(self, joint_start_state, joint_goal_state):
"""Checks if the combination of joint start state and joint goal state is valid"""
if not self.is_valid_joint_motion_goal(joint_goal_state):
return False
check_valid_fn = self.motion_planner.is_valid_motion_start_goal_pair
return all(
[
check_valid_fn(joint_start_state[i], joint_goal_state[i])
for i in range(2)
]
)
def _obtain_plan(self, joint_start_state, joint_goal_state):
"""Either use motion planner or actually compute a joint plan"""
# Try using MotionPlanner plans and join them together
(
action_plans,
pos_and_or_paths,
plan_lengths,
) = self._get_plans_from_single_planner(
joint_start_state, joint_goal_state
)
# Check if individual plans conflict
have_conflict = self.plans_have_conflict(
joint_start_state, joint_goal_state, pos_and_or_paths, plan_lengths
)
# If there is no conflict, the joint plan computed by joining single agent MotionPlanner plans is optimal
if not have_conflict:
(
joint_action_plan,
end_pos_and_orientations,
) = self._join_single_agent_action_plans(
joint_start_state,
action_plans,
pos_and_or_paths,
min(plan_lengths),
)
return joint_action_plan, end_pos_and_orientations, plan_lengths
# If there is a conflict in the single motion plan and the agents have the same goal,
# the graph problem can't be used either as it can't handle same goal state: we compute
# manually what the best way to handle the conflict is
elif self._agents_are_in_same_position(joint_goal_state):
(
joint_action_plan,
end_pos_and_orientations,
plan_lengths,
) = self._handle_path_conflict_with_same_goal(
joint_start_state,
joint_goal_state,
action_plans,
pos_and_or_paths,
)
return joint_action_plan, end_pos_and_orientations, plan_lengths
# If there is a conflict, and the agents have different goals, we can use solve the joint graph problem
return self._compute_plan_from_joint_graph(
joint_start_state, joint_goal_state
)
def _get_plans_from_single_planner(
self, joint_start_state, joint_goal_state
):
"""
Get individual action plans for each agent from the MotionPlanner to get each agent
independently to their goal state. NOTE: these plans might conflict
"""
single_agent_motion_plans = [
self.motion_planner.get_plan(start, goal)
for start, goal in zip(joint_start_state, joint_goal_state)
]
action_plans, pos_and_or_paths = [], []
for action_plan, pos_and_or_path, _ in single_agent_motion_plans:
action_plans.append(action_plan)
pos_and_or_paths.append(pos_and_or_path)
plan_lengths = tuple(len(p) for p in action_plans)
assert all(
[plan_lengths[i] == len(pos_and_or_paths[i]) for i in range(2)]
)
return action_plans, pos_and_or_paths, plan_lengths
def plans_have_conflict(
self,
joint_start_state,
joint_goal_state,
pos_and_or_paths,
plan_lengths,
):
"""Check if the sequence of pos_and_or_paths for the two agents conflict"""
min_length = min(plan_lengths)
prev_positions = tuple(s[0] for s in joint_start_state)
for t in range(min_length):
curr_pos_or0, curr_pos_or1 = (
pos_and_or_paths[0][t],
pos_and_or_paths[1][t],
)
curr_positions = (curr_pos_or0[0], curr_pos_or1[0])
if self.mdp.is_transition_collision(
prev_positions, curr_positions
):
return True
prev_positions = curr_positions
return False
def _join_single_agent_action_plans(
self, joint_start_state, action_plans, pos_and_or_paths, finishing_time
):
"""Returns the joint action plan and end joint state obtained by joining the individual action plans"""
assert finishing_time > 0
end_joint_state = (
pos_and_or_paths[0][finishing_time - 1],
pos_and_or_paths[1][finishing_time - 1],
)
joint_action_plan = list(
zip(
*[
action_plans[0][:finishing_time],
action_plans[1][:finishing_time],
]
)
)
return joint_action_plan, end_joint_state
def _handle_path_conflict_with_same_goal(
self,
joint_start_state,
joint_goal_state,
action_plans,
pos_and_or_paths,
):
"""Assumes that optimal path in case two agents have the same goal and their paths conflict
is for one of the agents to wait. Checks resulting plans if either agent waits, and selects the
shortest cost among the two."""
(
joint_plan0,
end_pos_and_or0,
plan_lengths0,
) = self._handle_conflict_with_same_goal_idx(
joint_start_state,
joint_goal_state,
action_plans,
pos_and_or_paths,
wait_agent_idx=0,
)
(
joint_plan1,
end_pos_and_or1,
plan_lengths1,
) = self._handle_conflict_with_same_goal_idx(
joint_start_state,
joint_goal_state,
action_plans,
pos_and_or_paths,
wait_agent_idx=1,
)
assert any([joint_plan0 is not None, joint_plan1 is not None])
best_plan_idx = np.argmin([min(plan_lengths0), min(plan_lengths1)])
solutions = [
(joint_plan0, end_pos_and_or0, plan_lengths0),
(joint_plan1, end_pos_and_or1, plan_lengths1),
]
return solutions[best_plan_idx]
def _handle_conflict_with_same_goal_idx(
self,
joint_start_state,
joint_goal_state,
action_plans,
pos_and_or_paths,
wait_agent_idx,
):
"""
Determines what is the best joint plan if whenether there is a conflict between the two agents' actions,
the agent with index `wait_agent_idx` waits one turn.
If the agent that is assigned to wait is "in front" of the non-waiting agent, this could result
in an endless conflict. In this case, we return infinite finishing times.
"""
idx0, idx1 = 0, 0
prev_positions = [
start_pos_and_or[0] for start_pos_and_or in joint_start_state
]
curr_pos_or0, curr_pos_or1 = joint_start_state
agent0_plan_original, agent1_plan_original = action_plans
joint_plan = []
# While either agent hasn't finished their plan
while idx0 != len(agent0_plan_original) and idx1 != len(
agent1_plan_original
):
next_pos_or0, next_pos_or1 = (
pos_and_or_paths[0][idx0],
pos_and_or_paths[1][idx1],
)
next_positions = (next_pos_or0[0], next_pos_or1[0])
# If agents collide, let the waiting agent wait and the non-waiting
# agent take a step
if self.mdp.is_transition_collision(
prev_positions, next_positions
):
if wait_agent_idx == 0:
curr_pos_or0 = (
curr_pos_or0 # Agent 0 will wait, stays the same
)
curr_pos_or1 = next_pos_or1
curr_joint_action = [
Action.STAY,
agent1_plan_original[idx1],
]
idx1 += 1
elif wait_agent_idx == 1:
curr_pos_or0 = next_pos_or0
curr_pos_or1 = (
curr_pos_or1 # Agent 1 will wait, stays the same
)
curr_joint_action = [
agent0_plan_original[idx0],
Action.STAY,
]
idx0 += 1
curr_positions = (curr_pos_or0[0], curr_pos_or1[0])
# If one agent waiting causes other to crash into it, return None
if self._agents_are_in_same_position(
(curr_pos_or0, curr_pos_or1)
):
return None, None, [np.Inf, np.Inf]
else:
curr_pos_or0, curr_pos_or1 = next_pos_or0, next_pos_or1
curr_positions = next_positions
curr_joint_action = [
agent0_plan_original[idx0],
agent1_plan_original[idx1],
]
idx0 += 1
idx1 += 1
joint_plan.append(curr_joint_action)
prev_positions = curr_positions
assert idx0 != idx1, "No conflict found"
end_pos_and_or = (curr_pos_or0, curr_pos_or1)
finishing_times = (
(np.Inf, idx1) if wait_agent_idx == 0 else (idx0, np.Inf)
)
return joint_plan, end_pos_and_or, finishing_times
def is_valid_joint_motion_goal(self, joint_goal_state):
"""Checks whether the goal joint positions and orientations are a valid goal"""
if not self.same_motion_goals and self._agents_are_in_same_position(
joint_goal_state
):
return False
multi_cc_map = (
len(self.motion_planner.graph_problem.connected_components) > 1
)
players_in_same_cc = self.motion_planner.graph_problem.are_in_same_cc(
joint_goal_state[0], joint_goal_state[1]
)
if multi_cc_map and players_in_same_cc:
return False
return all(
[
self.motion_planner.is_valid_motion_goal(player_state)
for player_state in joint_goal_state
]
)
def is_valid_joint_motion_pair(self, joint_start_state, joint_goal_state):
if not self.is_valid_joint_motion_goal(joint_goal_state):
return False
return all(
[
self.motion_planner.is_valid_motion_start_goal_pair(
joint_start_state[i], joint_goal_state[i]
)
for i in range(2)
]
)
def _agents_are_in_same_position(self, joint_motion_state):
agent_positions = [
player_pos_and_or[0] for player_pos_and_or in joint_motion_state
]
return len(agent_positions) != len(set(agent_positions))
def _compute_plan_from_joint_graph(
self, joint_start_state, joint_goal_state
):
"""Compute joint action plan for two agents to achieve a
certain position and orientation with the joint motion graph
Args:
joint_start_state: pair of start (pos, or)
joint_goal_state: pair of goal (pos, or)
"""
assert self.is_valid_joint_motion_pair(
joint_start_state, joint_goal_state
), joint_goal_state
# Solve shortest-path graph problem
start_positions = list(zip(*joint_start_state))[0]
goal_positions = list(zip(*joint_goal_state))[0]
try:
joint_positions_node_path = self.joint_graph_problem.get_node_path(
start_positions, goal_positions
)[1:]
except NotConnectedError:
# The cost will be infinite if there is no path
num_player = len(goal_positions)
return [], None, [np.inf] * num_player
(
joint_actions_list,
end_pos_and_orientations,
finishing_times,
) = self.joint_action_plan_from_positions(
joint_positions_node_path, joint_start_state, joint_goal_state
)
return joint_actions_list, end_pos_and_orientations, finishing_times
def joint_action_plan_from_positions(
self, joint_positions, joint_start_state, joint_goal_state
):
"""
Finds an action plan and it's cost, such that at least one of the agent goal states is achieved
Args:
joint_positions (list): list of joint positions to be reached after the starting position
(does not include starting position, but includes ending position)
joint_start_state (tuple): pair of starting positions and orientations
joint_goal_state (tuple): pair of goal positions and orientations
"""
action_plans = []
for i in range(2):
agent_position_sequence = [
joint_position[i] for joint_position in joint_positions
]
action_plan, _, _ = self.motion_planner.action_plan_from_positions(
agent_position_sequence,
joint_start_state[i],
joint_goal_state[i],
)
action_plans.append(action_plan)
finishing_times = tuple(len(plan) for plan in action_plans)
trimmed_action_plans = self._fix_plan_lengths(action_plans)
joint_action_plan = list(zip(*trimmed_action_plans))
end_pos_and_orientations = self._rollout_end_pos_and_or(
joint_start_state, joint_action_plan
)
return joint_action_plan, end_pos_and_orientations, finishing_times
def _fix_plan_lengths(self, plans):
"""Truncates the longer plan when shorter plan ends"""
plans = list(plans)
finishing_times = [len(p) for p in plans]
delta_length = max(finishing_times) - min(finishing_times)
if delta_length != 0:
index_long_plan = np.argmax(finishing_times)
plans[index_long_plan] = plans[index_long_plan][
: min(finishing_times)
]
return plans
def _rollout_end_pos_and_or(self, joint_start_state, joint_action_plan):
"""Execute plan in environment to determine ending positions and orientations"""
# Assumes that final pos and orientations only depend on initial ones
# (not on objects and other aspects of state).
# Also assumes can't deliver more than two orders in one motion goal
# (otherwise Environment will terminate)
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv
dummy_state = OvercookedState.from_players_pos_and_or(
joint_start_state, all_orders=self.mdp.start_all_orders
)
env = OvercookedEnv.from_mdp(
self.mdp, horizon=200, info_level=int(self.debug)
) # Plans should be shorter than 200 timesteps, or something is likely wrong
successor_state, is_done = env.execute_plan(
dummy_state, joint_action_plan
)
assert not is_done
return successor_state.players_pos_and_or
def _joint_graph_from_grid(self):
"""Creates a graph instance from the mdp instance. Each graph node encodes a pair of positions"""
state_decoder = {}
# Valid positions pairs, not including ones with both players in same spot
valid_joint_positions = self.mdp.get_valid_joint_player_positions()
for state_index, joint_pos in enumerate(valid_joint_positions):
state_decoder[state_index] = joint_pos
state_encoder = {v: k for k, v in state_decoder.items()}
num_graph_nodes = len(state_decoder)
adjacency_matrix = np.zeros((num_graph_nodes, num_graph_nodes))
for start_state_index, start_joint_positions in state_decoder.items():
for (
joint_action,
successor_jm_state,
) in self._get_valid_successor_joint_positions(
start_joint_positions
).items():
successor_node_index = state_encoder[successor_jm_state]
this_action_cost = self._graph_joint_action_cost(joint_action)
current_cost = adjacency_matrix[start_state_index][
successor_node_index
]
if current_cost == 0 or this_action_cost < current_cost:
adjacency_matrix[start_state_index][
successor_node_index
] = this_action_cost
return Graph(adjacency_matrix, state_encoder, state_decoder)
def _graph_joint_action_cost(self, joint_action):
"""The cost used in the graph shortest-path problem for a certain joint-action"""
num_of_non_stay_actions = len(
[a for a in joint_action if a != Action.STAY]
)
# NOTE: Removing the possibility of having 0 cost joint_actions
if num_of_non_stay_actions == 0:
return 1
return num_of_non_stay_actions
def _get_valid_successor_joint_positions(self, starting_positions):
"""Get all joint positions that can be reached by a joint action.
NOTE: this DOES NOT include joint positions with superimposed agents.
"""
successor_joint_positions = {}
joint_motion_actions = itertools.product(
Action.MOTION_ACTIONS, Action.MOTION_ACTIONS
)
# Under assumption that orientation doesn't matter
dummy_orientation = Direction.NORTH
dummy_player_states = [
PlayerState(pos, dummy_orientation) for pos in starting_positions
]
for joint_action in joint_motion_actions:
new_positions, _ = self.mdp.compute_new_positions_and_orientations(
dummy_player_states, joint_action
)
successor_joint_positions[joint_action] = new_positions
return successor_joint_positions
def derive_state(self, start_state, end_pos_and_ors, action_plans):
"""
Given a start state, end position and orientations, and an action plan, recovers
the resulting state without executing the entire plan.
"""
if len(action_plans) == 0:
return start_state
end_state = start_state.deepcopy()
end_players = []
for player, end_pos_and_or in zip(end_state.players, end_pos_and_ors):
new_player = player.deepcopy()
position, orientation = end_pos_and_or
new_player.update_pos_and_or(position, orientation)
end_players.append(new_player)
end_state.players = tuple(end_players)
# Resolve environment effects for t - 1 turns
plan_length = len(action_plans)
assert plan_length > 0
for _ in range(plan_length - 1):
self.mdp.step_environment_effects(end_state)
# Interacts
last_joint_action = tuple(
a if a == Action.INTERACT else Action.STAY
for a in action_plans[-1]
)
events_dict = {
k: [[] for _ in range(self.mdp.num_players)] for k in EVENT_TYPES
}
self.mdp.resolve_interacts(end_state, last_joint_action, events_dict)
self.mdp.resolve_movement(end_state, last_joint_action)
self.mdp.step_environment_effects(end_state)
return end_state
class MediumLevelActionManager(object):
"""
Manager for medium level actions (specific joint motion goals).
Determines available medium level actions for each state.
Args:
mdp (OvercookedGridWorld): gridworld of interest
mlam_params (dictionary): parameters for the medium level action manager
"""
def __init__(self, mdp, mlam_params):
self.mdp = mdp
self.params = mlam_params
self.wait_allowed = mlam_params["wait_allowed"]
self.counter_drop = mlam_params["counter_drop"]
self.counter_pickup = mlam_params["counter_pickup"]
self.joint_motion_planner = JointMotionPlanner(mdp, mlam_params)
self.motion_planner = self.joint_motion_planner.motion_planner
def save_to_file(self, filename):
with open(filename, "wb") as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
@staticmethod
def from_file(filename):
return load_saved_action_manager(filename)
@staticmethod
def from_pickle_or_compute(
mdp, mlam_params, custom_filename=None, force_compute=False, info=False
):
assert isinstance(mdp, OvercookedGridworld)
filename = (
custom_filename
if custom_filename is not None
else mdp.layout_name + "_am.pkl"
)
if force_compute:
return MediumLevelActionManager.compute_mlam(
filename, mdp, mlam_params, info=info
)
try:
mlam = MediumLevelActionManager.from_file(filename)
if mlam.params != mlam_params or mlam.mdp != mdp:
if info:
print(
"medium level action manager with different params or mdp found, computing from scratch"
)
return MediumLevelActionManager.compute_mlam(
filename, mdp, mlam_params, info=info
)
except (
FileNotFoundError,
ModuleNotFoundError,
EOFError,
AttributeError,
) as e:
if info:
print("Recomputing planner due to:", e)
return MediumLevelActionManager.compute_mlam(
filename, mdp, mlam_params, info=info
)
if info:
print(
"Loaded MediumLevelActionManager from {}".format(
os.path.join(PLANNERS_DIR, filename)
)
)
return mlam
@staticmethod
def compute_mlam(filename, mdp, mlam_params, info=False):
final_filepath = os.path.join(PLANNERS_DIR, filename)
if info:
print(
"Computing MediumLevelActionManager to be saved in {}".format(
final_filepath
)
)
start_time = time.time()
mlam = MediumLevelActionManager(mdp, mlam_params=mlam_params)
if info:
print(
"It took {} seconds to create mlam".format(
time.time() - start_time
)
)
mlam.save_to_file(final_filepath)
return mlam
def joint_ml_actions(self, state):
"""Determine all possible joint medium level actions for a certain state"""
agent1_actions, agent2_actions = tuple(
self.get_medium_level_actions(state, player)
for player in state.players
)
joint_ml_actions = list(
itertools.product(agent1_actions, agent2_actions)
)
# ml actions are nothing but specific joint motion goals
valid_joint_ml_actions = list(
filter(
lambda a: self.is_valid_ml_action(state, a), joint_ml_actions
)
)
# HACK: Could cause things to break.
# Necessary to prevent states without successors (due to no counters being allowed and no wait actions)
# causing A* to not find a solution
if len(valid_joint_ml_actions) == 0:
agent1_actions, agent2_actions = tuple(
self.get_medium_level_actions(
state, player, waiting_substitute=True
)
for player in state.players
)
joint_ml_actions = list(
itertools.product(agent1_actions, agent2_actions)
)
valid_joint_ml_actions = list(
filter(
lambda a: self.is_valid_ml_action(state, a),
joint_ml_actions,
)
)
if len(valid_joint_ml_actions) == 0:
print(
"WARNING: Found state without valid actions even after adding waiting substitute actions. State: {}".format(
state
)
)
return valid_joint_ml_actions
def is_valid_ml_action(self, state, ml_action):
return self.joint_motion_planner.is_valid_jm_start_goal_pair(
state.players_pos_and_or, ml_action
)
def get_medium_level_actions(
self, state, player, waiting_substitute=False
):
"""
Determine valid medium level actions for a player.
Args:
state (OvercookedState): current state
player (PlayerState): the player's current state
waiting_substitute (bool): add a substitute action that takes the place of
a waiting action (going to closest feature)
Returns:
player_actions (list): possible motion goals (pairs of goal positions and orientations)
"""
player_actions = []
counter_pickup_objects = self.mdp.get_counter_objects_dict(
state, self.counter_pickup
)
if not player.has_object():
onion_pickup = self.pickup_onion_actions(counter_pickup_objects)
tomato_pickup = self.pickup_tomato_actions(counter_pickup_objects)
dish_pickup = self.pickup_dish_actions(counter_pickup_objects)
soup_pickup = self.pickup_counter_soup_actions(
counter_pickup_objects
)
pot_states_dict = self.mdp.get_pot_states(state)
start_cooking = self.start_cooking_actions(pot_states_dict)
player_actions.extend(
onion_pickup
+ tomato_pickup
+ dish_pickup
+ soup_pickup
+ start_cooking
)
else:
player_object = player.get_object()
pot_states_dict = self.mdp.get_pot_states(state)
# No matter the object, we can place it on a counter
if len(self.counter_drop) > 0:
player_actions.extend(self.place_obj_on_counter_actions(state))
if player_object.name == "soup":
player_actions.extend(self.deliver_soup_actions())
elif player_object.name == "onion":
player_actions.extend(
self.put_onion_in_pot_actions(pot_states_dict)
)
elif player_object.name == "tomato":
player_actions.extend(
self.put_tomato_in_pot_actions(pot_states_dict)
)
elif player_object.name == "dish":
# Not considering all pots (only ones close to ready) to reduce computation
# NOTE: could try to calculate which pots are eligible, but would probably take
# a lot of compute
player_actions.extend(
self.pickup_soup_with_dish_actions(
pot_states_dict, only_nearly_ready=False
)
)
else:
raise ValueError("Unrecognized object")
if self.wait_allowed:
player_actions.extend(self.wait_actions(player))
if waiting_substitute:
# Trying to mimic a "WAIT" action by adding the closest allowed feature to the avaliable actions
# This is because motion plans that aren't facing terrain features (non counter, non empty spots)
# are not considered valid
player_actions.extend(self.go_to_closest_feature_actions(player))
is_valid_goal_given_start = (
lambda goal: self.motion_planner.is_valid_motion_start_goal_pair(
player.pos_and_or, goal
)
)
player_actions = list(
filter(is_valid_goal_given_start, player_actions)
)
return player_actions
def pickup_onion_actions(self, counter_objects, only_use_dispensers=False):
"""If only_use_dispensers is True, then only take onions from the dispensers"""
onion_pickup_locations = self.mdp.get_onion_dispenser_locations()
if not only_use_dispensers:
onion_pickup_locations += counter_objects["onion"]
return self._get_ml_actions_for_positions(onion_pickup_locations)
def pickup_tomato_actions(self, counter_objects):
tomato_dispenser_locations = self.mdp.get_tomato_dispenser_locations()
tomato_pickup_locations = (
tomato_dispenser_locations + counter_objects["tomato"]
)
return self._get_ml_actions_for_positions(tomato_pickup_locations)
def pickup_dish_actions(self, counter_objects, only_use_dispensers=False):
"""If only_use_dispensers is True, then only take dishes from the dispensers"""
dish_pickup_locations = self.mdp.get_dish_dispenser_locations()
if not only_use_dispensers:
dish_pickup_locations += counter_objects["dish"]
return self._get_ml_actions_for_positions(dish_pickup_locations)
def pickup_counter_soup_actions(self, counter_objects):
soup_pickup_locations = counter_objects["soup"]
return self._get_ml_actions_for_positions(soup_pickup_locations)
def start_cooking_actions(self, pot_states_dict):
"""This is for start cooking a pot that is cookable"""
cookable_pots_location = self.mdp.get_partially_full_pots(
pot_states_dict
) + self.mdp.get_full_but_not_cooking_pots(pot_states_dict)
return self._get_ml_actions_for_positions(cookable_pots_location)
def place_obj_on_counter_actions(self, state):
all_empty_counters = set(self.mdp.get_empty_counter_locations(state))
valid_empty_counters = [
c_pos for c_pos in self.counter_drop if c_pos in all_empty_counters
]
return self._get_ml_actions_for_positions(valid_empty_counters)
def deliver_soup_actions(self):
serving_locations = self.mdp.get_serving_locations()
return self._get_ml_actions_for_positions(serving_locations)
def put_onion_in_pot_actions(self, pot_states_dict):
partially_full_onion_pots = self.mdp.get_partially_full_pots(
pot_states_dict
)
fillable_pots = partially_full_onion_pots + pot_states_dict["empty"]
return self._get_ml_actions_for_positions(fillable_pots)
def put_tomato_in_pot_actions(self, pot_states_dict):
partially_full_onion_pots = self.mdp.get_partially_full_pots(
pot_states_dict
)
fillable_pots = partially_full_onion_pots + pot_states_dict["empty"]
return self._get_ml_actions_for_positions(fillable_pots)
def pickup_soup_with_dish_actions(
self, pot_states_dict, only_nearly_ready=False
):
ready_pot_locations = pot_states_dict["ready"]
nearly_ready_pot_locations = pot_states_dict["cooking"]
if not only_nearly_ready:
partially_full_pots = self.mdp.get_partially_full_pots(
pot_states_dict
)
nearly_ready_pot_locations = (
nearly_ready_pot_locations
+ pot_states_dict["empty"]
+ partially_full_pots
)
return self._get_ml_actions_for_positions(
ready_pot_locations + nearly_ready_pot_locations
)
def go_to_closest_feature_actions(self, player):
feature_locations = (
self.mdp.get_onion_dispenser_locations()
+ self.mdp.get_tomato_dispenser_locations()
+ self.mdp.get_pot_locations()
+ self.mdp.get_dish_dispenser_locations()
)
closest_feature_pos = self.motion_planner.min_cost_to_feature(
player.pos_and_or, feature_locations, with_argmin=True
)[1]
return self._get_ml_actions_for_positions([closest_feature_pos])
def go_to_closest_feature_or_counter_to_goal(
self, goal_pos_and_or, goal_location
):
"""Instead of going to goal_pos_and_or, go to the closest feature or counter to this goal, that ISN'T the goal itself"""
valid_locations = (
self.mdp.get_onion_dispenser_locations()
+ self.mdp.get_tomato_dispenser_locations()
+ self.mdp.get_pot_locations()
+ self.mdp.get_dish_dispenser_locations()
+ self.counter_drop
)
valid_locations.remove(goal_location)
closest_non_goal_feature_pos = self.motion_planner.min_cost_to_feature(
goal_pos_and_or, valid_locations, with_argmin=True
)[1]
return self._get_ml_actions_for_positions(
[closest_non_goal_feature_pos]
)
def wait_actions(self, player):
waiting_motion_goal = (player.position, player.orientation)
return [waiting_motion_goal]
def _get_ml_actions_for_positions(self, positions_list):
"""Determine what are the ml actions (joint motion goals) for a list of positions
Args:
positions_list (list): list of target terrain feature positions
"""
possible_motion_goals = []
for pos in positions_list:
# All possible ways to reach the target feature
for (
motion_goal
) in self.joint_motion_planner.motion_planner.motion_goals_for_pos[
pos
]:
possible_motion_goals.append(motion_goal)
return possible_motion_goals
# # Deprecated, since agent-level dynamic planning is no longer used
# class MediumLevelPlanner(object):
# """
# A planner that computes optimal plans for two agents to deliver a certain number of dishes
# in an OvercookedGridworld using medium level actions (single motion goals) in the corresponding
# A* search problem.
# """
#
# def __init__(self, mdp, mlp_params, ml_action_manager=None):
# self.mdp = mdp
# self.params = mlp_params
# self.ml_action_manager = ml_action_manager if ml_action_manager else MediumLevelActionManager(mdp, mlp_params)
# self.jmp = self.ml_action_manager.joint_motion_planner
# self.mp = self.jmp.motion_planner
#
# @staticmethod
# def from_action_manager_file(filename):
# mlp_action_manager = load_saved_action_manager(filename)
# mdp = mlp_action_manager.mdp
# params = mlp_action_manager.params
# return MediumLevelPlanner(mdp, params, mlp_action_manager)
#
# @staticmethod
# def from_pickle_or_compute(mdp, mlp_params, custom_filename=None, force_compute=False, info=True):
# assert isinstance(mdp, OvercookedGridworld)
#
# filename = custom_filename if custom_filename is not None else mdp.layout_name + "_am.pkl"
#
# if force_compute:
# return MediumLevelPlanner.compute_mlp(filename, mdp, mlp_params)
#
# try:
# mlp = MediumLevelPlanner.from_action_manager_file(filename)
#
# if mlp.ml_action_manager.params != mlp_params or mlp.mdp != mdp:
# print("Mlp with different params or mdp found, computing from scratch")
# return MediumLevelPlanner.compute_mlp(filename, mdp, mlp_params)
#
# except (FileNotFoundError, ModuleNotFoundError, EOFError, AttributeError) as e:
# print("Recomputing planner due to:", e)
# return MediumLevelPlanner.compute_mlp(filename, mdp, mlp_params)
#
# if info:
# print("Loaded MediumLevelPlanner from {}".format(os.path.join(PLANNERS_DIR, filename)))
# return mlp
#
# @staticmethod
# def compute_mlp(filename, mdp, mlp_params):
# final_filepath = os.path.join(PLANNERS_DIR, filename)
# print("Computing MediumLevelPlanner to be saved in {}".format(final_filepath))
# start_time = time.time()
# mlp = MediumLevelPlanner(mdp, mlp_params=mlp_params)
# print("It took {} seconds to create mlp".format(time.time() - start_time))
# mlp.ml_action_manager.save_to_file(final_filepath)
# return mlp
#
# Deprecated.
# def get_successor_states(self, start_state):
# """Successor states for medium-level actions are defined as
# the first state in the corresponding motion plan in which
# one of the two agents' subgoals is satisfied.
#
# Returns: list of
# joint_motion_goal: ((pos1, or1), (pos2, or2)) specifying the
# motion plan goal for both agents
#
# successor_state: OvercookedState corresponding to state
# arrived at after executing part of the motion plan
# (until one of the agents arrives at his goal status)
#
# plan_length: Time passed until arrival to the successor state
# """
# if self.mdp.is_terminal(start_state):
# return []
#
# start_jm_state = start_state.players_pos_and_or
# successor_states = []
# for goal_jm_state in self.ml_action_manager.joint_ml_actions(start_state):
# joint_motion_action_plans, end_pos_and_ors, plan_costs = self.jmp.get_low_level_action_plan(start_jm_state, goal_jm_state)
# end_state = self.jmp.derive_state(start_state, end_pos_and_ors, joint_motion_action_plans)
#
# if SAFE_RUN:
# from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv
# assert end_pos_and_ors[0] == goal_jm_state[0] or end_pos_and_ors[1] == goal_jm_state[1]
# s_prime, _ = OvercookedEnv.execute_plan(self.mdp, start_state, joint_motion_action_plans, display=False)
# assert end_state == s_prime, [self.mdp.state_string(s_prime), self.mdp.state_string(end_state)]
#
# successor_states.append((goal_jm_state, end_state, min(plan_costs)))
# return successor_states
# Deprecated.
# def get_successor_states_fixed_other(self, start_state, other_agent, other_agent_idx):
# """
# Get the successor states of a given start state, assuming that the other agent is fixed and will act according to the passed in model
# """
# if self.mdp.is_terminal(start_state):
# return []
#
# player = start_state.players[1 - other_agent_idx]
# ml_actions = self.ml_action_manager.get_medium_level_actions(start_state, player)
#
# if len(ml_actions) == 0:
# ml_actions = self.ml_action_manager.get_medium_level_actions(start_state, player, waiting_substitute=True)
#
# successor_high_level_states = []
# for ml_action in ml_actions:
# action_plan, end_state, cost = self.get_embedded_low_level_action_plan(start_state, ml_action, other_agent, other_agent_idx)
#
# if not self.mdp.is_terminal(end_state):
# # Adding interact action and deriving last state
# other_agent_action, _ = other_agent.action(end_state)
# last_joint_action = (Action.INTERACT, other_agent_action) if other_agent_idx == 1 else (other_agent_action, Action.INTERACT)
# action_plan = action_plan + (last_joint_action,)
# cost = cost + 1
#
# end_state, _ = self.embedded_mdp_step(end_state, Action.INTERACT, other_agent_action, other_agent.agent_index)
#
# successor_high_level_states.append((action_plan, end_state, cost))
# return successor_high_level_states
# Deprecated. because no longer used
# def check_heuristic_consistency(self, curr_heuristic_val, prev_heuristic_val, actual_edge_cost):
# delta_h = curr_heuristic_val - prev_heuristic_val
# assert actual_edge_cost >= delta_h, \
# "Heuristic was not consistent. \n Prev h: {}, Curr h: {}, Actual cost: {}, Δh: {}" \
# .format(prev_heuristic_val, curr_heuristic_val, actual_edge_cost, delta_h)
#
# def embedded_mdp_succ_fn(self, state, other_agent):
# other_agent_action, _ = other_agent.action(state)
#
# successors = []
# for a in Action.ALL_ACTIONS:
# successor_state, joint_action = self.embedded_mdp_step(state, a, other_agent_action, other_agent.agent_index)
# cost = 1
# successors.append((joint_action, successor_state, cost))
# return successors
#
# def embedded_mdp_step(self, state, action, other_agent_action, other_agent_index):
# if other_agent_index == 0:
# joint_action = (other_agent_action, action)
# else:
# joint_action = (action, other_agent_action)
# if not self.mdp.is_terminal(state):
# results, _ = self.mdp.get_state_transition(state, joint_action)
# successor_state = results
# else:
# print("Tried to find successor of terminal")
# assert False, "state {} \t action {}".format(state, action)
# successor_state = state
# return successor_state, joint_action
# Deprecated due to Heuristic
# def get_low_level_action_plan(self, start_state, h_fn, delivery_horizon=4, debug=False, goal_info=False):
# """
# Get a plan of joint-actions executable in the environment that will lead to a goal number of deliveries
#
# Args:
# state (OvercookedState): starting state
# h_fn: heuristic function
#
# Returns:
# full_joint_action_plan (list): joint actions to reach goal
# """
# start_state = start_state.deepcopy()
# ml_plan, cost = self.get_ml_plan(start_state, h_fn, delivery_horizon=delivery_horizon, debug=debug)
#
# full_joint_action_plan = self.get_low_level_plan_from_ml_plan(
# start_state, ml_plan, h_fn, debug=debug, goal_info=goal_info
# )
# assert cost == len(full_joint_action_plan), "A* cost {} but full joint action plan cost {}".format(cost, len(full_joint_action_plan))
#
# if debug: print("Found plan with cost {}".format(cost))
# return full_joint_action_plan
# Deprecated due to Heuristic
# def get_low_level_plan_from_ml_plan(self, start_state, ml_plan, heuristic_fn, debug=False, goal_info=False):
# t = 0
# full_joint_action_plan = []
# curr_state = start_state
# curr_motion_state = start_state.players_pos_and_or
# prev_h = heuristic_fn(start_state, t, debug=False)
#
# if len(ml_plan) > 0 and goal_info:
# print("First motion goal: ", ml_plan[0][0])
#
# if not clean and debug:
# print("Start state")
# OvercookedEnv.print_state(self.mdp, start_state)
#
# for joint_motion_goal, goal_state in ml_plan:
# joint_action_plan, end_motion_state, plan_costs = \
# self.ml_action_manager.joint_motion_planner.get_low_level_action_plan(curr_motion_state, joint_motion_goal)
# curr_plan_cost = min(plan_costs)
# full_joint_action_plan.extend(joint_action_plan)
# t += 1
#
# if not clean and debug:
# print(t)
# OvercookedEnv.print_state(self.mdp, goal_state)
#
# if not clean and SAFE_RUN:
# s_prime, _ = OvercookedEnv.execute_plan(self.mdp, curr_state, joint_action_plan)
# assert s_prime == goal_state
#
# curr_h = heuristic_fn(goal_state, t, debug=False)
# self.check_heuristic_consistency(curr_h, prev_h, curr_plan_cost)
# curr_motion_state, prev_h, curr_state = end_motion_state, curr_h, goal_state
# return full_joint_action_plan
# Deprecated due to Heuristic
# def get_ml_plan(self, start_state, h_fn, delivery_horizon=4, debug=False):
# """
# Solves A* Search problem to find optimal sequence of medium level actions
# to reach the goal number of deliveries
#
# Returns:
# ml_plan (list): plan not including starting state in form
# [(joint_action, successor_state), ..., (joint_action, goal_state)]
# cost (int): A* Search cost
# """
# start_state = start_state.deepcopy()
#
# expand_fn = lambda state: self.get_successor_states(state)
# goal_fn = lambda state: state.delivery_rew >= DELIVERY_REW_THRES
# heuristic_fn = lambda state: h_fn(state)
#
# search_problem = SearchTree(start_state, goal_fn, expand_fn, heuristic_fn, debug=debug)
# ml_plan, cost = search_problem.A_star_graph_search(info=True)
# return ml_plan[1:], cost
# Deprecated
# def get_embedded_low_level_action_plan(self, state, goal_pos_and_or, other_agent, other_agent_idx):
# """Find action plan for a specific motion goal with A* considering the other agent"""
# other_agent.set_agent_index(other_agent_idx)
# agent_idx = 1 - other_agent_idx
#
# expand_fn = lambda state: self.embedded_mdp_succ_fn(state, other_agent)
# # FIXME
# goal_fn = lambda state: state.players[agent_idx].pos_and_or == goal_pos_and_or or state.delivery_rew >= DELIVERY_REW_THRES
# heuristic_fn = lambda state: sum(pos_distance(state.players[agent_idx].position, goal_pos_and_or[0]))
#
# search_problem = SearchTree(state, goal_fn, expand_fn, heuristic_fn)
# state_action_plan, cost = search_problem.A_star_graph_search(info=False)
# action_plan, state_plan = zip(*state_action_plan)
# action_plan = action_plan[1:]
# end_state = state_plan[-1]
# return action_plan, end_state, cost
# Deprecated.
# class HighLevelAction:
# """A high level action is given by a set of subsequent motion goals"""
#
# def __init__(self, motion_goals):
# self.motion_goals = motion_goals
#
# def _check_valid(self):
# for goal in self.motion_goals:
# assert len(goal) == 2
# pos, orient = goal
# assert orient in Direction.ALL_DIRECTIONS
# assert type(pos) is tuple
# assert len(pos) == 2
#
# def __getitem__(self, i):
# """Get ith motion goal of the HL Action"""
# return self.motion_goals[i]
#
#
# class HighLevelActionManager(object):
# """
# Manager for high level actions. Determines available high level actions
# for each state and player.
# """
#
# def __init__(self, medium_level_planner):
# self.mdp = medium_level_planner.mdp
#
# self.wait_allowed = medium_level_planner.params['wait_allowed']
# self.counter_drop = medium_level_planner.params["counter_drop"]
# self.counter_pickup = medium_level_planner.params["counter_pickup"]
#
# self.mlp = medium_level_planner
# self.ml_action_manager = medium_level_planner.ml_action_manager
# self.mp = medium_level_planner.mp
#
# def joint_hl_actions(self, state):
# hl_actions_a0, hl_actions_a1 = tuple(self.get_high_level_actions(state, player) for player in state.players)
# joint_hl_actions = list(itertools.product(hl_actions_a0, hl_actions_a1))
#
# assert self.mlp.params["same_motion_goals"]
# valid_joint_hl_actions = joint_hl_actions
#
# if len(valid_joint_hl_actions) == 0:
# print("WARNING: found a state without high level successors")
# return valid_joint_hl_actions
#
# def get_high_level_actions(self, state, player):
# player_hl_actions = []
# counter_pickup_objects = self.mdp.get_counter_objects_dict(state, self.counter_pickup)
# if player.has_object():
# place_obj_ml_actions = self.ml_action_manager.get_medium_level_actions(state, player)
#
# # HACK to prevent some states not having successors due to lack of waiting actions
# if len(place_obj_ml_actions) == 0:
# place_obj_ml_actions = self.ml_action_manager.get_medium_level_actions(state, player, waiting_substitute=True)
#
# place_obj_hl_actions = [HighLevelAction([ml_action]) for ml_action in place_obj_ml_actions]
# player_hl_actions.extend(place_obj_hl_actions)
# else:
# pot_states_dict = self.mdp.get_pot_states(state)
# player_hl_actions.extend(self.get_onion_and_put_in_pot(state, counter_pickup_objects, pot_states_dict))
# player_hl_actions.extend(self.get_tomato_and_put_in_pot(state, counter_pickup_objects, pot_states_dict))
# player_hl_actions.extend(self.get_dish_and_soup_and_serve(state, counter_pickup_objects, pot_states_dict))
# player_hl_actions.extend(self.start_cooking(state, pot_states_dict))
# return player_hl_actions
#
# def get_dish_and_soup_and_serve(self, state, counter_objects, pot_states_dict):
# """Get all sequences of medium-level actions (hl actions) that involve a player getting a dish,
# going to a pot and picking up a soup, and delivering the soup."""
# dish_pickup_actions = self.ml_action_manager.pickup_dish_actions(counter_objects)
# pickup_soup_actions = self.ml_action_manager.pickup_soup_with_dish_actions(pot_states_dict)
# deliver_soup_actions = self.ml_action_manager.deliver_soup_actions()
# hl_level_actions = list(itertools.product(dish_pickup_actions, pickup_soup_actions, deliver_soup_actions))
# return [HighLevelAction(hl_action_list) for hl_action_list in hl_level_actions]
#
# def get_onion_and_put_in_pot(self, state, counter_objects, pot_states_dict):
# """Get all sequences of medium-level actions (hl actions) that involve a player getting an onion
# from a dispenser and placing it in a pot."""
# onion_pickup_actions = self.ml_action_manager.pickup_onion_actions(counter_objects)
# put_in_pot_actions = self.ml_action_manager.put_onion_in_pot_actions(pot_states_dict)
# hl_level_actions = list(itertools.product(onion_pickup_actions, put_in_pot_actions))
# return [HighLevelAction(hl_action_list) for hl_action_list in hl_level_actions]
#
# def get_tomato_and_put_in_pot(self, state, counter_objects, pot_states_dict):
# """Get all sequences of medium-level actions (hl actions) that involve a player getting an tomato
# from a dispenser and placing it in a pot."""
# tomato_pickup_actions = self.ml_action_manager.pickup_tomato_actions(counter_objects)
# put_in_pot_actions = self.ml_action_manager.put_tomato_in_pot_actions(pot_states_dict)
# hl_level_actions = list(itertools.product(tomato_pickup_actions, put_in_pot_actions))
# return [HighLevelAction(hl_action_list) for hl_action_list in hl_level_actions]
#
# def start_cooking(self, state, pot_states_dict):
# """Go to a pot that is not empty and start cooking. Currently, because high level action requires 2 goals,
# we are going to repeat the same goal twice"""
# start_cooking = self.ml_action_manager.start_cooking_actions(pot_states_dict)
# hl_level_actions = [(pot, pot) for pot in start_cooking]
# return [HighLevelAction(hl_action_list) for hl_action_list in hl_level_actions]
#
#
#
# class HighLevelPlanner(object):
# """A planner that computes optimal plans for two agents to
# deliver a certain number of dishes in an OvercookedGridworld
# using high level actions in the corresponding A* search problems
# """
#
# def __init__(self, hl_action_manager):
# self.hl_action_manager = hl_action_manager
# self.mlp = self.hl_action_manager.mlp
# self.jmp = self.mlp.ml_action_manager.joint_motion_planner
# self.mp = self.jmp.motion_planner
# self.mdp = self.mlp.mdp
#
# def get_successor_states(self, start_state):
# """Determines successor states for high-level actions"""
# successor_states = []
#
# if self.mdp.is_terminal(start_state):
# return successor_states
#
# for joint_hl_action in self.hl_action_manager.joint_hl_actions(start_state):
# _, end_state, hl_action_cost = self.perform_hl_action(joint_hl_action, start_state)
#
# successor_states.append((joint_hl_action, end_state, hl_action_cost))
# return successor_states
#
# def perform_hl_action(self, joint_hl_action, curr_state):
# """Determines the end state for a high level action, and the corresponding low level action plan and cost.
# Will return Nones if a pot exploded throughout the execution of the action"""
# full_plan = []
# motion_goal_indices = (0, 0)
# total_cost = 0
# while not self.at_least_one_finished_hl_action(joint_hl_action, motion_goal_indices):
# curr_jm_goal = tuple(joint_hl_action[i].motion_goals[motion_goal_indices[i]] for i in range(2))
# joint_motion_action_plans, end_pos_and_ors, plan_costs = \
# self.jmp.get_low_level_action_plan(curr_state.players_pos_and_or, curr_jm_goal)
# curr_state = self.jmp.derive_state(curr_state, end_pos_and_ors, joint_motion_action_plans)
# motion_goal_indices = self._advance_motion_goal_indices(motion_goal_indices, plan_costs)
# total_cost += min(plan_costs)
# full_plan.extend(joint_motion_action_plans)
# return full_plan, curr_state, total_cost
#
# def at_least_one_finished_hl_action(self, joint_hl_action, motion_goal_indices):
# """Returns whether either agent has reached the end of the motion goal list it was supposed
# to perform to finish it's high level action"""
# return any([len(joint_hl_action[i].motion_goals) == motion_goal_indices[i] for i in range(2)])
#
# def get_low_level_action_plan(self, start_state, h_fn, debug=False):
# """
# Get a plan of joint-actions executable in the environment that will lead to a goal number of deliveries
# by performaing an A* search in high-level action space
#
# Args:
# state (OvercookedState): starting state
#
# Returns:
# full_joint_action_plan (list): joint actions to reach goal
# cost (int): a cost in number of timesteps to reach the goal
# """
# full_joint_low_level_action_plan = []
# hl_plan, cost = self.get_hl_plan(start_state, h_fn)
# curr_state = start_state
# prev_h = h_fn(start_state, debug=False)
# total_cost = 0
# for joint_hl_action, curr_goal_state in hl_plan:
# assert all([type(a) is HighLevelAction for a in joint_hl_action])
# hl_action_plan, curr_state, hl_action_cost = self.perform_hl_action(joint_hl_action, curr_state)
# full_joint_low_level_action_plan.extend(hl_action_plan)
# total_cost += hl_action_cost
# assert curr_state == curr_goal_state
#
# curr_h = h_fn(curr_state, debug=False)
# self.mlp.check_heuristic_consistency(curr_h, prev_h, total_cost)
# prev_h = curr_h
# assert total_cost == cost == len(full_joint_low_level_action_plan), "{} vs {} vs {}"\
# .format(total_cost, cost, len(full_joint_low_level_action_plan))
# return full_joint_low_level_action_plan, cost
#
# # Deprecated due to Heuristic
# # def get_hl_plan(self, start_state, h_fn, debug=False):
# # expand_fn = lambda state: self.get_successor_states(state)
# # goal_fn = lambda state: state.delivery_rew >= DELIVERY_REW_THRES
# # heuristic_fn = lambda state: h_fn(state)
# #
# # search_problem = SearchTree(start_state, goal_fn, expand_fn, heuristic_fn, debug=debug)
# # hl_plan, cost = search_problem.A_star_graph_search(info=True)
# # return hl_plan[1:], cost
#
# def _advance_motion_goal_indices(self, curr_plan_indices, plan_lengths):
# """Advance indices for agents current motion goals
# based on who finished their motion goal this round"""
# idx0, idx1 = curr_plan_indices
# if plan_lengths[0] == plan_lengths[1]:
# return idx0 + 1, idx1 + 1
#
# who_finished = np.argmin(plan_lengths)
# if who_finished == 0:
# return idx0 + 1, idx1
# elif who_finished == 1:
# return idx0, idx1 + 1
# # Deprecated.
# class Heuristic(object):
#
# def __init__(self, mp):
# self.motion_planner = mp
# self.mdp = mp.mdp
# self.heuristic_cost_dict = self._calculate_heuristic_costs()
#
# def hard_heuristic(self, state, goal_deliveries, time=0, debug=False):
# # NOTE: does not support tomatoes – currently deprecated as harder heuristic
# # does not seem worth the additional computational time
#
# """
# From a state, we can calculate exactly how many:
# - soup deliveries we need
# - dishes to pots we need
# - onion to pots we need
#
# We then determine if there are any soups/dishes/onions
# in transit (on counters or on players) than can be
# brought to their destinations faster than starting off from
# a dispenser of the same type. If so, we consider fulfilling
# all demand from these positions.
#
# After all in-transit objects are considered, we consider the
# costs required to fulfill all the rest of the demand, that is
# given by:
# - pot-delivery trips
# - dish-pot trips
# - onion-pot trips
#
# The total cost is obtained by determining an optimistic time
# cost for each of these trip types
# """
# forward_cost = 0
#
# # Obtaining useful quantities
# objects_dict = state.unowned_objects_by_type
# player_objects = state.player_objects_by_type
# pot_states_dict = self.mdp.get_pot_states(state)
# min_pot_delivery_cost = self.heuristic_cost_dict['pot-delivery']
# min_dish_to_pot_cost = self.heuristic_cost_dict['dish-pot']
# min_onion_to_pot_cost = self.heuristic_cost_dict['onion-pot']
#
# pot_locations = self.mdp.get_pot_locations()
# full_soups_in_pots = pot_states_dict['cooking'] + pot_states_dict['ready']
# partially_full_soups = self.mdp.get_partially_full_pots(pot_states_dict)
# num_onions_in_partially_full_pots = sum([state.get_object(loc).state[1] for loc in partially_full_soups])
#
# # Calculating costs
# num_deliveries_to_go = goal_deliveries - state.num_delivered
#
# # SOUP COSTS
# total_num_soups_needed = max([0, num_deliveries_to_go])
#
# soups_on_counters = [soup_obj for soup_obj in objects_dict['soup'] if soup_obj.position not in pot_locations]
# soups_in_transit = player_objects['soup'] + soups_on_counters
# soup_delivery_locations = self.mdp.get_serving_locations()
#
# num_soups_better_than_pot, total_better_than_pot_soup_cost = \
# self.get_costs_better_than_dispenser(soups_in_transit, soup_delivery_locations, min_pot_delivery_cost, total_num_soups_needed, state)
#
# min_pot_to_delivery_trips = max([0, total_num_soups_needed - num_soups_better_than_pot])
# pot_to_delivery_costs = min_pot_delivery_cost * min_pot_to_delivery_trips
#
# forward_cost += total_better_than_pot_soup_cost
# forward_cost += pot_to_delivery_costs
#
# # DISH COSTS
# total_num_dishes_needed = max([0, min_pot_to_delivery_trips])
# dishes_on_counters = objects_dict['dish']
# dishes_in_transit = player_objects['dish'] + dishes_on_counters
#
# num_dishes_better_than_disp, total_better_than_disp_dish_cost = \
# self.get_costs_better_than_dispenser(dishes_in_transit, pot_locations, min_dish_to_pot_cost, total_num_dishes_needed, state)
#
# min_dish_to_pot_trips = max([0, min_pot_to_delivery_trips - num_dishes_better_than_disp])
# dish_to_pot_costs = min_dish_to_pot_cost * min_dish_to_pot_trips
#
# forward_cost += total_better_than_disp_dish_cost
# forward_cost += dish_to_pot_costs
#
# # START COOKING COSTS, each to be filled pots will require 1 INTERACT to start cooking
# num_pots_to_be_filled = min_pot_to_delivery_trips - len(full_soups_in_pots)
# """Note that this is still assuming every soup requires 3 ingredients"""
# forward_cost += num_pots_to_be_filled
#
# # ONION COSTS
# total_num_onions_needed = num_pots_to_be_filled * 3 - num_onions_in_partially_full_pots
# onions_on_counters = objects_dict['onion']
# onions_in_transit = player_objects['onion'] + onions_on_counters
#
# num_onions_better_than_disp, total_better_than_disp_onion_cost = \
# self.get_costs_better_than_dispenser(onions_in_transit, pot_locations, min_onion_to_pot_cost, total_num_onions_needed, state)
#
# min_onion_to_pot_trips = max([0, total_num_onions_needed - num_onions_better_than_disp])
# onion_to_pot_costs = min_onion_to_pot_cost * min_onion_to_pot_trips
#
# forward_cost += total_better_than_disp_onion_cost
# forward_cost += onion_to_pot_costs
#
# # Going to closest feature costs
# # NOTE: as implemented makes heuristic inconsistent
# # for player in state.players:
# # if not player.has_object():
# # counter_objects = soups_on_counters + dishes_on_counters + onions_on_counters
# # possible_features = counter_objects + pot_locations + self.mdp.get_dish_dispenser_locations() + self.mdp.get_onion_dispenser_locations()
# # forward_cost += self.action_manager.min_cost_to_feature(player.pos_and_or, possible_features)
#
# heuristic_cost = forward_cost / 2
#
# if not clean and debug:
# env = OvercookedEnv.from_mdp(self.mdp)
# env.state = state
# print("\n" + "#"*35)
# print("Current state: (ml timestep {})\n".format(time))
#
# print("# in transit: \t\t Soups {} \t Dishes {} \t Onions {}".format(
# len(soups_in_transit), len(dishes_in_transit), len(onions_in_transit)
# ))
#
# # NOTE Possible improvement: consider cost of dish delivery too when considering if a
# # transit soup is better than dispenser equivalent
# print("# better than disp: \t Soups {} \t Dishes {} \t Onions {}".format(
# num_soups_better_than_pot, num_dishes_better_than_disp, num_onions_better_than_disp
# ))
#
# print("# of trips: \t\t pot-del {} \t dish-pot {} \t onion-pot {}".format(
# min_pot_to_delivery_trips, min_dish_to_pot_trips, min_onion_to_pot_trips
# ))
#
# print("Trip costs: \t\t pot-del {} \t dish-pot {} \t onion-pot {}".format(
# pot_to_delivery_costs, dish_to_pot_costs, onion_to_pot_costs
# ))
#
# print(str(env) + "HEURISTIC: {}".format(heuristic_cost))
#
# return heuristic_cost
#
# def get_costs_better_than_dispenser(self, possible_objects, target_locations, baseline_cost, num_needed, state):
# """
# Computes the number of objects whose minimum cost to any of the target locations is smaller than
# the baseline cost (clipping it if greater than the number needed). It also calculates a lower
# bound on the cost of using such objects.
# """
# costs_from_transit_locations = []
# for obj in possible_objects:
# obj_pos = obj.position
# if obj_pos in state.player_positions:
# # If object is being carried by a player
# player = [p for p in state.players if p.position == obj_pos][0]
# # NOTE: not sure if this -1 is justified.
# # Made things work better in practice for greedy heuristic based agents.
# # For now this function is just used from there. Consider removing later if
# # greedy heuristic agents end up not being used.
# min_cost = self.motion_planner.min_cost_to_feature(player.pos_and_or, target_locations) - 1
# else:
# # If object is on a counter
# min_cost = self.motion_planner.min_cost_between_features([obj_pos], target_locations)
# costs_from_transit_locations.append(min_cost)
#
# costs_better_than_dispenser = [cost for cost in costs_from_transit_locations if cost <= baseline_cost]
# better_than_dispenser_total_cost = sum(np.sort(costs_better_than_dispenser)[:num_needed])
# return len(costs_better_than_dispenser), better_than_dispenser_total_cost
#
# def _calculate_heuristic_costs(self, debug=False):
# """Pre-computes the costs between common trip types for this mdp"""
# pot_locations = self.mdp.get_pot_locations()
# delivery_locations = self.mdp.get_serving_locations()
# dish_locations = self.mdp.get_dish_dispenser_locations()
# onion_locations = self.mdp.get_onion_dispenser_locations()
# tomato_locations = self.mdp.get_tomato_dispenser_locations()
#
# heuristic_cost_dict = {
# 'pot-delivery': self.motion_planner.min_cost_between_features(pot_locations, delivery_locations, manhattan_if_fail=True),
# 'pot-cooking': 20, # this assume cooking time is always 20 timesteps
# 'dish-pot': self.motion_planner.min_cost_between_features(dish_locations, pot_locations, manhattan_if_fail=True)
# }
#
# onion_pot_cost = self.motion_planner.min_cost_between_features(onion_locations, pot_locations, manhattan_if_fail=True)
# tomato_pot_cost = self.motion_planner.min_cost_between_features(tomato_locations, pot_locations, manhattan_if_fail=True)
#
# if debug: print("Heuristic cost dict", heuristic_cost_dict)
# assert onion_pot_cost != np.inf or tomato_pot_cost != np.inf
# if onion_pot_cost != np.inf:
# heuristic_cost_dict['onion-pot'] = onion_pot_cost
# if tomato_pot_cost != np.inf:
# heuristic_cost_dict['tomato-pot'] = tomato_pot_cost
#
# return heuristic_cost_dict
#
# # Deprecated. This is out of date with the current MDP, but is no longer needed, so deprecated
# def simple_heuristic(self, state, time=0, debug=False):
# """Simpler heuristic that tends to run faster than current one"""
# # NOTE: State should be modified to have an order list w.r.t. which
# # one can calculate the heuristic
#
# objects_dict = state.unowned_objects_by_type
# player_objects = state.player_objects_by_type
# pot_states_scores_dict = self.mdp.get_pot_states_scores(state)
# max_recipe_value = self.mdp.max_recipe_value(state)
# num_deliveries_to_go = (DELIVERY_REW_THRES - state.delivery_rew)//max_recipe_value
# num_full_soups_in_pots = sum(pot_states_scores_dict['cooking'] + pot_states_scores_dict['ready'])//max_recipe_value
#
# pot_states_dict = self.mdp.get_pot_states(state)
# partially_full_soups = self.mdp.get_partially_full_pots(pot_states_dict)
# num_items_in_partially_full_pots = sum([len(state.get_object(loc).ingredients) for loc in partially_full_soups])
#
# soups_in_transit = player_objects['soup']
# dishes_in_transit = objects_dict['dish'] + player_objects['dish']
# onions_in_transit = objects_dict['onion'] + player_objects['onion']
# tomatoes_in_transit = objects_dict['tomato'] + player_objects['tomato']
#
# num_pot_to_delivery = max([0, num_deliveries_to_go - len(soups_in_transit)])
# num_dish_to_pot = max([0, num_pot_to_delivery - len(dishes_in_transit)])
#
# # FIXME: the following logic might need to be discussed, when incoporating tomatoes
# num_pots_to_be_filled = num_pot_to_delivery - num_full_soups_in_pots
# num_onions_needed_for_pots = num_pots_to_be_filled * 3 - len(onions_in_transit) - num_items_in_partially_full_pots
# num_tomatoes_needed_for_pots = 0
# num_onion_to_pot = max([0, num_onions_needed_for_pots])
# num_tomato_to_pot = max([0, num_tomatoes_needed_for_pots])
#
# pot_to_delivery_costs = (self.heuristic_cost_dict['pot-delivery'] + self.heuristic_cost_dict['pot-cooking']) \
# * num_pot_to_delivery
# dish_to_pot_costs = self.heuristic_cost_dict['dish-pot'] * num_dish_to_pot
#
# items_to_pot_costs = []
# # FIXME: might want to change this for anything beyond 3-onion soup
# if 'onion-pot' in self.heuristic_cost_dict.keys():
# onion_to_pot_costs = self.heuristic_cost_dict['onion-pot'] * num_onion_to_pot
# items_to_pot_costs.append(onion_to_pot_costs)
# if 'tomato-pot' in self.heuristic_cost_dict.keys():
# tomato_to_pot_costs = self.heuristic_cost_dict['tomato-pot'] * num_tomato_to_pot
# items_to_pot_costs.append(tomato_to_pot_costs)
#
# # NOTE: doesn't take into account that a combination of the two might actually be more advantageous.
# # Might cause heuristic to be inadmissable in some edge cases.
# # FIXME: only onion for now
# items_to_pot_cost = onion_to_pot_costs
#
# # num_pot_to_delivery added to account for the additional "INTERACT" to start soup cooking
# heuristic_cost = (pot_to_delivery_costs + dish_to_pot_costs + num_pot_to_delivery + items_to_pot_cost) / 2
#
# if not clean and debug:
# env = OvercookedEnv.from_mdp(self.mdp)
# env.state = state
# print("\n" + "#" * 35)
# print("Current state: (ml timestep {})\n".format(time))
#
# print("# in transit: \t\t Soups {} \t Dishes {} \t Onions {}".format(
# len(soups_in_transit), len(dishes_in_transit), len(onions_in_transit)
# ))
#
# print("Trip costs: \t\t pot-del {} \t dish-pot {} \t onion-pot {}".format(
# pot_to_delivery_costs, dish_to_pot_costs, onion_to_pot_costs
# ))
#
# print(str(env) + "HEURISTIC: {}".format(heuristic_cost))
# if heuristic_cost < 15:
# print(heuristic_cost, (pot_to_delivery_costs, dish_to_pot_costs, num_pot_to_delivery, items_to_pot_cost))
# print(self.mdp.state_string(state))
# return heuristic_cost
| 93,753 | 41.94732 | 156 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/planning/__init__.py | 0 | 0 | 0 | py |
|
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/planning/search.py | import heapq
import time
import numpy as np
import scipy.sparse
class SearchTree(object):
"""
A class to help perform tree searches of various types. Once a goal state is found, returns a list of tuples
containing (action, state) pairs. This enables to recover the optimal action and state path.
Args:
root (state): Initial state in our search
goal_fn (func): Takes in a state and returns whether it is a goal state
expand_fn (func): Takes in a state and returns a list of (action, successor, action_cost) tuples
heuristic_fn (func): Takes in a state and returns a heuristic value
"""
def __init__(
self,
root,
goal_fn,
expand_fn,
heuristic_fn,
max_iter_count=10e6,
debug=False,
):
self.debug = debug
self.root = root
self.is_goal = goal_fn
self.expand = expand_fn
self.heuristic_fn = heuristic_fn
self.max_iter_count = max_iter_count
def A_star_graph_search(self, info=False):
"""
Performs a A* Graph Search to find a path to a goal state
"""
start_time = time.time()
iter_count = 0
seen = set()
pq = PriorityQueue()
root_node = SearchNode(
self.root,
action=None,
parent=None,
action_cost=0,
debug=self.debug,
)
pq.push(root_node, self.estimated_total_cost(root_node))
while not pq.isEmpty():
curr_node = pq.pop()
iter_count += 1
if self.debug and iter_count % 1000 == 0:
print([p[0] for p in curr_node.get_path()])
print(iter_count)
curr_state = curr_node.state
if curr_state in seen:
continue
seen.add(curr_state)
if iter_count > self.max_iter_count:
print(
"Expanded more than the maximum number of allowed states"
)
raise TimeoutError("Too many states expanded expanded")
if self.is_goal(curr_state):
elapsed_time = time.time() - start_time
if info:
print(
"Found goal after: \t{:.2f} seconds, \t{} state expanded ({:.2f} unique) \t ~{:.2f} expansions/s".format(
elapsed_time,
iter_count,
len(seen) / iter_count,
iter_count / elapsed_time,
)
)
return curr_node.get_path(), curr_node.backwards_cost
successors = self.expand(curr_state)
for action, child, cost in successors:
child_node = SearchNode(
child,
action,
parent=curr_node,
action_cost=cost,
debug=self.debug,
)
pq.push(child_node, self.estimated_total_cost(child_node))
print(
"Path for last node expanded: ",
[p[0] for p in curr_node.get_path()],
)
print("State of last node expanded: ", curr_node.state)
print("Successors for last node expanded: ", self.expand(curr_state))
raise TimeoutError(
"A* graph search was unable to find any goal state."
)
def estimated_total_cost(self, node):
"""
Calculates the estimated total cost of going from node to goal
Args:
node (SearchNode): node of the state we are interested in
Returns:
float: h(s) + g(s), where g is the total backwards cost
"""
return node.backwards_cost + self.heuristic_fn(node.state)
class SearchNode(object):
"""
A helper class that stores a state, action, and parent tuple and enables to restore paths
Args:
state (any): Game state corresponding to the node
action (any): Action that brought to the current state
parent (SearchNode): Parent SearchNode of the current SearchNode
action_cost: Additional cost to get to this node from the parent
"""
def __init__(self, state, action, parent, action_cost, debug=False):
assert state is not None
self.state = state
# Action that led to this state
self.action = action
self.debug = debug
# Parent SearchNode
self.parent = parent
if parent != None:
self.depth = self.parent.depth + 1
self.backwards_cost = self.parent.backwards_cost + action_cost
else:
self.depth = 0
self.backwards_cost = 0
def __lt__(self, other):
return self.backwards_cost < other.backwards_cost
def get_path(self):
"""
Returns the path leading from the earliest parent-less node to the current
Returns:
List of tuples (action, state) where action is the action that led to the state.
NOTE: The first entry will be (None, start_state).
"""
path = []
node = self
while node is not None:
path = [(node.action, node.state)] + path
node = node.parent
return path
class Graph(object):
def __init__(self, dense_adjacency_matrix, encoder, decoder, debug=False):
"""
Each graph node is distinguishable by a key, encoded by the encoder into
a index that corresponds to that node in the adjacency matrix defining the graph.
Arguments:
dense_adjacency_matrix: 2D array with distances between nodes
encoder: Dictionary mapping each graph node key to the adj mtx index it corresponds to
decoder: Dictionary mapping each adj mtx index to a graph node key
"""
self.sparse_adjacency_matrix = scipy.sparse.csr_matrix(
dense_adjacency_matrix
)
self.distance_matrix = self.shortest_paths(dense_adjacency_matrix)
self._encoder = encoder
self._decoder = decoder
start_time = time.time()
if debug:
print(
"Computing shortest paths took {} seconds".format(
time.time() - start_time
)
)
self._ccs = None
@property
def connected_components(self):
if self._ccs is not None:
return self._ccs
else:
self._ccs = self._get_connected_components()
return self._ccs
def shortest_paths(self, dense_adjacency_matrix):
"""
Uses scipy's implementation of shortest paths to compute a distance
matrix between all elements of the graph
"""
csgraph = scipy.sparse.csgraph.csgraph_from_dense(
dense_adjacency_matrix
)
return scipy.sparse.csgraph.shortest_path(csgraph)
def dist(self, node1, node2):
"""
Returns the calculated shortest distance between two nodes of the graph.
Takes in as input the node keys.
"""
idx1, idx2 = self._encoder[node1], self._encoder[node2]
return self.distance_matrix[idx1][idx2]
def get_children(self, node):
"""
Returns a list of children node keys, given a node key.
"""
edge_indx = self._get_children(self._encoder[node])
nodes = [self._decoder[i] for i in edge_indx]
return nodes
def _get_children(self, node_index):
"""
Returns a list of children node indices, given a node index.
"""
assert node_index is not None
# NOTE: Assuming successor costs are non-zero
_, children_indices = self.sparse_adjacency_matrix.getrow(
node_index
).nonzero()
return children_indices
def get_node_path(self, start_node, goal_node):
"""
Given a start node key and a goal node key, returns a list of
node keys that trace a shortest path from start to goal.
"""
start_index, goal_index = (
self._encoder[start_node],
self._encoder[goal_node],
)
index_path = self._get_node_index_path(start_index, goal_index)
node_path = [self._decoder[i] for i in index_path]
return node_path
def _get_node_index_path(self, start_index, goal_index):
"""
Given a start node index and a goal node index, returns a list of
node indices that trace a shortest path from start to goal.
"""
assert start_index is not None
if start_index == goal_index:
return [goal_index]
successors = self._get_children(start_index)
# NOTE: Currently does not support multiple equally costly paths
best_index = None
smallest_dist = np.inf
for s in successors:
curr_dist = self.distance_matrix[s][goal_index]
if curr_dist < smallest_dist:
best_index = s
smallest_dist = curr_dist
if best_index is None:
# Basically, for some of the variable mdp, it is possible for an agent to be "trapped" and
# unable to go from one joint state to another joint state
# X S X O X X S X O X
# D 1 X X D 2 X X
# X 2 P ---> X 1 P
# X X X X X X X X X X
# This is actually an absolutely impossible transition
# 08/16/2020 update: This has been addressed by catching NotConnectedError upstream
raise NotConnectedError(
"No path could be found from {} to {}".format(
self._decoder[start_index], self._decoder[goal_index]
)
+ "This could be caused by using another layout's planner on this layout"
)
return [start_index] + self._get_node_index_path(
best_index, goal_index
)
def _get_connected_components(self):
num_ccs, cc_labels = scipy.sparse.csgraph.connected_components(
self.sparse_adjacency_matrix
)
connected_components = [set() for _ in range(num_ccs)]
for node_index, cc_index in enumerate(cc_labels):
node = self._decoder[node_index]
connected_components[cc_index].add(node)
return connected_components
def are_in_same_cc(self, node1, node2):
node1_cc_index = [
i for i, cc in enumerate(self.connected_components) if node1 in cc
]
node2_cc_index = [
i for i, cc in enumerate(self.connected_components) if node2 in cc
]
assert (
len(node1_cc_index) == len(node2_cc_index) == 1
), "Node 1 cc: {} \t Node 2 cc: {}".format(
node1_cc_index, node2_cc_index
)
return node1_cc_index[0] == node2_cc_index[0]
class NotConnectedError(Exception):
pass
class PriorityQueue:
"""Taken from UC Berkeley's CS188 project utils.
Implements a priority queue data structure. Each inserted item
has a priority associated with it and the client is usually interested
in quick retrieval of the lowest-priority item in the queue. This
data structure allows O(1) access to the lowest-priority item.
Note that this PriorityQueue does not allow you to change the priority
of an item. However, you may insert the same item multiple times with
different priorities."""
def __init__(self):
self.heap = []
def push(self, item, priority):
heapq.heappush(self.heap, (priority, item))
def pop(self):
(priority, item) = heapq.heappop(self.heap)
return item
def isEmpty(self):
return len(self.heap) == 0
| 11,855 | 33.365217 | 131 | py |
overcooked_ai | overcooked_ai-master/src/overcooked_ai_py/data/planners/__init__.py | import os
import pickle
from overcooked_ai_py.static import PLANNERS_DIR
from overcooked_ai_py.utils import load_dict_from_file
def load_saved_action_manager(filename):
with open(os.path.join(PLANNERS_DIR, filename), "rb") as f:
mlp_action_manager = pickle.load(f)
return mlp_action_manager
def load_saved_motion_planner(filename):
with open(os.path.join(PLANNERS_DIR, filename), "rb") as f:
motion_planner = pickle.load(f)
return motion_planner
| 491 | 26.333333 | 63 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/data_dir.py | import os
DATA_DIR = os.path.abspath(".")
| 43 | 10 | 31 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/utils.py | import itertools
import json
import os
import random
import re
import shutil
import git
import numpy as np
import tensorflow as tf
WANDB_PROJECT = "Overcooked AI"
def delete_dir_if_exists(dir_path, verbose=False):
if os.path.exists(dir_path):
if verbose:
print("Deleting old dir", dir_path)
shutil.rmtree(dir_path)
def create_dir_if_not_exists(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def reset_tf():
"""Clean up tensorflow graph and session.
NOTE: this also resets the tensorflow seed"""
tf.reset_default_graph()
if tf.get_default_session() is not None:
tf.get_default_session().close()
def num_tf_params():
"""Prints number of trainable parameters defined"""
total_parameters = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
print(total_parameters)
def get_current_commit_hash():
repo = git.Repo(search_parent_directories=True)
return repo.head.object.hexsha
def get_trailing_number(s):
"""
Get the trailing number from a string,
i.e. 'file123' -> '123'
"""
m = re.search(r"\d+$", s)
return int(m.group()) if m else None
def get_max_iter(agent_folder):
"""Return biggest PBT iteration that has been run"""
saved_iters = []
for folder_s in os.listdir(agent_folder):
folder_iter = get_trailing_number(folder_s)
if folder_iter is not None:
saved_iters.append(folder_iter)
if len(saved_iters) == 0:
raise ValueError(
"Agent folder {} seemed to not have any pbt_iter subfolders".format(
agent_folder
)
)
return max(saved_iters)
def cross_entropy(action_probs, y, eps=1e-4):
"""
X is the output from fully connected layer (num_examples x num_classes)
y is labels (num_examples x 1)
Note that y is not one-hot encoded vector.
It can be computed as y.argmax(axis=1) from one-hot encoded vectors of labels if required.
"""
m = y.shape[0]
# We use multidimensional array indexing to extract
# softmax probability of the correct label for each sample.
probs_for_correct = action_probs[range(m), y]
# NOTE: eps was added to correct for some actions being deterministically removed from
# the human model when it would get stuck. It was chosen empirically as to be about an order of
# magnitude less than the smallest probability assigned to any event by the model
probs_for_correct = np.array(
[p if p > eps else eps for p in probs_for_correct]
).astype(float)
log_likelihood = -np.log(probs_for_correct)
cross_entropy_loss = np.sum(log_likelihood) / m
return cross_entropy_loss
def accuracy(action_probs, y):
return np.sum(np.argmax(action_probs, axis=1) == y) / len(y)
def set_global_seed(seed):
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
def prepare_nested_default_dict_for_pickle(nested_defaultdict):
"""Need to make all nested defaultdicts into normal dicts to pickle"""
for k, v in nested_defaultdict.items():
nested_defaultdict[k] = dict(v)
pickleable_dict = dict(nested_defaultdict)
return pickleable_dict
def set_style(font_scale=1.6):
import matplotlib
import seaborn
seaborn.set(font="serif", font_scale=font_scale)
# Make the background white, and specify the specific font family
seaborn.set_style(
"white",
{
"font.family": "serif",
"font.weight": "normal",
"font.serif": ["Times", "Palatino", "serif"],
"axes.facecolor": "white",
"lines.markeredgewidth": 1,
},
)
matplotlib.rcParams["text.usetex"] = True
matplotlib.rc("font", family="serif", serif=["Palatino"])
def common_keys_equal(dict_a, dict_b):
common_keys = set(dict_a.keys()).intersection(set(dict_b.keys()))
for k in common_keys:
if dict_a[k] != dict_b[k]:
return False
return True
class Node(object):
def __init__(self, agent_name, params, parent=None):
self.agent_name = agent_name
self.params = params
self.parent = parent
def get_flattened_keys(dictionary):
if type(dictionary) != dict:
return []
return list(dictionary.keys()) + list(
itertools.chain(
*[get_flattened_keys(dictionary[key]) for key in dictionary]
)
)
def recursive_dict_update(map, key, value):
if type(map) != dict:
return False
if key in map:
map[key] = value
return True
return any(
[recursive_dict_update(child, key, value) for child in map.values()]
)
def equal_dicts(d1, d2, ignore_keys):
ignored = set(ignore_keys)
for k1, v1 in d1.items():
if k1 not in ignored and (k1 not in d2 or d2[k1] != v1):
if k1 not in d2:
print("d2 missing", k1)
else:
if k1 == "objects":
print("object difference")
for o1 in d1[k1]:
print(o1)
print("----")
for o2 in d2[k1]:
print(o2)
else:
print(
"different at ", k1, "one is ", d2[k1], "one is ", v1
)
return False
for k2, v2 in d2.items():
if k2 not in ignored and k2 not in d1:
print("d1 missing", k2)
return False
return True
def get_dict_stats(d):
new_d = d.copy()
for k, v in d.items():
new_d[k] = {
"mean": np.mean(v),
"standard_error": np.std(v) / np.sqrt(len(v)),
"max": np.max(v),
"n": len(v),
}
return new_d
def get_last_episode_rewards(filename):
with open(filename) as f:
j = json.loads(f.readlines()[-1])
result = {
"episode_reward_mean": j["episode_reward_mean"],
"sparse_reward_mean": j["custom_metrics"]["sparse_reward_mean"],
}
return result
| 6,341 | 27.567568 | 99 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/__init__.py | 0 | 0 | 0 | py |
|
overcooked_ai | overcooked_ai-master/src/human_aware_rl/human/human_data_forward_compat.py | import argparse
import os
import numpy as np
import pandas as pd
from human_aware_rl.human.data_processing_utils import AI_ID
from human_aware_rl.static import NEW_SCHEMA, OLD_SCHEMA
"""
Script for converting legacy-schema human data to current schema.
Note: This script, and working with the raw CSV files in general, should only be done by advanced users.
It is recommended that most users work with the pre-processed pickle files in /human_aware_rl/data/cleaned.
See docs for more info
"""
def write_csv(data, output_file_path):
if os.path.exists(output_file_path):
raise FileExistsError(
"File {} already exists, aborting to avoid overwriting".format(
output_file_path
)
)
output_dir = os.path.dirname(output_file_path)
if output_dir and not os.path.exists(output_dir):
os.makedirs(output_dir)
data.to_csv(output_file_path, index=False)
def main(input_file, output_file, is_human_ai=False):
### Load in data ###
print("Loading data from {}...".format(input_file))
data = pd.read_csv(input_file, header=0)
print("Success!")
### Update schema ###
print("Updating schema...")
# Ensure proper legacy schema
assert set(data.columns) == OLD_SCHEMA, "Input data has unexected schema"
# add unique trial_id to each game. A game is defined as a single trajectory on a single layout.
# This only works because the data is stored in chronological order
data["trial_id"] = (
data["layout_name"] != data["layout_name"].shift(1)
).astype(int).cumsum() - 1
# Unique for each human-human pairing. Note, one pairing will play multiple games
data["pairing_id"] = (
(data["workerid_num"] != data["workerid_num"].shift(1))
.astype(int)
.cumsum()
)
# Drop redundant games
# Note: this is necessary due to how data was collected on the backend. If player A and B are paired, the game is recorded twice.
# once with player A as player 0 and once with player B as player 0
data = data[data["is_leader"]]
if not is_human_ai:
data["player_0_is_human"] = True
data["player_1_is_human"] = True
data["player_0_id"] = (data["pairing_id"] * 2).astype(str)
data["player_1_id"] = (data["pairing_id"] * 2 + 1).astype(str)
else:
data["player_0_is_human"] = True
data["player_1_is_human"] = False
data["player_0_id"] = data["pairing_id"].astype(str)
data["player_1_id"] = AI_ID
columns_to_drop = (OLD_SCHEMA - NEW_SCHEMA).union(set(["pairing_id"]))
data = data.drop(columns=columns_to_drop)
assert set(data.columns == NEW_SCHEMA), "Output data has misformed schema"
print("Success!")
### Write out data ###
print("Writing data to {}...".format(output_file))
write_csv(data, output_file)
print("Success!")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_file",
"-i",
type=str,
required=True,
help="path to old-schema data",
)
parser.add_argument(
"--output_file",
"-o",
type=str,
required=True,
help="path to save new-schema data",
)
parser.add_argument(
"--is_human_ai",
"-ai",
action="store_true",
help="Provide this flag if data from human-AI games",
)
args = vars(parser.parse_args())
main(**args)
| 3,473 | 30.581818 | 133 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/human/tests.py | import copy
import os
import pickle
import shutil
import sys
import unittest
import numpy as np
from numpy.testing._private.utils import assert_raises
from human_aware_rl.human.process_dataframes import (
csv_to_df_pickle,
get_trajs_from_data,
)
from human_aware_rl.human.process_human_trials import (
main as process_human_trials_main,
)
from human_aware_rl.static import *
from human_aware_rl.utils import equal_dicts
from overcooked_ai_py.agents.agent import AgentPair, GreedyHumanModel
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv
from overcooked_ai_py.mdp.overcooked_mdp import (
OvercookedGridworld,
OvercookedState,
)
from overcooked_ai_py.planning.planners import (
NO_COUNTERS_PARAMS,
MediumLevelActionManager,
)
class TestProcessDataFrames(unittest.TestCase):
temp_data_dir = "this_is_a_temp"
data_len_2019 = 3546
data_len_2020 = 1189
base_csv_to_df_params = {
"csv_path": DUMMY_2020_RAW_HUMAN_DATA_PATH,
"out_dir": "this_is_a_temp",
"out_file_prefix": "unittest",
"button_presses_threshold": 0.25,
"perform_train_test_split": False,
"silent": True,
}
base_get_trajs_from_data_params = {
"data_path": DUMMY_2019_CLEAN_HUMAN_DATA_PATH,
"featurize_states": False,
"check_trajectories": False,
"silent": True,
"layouts": ["cramped_room"],
}
def setUp(self):
print(
"\nIn Class {}, in Method {}".format(
self.__class__.__name__, self._testMethodName
)
)
if not os.path.exists(self.temp_data_dir):
os.makedirs(self.temp_data_dir)
def tearDown(self):
shutil.rmtree(self.temp_data_dir)
def test_csv_to_df_pickle_2019(self):
params = copy.deepcopy(self.base_csv_to_df_params)
params["csv_path"] = DUMMY_2019_RAW_HUMAN_DATA_PATH
params["button_presses_threshold"] = 0.0
data = csv_to_df_pickle(**params)
self.assertEqual(len(data), self.data_len_2019)
params = copy.deepcopy(self.base_csv_to_df_params)
params["csv_path"] = DUMMY_2019_RAW_HUMAN_DATA_PATH
params["button_presses_threshold"] = 0.7
data = csv_to_df_pickle(**params)
self.assertLess(len(data), self.data_len_2019)
def test_csv_to_df_pickle_2020(self):
params = copy.deepcopy(self.base_csv_to_df_params)
params["button_presses_threshold"] = 0.0
data = csv_to_df_pickle(**params)
self.assertEqual(len(data), self.data_len_2020)
params = copy.deepcopy(self.base_csv_to_df_params)
params["button_presses_threshold"] = 0.7
data = csv_to_df_pickle(**params)
self.assertLess(len(data), self.data_len_2020)
def test_csv_to_df_pickle(self):
# Try various button thresholds (hand-picked to lie between different values for dummy data games)
button_thresholds = [0.2, 0.6, 0.7]
lengths = []
for threshold in button_thresholds:
# dummy dataset is too small to partion so we set train_test_split=False
params = copy.deepcopy(self.base_csv_to_df_params)
params["button_presses_threshold"] = threshold
data = csv_to_df_pickle(**params)
lengths.append(len(data))
# Filtered data size should be monotonically decreasing wrt button_threshold
for i in range(len(lengths) - 1):
self.assertGreaterEqual(lengths[i], lengths[i + 1])
# Picking a threshold that's suficiently high discards all data, should result in value error
params = copy.deepcopy(self.base_csv_to_df_params)
params["button_presses_threshold"] = 0.8
self.assertRaises(ValueError, csv_to_df_pickle, **params)
def test_get_trajs_from_data_2019(self):
params = copy.deepcopy(self.base_get_trajs_from_data_params)
trajectories, _ = get_trajs_from_data(**params)
def test_get_trajs_from_data_2019_featurize(self):
params = copy.deepcopy(self.base_get_trajs_from_data_params)
params["featurize_states"] = True
trajectories, _ = get_trajs_from_data(**params)
def test_get_trajs_from_data_2020(self):
# Ensure we can properly deserialize states with updated objects (i.e tomatoes)
params = copy.deepcopy(self.base_get_trajs_from_data_params)
params["layouts"] = ["inverse_marshmallow_experiment"]
params["data_path"] = DUMMY_2020_CLEAN_HUMAN_DATA_PATH
trajectories, _ = get_trajs_from_data(**params)
def test_get_trajs_from_data_2020_featurize(self):
# Ensure we can properly featurize states with updated dynamics and updated objects (i.e tomatoes)
params = copy.deepcopy(self.base_get_trajs_from_data_params)
params["layouts"] = ["inverse_marshmallow_experiment"]
params["data_path"] = DUMMY_2020_CLEAN_HUMAN_DATA_PATH
params["featurize_states"] = True
trajectories, _ = get_trajs_from_data(**params)
def test_csv_to_df_to_trajs_integration(self):
# Ensure the output of 'csv_to_df_pickle' works as valid input to 'get_trajs_from_data'
params = copy.deepcopy(self.base_csv_to_df_params)
_ = csv_to_df_pickle(**params)
params = copy.deepcopy(self.base_get_trajs_from_data_params)
params["data_path"] = os.path.join(
self.temp_data_dir, "unittest_all.pickle"
)
params["layouts"] = ["inverse_marshmallow_experiment"]
_ = get_trajs_from_data(**params)
class TestHumanDataConversion(unittest.TestCase):
temp_dir = "this_is_also_a_temp"
infile = DUMMY_2019_CLEAN_HUMAN_DATA_PATH
horizon = 400
DATA_TYPE = "train"
layout_name = "cramped_room"
def _equal_pickle_and_env_state_dict(
self, pickle_state_dict, env_state_dict
):
return equal_dicts(
pickle_state_dict,
env_state_dict,
["timestep", "all_orders", "bonus_orders"],
)
def setUp(self):
print(
"\nIn Class {}, in Method {}".format(
self.__class__.__name__, self._testMethodName
)
)
if not os.path.exists(self.temp_dir):
os.makedirs(self.temp_dir)
self.base_mdp = OvercookedGridworld.from_layout_name(self.layout_name)
self.mlam = MediumLevelActionManager.from_pickle_or_compute(
self.base_mdp, NO_COUNTERS_PARAMS, force_compute=True, info=False
)
self.env = OvercookedEnv.from_mdp(
self.base_mdp, horizon=self.horizon, info_level=0
)
self.starting_state_dict = (
self.base_mdp.get_standard_start_state().to_dict()
)
outfile = process_human_trials_main(
self.infile,
self.temp_dir,
insert_interacts=True,
verbose=False,
forward_port=False,
fix_json=False,
)
with open(outfile, "rb") as f:
self.human_data = pickle.load(f)[self.layout_name]
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_state(self):
idx = 0
for state_dict, joint_action in self.human_data[:100]:
if state_dict.items() == self.starting_state_dict.items():
self.env.reset()
else:
self.assertTrue(
self._equal_pickle_and_env_state_dict(
state_dict, self.env.state.to_dict()
),
"Expected state:\t\n{}\n\nActual state:\t\n{}".format(
self.env.state.to_dict(), state_dict
),
)
self.env.step(joint_action=joint_action)
idx += 1
if __name__ == "__main__":
unittest.main()
| 7,837 | 35.119816 | 106 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/human/process_human_trials.py | import argparse
import copy
import json
import os
import pickle
import pandas as pd
from human_aware_rl.human.data_processing_utils import (
json_joint_action_to_python_action,
)
from overcooked_ai_py.mdp.overcooked_mdp import (
OvercookedGridworld,
OvercookedState,
)
from overcooked_ai_py.planning.planners import (
NO_COUNTERS_PARAMS,
MediumLevelActionManager,
)
# IMPORTANT FLAG: PLEASE READ BEFORE PROCEEDING
# This flag is meant to correct the fact that in the new dynamics, an additional INTERACT action is required
# to get soups to start cooking after the last onion is dropped in
# (previously, when the last onion is dropped in, the soup automatically start cooking).
JSON_FIXES = [
("'", '"'),
("False", "false"),
("True", "true"),
("INTERACT", "interact"),
]
"""
First parse raw pickle file indicated using the first command
line argument, to generate state action pairs. Then featurize
all collected states for downstream behavior cloning usage.
"""
def insert_cooking_interact(state_dict):
"""
Arguments:
state_dict (dict): dictionary with player and object information for the state that needs insertion before it
"""
# initialize the actions_insert to be returned
actions_insert = [(0, 0), (0, 0)]
# making a deep copy because we need to modify the attributes of the soup
state_dict_insert = copy.deepcopy(state_dict)
players = state_dict_insert["players"]
# get the reach of each players
players_reach = [
(
player["position"][0] + player["orientation"][0],
player["position"][1] + player["orientation"][1],
)
for player in players
]
objects = state_dict_insert["objects"]
for o in objects:
if o["name"] == "soup" and o["_cooking_tick"] == 1:
for i, player_reach in enumerate(players_reach):
if player_reach == o["position"]:
actions_insert[i] = "interact"
# we need to rewind some of the attribut momentarily
o["_cooking_tick"] = -1
o["cooking_tick"] = -1 # duplicate tag
o["cook_time"] = -1
o["is_idle"] = True
o["is_cooking"] = False
assert (
"interact" in actions_insert
), "was supposed to insert interact but did not find a player_reach to insert"
return state_dict_insert, actions_insert
def is_insertion_needed(state):
"""
Arguments:
state (dict): OvercookedState dictionary. Must be under new dynamics schema; call `forward_port_state*` to update schema
Returns:
insertion_needed (bool): Whether a soup immediately starting cooking this timestep, indicating an 'interact' action is required under new dynamics
"""
if "objects" not in state:
raise ValueError(
"Corrupted data detected, state missing 'objects' key"
)
soups = [obj for obj in state["objects"] if obj["name"] == "soup"]
if not soups:
return False
insertion_needed = False
for soup in soups:
if not "cooking_tick" in soup:
raise ValueError(
"Legacy schema detected! Please ensure you are using updated state schema"
)
# this is the flag to signal if the soup just started cooking (and an additional frame with interact is required)
insertion_needed = insertion_needed or soup["cooking_tick"] == 1
return insertion_needed
def forward_port_state_dict(state):
"""
Update state schema. If new shema state passed in, this is an identity function
Arguments:
state (dict): Serialized OvercookedState encoding under legacy schema
Returns:
state (dict): Serialized OvercookedState encoding under current schema
"""
assert type(state) == dict, "Expected Dict input"
if "players" in state:
for player in state["players"]:
if not "held_object" in player:
player["held_object"] = None
player
if "objects" in state and type(state["objects"]) == dict:
state["objects"] = list(state["objects"].values())
# Convert all position and orientation lists to tuples
return OvercookedState.from_dict(state).to_dict()
def forward_port_state_json(state):
"""
Update state schema. If new schema JSON passed in, this is an identity function
Arguments:
state (str): Valid JSON encoding of legacy state
Returns:
state (str): Valid JSON encoding of state under current schema
"""
assert type(state) == str, "Expected JSON string input"
state_dict = json.loads(state)
state_dict = forward_port_state_dict(state_dict)
return json.dumps(state_dict)
def process_state(state_json, forward_port=False, fix_json=False):
"""
Arguments:
state_json (str): Valid JSON encoding of an (optionally legacy) OvercookedState
forward_port (bool): Whether state encoding needs to be updated to current schema. Pass in
forward_port=True if working with legacy encodings
fix_json (bool): Whether legacy JSON fixes (such as converting single quotes to double quotes) are necessary.
Probably not necessary, even if working with legacy schema
Returns:
state_dict (dict): New schema encoding of state
insertion_needed (bool): Whether a soup began cooking at this timestep
"""
if fix_json:
for old, new in JSON_FIXES:
state_json = state_json.replace(old, new)
if forward_port:
state_json = forward_port_state_json(state_json)
state_dict = json.loads(state_json)
# Perform housecleaning + necessary casting (i.e position lists get converted into tuples)
state_dict = OvercookedState.to_dict(OvercookedState.from_dict(state_dict))
return state_dict, is_insertion_needed(state_dict)
def process_actions(actions_json):
"""
Arguments:
actions_json (str): JSON encoding of joint agent action
Returns:
actions (Overcooked.Action): Current encoding compatible joint action
Note: `json_joint_action_to_python_action` can handle both legacy and current schema, as well as any necessary JSON fixes under the hood
"""
return json_joint_action_to_python_action(actions_json)
def display_state_dict_and_action(state_dict, actions):
for item in state_dict.items():
if item[0] == "objects":
print("objects ------")
for l in item[1]:
print(l)
print("--------------")
else:
print(item)
print(actions)
print()
def main(
data_infile, data_outdir, insert_interacts, forward_port, fix_json, verbose
):
"""
Arguments:
data_infile (str): Full path to cleaned, pickled DataFrame of human data we wish to work with
data_outdir (str): Directory in which we will save our results. Must exist already
insert_interacts (bool): Whether to impute interact actions to be compatible with modern dynamics
forward_port (bool): Whether states need to be converted from legacy to current schema
fix_json (bool): Whether legacy JSON fixes (ie convert single to double quotes) need to be performed. Unless
you are working with a very outdated version of our data, this is most likely not necessary
verbose (bool): Whether to include debug logs
Behavior:
Converts data as specified by arguments, then saves 'state_action_pairs' dictionary in
{data_outdir}/{data_infile_filename}._state_dict_and_action_{inserted|original}.pickle
Where {data_infile_filename} is the base filename of the loaded datapath. For example, if data_infile='/foo/bar/baz.pickle', then
data_infile_filename='baz'
The structure of 'state_action_pairs' is as follows:
state_action_pairs[layout] = [(state_1, joint_action_1), (state_2, joint_action_2), ...]
"""
raw_data = pd.read_pickle(data_infile)
N = len(raw_data)
if verbose:
print("Processing Raw Data")
state_action_pairs = dict()
for i, datapoint in raw_data.iterrows():
if verbose:
print(f"Processing {i}/{N}", end="\r")
layout_name = datapoint.layout_name
if layout_name == "random0":
layout_name = "forced_coordination"
elif layout_name == "random3":
layout_name = "counter_circuit_o_1order"
if layout_name not in state_action_pairs:
state_action_pairs[layout_name] = []
# Fix formatting issues then parse json state
state = datapoint.state
actions = datapoint.joint_action
state_dict, insertion_needed = process_state(
state, forward_port, fix_json
)
actions = process_actions(actions)
# take care of insertion of interact
if insert_interacts and insertion_needed:
if verbose:
print("INSERTING NEEDED, PERFORMING")
state_dict_insert, actions_insert = insert_cooking_interact(
state_dict
)
if verbose:
display_state_dict_and_action(
state_dict_insert, actions_insert
)
state_action_pairs[layout_name].append(
(state_dict_insert, actions_insert)
)
if verbose:
display_state_dict_and_action(state_dict, actions)
state_action_pairs[layout_name].append((state_dict, actions))
if verbose:
print("Done processing raw data!")
# The tag to the file such that we know whether insertion has been performed
filename = os.path.basename(data_infile).split(".")[0]
tag = "inserted" if insert_interacts else "original"
data_outfile = os.path.join(
data_outdir, filename + "_state_dict_and_action_{}.pickle".format(tag)
)
with open(data_outfile, "wb") as f:
pickle.dump(state_action_pairs, f)
return data_outfile
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--data-infile", type=str, required=True)
parser.add_argument("-o", "--data-outdir", type=str, required=True)
parser.add_argument("-ii", "-insert-interacts", action="store_true")
parser.add_argument("-j", "--fix-json", action="store_true")
parser.add_argument("-fp", "--forward-port", action="store_true")
parser.add_argument("-v", "--verbose", action="store_true")
args = vars(parser.parse_args())
main(**args)
# def process_soup_not_held(soup_not_held):
# # convert the soup not held by a player
# assert soup_not_held['name'] == 'soup'
# position = tuple(soup_not_held['position'])
# new_soup_not_held = {
# 'name': 'soup',
# 'position': position,
# }
# type, num_onion_in_soup, cooking_tick = soup_not_held['state']
# cooking_tick = min(20, cooking_tick)
# assert type == "onion", "data is corrupted, because the type must be onion in old dynamics"
# new_soup_not_held['_ingredients'] = [{'name': 'onion', 'position': position}] * num_onion_in_soup
# new_soup_not_held['_cooking_tick'] = cooking_tick if cooking_tick > 0 else -1
# new_soup_not_held['cooking_tick'] = new_soup_not_held['_cooking_tick'] # duplicate tag
# new_soup_not_held['cook_time'] = 20 if cooking_tick > 0 else -1
# new_soup_not_held['is_ready'] = cooking_tick == 20
# new_soup_not_held['is_idle'] = cooking_tick == 0
# new_soup_not_held['is_cooking'] = not new_soup_not_held['is_idle'] and not new_soup_not_held['is_ready']
# # this is the flag to signal if the soup just started cooking (and an additional frame with interact is required)
# insertion_needed_i = cooking_tick == 1
# return new_soup_not_held, insertion_needed_i
# def process_held_object(held_object):
# # convert held_object from old format to new format
# position = tuple(held_object['position'])
# new_held_object = {
# 'name': held_object['name'],
# 'position': position
# }
# # handling all the new tags for soup
# if held_object['name'] == 'soup':
# # only 3 onion soup is allowed in the old dynamics
# new_held_object['_ingredients'] = [{'name': 'onion', 'position': position}] * 3
# new_held_object['cooking_tick'] = 20
# new_held_object['is_cooking'] = False
# new_held_object['is_ready'] = True
# new_held_object['is_idle'] = False
# new_held_object['cook_time'] = 20
# new_held_object['_cooking_tick'] = 20
# return new_held_object
# def old_state_dict_to_new_state_dict(old_state_dict):
# """
# Arguments:
# old_state_dict (python dictionary): state dict in the old dynamics
# Return:
# new_state_dict (python dictionary): state dict in the new dynamics
# insertion_needed (bool): whether we need to insert an additional frame with interact to start soup cooking
# """
# # default insertion needed to false
# insertion_needed = False
# # players: tuple
# players = old_state_dict["players"]
# new_players = []
# for player in players:
# # convert position and orientation
# new_player = {
# 'position': tuple(player['position']),
# 'orientation': tuple(player['orientation']),
# }
# if player.get('held_object', None):
# new_held_object = process_held_object(player['held_object'])
# else:
# new_held_object = None
# new_player['held_object'] = new_held_object
# new_players.append(new_player)
# objects = old_state_dict["objects"]
# new_objects = []
# for o in objects:
# if o['name'] == 'soup':
# processed_soup, insertion_needed_i = process_soup_not_held(o)
# # update insertion
# insertion_needed = insertion_needed or insertion_needed_i
# new_objects.append(processed_soup)
# else:
# processed_object = {
# 'name': o['name'],
# 'position': tuple(o['position'])
# }
# new_objects.append(processed_object)
# return {
# "players": new_players,
# "objects": new_objects,
# "bonus_orders": [], # no bonus order in old dynamics
# "all_orders": [{'ingredients': ('onion', 'onion', 'onion')}], # 3 onion soup only in old dynamics
# "timestep": 0 # FIXME: This still needs to be fixed
# }, insertion_needed
| 14,590 | 36.032995 | 154 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/human/data_processing_utils.py | import json
import time
import numpy as np
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
from overcooked_ai_py.mdp.actions import Action, Direction
from overcooked_ai_py.mdp.overcooked_mdp import (
ObjectState,
OvercookedGridworld,
OvercookedState,
PlayerState,
)
AI_ID = "I am robot!"
####################
# CONVERSION UTILS #
####################
def json_action_to_python_action(action):
if type(action) is list:
action = tuple(action)
if type(action) is str:
action = action.lower()
assert action in Action.ALL_ACTIONS
return action
def json_joint_action_to_python_action(json_joint_action):
"""Port format from javascript to python version of Overcooked"""
if type(json_joint_action) is str:
try:
json_joint_action = json.loads(json_joint_action)
except json.decoder.JSONDecodeError:
# hacky fix to circumvent 'INTERACT' action being malformed json (because of single quotes)
# Might need to find a more robust way around this in the future
json_joint_action = eval(json_joint_action)
return tuple(json_action_to_python_action(a) for a in json_joint_action)
def json_state_to_python_state(df_state):
"""Convert from a df cell format of a state to an Overcooked State"""
if type(df_state) is str:
df_state = json.loads(df_state)
return OvercookedState.from_dict(df_state)
def is_interact(joint_action):
joint_action = json_joint_action_to_python_action(joint_action)
return np.array(
[
int(joint_action[0] == Action.INTERACT),
int(joint_action[1] == Action.INTERACT),
]
)
def is_button_press(joint_action):
joint_action = json_joint_action_to_python_action(joint_action)
return np.array(
[
int(joint_action[0] != Action.STAY),
int(joint_action[1] != Action.STAY),
]
)
def extract_df_for_worker_on_layout(main_trials, worker_id, layout_name):
"""
WARNING: this function has been deprecated and is no longer compatible with current schema
Extract trajectory for a specific layout and worker pair from main_trials df
"""
worker_trajs_df = main_trials[main_trials["workerid_num"] == worker_id]
worker_layout_traj_df = worker_trajs_df[
worker_trajs_df["layout_name"] == layout_name
]
return worker_layout_traj_df
def df_traj_to_python_joint_traj(
traj_df, check_trajectories=True, silent=True, **kwargs
):
if len(traj_df) == 0:
return None
datapoint = traj_df.iloc[0]
layout_name = datapoint["layout_name"]
agent_evaluator = AgentEvaluator.from_layout_name(
mdp_params={"layout_name": layout_name},
env_params={
"horizon": 1250
}, # Defining the horizon of the mdp of origin of the trajectories
)
mdp = agent_evaluator.env.mdp
env = agent_evaluator.env
overcooked_states = [json_state_to_python_state(s) for s in traj_df.state]
overcooked_actions = [
json_joint_action_to_python_action(joint_action)
for joint_action in traj_df.joint_action
]
overcooked_rewards = list(traj_df.reward)
assert (
sum(overcooked_rewards) == datapoint.score_total
), "Rewards didn't sum up to cumulative rewards. Probably trajectory df is corrupted / not complete"
trajectories = {
"ep_states": [overcooked_states],
"ep_actions": [overcooked_actions],
"ep_rewards": [overcooked_rewards], # Individual (dense) reward values
"ep_dones": [
[False] * len(overcooked_states)
], # Individual done values
"ep_infos": [{}] * len(overcooked_states),
"ep_returns": [
sum(overcooked_rewards)
], # Sum of dense rewards across each episode
"ep_lengths": [len(overcooked_states)], # Lengths of each episode
"mdp_params": [mdp.mdp_params],
"env_params": [env.env_params],
"metadatas": {
"player_0_id": [datapoint["player_0_id"]],
"player_1_id": [datapoint["player_1_id"]],
"env": [agent_evaluator.env],
},
}
trajectories = {
k: np.array(v) if k not in ["ep_actions", "metadatas"] else v
for k, v in trajectories.items()
}
if check_trajectories:
agent_evaluator.check_trajectories(trajectories, verbose=not silent)
return trajectories
def convert_joint_df_trajs_to_overcooked_single(
main_trials, layouts, silent=False, **kwargs
):
"""
Takes in a dataframe `main_trials` containing joint trajectories, and extract trajectories of workers `worker_ids`
on layouts `layouts`, with specific options.
"""
single_agent_trajectories = {
# With shape (n_episodes, game_len), where game_len might vary across games:
"ep_states": [],
"ep_actions": [],
"ep_rewards": [], # Individual reward values
"ep_dones": [], # Individual done values
"ep_infos": [],
# With shape (n_episodes, ):
"ep_returns": [], # Sum of rewards across each episode
"ep_lengths": [], # Lengths of each episode
"mdp_params": [],
"env_params": [],
"metadatas": {"ep_agent_idxs": []}, # Agent index for current episode
}
human_indices = []
num_trials_for_layout = {}
for layout_name in layouts:
trial_ids = np.unique(
main_trials[main_trials["layout_name"] == layout_name]["trial_id"]
)
num_trials = len(trial_ids)
num_trials_for_layout[layout_name] = num_trials
if num_trials == 0:
print(
"WARNING: No trajectories found on {} layout!".format(
layout_name
)
)
for trial_id in trial_ids:
# Get an single game
one_traj_df = main_trials[main_trials["trial_id"] == trial_id]
# Get python trajectory data and information on which player(s) was/were human
joint_traj_data = df_traj_to_python_joint_traj(
one_traj_df, silent=silent, **kwargs
)
human_idx = get_human_player_index_for_df(one_traj_df)
human_indices.append(human_idx)
# Convert joint trajectories to single agent trajectories, appending recovered info to the `trajectories` dict
joint_state_trajectory_to_single(
single_agent_trajectories, joint_traj_data, human_idx, **kwargs
)
if not silent:
print(
"Number of trajectories processed for each layout: {}".format(
num_trials_for_layout
)
)
return single_agent_trajectories, human_indices
def get_human_player_index_for_df(one_traj_df):
"""Determines which player index had a human player"""
human_player_indices = []
assert len(one_traj_df["player_0_id"].unique()) == 1
assert len(one_traj_df["player_1_id"].unique()) == 1
datapoint = one_traj_df.iloc[0]
if datapoint["player_0_is_human"]:
human_player_indices.append(0)
if datapoint["player_1_is_human"]:
human_player_indices.append(1)
return human_player_indices
def joint_state_trajectory_to_single(
trajectories,
joint_traj_data,
player_indices_to_convert=None,
featurize_states=True,
silent=False,
**kwargs
):
"""
Take a joint trajectory and split it into two single-agent trajectories, adding data to the `trajectories` dictionary
player_indices_to_convert: which player indexes' trajs we should return
"""
env = joint_traj_data["metadatas"]["env"][0]
assert (
len(joint_traj_data["ep_states"]) == 1
), "This method only takes in one trajectory"
states, joint_actions = (
joint_traj_data["ep_states"][0],
joint_traj_data["ep_actions"][0],
)
rewards, length = (
joint_traj_data["ep_rewards"][0],
joint_traj_data["ep_lengths"][0],
)
# Getting trajectory for each agent
for agent_idx in player_indices_to_convert:
ep_obs, ep_acts, ep_dones = [], [], []
for i in range(len(states)):
state, action = states[i], joint_actions[i][agent_idx]
if featurize_states:
action = np.array([Action.ACTION_TO_INDEX[action]]).astype(int)
state = env.featurize_state_mdp(state)[agent_idx]
ep_obs.append(state)
ep_acts.append(action)
ep_dones.append(False)
ep_dones[-1] = True
trajectories["ep_states"].append(ep_obs)
trajectories["ep_actions"].append(ep_acts)
trajectories["ep_rewards"].append(rewards)
trajectories["ep_dones"].append(ep_dones)
trajectories["ep_infos"].append([{}] * len(rewards))
trajectories["ep_returns"].append(sum(rewards))
trajectories["ep_lengths"].append(length)
trajectories["mdp_params"].append(env.mdp.mdp_params)
trajectories["env_params"].append({})
trajectories["metadatas"]["ep_agent_idxs"].append(agent_idx)
| 9,152 | 32.405109 | 122 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/human/process_dataframes.py | import copy
import json
import os
import random
from collections import defaultdict
from typing import DefaultDict
import numpy as np
import pandas as pd
from numpy.core.numeric import full
from human_aware_rl.human.data_processing_utils import (
convert_joint_df_trajs_to_overcooked_single,
df_traj_to_python_joint_traj,
is_button_press,
is_interact,
)
from human_aware_rl.static import *
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
from overcooked_ai_py.mdp.overcooked_trajectory import append_trajectories
from overcooked_ai_py.utils import mean_and_std_err
######################
# HIGH LEVEL METHODS #
######################
def get_human_human_trajectories(
layouts, dataset_type="train", data_path=None, **kwargs
):
"""
Get human-human trajectories for a layout. Automatically
Arguments:
layouts (list): List of strings corresponding to layouts we wish to retrieve data for
data_path (str): Full path to pickled DataFrame we wish to load. If not specified, default to CLEAN_{2019|2020}_HUMAN_DATA_{train|test|all}
dataset_type (str): Either 'train', 'test', or 'all', determines which data to load if data_path=None
Keyword Arguments:
featurize_states (bool): Whether the states in returned trajectories should be OvercookedState objects (false) or vectorized np.Arrays (true)
check_trajectories (bool): If True, we ensure the consistency of the MDP dynamics within the trajectory. This is slow and has lots of overhead
silent (bool): If true, silence logging and print statements
"""
if not set(layouts).issubset(LAYOUTS_WITH_DATA):
# Note: doesn't necessarily mean we'll find data for this layout as this is a loose check
# for example, if layouts=['cramped_room'] and the data path is CLEAN_HUMAN_DATA_{train|test|all}, no data will be found
raise ValueError("Layout for which no data collected detected")
if data_path and not os.path.exists(data_path):
raise FileNotFoundError(
"Tried to load human data from {} but file does not exist!".format(
data_path
)
)
data = {}
# Determine which paths are needed for which layouts (according to hierarchical path resolution rules outlined in docstring)
data_path_to_layouts = DefaultDict(list)
for layout in layouts:
curr_data_path = _get_data_path(layout, dataset_type, data_path)
data_path_to_layouts[curr_data_path].append(layout)
# For each data path, load data once and parse trajectories for all corresponding layouts
for data_path in data_path_to_layouts:
curr_data = get_trajs_from_data(
curr_data_path, layouts=[layout], **kwargs
)[0]
data = append_trajectories(data, curr_data)
# Return all accumulated data for desired layouts
return data
def csv_to_df_pickle(
csv_path,
out_dir,
out_file_prefix,
button_presses_threshold=0.25,
perform_train_test_split=True,
silent=True,
**kwargs
):
"""
High level function that converts raw CSV data into well formatted and cleaned pickled pandas dataframes.
Arguments:
- csv_path (str): Full path to human csv data
- out_dir(str): Full path to directory where cleaned data will be saved
- out_file_prefix(str): common prefix for all saved files
- button_presses_threshold (float): minimum button presses per timestep over rollout required to
keep entire game
- perform_train_test_split (bool): Whether to partition dataset into training and testing portions
- kwargs (dict): keyword args to pass to all helper functions
After running, the following files are created
if traintest_split:
/{out_dir}
- {out_file_prefix}_all.pickle
- {out_file_prefix}_train.pickle
- {out_file_prefix}_test.pickle
else:
/{out_dir}
- {out_file_prefix}_all.pickle
Returns:
if perform_train_test_split:
- tuple(pd.DataFrame, pd.DateFrame): tuple of train data, test data
else:
- clean_trials (pd.DataFrame): Dataframe containing _all_ cleaned and formatted transitions
"""
if not silent:
print("Loading raw data from", csv_path)
all_trials = pd.read_csv(csv_path)
if not silent:
print("Success")
if not silent:
print("Raw data columns:", all_trials.columns)
if not silent:
print("Formatting...")
all_trials = format_trials_df(all_trials, silent=silent, **kwargs)
if not silent:
print("Success!")
def filter_func(row):
return row["button_presses_per_timstep"] >= button_presses_threshold
if not silent:
print("Filtering...")
clean_trials = filter_trials(all_trials, filter_func, **kwargs)
if not silent:
print("Success!")
full_outfile_prefix = os.path.join(out_dir, out_file_prefix)
if not silent:
print("Saving processed pickle data with prefix", full_outfile_prefix)
clean_trials.to_pickle(full_outfile_prefix + "_all.pickle")
if not silent:
print("Success!")
if perform_train_test_split:
if not silent:
print("Performing train/test split...")
cleaned_trials_dict = train_test_split(clean_trials, **kwargs)
layouts = np.unique(clean_trials["layout_name"])
train_trials = pd.concat(
[cleaned_trials_dict[layout]["train"] for layout in layouts]
)
test_trials = pd.concat(
[cleaned_trials_dict[layout]["test"] for layout in layouts]
)
clean_trials = pd.concat([train_trials, test_trials])
train_trials.to_pickle(full_outfile_prefix + "_train.pickle")
test_trials.to_pickle(full_outfile_prefix + "_test.pickle")
if not silent:
print("Success!")
return clean_trials
#############################
# DATAFRAME TO TRAJECTORIES #
#############################
def get_trajs_from_data(data_path, layouts, silent=True, **kwargs):
"""
Converts and returns trajectories from dataframe at `data_path` to overcooked trajectories.
"""
if not silent:
print("Loading data from {}".format(data_path))
main_trials = pd.read_pickle(data_path)
trajs, info = convert_joint_df_trajs_to_overcooked_single(
main_trials, layouts, silent=silent, **kwargs
)
return trajs, info
############################
# DATAFRAME PRE-PROCESSING #
############################
def format_trials_df(trials, clip_400=False, silent=False, **kwargs):
"""Get trials for layouts in standard format for data exploration, cumulative reward and length information + interactivity metrics"""
layouts = np.unique(trials["layout_name"])
if not silent:
print("Layouts found", layouts)
if clip_400:
trials = trials[trials["cur_gameloop"] <= 400]
# Add game length for each round
trials = trials.join(
trials.groupby(["trial_id"])["cur_gameloop"].count(),
on=["trial_id"],
rsuffix="_total",
)
# Calculate total reward for each round
trials = trials.join(
trials.groupby(["trial_id"])["score"].max(),
on=["trial_id"],
rsuffix="_total",
)
# Add interactivity metadata
trials = _add_interactivity_metrics(trials)
trials["button_presses_per_timstep"] = (
trials["button_press_total"] / trials["cur_gameloop_total"]
)
return trials
def filter_trials(trials, filter, **kwargs):
"""
Prune games based on user-defined fileter function
Note: 'filter' must accept a single row as input and whether the entire trial should be kept
based on its first row
"""
trial_ids = np.unique(trials["trial_id"])
cleaned_trial_dfs = []
for trial_id in trial_ids:
curr_trial = trials[trials["trial_id"] == trial_id]
# Discard entire trials based on filter function applied to first row
element = curr_trial.iloc[0]
keep = filter(element)
if keep:
cleaned_trial_dfs.append(curr_trial)
return pd.concat(cleaned_trial_dfs)
def filter_transitions(trials, filter):
"""
Prune games based on user-defined fileter function
Note: 'filter' must accept a pandas Series as input and return a Series of booleans
where the ith boolean is True if the ith entry should be kept
"""
trial_ids = np.unique(trials["trial_id"])
cleaned_trial_dfs = []
for trial_id in trial_ids:
curr_trial = trials[trials["trial_id"] == trial_id]
# Discard entire trials based on filter function applied to first row
keep = filter(curr_trial)
curr_trial_kept = curr_trial[keep]
cleaned_trial_dfs.append(curr_trial_kept)
return pd.concat(cleaned_trial_dfs)
def train_test_split(trials, train_size=0.7, print_stats=False):
cleaned_trials_dict = defaultdict(dict)
layouts = np.unique(trials["layout_name"])
for layout in layouts:
# Gettings trials for curr layout
curr_layout_trials = trials[trials["layout_name"] == layout]
# Get all trial ids for the layout
curr_trial_ids = np.unique(curr_layout_trials["trial_id"])
# Split trials into train and test sets
random.shuffle(curr_trial_ids)
mid_idx = int(np.ceil(len(curr_trial_ids) * train_size))
train_trials, test_trials = (
curr_trial_ids[:mid_idx],
curr_trial_ids[mid_idx:],
)
assert (
len(train_trials) > 0 and len(test_trials) > 0
), "Cannot have empty split"
# Get corresponding trials
layout_train = curr_layout_trials[
curr_layout_trials["trial_id"].isin(train_trials)
]
layout_test = curr_layout_trials[
curr_layout_trials["trial_id"].isin(test_trials)
]
train_dset_avg_rew = int(np.mean(layout_train["score_total"]))
test_dset_avg_rew = int(np.mean(layout_test["score_total"]))
if print_stats:
print(
"Layout: {}\nNum Train Trajs: {}\nTrain Traj Average Rew: {}\nNum Test Trajs: {}\nTest Traj Average Rew: {}".format(
layout,
len(train_trials),
train_dset_avg_rew,
len(test_trials),
test_dset_avg_rew,
)
)
cleaned_trials_dict[layout]["train"] = layout_train
cleaned_trials_dict[layout]["test"] = layout_test
return cleaned_trials_dict
def get_trials_scenario_and_worker_rews(trials):
scenario_rews = defaultdict(list)
worker_rews = defaultdict(list)
for _, trial in trials.groupby("trial_id"):
datapoint = trial.iloc[0]
layout = datapoint["layout_name"]
player_0, player_1 = datapoint["player_0_id"], datapoint["player_1_id"]
tot_rew = datapoint["score_total"]
scenario_rews[layout].append(tot_rew)
worker_rews[player_0].append(tot_rew)
worker_rews[player_1].append(tot_rew)
return dict(scenario_rews), dict(worker_rews)
#####################
# Lower-level Utils #
#####################
def remove_worker(trials, worker_id):
return trials[
trials["player_0_id"] != worker_id & trials["player_1_id"] != worker_id
]
def remove_worker_on_map(trials, workerid_num, layout):
to_remove = (
(trials["player_0_id"] == workerid_num)
| (trials["player_1_id"] == workerid_num)
) & (trials["layout_name"] == layout)
to_keep = ~to_remove
assert to_remove.sum() > 0
return trials[to_keep]
def _add_interactivity_metrics(trials):
# this method is non-destructive
trials = trials.copy()
# whether any human INTERACT actions were performed
is_interact_row = lambda row: int(
np.sum(
np.array([row["player_0_is_human"], row["player_1_is_human"]])
* is_interact(row["joint_action"])
)
> 0
)
# Whehter any human keyboard stroked were performed
is_button_press_row = lambda row: int(
np.sum(
np.array([row["player_0_is_human"], row["player_1_is_human"]])
* is_button_press(row["joint_action"])
)
> 0
)
# temp column to split trajectories on INTERACTs
trials["interact"] = trials.apply(is_interact_row, axis=1).cumsum()
trials["dummy"] = 1
# Temp column indicating whether current timestep required a keyboard press
trials["button_press"] = trials.apply(is_button_press_row, axis=1)
# Add 'button_press_total' column to each game indicating total number of keyboard strokes
trials = trials.join(
trials.groupby(["trial_id"])["button_press"].sum(),
on=["trial_id"],
rsuffix="_total",
)
# Count number of timesteps elapsed since last human INTERACT action
trials["timesteps_since_interact"] = (
trials.groupby(["interact"])["dummy"].cumsum() - 1
)
# Drop temp columns
trials = trials.drop(columns=["interact", "dummy"])
return trials
def _get_data_path(layout, dataset_type, data_path):
if data_path:
return data_path
if dataset_type == "train":
return (
CLEAN_2019_HUMAN_DATA_TRAIN
if layout in LAYOUTS_WITH_DATA_2019
else CLEAN_2020_HUMAN_DATA_TRAIN
)
if dataset_type == "test":
return (
CLEAN_2019_HUMAN_DATA_TEST
if layout in LAYOUTS_WITH_DATA_2019
else CLEAN_2020_HUMAN_DATA_TEST
)
if dataset_type == "all":
return (
CLEAN_2019_HUMAN_DATA_ALL
if layout in LAYOUTS_WITH_DATA_2019
else CLEAN_2020_HUMAN_DATA_ALL
)
##############
# DEPRECATED #
##############
def trial_type_by_unique_id_dict(trial_questions_df):
trial_type_dict = {}
unique_ids = trial_questions_df["workerid"].unique()
for unique_id in unique_ids:
person_data = trial_questions_df[
trial_questions_df["workerid"] == unique_id
]
model_type, player_index = (
person_data["MODEL_TYPE"].iloc[0],
int(person_data["PLAYER_INDEX"].iloc[0]),
)
trial_type_dict[unique_id] = (model_type, player_index)
return trial_type_dict
def add_means_and_stds_from_df(data, main_trials, algo_name):
"""Calculate means and SEs for each layout, and add them to the data dictionary under algo name `algo`"""
layouts = [
"asymmetric_advantages",
"coordination_ring",
"cramped_room",
"random0",
"random3",
]
for layout in layouts:
layout_trials = main_trials[main_trials["layout_name"] == layout]
idx_1_workers = []
idx_0_workers = []
for worker_id in layout_trials["player_0_id"].unique():
if layout_trials[layout_trials["player_0_id"] == worker_id][
"player_0_is_human"
][0]:
idx_0_workers.append(worker_id)
for worker_id in layout_trials["player_1_id"].unique():
if layout_trials[layout_trials["player_1_id"] == worker_id][
"player_1_is_human"
][0]:
idx_1_workers.append(worker_id)
idx_0_trials = layout_trials[
layout_trials["player_0_id"].isin(idx_0_workers)
]
data[layout][algo_name + "_0"] = mean_and_std_err(
idx_0_trials.groupby("player_0_id")["score_total"].mean()
)
idx_1_trials = layout_trials[
layout_trials["plaer_1_id"].isin(idx_1_workers)
]
data[layout][algo_name + "_1"] = mean_and_std_err(
idx_1_trials.groupby("plaer_1_id")["score_total"].mean()
)
def interactive_from_traj_df(df_traj):
python_traj = df_traj_to_python_joint_traj(df_traj)
AgentEvaluator.interactive_from_traj(python_traj, traj_idx=0)
def display_interactive_by_workerid(main_trials, worker_id, limit=None):
print("Displaying main trials for worker", worker_id)
worker_trials = main_trials[
main_trials["player_0_id"]
== worker_id | main_trials["player_1_id"]
== worker_id
]
count = 0
for _, rtrials in worker_trials.groupby(["trial_id"]):
interactive_from_traj_df(rtrials)
count += 1
if limit is not None and count >= limit:
return
def display_interactive_by_layout(main_trials, layout_name, limit=None):
print("Displaying main trials for layout", layout_name)
layout_trials = main_trials[main_trials["layout_name"] == layout_name]
count = 0
for wid, wtrials in layout_trials.groupby("player_0_id"):
print("Worker: ", wid)
for _, rtrials in wtrials.groupby(["trial_id"]):
interactive_from_traj_df(rtrials)
count += 1
if limit is not None and count >= limit:
return
for wid, wtrials in layout_trials.groupby("player_1_id"):
print("Worker: ", wid)
for _, rtrials in wtrials.groupby(["trial_id"]):
interactive_from_traj_df(rtrials)
count += 1
if limit is not None and count >= limit:
return
| 17,347 | 32.555126 | 150 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/human/__init__.py | import os
_curr_directory = os.path.dirname(os.path.abspath(__file__))
| 72 | 17.25 | 60 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/imitation/behavior_cloning_tf2_test.py | import argparse
import os
import pickle
import shutil
import sys
import unittest
import warnings
import numpy as np
import tensorflow as tf
from human_aware_rl.human.process_dataframes import get_trajs_from_data
from human_aware_rl.imitation.behavior_cloning_tf2 import (
BC_SAVE_DIR,
build_bc_model,
evaluate_bc_model,
get_bc_params,
load_bc_model,
save_bc_model,
train_bc_model,
)
from human_aware_rl.static import (
BC_EXPECTED_DATA_PATH,
DUMMY_2019_CLEAN_HUMAN_DATA_PATH,
)
from human_aware_rl.utils import set_global_seed
def _clear_pickle():
with open(BC_EXPECTED_DATA_PATH, "wb") as f:
pickle.dump({}, f)
class TestBCTraining(unittest.TestCase):
"""
Unittests for behavior cloning training and utilities
compute_pickle (bool): Whether the results of this test should be stored as the expected values for future tests
strict (bool): Whether the results of this test should be compared against expected values for exact match
min_performance (int): Minimum reward achieved in BC-BC rollout after training to consider training successfull
Note, this test always performs a basic sanity check to verify some learning is happening, even if the `strict` param is false
"""
def __init__(self, test_name):
super(TestBCTraining, self).__init__(test_name)
self.compute_pickle = False
self.strict = False
self.min_performance = 0
assert not (
self.compute_pickle and self.strict
), "Cannot compute pickle and run strict reproducibility tests at same time"
if self.compute_pickle:
_clear_pickle()
def setUp(self):
set_global_seed(0)
print(
"\nIn Class {}, in Method {}".format(
self.__class__.__name__, self._testMethodName
)
)
# unittest generates a lot of warning msgs due to third-party dependencies (e.g. ray[rllib] using outdated np methods)
# not a problem when directly ran, but when using -m unittest this helps filter out the warnings
warnings.simplefilter("ignore", ResourceWarning)
warnings.simplefilter("ignore", DeprecationWarning)
self.bc_params = get_bc_params(
**{"data_path": DUMMY_2019_CLEAN_HUMAN_DATA_PATH}
)
self.bc_params["mdp_params"]["layout_name"] = "cramped_room"
self.bc_params["training_params"]["epochs"] = 1
self.model_dir = os.path.join(BC_SAVE_DIR, "test_model")
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
processed_trajs, _ = get_trajs_from_data(
**self.bc_params["data_params"], silent=True
)
self.dummy_input = np.vstack(processed_trajs["ep_states"])[:1, :]
self.initial_states = [
np.zeros((1, self.bc_params["cell_size"])),
np.zeros((1, self.bc_params["cell_size"])),
]
with open(BC_EXPECTED_DATA_PATH, "rb") as f:
self.expected = pickle.load(f)
# Disable TF warnings and infos
tf.get_logger().setLevel("ERROR")
def tearDown(self):
if self.compute_pickle:
with open(BC_EXPECTED_DATA_PATH, "wb") as f:
pickle.dump(self.expected, f)
shutil.rmtree(self.model_dir)
def test_model_construction(self):
model = build_bc_model(**self.bc_params)
if self.compute_pickle:
self.expected["test_model_construction"] = model(self.dummy_input)
if self.strict:
self.assertTrue(
np.allclose(
model(self.dummy_input),
self.expected["test_model_construction"],
)
)
def test_save_and_load(self):
model = build_bc_model(**self.bc_params)
save_bc_model(self.model_dir, model, self.bc_params)
loaded_model, loaded_params = load_bc_model(self.model_dir)
self.assertDictEqual(self.bc_params, loaded_params)
self.assertTrue(
np.allclose(
model(self.dummy_input), loaded_model(self.dummy_input)
)
)
def test_training(self):
model = train_bc_model(self.model_dir, self.bc_params)
if self.compute_pickle:
self.expected["test_training"] = model(self.dummy_input)
if self.strict:
self.assertTrue(
np.allclose(
model(self.dummy_input), self.expected["test_training"]
)
)
def test_agent_evaluation(self):
self.bc_params["training_params"]["epochs"] = 20
model = train_bc_model(self.model_dir, self.bc_params)
results = evaluate_bc_model(model, self.bc_params)
# Sanity Check
self.assertGreaterEqual(results, self.min_performance)
if self.compute_pickle:
self.expected["test_agent_evaluation"] = results
if self.strict:
self.assertAlmostEqual(
results, self.expected["test_agent_evaluation"]
)
class TestBCTrainingLSTM(TestBCTraining):
# LSTM tests break on older versions of tensorflow so be careful with this
def test_lstm_construction(self):
self.bc_params["use_lstm"] = True
model = build_bc_model(**self.bc_params)
if self.compute_pickle:
self.expected["test_lstm_construction"] = model(self.dummy_input)
if self.strict:
self.assertTrue(
np.allclose(
model(self.dummy_input),
self.expected["test_lstm_construction"],
)
)
def test_lstm_training(self):
self.bc_params["use_lstm"] = True
model = train_bc_model(self.model_dir, self.bc_params)
if self.compute_pickle:
self.expected["test_lstm_training"] = model(self.dummy_input)
if self.strict:
self.assertTrue(
np.allclose(
model(self.dummy_input),
self.expected["test_lstm_training"],
)
)
def test_lstm_evaluation(self):
self.bc_params["use_lstm"] = True
self.bc_params["training_params"]["epochs"] = 1
model = train_bc_model(self.model_dir, self.bc_params)
results = evaluate_bc_model(model, self.bc_params)
# Sanity Check
self.assertGreaterEqual(results, self.min_performance)
if self.compute_pickle:
self.expected["test_lstm_evaluation"] = results
if self.strict:
self.assertAlmostEqual(
results, self.expected["test_lstm_evaluation"]
)
def test_lstm_save_and_load(self):
self.bc_params["use_lstm"] = True
model = build_bc_model(**self.bc_params)
save_bc_model(self.model_dir, model, self.bc_params)
loaded_model, loaded_params = load_bc_model(self.model_dir)
self.assertDictEqual(self.bc_params, loaded_params)
self.assertTrue(
np.allclose(
self._lstm_forward(model, self.dummy_input)[0],
self._lstm_forward(loaded_model, self.dummy_input)[0],
)
)
def _lstm_forward(self, model, obs_batch, states=None):
obs_batch = np.expand_dims(obs_batch, 1)
seq_lens = np.ones(len(obs_batch))
states_batch = states if states else self.initial_states
model_out = model.predict([obs_batch, seq_lens] + states_batch)
logits, states = model_out[0], model_out[1:]
logits = logits.reshape((logits.shape[0], -1))
return logits, states
if __name__ == "__main__":
unittest.main()
| 7,736 | 33.851351 | 130 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/imitation/reproduce_bc.py | import os
from human_aware_rl.imitation.behavior_cloning_tf2 import (
get_bc_params,
train_bc_model,
)
from human_aware_rl.static import (
CLEAN_2019_HUMAN_DATA_TEST,
CLEAN_2019_HUMAN_DATA_TRAIN,
)
if __name__ == "__main__":
# random 3 is counter_circuit
# random 0 is forced coordination
# the reason why we use these as the layouts name here is that in the cleaned pickled file of human trajectories, the df has layout named random3 and random0
# So in order to extract the right data from the df, we need to use these names
# however when loading layouts there are no random0/3
# The same parameter is used in both setting up the layout for training and loading the corresponding trajectories
# so without modifying the dataframes, I have to create new layouts
for layout in [
"random3",
"coordination_ring",
"cramped_room",
"random0",
"asymmetric_advantages",
]:
current_file_dir = os.path.dirname(os.path.abspath(__file__))
# this is where
bc_dir = os.path.join(current_file_dir, "bc_runs", "train", layout)
if os.path.isdir(bc_dir):
# if this bc agent has been created, we continue to the next layout
continue
params_to_override = {
"layouts": [layout],
"layout_name": layout,
"data_path": CLEAN_2019_HUMAN_DATA_TRAIN,
"epochs": 100,
"old_dynamics": True,
}
bc_params = get_bc_params(**params_to_override)
train_bc_model(bc_dir, bc_params, True)
| 1,594 | 36.97619 | 161 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/imitation/__init__.py | 0 | 0 | 0 | py |
|
overcooked_ai | overcooked_ai-master/src/human_aware_rl/imitation/behavior_cloning_tf2.py | import copy
import os
import pickle
import numpy as np
import tensorflow as tf
from ray.rllib.policy import Policy as RllibPolicy
from tensorflow import keras
from tensorflow.compat.v1.keras.backend import get_session, set_session
from human_aware_rl.data_dir import DATA_DIR
from human_aware_rl.human.process_dataframes import (
get_human_human_trajectories,
get_trajs_from_data,
)
from human_aware_rl.rllib.rllib import (
RlLibAgent,
evaluate,
get_base_ae,
softmax,
)
from human_aware_rl.static import CLEAN_2019_HUMAN_DATA_TRAIN
from human_aware_rl.utils import get_flattened_keys, recursive_dict_update
from overcooked_ai_py.mdp.actions import Action
from overcooked_ai_py.mdp.overcooked_env import DEFAULT_ENV_PARAMS
#################
# Configuration #
#################
BC_SAVE_DIR = os.path.join(DATA_DIR, "bc_runs")
DEFAULT_DATA_PARAMS = {
"layouts": ["cramped_room"],
"check_trajectories": False,
"featurize_states": True,
"data_path": CLEAN_2019_HUMAN_DATA_TRAIN,
}
DEFAULT_MLP_PARAMS = {
# Number of fully connected layers to use in our network
"num_layers": 2,
# Each int represents a layer of that hidden size
"net_arch": [64, 64],
}
DEFAULT_TRAINING_PARAMS = {
"epochs": 100,
"validation_split": 0.15,
"batch_size": 64,
"learning_rate": 1e-3,
"use_class_weights": False,
}
DEFAULT_EVALUATION_PARAMS = {
"ep_length": 400,
"num_games": 1,
"display": False,
}
DEFAULT_BC_PARAMS = {
"eager": True,
"use_lstm": False,
"cell_size": 256,
"data_params": DEFAULT_DATA_PARAMS,
"mdp_params": {"layout_name": "cramped_room", "old_dynamics": False},
"env_params": DEFAULT_ENV_PARAMS,
"mdp_fn_params": {},
"mlp_params": DEFAULT_MLP_PARAMS,
"training_params": DEFAULT_TRAINING_PARAMS,
"evaluation_params": DEFAULT_EVALUATION_PARAMS,
"action_shape": (len(Action.ALL_ACTIONS),),
}
# Boolean indicating whether all param dependencies have been loaded. Used to prevent re-loading unceccesarily
_params_initalized = False
def _get_base_ae(bc_params):
return get_base_ae(bc_params["mdp_params"], bc_params["env_params"])
def _get_observation_shape(bc_params):
"""
Helper function for creating a dummy environment from "mdp_params" and "env_params" specified
in bc_params and returning the shape of the observation space
"""
base_ae = _get_base_ae(bc_params)
base_env = base_ae.env
dummy_state = base_env.mdp.get_standard_start_state()
obs_shape = base_env.featurize_state_mdp(dummy_state)[0].shape
return obs_shape
# For lazily loading the default params. Prevents loading on every import of this module
def get_bc_params(**args_to_override):
"""
Loads default bc params defined globally. For each key in args_to_override, overrides the default with the
value specified for that key. Recursively checks all children. If key not found, creates new top level parameter.
Note: Even though children can share keys, for simplicity, we enforce the condition that all keys at all levels must be distict
"""
global _params_initalized, DEFAULT_BC_PARAMS
if not _params_initalized:
DEFAULT_BC_PARAMS["observation_shape"] = _get_observation_shape(
DEFAULT_BC_PARAMS
)
_params_initalized = False
params = copy.deepcopy(DEFAULT_BC_PARAMS)
for arg, val in args_to_override.items():
updated = recursive_dict_update(params, arg, val)
if not updated:
print(
"WARNING, no value for specified bc argument {} found in schema. Adding as top level parameter".format(
arg
)
)
all_keys = get_flattened_keys(params)
if len(all_keys) != len(set(all_keys)):
raise ValueError(
"Every key at every level must be distict for BC params!"
)
return params
##############
# Model code #
##############
class LstmStateResetCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
self.model.reset_states()
def _pad(sequences, maxlen=None, default=0):
if not maxlen:
maxlen = max([len(seq) for seq in sequences])
for seq in sequences:
pad_len = maxlen - len(seq)
seq.extend([default] * pad_len)
return sequences
def load_data(bc_params, verbose=False):
processed_trajs = get_human_human_trajectories(
**bc_params["data_params"], silent=not verbose
)
inputs, targets = (
processed_trajs["ep_states"],
processed_trajs["ep_actions"],
)
if bc_params["use_lstm"]:
seq_lens = np.array([len(seq) for seq in inputs])
seq_padded = _pad(
inputs,
default=np.zeros(
(
len(
inputs[0][0],
)
)
),
)
targets_padded = _pad(targets, default=np.zeros(1))
seq_t = np.dstack(seq_padded).transpose((2, 0, 1))
targets_t = np.dstack(targets_padded).transpose((2, 0, 1))
return seq_t, seq_lens, targets_t
else:
return np.vstack(inputs), None, np.vstack(targets)
def build_bc_model(use_lstm=True, eager=False, **kwargs):
if not eager:
tf.compat.v1.disable_eager_execution()
if use_lstm:
return _build_lstm_model(**kwargs)
else:
return _build_model(**kwargs)
def train_bc_model(model_dir, bc_params, verbose=False):
inputs, seq_lens, targets = load_data(bc_params, verbose)
training_params = bc_params["training_params"]
if training_params["use_class_weights"]:
# Get class counts, and use these to compute balanced class weights
classes, counts = np.unique(targets.flatten(), return_counts=True)
weights = sum(counts) / counts
class_weights = dict(zip(classes, weights))
else:
# Default is uniform class weights
class_weights = None
# Retrieve un-initialized keras model
model = build_bc_model(
**bc_params, max_seq_len=np.max(seq_lens), verbose=verbose
)
# Initialize the model
# Note: have to use lists for multi-output model support and not dicts because of tensorlfow 2.0.0 bug
if bc_params["use_lstm"]:
loss = [
keras.losses.SparseCategoricalCrossentropy(from_logits=True),
None,
None,
]
metrics = [["sparse_categorical_accuracy"], [], []]
else:
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metrics = ["sparse_categorical_accuracy"]
model.compile(
optimizer=keras.optimizers.Adam(training_params["learning_rate"]),
loss=loss,
metrics=metrics,
)
# Customize our training loop with callbacks
callbacks = [
# Early terminate training if loss doesn't improve for "patience" epochs
keras.callbacks.EarlyStopping(monitor="loss", patience=20),
# Reduce lr by "factor" after "patience" epochs of no improvement in loss
keras.callbacks.ReduceLROnPlateau(
monitor="loss", patience=3, factor=0.1
),
# Log all metrics model was compiled with to tensorboard every epoch
keras.callbacks.TensorBoard(
log_dir=os.path.join(model_dir, "logs"), write_graph=False
),
# Save checkpoints of the models at the end of every epoch (saving only the best one so far)
keras.callbacks.ModelCheckpoint(
filepath=os.path.join(model_dir, "checkpoints"),
monitor="loss",
save_best_only=True,
),
]
## Actually train our model
# Create input dict for both models
N = inputs.shape[0]
inputs = {"Overcooked_observation": inputs}
targets = {"logits": targets}
# Inputs unique to lstm model
if bc_params["use_lstm"]:
inputs["seq_in"] = seq_lens
inputs["hidden_in"] = np.zeros((N, bc_params["cell_size"]))
inputs["memory_in"] = np.zeros((N, bc_params["cell_size"]))
# Batch size doesn't include time dimension (seq_len) so it should be smaller for rnn model
batch_size = 1 if bc_params["use_lstm"] else training_params["batch_size"]
model.fit(
inputs,
targets,
callbacks=callbacks,
batch_size=batch_size,
epochs=training_params["epochs"],
validation_split=training_params["validation_split"],
class_weight=class_weights,
verbose=2 if verbose else 0,
)
# Save the model
save_bc_model(model_dir, model, bc_params, verbose=verbose)
return model
def save_bc_model(model_dir, model, bc_params, verbose=False):
"""
Saves the specified model under the directory model_dir. This creates three items
assets/ stores information essential to reconstructing the context and tf graph
variables/ stores the model's trainable weights
saved_model.pd the saved state of the model object
Additionally, saves a pickled dictionary containing all the parameters used to construct this model
at model_dir/metadata.pickle
"""
if verbose:
print("Saving bc model at ", model_dir)
model.save(model_dir, save_format="tf")
with open(os.path.join(model_dir, "metadata.pickle"), "wb") as f:
pickle.dump(bc_params, f)
def load_bc_model(model_dir, verbose=False):
"""
Returns the model instance (including all compilation data like optimizer state) and a dictionary of parameters
used to create the model
"""
if verbose:
print("Loading bc model from ", model_dir)
model = keras.models.load_model(model_dir, custom_objects={"tf": tf})
with open(os.path.join(model_dir, "metadata.pickle"), "rb") as f:
bc_params = pickle.load(f)
return model, bc_params
def evaluate_bc_model(model, bc_params, verbose=False):
"""
Creates an AgentPair object containing two instances of BC Agents, whose policies are specified by `model`. Runs
a rollout using AgentEvaluator class in an environment specified by bc_params
Arguments
- model (tf.keras.Model) A function that maps featurized overcooked states to action logits
- bc_params (dict) Specifies the environemnt in which to evaluate the agent (i.e. layout, reward_shaping_param)
as well as the configuration for the rollout (rollout_length)
Returns
- reward (int) Total sparse reward achieved by AgentPair during rollout
"""
evaluation_params = bc_params["evaluation_params"]
mdp_params = bc_params["mdp_params"]
# Get reference to state encoding function used by bc agents, with compatible signature
base_ae = _get_base_ae(bc_params)
base_env = base_ae.env
def featurize_fn(state):
return base_env.featurize_state_mdp(state)
# Wrap Keras models in rllib policies
agent_0_policy = BehaviorCloningPolicy.from_model(
model, bc_params, stochastic=True
)
agent_1_policy = BehaviorCloningPolicy.from_model(
model, bc_params, stochastic=True
)
# Compute the results of the rollout(s)
results = evaluate(
eval_params=evaluation_params,
mdp_params=mdp_params,
outer_shape=None,
agent_0_policy=agent_0_policy,
agent_1_policy=agent_1_policy,
agent_0_featurize_fn=featurize_fn,
agent_1_featurize_fn=featurize_fn,
verbose=verbose,
)
# Compute the average sparse return obtained in each rollout
reward = np.mean(results["ep_returns"])
return reward
def _build_model(observation_shape, action_shape, mlp_params, **kwargs):
## Inputs
inputs = keras.Input(
shape=observation_shape, name="Overcooked_observation"
)
x = inputs
## Build fully connected layers
assert (
len(mlp_params["net_arch"]) == mlp_params["num_layers"]
), "Invalid Fully Connected params"
for i in range(mlp_params["num_layers"]):
units = mlp_params["net_arch"][i]
x = keras.layers.Dense(
units, activation="relu", name="fc_{0}".format(i)
)(x)
## output layer
logits = keras.layers.Dense(action_shape[0], name="logits")(x)
return keras.Model(inputs=inputs, outputs=logits)
def _build_lstm_model(
observation_shape,
action_shape,
mlp_params,
cell_size,
max_seq_len=20,
**kwargs
):
## Inputs
obs_in = keras.Input(
shape=(None, *observation_shape), name="Overcooked_observation"
)
seq_in = keras.Input(shape=(), name="seq_in", dtype=tf.int32)
h_in = keras.Input(shape=(cell_size,), name="hidden_in")
c_in = keras.Input(shape=(cell_size,), name="memory_in")
x = obs_in
## Build fully connected layers
assert (
len(mlp_params["net_arch"]) == mlp_params["num_layers"]
), "Invalid Fully Connected params"
for i in range(mlp_params["num_layers"]):
units = mlp_params["net_arch"][i]
x = keras.layers.TimeDistributed(
keras.layers.Dense(
units, activation="relu", name="fc_{0}".format(i)
)
)(x)
mask = keras.layers.Lambda(
lambda x: tf.sequence_mask(x, maxlen=max_seq_len)
)(seq_in)
## LSTM layer
lstm_out, h_out, c_out = keras.layers.LSTM(
cell_size,
return_sequences=True,
return_state=True,
stateful=False,
name="lstm",
)(inputs=x, mask=mask, initial_state=[h_in, c_in])
## output layer
logits = keras.layers.TimeDistributed(
keras.layers.Dense(action_shape[0]), name="logits"
)(lstm_out)
return keras.Model(
inputs=[obs_in, seq_in, h_in, c_in], outputs=[logits, h_out, c_out]
)
################
# Rllib Policy #
################
class NullContextManager:
"""
No-op context manager that does nothing
"""
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
class TfContextManager:
"""
Properly sets the execution graph and session of the keras backend given a "session" object as input
Used for isolating tf execution in graph mode. Do not use with eager models or with eager mode on
"""
def __init__(self, session):
self.session = session
def __enter__(self):
self.ctx = self.session.graph.as_default()
self.ctx.__enter__()
set_session(self.session)
def __exit__(self, *args):
self.ctx.__exit__(*args)
class BehaviorCloningPolicy(RllibPolicy):
def __init__(self, observation_space, action_space, config):
"""
RLLib compatible constructor for initializing a behavior cloning model
observation_space (gym.Space|tuple) Shape of the featurized observations
action_space (gym.space|tuple) Shape of the action space (len(Action.All_ACTIONS),)
config (dict) Dictionary of relavant bc params
- model_dir (str) Path to pickled keras.Model used to map observations to action logits
- stochastic (bool) Whether action should return logit argmax or sample over distribution
- bc_model (keras.Model) Pointer to loaded policy model. Overrides model_dir
- bc_params (dict) Dictionary of parameters used to train model. Required if "model" is present
- eager (bool) Whether the model should run in eager (or graph) mode. Overrides bc_params['eager'] if present
"""
super(BehaviorCloningPolicy, self).__init__(
observation_space, action_space, config
)
if "bc_model" in config and config["bc_model"]:
assert (
"bc_params" in config
), "must specify params in addition to model"
assert issubclass(
type(config["bc_model"]), keras.Model
), "model must be of type keras.Model"
model, bc_params = config["bc_model"], config["bc_params"]
else:
assert (
"model_dir" in config
), "must specify model directory if model not specified"
model, bc_params = load_bc_model(config["model_dir"])
# Save the session that the model was loaded into so it is available at inference time if necessary
self._sess = get_session()
self._setup_shapes()
# Basic check to make sure model dimensions match
assert self.observation_shape == bc_params["observation_shape"]
assert self.action_shape == bc_params["action_shape"]
self.model = model
self.stochastic = config["stochastic"]
self.use_lstm = bc_params["use_lstm"]
self.cell_size = bc_params["cell_size"]
self.eager = (
config["eager"] if "eager" in config else bc_params["eager"]
)
self.context = self._create_execution_context()
def _setup_shapes(self):
# This is here to make the class compatible with both tuples or gym.Space objs for the spaces
# Note: action_space = (len(Action.ALL_ACTIONS,)) is technically NOT the action space shape, which would be () since actions are scalars
self.observation_shape = (
self.observation_space
if type(self.observation_space) == tuple
else self.observation_space.shape
)
self.action_shape = (
self.action_space
if type(self.action_space) == tuple
else (self.action_space.n,)
)
@classmethod
def from_model_dir(cls, model_dir, stochastic=True):
model, bc_params = load_bc_model(model_dir)
config = {
"bc_model": model,
"bc_params": bc_params,
"stochastic": stochastic,
}
return cls(
bc_params["observation_shape"], bc_params["action_shape"], config
)
@classmethod
def from_model(cls, model, bc_params, stochastic=True):
config = {
"bc_model": model,
"bc_params": bc_params,
"stochastic": stochastic,
}
return cls(
bc_params["observation_shape"], bc_params["action_shape"], config
)
def compute_actions(
self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs
):
"""
Computes sampled actions for each of the corresponding OvercookedEnv states in obs_batch
Args:
obs_batch (np.array): batch of pre-process (lossless state encoded) observations
Returns:
actions (list|np.array): batch of output actions shape [BATCH_SIZE, ACTION_SHAPE]
state_outs (list): only necessary for rnn hidden states
infos (dict): dictionary of extra feature batches { "action_dist_inputs" : [BATCH_SIZE, ...] }
"""
# Cast to np.array if list (no-op if already np.array)
obs_batch = np.array(obs_batch)
# Run the model
with self.context:
action_logits, states = self._forward(obs_batch, state_batches)
# Softmax in numpy to convert logits to probabilities
action_probs = softmax(action_logits)
if self.stochastic:
# Sample according to action_probs for each row in the output
actions = np.array(
[
np.random.choice(self.action_shape[0], p=action_probs[i])
for i in range(len(action_probs))
]
)
else:
actions = np.argmax(action_logits, axis=1)
return actions, states, {"action_dist_inputs": action_logits}
def get_initial_state(self):
"""
Returns the initial hidden and memory states for the model if it is recursive
Note, this shadows the rllib.Model.get_initial_state function, but had to be added here as
keras does not allow mixins in custom model classes
Also note, either this function or self.model.get_initial_state (if it exists) must be called at
start of an episode
"""
if self.use_lstm:
return [
np.zeros(
self.cell_size,
),
np.zeros(
self.cell_size,
),
]
return []
def get_weights(self):
"""
No-op to keep rllib from breaking, won't be necessary in future rllib releases
"""
pass
def set_weights(self, weights):
"""
No-op to keep rllib from breaking
"""
pass
def learn_on_batch(self, samples):
"""
Static policy requires no learning
"""
return {}
def _forward(self, obs_batch, state_batches):
if self.use_lstm:
obs_batch = np.expand_dims(obs_batch, 1)
seq_lens = np.ones(len(obs_batch))
model_out = self.model.predict(
[obs_batch, seq_lens] + state_batches
)
logits, states = model_out[0], model_out[1:]
logits = logits.reshape((logits.shape[0], -1))
return logits, states
else:
return self.model.predict(obs_batch, verbose=0), []
def _create_execution_context(self):
"""
Creates a private execution context for the model
Necessary if using with rllib in order to isolate this policy model from others
"""
if self.eager:
return NullContextManager()
return TfContextManager(self._sess)
if __name__ == "__main__":
params = get_bc_params()
model = train_bc_model(
os.path.join(BC_SAVE_DIR, "default"), params, verbose=True
)
# Evaluate our model's performance in a rollout
evaluate_bc_model(model, params)
| 22,092 | 31.925484 | 144 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/rllib/tests.py | import copy
import unittest
from math import isclose
import numpy as np
from human_aware_rl.rllib.rllib import OvercookedMultiAgent
from human_aware_rl.rllib.utils import (
get_required_arguments,
iterable_equal,
softmax,
)
class RllibEnvTest(unittest.TestCase):
def setUp(self):
print(
"\nIn Class {}, in Method {}".format(
self.__class__.__name__, self._testMethodName
)
)
self.params = copy.deepcopy(OvercookedMultiAgent.DEFAULT_CONFIG)
self.timesteps = [0, 10, 100, 500, 1000, 1500, 2000, 2500]
def tearDown(self):
pass
def _assert_lists_almost_equal(self, first, second, places=7):
for a, b in zip(first, second):
self.assertAlmostEqual(a, b, places=places)
def _test_bc_schedule(self, bc_schedule, expected_bc_factors):
self.params["multi_agent_params"]["bc_schedule"] = bc_schedule
env = OvercookedMultiAgent.from_config(self.params)
actual_bc_factors = []
for t in self.timesteps:
env.anneal_bc_factor(t)
actual_bc_factors.append(env.bc_factor)
self._assert_lists_almost_equal(expected_bc_factors, actual_bc_factors)
def _test_bc_creation_proportion(self, env, factor, trials=10000):
env.bc_factor = factor
tot_bc = 0
for _ in range(trials):
env.reset(regen_mdp=False)
num_bc = sum(
map(lambda agent: int(agent.startswith("bc")), env.curr_agents)
)
self.assertLessEqual(num_bc, 1)
tot_bc += num_bc
actual_factor = tot_bc / trials
self.assertAlmostEqual(actual_factor, factor, places=1)
def test_env_creation(self):
# Valid creation
env = OvercookedMultiAgent.from_config(self.params)
for param, expected in self.params["multi_agent_params"].items():
self.assertEqual(expected, getattr(env, param))
# Invalid bc_schedules
invalid_schedules = [
[(-1, 0.0), (1.0, 1e5)],
[(0.0, 0.0), (10, 1), (5, 0.5)],
[(0, 0), (5, 1), (10, 1.5)],
]
for sched in invalid_schedules:
self.params["multi_agent_params"]["bc_schedule"] = sched
self.assertRaises(
AssertionError, OvercookedMultiAgent.from_config, self.params
)
def test_reward_shaping_annealing(self):
self.params["multi_agent_params"]["reward_shaping_factor"] = 1
self.params["multi_agent_params"]["reward_shaping_horizon"] = 1e3
expected_rew_factors = [
1,
990 / 1e3,
900 / 1e3,
500 / 1e3,
0.0,
0.0,
0.0,
0.0,
]
actual_rew_factors = []
env = OvercookedMultiAgent.from_config(self.params)
for t in self.timesteps:
env.anneal_reward_shaping_factor(t)
actual_rew_factors.append(env.reward_shaping_factor)
self._assert_lists_almost_equal(
expected_rew_factors, actual_rew_factors
)
def test_bc_annealing(self):
# Test no annealing
self._test_bc_schedule(
OvercookedMultiAgent.self_play_bc_schedule,
[0.0] * len(self.timesteps),
)
# Test annealing
anneal_bc_schedule = [(0, 0.0), (1e3, 1.0), (2e3, 0.0)]
expected_bc_factors = [
0.0,
10 / 1e3,
100 / 1e3,
500 / 1e3,
1.0,
500 / 1e3,
0.0,
0.0,
]
self._test_bc_schedule(anneal_bc_schedule, expected_bc_factors)
def test_agent_creation(self):
env = OvercookedMultiAgent.from_config(self.params)
obs = env.reset()
# Check that we have the right number of agents with valid names
self.assertEqual(len(env.curr_agents), 2)
self.assertListEqual(list(obs.keys()), env.curr_agents)
# Ensure that bc agents are created 'factor' percentage of the time
bc_factors = [0.0, 0.1, 0.5, 0.9, 1.0]
for factor in bc_factors:
self._test_bc_creation_proportion(env, factor)
class RllibUtilsTest(unittest.TestCase):
def setUp(self):
print(
"\nIn Class {}, in Method {}".format(
self.__class__.__name__, self._testMethodName
)
)
pass
def tearDown(self):
pass
def test_softmax(self):
logits = np.array(
[
[0.1, 0.1, 0.1],
[-0.1, 0.0, 0.1],
[0.5, -1.2, 3.2],
[-1.6, -2.0, -1.5],
]
)
expected = np.array(
[
[0.33333333, 0.33333333, 0.33333333],
[0.30060961, 0.33222499, 0.3671654],
[0.06225714, 0.01137335, 0.92636951],
[0.36029662, 0.24151404, 0.39818934],
]
)
actual = softmax(logits)
self.assertTrue(np.allclose(expected, actual))
def test_iterable_equal(self):
a = [(1,), (1, 2)]
b = ([1], [1, 2])
self.assertTrue(iterable_equal(a, b))
a = [(1, 2), (1)]
b = [(1,), (1, 2)]
self.assertFalse(iterable_equal(a, b))
def test_get_required_arguments(self):
def foo1(a):
pass
def foo2(a, b):
pass
def foo3(a, b, c):
pass
def foo4(a, b, c="bar"):
pass
def foo5(a, b="bar", d="baz", **kwargs):
pass
fns = [foo1, foo2, foo3, foo4, foo5]
expected = [1, 2, 3, 2, 1]
for fn, expected in zip(fns, expected):
self.assertEqual(expected, len(get_required_arguments(fn)))
if __name__ == "__main__":
unittest.main()
| 5,864 | 27.609756 | 79 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/rllib/utils.py | import inspect
import numpy as np
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
def softmax(logits):
e_x = np.exp(logits.T - np.max(logits))
return (e_x / np.sum(e_x, axis=0)).T
def get_base_env(
mdp_params, env_params, outer_shape=None, mdp_params_schedule_fn=None
):
ae = get_base_ae(
mdp_params, env_params, outer_shape, mdp_params_schedule_fn
)
return ae.env
def get_base_mlam(
mdp_params, env_params, outer_shape=None, mdp_params_schedule_fn=None
):
ae = get_base_ae(
mdp_params, env_params, outer_shape, mdp_params_schedule_fn
)
return ae.mlam
def get_base_ae(
mdp_params, env_params, outer_shape=None, mdp_params_schedule_fn=None
):
"""
mdp_params: one set of fixed mdp parameter used by the enviroment
env_params: env parameters (horizon, etc)
outer_shape: outer shape of the environment
mdp_params_schedule_fn: the schedule for varying mdp params
return: the base agent evaluator
"""
assert (
mdp_params == None or mdp_params_schedule_fn == None
), "either of the two has to be null"
if type(mdp_params) == dict and "layout_name" in mdp_params:
ae = AgentEvaluator.from_layout_name(
mdp_params=mdp_params, env_params=env_params
)
elif "num_mdp" in env_params:
if np.isinf(env_params["num_mdp"]):
ae = AgentEvaluator.from_mdp_params_infinite(
mdp_params=mdp_params,
env_params=env_params,
outer_shape=outer_shape,
mdp_params_schedule_fn=mdp_params_schedule_fn,
)
else:
ae = AgentEvaluator.from_mdp_params_finite(
mdp_params=mdp_params,
env_params=env_params,
outer_shape=outer_shape,
mdp_params_schedule_fn=mdp_params_schedule_fn,
)
else:
# should not reach this case
raise NotImplementedError()
return ae
# Returns the required arguments as inspect.Parameter objects in a list
def get_required_arguments(fn):
required = []
params = inspect.signature(fn).parameters.values()
for param in params:
if (
param.default == inspect.Parameter.empty
and param.kind == param.POSITIONAL_OR_KEYWORD
):
required.append(param)
return required
def iterable_equal(a, b):
if hasattr(a, "__iter__") != hasattr(b, "__iter__"):
return False
if not hasattr(a, "__iter__"):
return a == b
if len(a) != len(b):
return False
for elem_a, elem_b in zip(a, b):
if not iterable_equal(elem_a, elem_b):
return False
return True
| 2,730 | 27.154639 | 73 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/rllib/__init__.py | 0 | 0 | 0 | py |
|
overcooked_ai | overcooked_ai-master/src/human_aware_rl/rllib/rllib.py | import copy
import logging
import os
import random
import tempfile
from datetime import datetime
import dill
import gym
import numpy as np
import ray
from ray.rllib.agents.ppo import PPOTrainer
from ray.rllib.algorithms.callbacks import DefaultCallbacks
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.rllib.models import ModelCatalog
from ray.tune.logger import UnifiedLogger
from ray.tune.registry import register_env
from ray.tune.result import DEFAULT_RESULTS_DIR
from human_aware_rl.rllib.utils import (
get_base_ae,
get_required_arguments,
iterable_equal,
softmax,
)
from overcooked_ai_py.agents.agent import Agent, AgentPair
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
from overcooked_ai_py.mdp.actions import Action
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv
from overcooked_ai_py.mdp.overcooked_mdp import (
EVENT_TYPES,
OvercookedGridworld,
)
action_space = gym.spaces.Discrete(len(Action.ALL_ACTIONS))
obs_space = gym.spaces.Discrete(len(Action.ALL_ACTIONS))
timestr = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
class RlLibAgent(Agent):
"""
Class for wrapping a trained RLLib Policy object into an Overcooked compatible Agent
"""
def __init__(self, policy, agent_index, featurize_fn):
self.policy = policy
self.agent_index = agent_index
self.featurize = featurize_fn
def reset(self):
# Get initial rnn states and add batch dimension to each
if hasattr(self.policy.model, "get_initial_state"):
self.rnn_state = [
np.expand_dims(state, axis=0)
for state in self.policy.model.get_initial_state()
]
elif hasattr(self.policy, "get_initial_state"):
self.rnn_state = [
np.expand_dims(state, axis=0)
for state in self.policy.get_initial_state()
]
else:
self.rnn_state = []
def action_probabilities(self, state):
"""
Arguments:
- state (Overcooked_mdp.OvercookedState) object encoding the global view of the environment
returns:
- Normalized action probabilities determined by self.policy
"""
# Preprocess the environment state
obs = self.featurize(state, debug=False)
my_obs = obs[self.agent_index]
# Compute non-normalized log probabilities from the underlying model
logits = self.policy.compute_actions(
np.array([my_obs]), self.rnn_state
)[2]["action_dist_inputs"]
# Softmax in numpy to convert logits to normalized probabilities
return softmax(logits)
def action(self, state):
"""
Arguments:
- state (Overcooked_mdp.OvercookedState) object encoding the global view of the environment
returns:
- the argmax action for a single observation state
- action_info (dict) that stores action probabilities under 'action_probs' key
"""
# Preprocess the environment state
obs = self.featurize(state)
my_obs = obs[self.agent_index]
# Use Rllib.Policy class to compute action argmax and action probabilities
# The first value is action_idx, which we will recompute below so the results are stochastic
_, rnn_state, info = self.policy.compute_actions(
np.array([my_obs]), self.rnn_state
)
# Softmax in numpy to convert logits to normalized probabilities
logits = info["action_dist_inputs"]
action_probabilities = softmax(logits)
# The original design is stochastic across different games,
# Though if we are reloading from a checkpoint it would inherit the seed at that point, producing deterministic results
[action_idx] = random.choices(
list(range(Action.NUM_ACTIONS)), action_probabilities[0]
)
agent_action = Action.INDEX_TO_ACTION[action_idx]
agent_action_info = {"action_probs": action_probabilities}
self.rnn_state = rnn_state
return agent_action, agent_action_info
class OvercookedMultiAgent(MultiAgentEnv):
"""
Class used to wrap OvercookedEnv in an Rllib compatible multi-agent environment
"""
# List of all agent types currently supported
supported_agents = ["ppo", "bc"]
# Default bc_schedule, includes no bc agent at any time
bc_schedule = self_play_bc_schedule = [(0, 0), (float("inf"), 0)]
# Default environment params used for creation
DEFAULT_CONFIG = {
# To be passed into OvercookedGridWorld constructor
"mdp_params": {
"layout_name": "cramped_room",
"rew_shaping_params": {},
},
# To be passed into OvercookedEnv constructor
"env_params": {"horizon": 400},
# To be passed into OvercookedMultiAgent constructor
"multi_agent_params": {
"reward_shaping_factor": 0.0,
"reward_shaping_horizon": 0,
"bc_schedule": self_play_bc_schedule,
"use_phi": True,
},
}
def __init__(
self,
base_env,
reward_shaping_factor=0.0,
reward_shaping_horizon=0,
bc_schedule=None,
use_phi=True,
):
"""
base_env: OvercookedEnv
reward_shaping_factor (float): Coefficient multiplied by dense reward before adding to sparse reward to determine shaped reward
reward_shaping_horizon (int): Timestep by which the reward_shaping_factor reaches zero through linear annealing
bc_schedule (list[tuple]): List of (t_i, v_i) pairs where v_i represents the value of bc_factor at timestep t_i
with linear interpolation in between the t_i
use_phi (bool): Whether to use 'shaped_r_by_agent' or 'phi_s_prime' - 'phi_s' to determine dense reward
"""
if bc_schedule:
self.bc_schedule = bc_schedule
self._validate_schedule(self.bc_schedule)
self.base_env = base_env
# since we are not passing featurize_fn in as an argument, we create it here and check its validity
self.featurize_fn_map = {
"ppo": lambda state: self.base_env.lossless_state_encoding_mdp(
state
),
"bc": lambda state: self.base_env.featurize_state_mdp(state),
}
self._validate_featurize_fns(self.featurize_fn_map)
self._initial_reward_shaping_factor = reward_shaping_factor
self.reward_shaping_factor = reward_shaping_factor
self.reward_shaping_horizon = reward_shaping_horizon
self.use_phi = use_phi
self.anneal_bc_factor(0)
self._agent_ids = set(self.reset().keys())
# fixes deprecation warnings
self._spaces_in_preferred_format = True
def _validate_featurize_fns(self, mapping):
assert "ppo" in mapping, "At least one ppo agent must be specified"
for k, v in mapping.items():
assert (
k in self.supported_agents
), "Unsuported agent type in featurize mapping {0}".format(k)
assert callable(v), "Featurize_fn values must be functions"
assert (
len(get_required_arguments(v)) == 1
), "Featurize_fn value must accept exactly one argument"
def _validate_schedule(self, schedule):
timesteps = [p[0] for p in schedule]
values = [p[1] for p in schedule]
assert (
len(schedule) >= 2
), "Need at least 2 points to linearly interpolate schedule"
assert schedule[0][0] == 0, "Schedule must start at timestep 0"
assert all(
[t >= 0 for t in timesteps]
), "All timesteps in schedule must be non-negative"
assert all(
[v >= 0 and v <= 1 for v in values]
), "All values in schedule must be between 0 and 1"
assert (
sorted(timesteps) == timesteps
), "Timesteps must be in increasing order in schedule"
# To ensure we flatline after passing last timestep
if schedule[-1][0] < float("inf"):
schedule.append((float("inf"), schedule[-1][1]))
def _setup_action_space(self, agents):
action_sp = {}
for agent in agents:
action_sp[agent] = gym.spaces.Discrete(len(Action.ALL_ACTIONS))
self.action_space = gym.spaces.Dict(action_sp)
self.shared_action_space = gym.spaces.Discrete(len(Action.ALL_ACTIONS))
def _setup_observation_space(self, agents):
dummy_state = self.base_env.mdp.get_standard_start_state()
# ppo observation
featurize_fn_ppo = (
lambda state: self.base_env.lossless_state_encoding_mdp(state)
)
obs_shape = featurize_fn_ppo(dummy_state)[0].shape
high = np.ones(obs_shape) * float("inf")
low = np.ones(obs_shape) * 0
self.ppo_observation_space = gym.spaces.Box(
np.float32(low), np.float32(high), dtype=np.float32
)
# bc observation
featurize_fn_bc = lambda state: self.base_env.featurize_state_mdp(
state
)
obs_shape = featurize_fn_bc(dummy_state)[0].shape
high = np.ones(obs_shape) * 100
low = np.ones(obs_shape) * -100
self.bc_observation_space = gym.spaces.Box(
np.float32(low), np.float32(high), dtype=np.float32
)
# hardcode mapping between action space and agent
ob_space = {}
for agent in agents:
if agent.startswith("ppo"):
ob_space[agent] = self.ppo_observation_space
else:
ob_space[agent] = self.bc_observation_space
self.observation_space = gym.spaces.Dict(ob_space)
def _get_featurize_fn(self, agent_id):
if agent_id.startswith("ppo"):
return lambda state: self.base_env.lossless_state_encoding_mdp(
state
)
if agent_id.startswith("bc"):
return lambda state: self.base_env.featurize_state_mdp(state)
raise ValueError("Unsupported agent type {0}".format(agent_id))
def _get_obs(self, state):
ob_p0 = self._get_featurize_fn(self.curr_agents[0])(state)[0]
ob_p1 = self._get_featurize_fn(self.curr_agents[1])(state)[1]
return ob_p0.astype(np.float32), ob_p1.astype(np.float32)
def _populate_agents(self):
# Always include at least one ppo agent (i.e. bc_sp not supported for simplicity)
agents = ["ppo"]
# Coin flip to determine whether other agent should be ppo or bc
other_agent = "bc" if np.random.uniform() < self.bc_factor else "ppo"
agents.append(other_agent)
# Randomize starting indices
np.random.shuffle(agents)
# Ensure agent names are unique
agents[0] = agents[0] + "_0"
agents[1] = agents[1] + "_1"
# logically the action_space and the observation_space should be set along with the generated agents
# the agents are also randomized in each iteration if bc agents are allowed, which requires reestablishing the action & observation space
self._setup_action_space(agents)
self._setup_observation_space(agents)
return agents
def _anneal(self, start_v, curr_t, end_t, end_v=0, start_t=0):
if end_t == 0:
# No annealing if horizon is zero
return start_v
else:
off_t = curr_t - start_t
# Calculate the new value based on linear annealing formula
fraction = max(1 - float(off_t) / (end_t - start_t), 0)
return fraction * start_v + (1 - fraction) * end_v
def step(self, action_dict):
"""
action:
(agent with index self.agent_idx action, other agent action)
is a tuple with the joint action of the primary and secondary agents in index format
returns:
observation: formatted to be standard input for self.agent_idx's policy
"""
action = [
action_dict[self.curr_agents[0]],
action_dict[self.curr_agents[1]],
]
assert all(
self.action_space[agent].contains(action_dict[agent])
for agent in action_dict
), "%r (%s) invalid" % (action, type(action))
joint_action = [Action.INDEX_TO_ACTION[a] for a in action]
# take a step in the current base environment
if self.use_phi:
next_state, sparse_reward, done, info = self.base_env.step(
joint_action, display_phi=True
)
potential = info["phi_s_prime"] - info["phi_s"]
dense_reward = (potential, potential)
else:
next_state, sparse_reward, done, info = self.base_env.step(
joint_action, display_phi=False
)
dense_reward = info["shaped_r_by_agent"]
ob_p0, ob_p1 = self._get_obs(next_state)
shaped_reward_p0 = (
sparse_reward + self.reward_shaping_factor * dense_reward[0]
)
shaped_reward_p1 = (
sparse_reward + self.reward_shaping_factor * dense_reward[1]
)
obs = {self.curr_agents[0]: ob_p0, self.curr_agents[1]: ob_p1}
rewards = {
self.curr_agents[0]: shaped_reward_p0,
self.curr_agents[1]: shaped_reward_p1,
}
dones = {
self.curr_agents[0]: done,
self.curr_agents[1]: done,
"__all__": done,
}
infos = {self.curr_agents[0]: info, self.curr_agents[1]: info}
return obs, rewards, dones, infos
def reset(self, regen_mdp=True):
"""
When training on individual maps, we want to randomize which agent is assigned to which
starting location, in order to make sure that the agents are trained to be able to
complete the task starting at either of the hardcoded positions.
NOTE: a nicer way to do this would be to just randomize starting positions, and not
have to deal with randomizing indices.
"""
self.base_env.reset(regen_mdp)
self.curr_agents = self._populate_agents()
ob_p0, ob_p1 = self._get_obs(self.base_env.state)
return {self.curr_agents[0]: ob_p0, self.curr_agents[1]: ob_p1}
def anneal_reward_shaping_factor(self, timesteps):
"""
Set the current reward shaping factor such that we anneal linearly until self.reward_shaping_horizon
timesteps, given that we are currently at timestep "timesteps"
"""
new_factor = self._anneal(
self._initial_reward_shaping_factor,
timesteps,
self.reward_shaping_horizon,
)
self.set_reward_shaping_factor(new_factor)
def anneal_bc_factor(self, timesteps):
"""
Set the current bc factor such that we anneal linearly until self.bc_factor_horizon
timesteps, given that we are currently at timestep "timesteps"
"""
p_0 = self.bc_schedule[0]
p_1 = self.bc_schedule[1]
i = 2
while timesteps > p_1[0] and i < len(self.bc_schedule):
p_0 = p_1
p_1 = self.bc_schedule[i]
i += 1
start_t, start_v = p_0
end_t, end_v = p_1
new_factor = self._anneal(start_v, timesteps, end_t, end_v, start_t)
self.set_bc_factor(new_factor)
def set_reward_shaping_factor(self, factor):
self.reward_shaping_factor = factor
def set_bc_factor(self, factor):
self.bc_factor = factor
def seed(self, seed):
"""
set global random seed to make environment deterministic
"""
# Our environment is already deterministic
pass
@classmethod
def from_config(cls, env_config):
"""
Factory method for generating environments in style with rllib guidlines
env_config (dict): Must contain keys 'mdp_params', 'env_params' and 'multi_agent_params', the last of which
gets fed into the OvercookedMultiAgent constuctor
Returns:
OvercookedMultiAgent instance specified by env_config params
"""
assert (
env_config
and "env_params" in env_config
and "multi_agent_params" in env_config
)
assert (
"mdp_params" in env_config
or "mdp_params_schedule_fn" in env_config
), "either a fixed set of mdp params or a schedule function needs to be given"
# "layout_name" and "rew_shaping_params"
if "mdp_params" in env_config:
mdp_params = env_config["mdp_params"]
outer_shape = None
mdp_params_schedule_fn = None
elif "mdp_params_schedule_fn" in env_config:
mdp_params = None
outer_shape = env_config["outer_shape"]
mdp_params_schedule_fn = env_config["mdp_params_schedule_fn"]
# "start_state_fn" and "horizon"
env_params = env_config["env_params"]
# "reward_shaping_factor"
multi_agent_params = env_config["multi_agent_params"]
base_ae = get_base_ae(
mdp_params, env_params, outer_shape, mdp_params_schedule_fn
)
base_env = base_ae.env
return cls(base_env, **multi_agent_params)
##################
# Training Utils #
##################
class TrainingCallbacks(DefaultCallbacks):
def on_episode_start(self, worker, base_env, policies, episode, **kwargs):
pass
def on_episode_step(self, worker, base_env, episode, **kwargs):
pass
def on_episode_end(self, worker, base_env, policies, episode, **kwargs):
"""
Used in order to add custom metrics to our tensorboard data
sparse_reward (int) - total reward from deliveries agent earned this episode
shaped_reward (int) - total reward shaping reward the agent earned this episode
"""
# Get rllib.OvercookedMultiAgentEnv refernce from rllib wraper
env = base_env.get_sub_environments()[0]
# Both agents share the same info so it doesn't matter whose we use, just use 0th agent's
info_dict = episode.last_info_for(env.curr_agents[0])
ep_info = info_dict["episode"]
game_stats = ep_info["ep_game_stats"]
# List of episode stats we'd like to collect by agent
stats_to_collect = EVENT_TYPES
# Parse info dicts generated by OvercookedEnv
tot_sparse_reward = ep_info["ep_sparse_r"]
tot_shaped_reward = ep_info["ep_shaped_r"]
# Store metrics where they will be visible to rllib for tensorboard logging
episode.custom_metrics["sparse_reward"] = tot_sparse_reward
episode.custom_metrics["shaped_reward"] = tot_shaped_reward
# Store per-agent game stats to rllib info dicts
for stat in stats_to_collect:
stats = game_stats[stat]
episode.custom_metrics[stat + "_agent_0"] = len(stats[0])
episode.custom_metrics[stat + "_agent_1"] = len(stats[1])
def on_sample_end(self, worker, samples, **kwargs):
pass
# Executes at the end of a call to Trainer.train, we'll update environment params (like annealing shaped rewards)
def on_train_result(self, trainer, result, **kwargs):
# Anneal the reward shaping coefficient based on environment paremeters and current timestep
timestep = result["timesteps_total"]
trainer.workers.foreach_worker(
lambda ev: ev.foreach_env(
lambda env: env.anneal_reward_shaping_factor(timestep)
)
)
# Anneal the bc factor based on environment paremeters and current timestep
trainer.workers.foreach_worker(
lambda ev: ev.foreach_env(
lambda env: env.anneal_bc_factor(timestep)
)
)
def on_postprocess_trajectory(
self,
worker,
episode,
agent_id,
policy_id,
policies,
postprocessed_batch,
original_batches,
**kwargs
):
pass
def get_rllib_eval_function(
eval_params,
eval_mdp_params,
env_params,
outer_shape,
agent_0_policy_str="ppo",
agent_1_policy_str="ppo",
verbose=False,
):
"""
Used to "curry" rllib evaluation function by wrapping additional parameters needed in a local scope, and returning a
function with rllib custom_evaluation_function compatible signature
eval_params (dict): Contains 'num_games' (int), 'display' (bool), and 'ep_length' (int)
mdp_params (dict): Used to create underlying OvercookedMDP (see that class for configuration)
env_params (dict): Used to create underlying OvercookedEnv (see that class for configuration)
outer_shape (list): a list of 2 item specifying the outer shape of the evaluation layout
agent_0_policy_str (str): Key associated with the rllib policy object used to select actions (must be either 'ppo' or 'bc')
agent_1_policy_str (str): Key associated with the rllib policy object used to select actions (must be either 'ppo' or 'bc')
Note: Agent policies are shuffled each time, so agent_0_policy_str and agent_1_policy_str are symmetric
Returns:
_evaluate (func): Runs an evaluation specified by the curried params, ignores the rllib parameter 'evaluation_workers'
"""
def _evaluate(trainer, evaluation_workers):
if verbose:
print("Computing rollout of current trained policy")
# Randomize starting indices
policies = [agent_0_policy_str, agent_1_policy_str]
np.random.shuffle(policies)
agent_0_policy, agent_1_policy = policies
# Get the corresponding rllib policy objects for each policy string name
agent_0_policy = trainer.get_policy(agent_0_policy)
agent_1_policy = trainer.get_policy(agent_1_policy)
agent_0_feat_fn = agent_1_feat_fn = None
if "bc" in policies:
base_ae = get_base_ae(eval_mdp_params, env_params)
base_env = base_ae.env
bc_featurize_fn = lambda state: base_env.featurize_state_mdp(state)
if policies[0] == "bc":
agent_0_feat_fn = bc_featurize_fn
if policies[1] == "bc":
agent_1_feat_fn = bc_featurize_fn
# Compute the evauation rollout. Note this doesn't use the rllib passed in evaluation_workers, so this
# computation all happens on the CPU. Could change this if evaluation becomes a bottleneck
results = evaluate(
eval_params,
eval_mdp_params,
outer_shape,
agent_0_policy,
agent_1_policy,
agent_0_feat_fn,
agent_1_feat_fn,
verbose=verbose,
)
# Log any metrics we care about for rllib tensorboard visualization
metrics = {}
metrics["average_sparse_reward"] = np.mean(results["ep_returns"])
return metrics
return _evaluate
def evaluate(
eval_params,
mdp_params,
outer_shape,
agent_0_policy,
agent_1_policy,
agent_0_featurize_fn=None,
agent_1_featurize_fn=None,
verbose=False,
):
"""
Used to visualize rollouts of trained policies
eval_params (dict): Contains configurations such as the rollout length, number of games, and whether to display rollouts
mdp_params (dict): OvercookedMDP compatible configuration used to create environment used for evaluation
outer_shape (list): a list of 2 item specifying the outer shape of the evaluation layout
agent_0_policy (rllib.Policy): Policy instance used to map states to action logits for agent 0
agent_1_policy (rllib.Policy): Policy instance used to map states to action logits for agent 1
agent_0_featurize_fn (func): Used to preprocess states for agent 0, defaults to lossless_state_encoding if 'None'
agent_1_featurize_fn (func): Used to preprocess states for agent 1, defaults to lossless_state_encoding if 'None'
"""
if verbose:
print("eval mdp params", mdp_params)
evaluator = get_base_ae(
mdp_params,
{"horizon": eval_params["ep_length"], "num_mdp": 1},
outer_shape,
)
# Override pre-processing functions with defaults if necessary
agent_0_featurize_fn = (
agent_0_featurize_fn
if agent_0_featurize_fn
else evaluator.env.lossless_state_encoding_mdp
)
agent_1_featurize_fn = (
agent_1_featurize_fn
if agent_1_featurize_fn
else evaluator.env.lossless_state_encoding_mdp
)
# Wrap rllib policies in overcooked agents to be compatible with Evaluator code
agent0 = RlLibAgent(
agent_0_policy, agent_index=0, featurize_fn=agent_0_featurize_fn
)
agent1 = RlLibAgent(
agent_1_policy, agent_index=1, featurize_fn=agent_1_featurize_fn
)
# Compute rollouts
if "store_dir" not in eval_params:
eval_params["store_dir"] = None
if "display_phi" not in eval_params:
eval_params["display_phi"] = False
results = evaluator.evaluate_agent_pair(
AgentPair(agent0, agent1),
num_games=eval_params["num_games"],
display=eval_params["display"],
dir=eval_params["store_dir"],
display_phi=eval_params["display_phi"],
info=verbose,
)
return results
###########################
# rllib.Trainer functions #
###########################
def gen_trainer_from_params(params):
# All ray environment set-up
if not ray.is_initialized():
init_params = {
"ignore_reinit_error": True,
"include_dashboard": False,
"_temp_dir": params["ray_params"]["temp_dir"],
"log_to_driver": params["verbose"],
"logging_level": logging.INFO
if params["verbose"]
else logging.CRITICAL,
}
ray.init(**init_params)
register_env("overcooked_multi_agent", params["ray_params"]["env_creator"])
ModelCatalog.register_custom_model(
params["ray_params"]["custom_model_id"],
params["ray_params"]["custom_model_cls"],
)
# Parse params
model_params = params["model_params"]
training_params = params["training_params"]
environment_params = params["environment_params"]
evaluation_params = params["evaluation_params"]
bc_params = params["bc_params"]
multi_agent_params = params["environment_params"]["multi_agent_params"]
env = OvercookedMultiAgent.from_config(environment_params)
# Returns a properly formatted policy tuple to be passed into ppotrainer config
def gen_policy(policy_type="ppo"):
# supported policy types thus far
assert policy_type in ["ppo", "bc"]
if policy_type == "ppo":
config = {
"model": {
"custom_model_config": model_params,
"custom_model": "MyPPOModel",
}
}
return (
None,
env.ppo_observation_space,
env.shared_action_space,
config,
)
elif policy_type == "bc":
bc_cls = bc_params["bc_policy_cls"]
bc_config = bc_params["bc_config"]
return (
bc_cls,
env.bc_observation_space,
env.shared_action_space,
bc_config,
)
# Rllib compatible way of setting the directory we store agent checkpoints in
logdir_prefix = "{0}_{1}_{2}".format(
params["experiment_name"], params["training_params"]["seed"], timestr
)
def custom_logger_creator(config):
"""Creates a Unified logger that stores results in <params['results_dir']>/<params["experiment_name"]>_<seed>_<timestamp>"""
results_dir = params["results_dir"]
if not os.path.exists(results_dir):
try:
os.makedirs(results_dir)
except Exception as e:
print(
"error creating custom logging dir. Falling back to default logdir {}".format(
DEFAULT_RESULTS_DIR
)
)
results_dir = DEFAULT_RESULTS_DIR
logdir = tempfile.mkdtemp(prefix=logdir_prefix, dir=results_dir)
logger = UnifiedLogger(config, logdir, loggers=None)
return logger
# Create rllib compatible multi-agent config based on params
multi_agent_config = {}
all_policies = ["ppo"]
# Whether both agents should be learned
self_play = iterable_equal(
multi_agent_params["bc_schedule"],
OvercookedMultiAgent.self_play_bc_schedule,
)
if not self_play:
all_policies.append("bc")
multi_agent_config["policies"] = {
policy: gen_policy(policy) for policy in all_policies
}
def select_policy(agent_id, episode, worker, **kwargs):
if agent_id.startswith("ppo"):
return "ppo"
if agent_id.startswith("bc"):
return "bc"
multi_agent_config["policy_mapping_fn"] = select_policy
multi_agent_config["policies_to_train"] = {"ppo"}
if "outer_shape" not in environment_params:
environment_params["outer_shape"] = None
if "mdp_params" in environment_params:
environment_params["eval_mdp_params"] = environment_params[
"mdp_params"
]
trainer = PPOTrainer(
env="overcooked_multi_agent",
config={
"multiagent": multi_agent_config,
"callbacks": TrainingCallbacks,
"custom_eval_function": get_rllib_eval_function(
evaluation_params,
environment_params["eval_mdp_params"],
environment_params["env_params"],
environment_params["outer_shape"],
"ppo",
"ppo" if self_play else "bc",
verbose=params["verbose"],
),
"env_config": environment_params,
"eager_tracing": False,
**training_params,
},
logger_creator=custom_logger_creator,
)
return trainer
### Serialization ###
def save_trainer(trainer, params, path=None):
"""
Saves a serialized trainer checkpoint at `path`. If none provided, the default path is
~/ray_results/<experiment_results_dir>/checkpoint_<i>
Note that `params` should follow the same schema as the dict passed into `gen_trainer_from_params`
"""
# Save trainer
save_path = trainer.save(path)
# Save params used to create trainer in /path/to/checkpoint_dir/config.pkl
config = copy.deepcopy(params)
config_path = os.path.join(os.path.dirname(save_path), "config.pkl")
# Note that we use dill (not pickle) here because it supports function serialization
with open(config_path, "wb") as f:
dill.dump(config, f)
return save_path
def load_trainer(save_path, true_num_workers=False):
"""
Returns a ray compatible trainer object that was previously saved at `save_path` by a call to `save_trainer`
Note that `save_path` is the full path to the checkpoint directory
Additionally we decide if we want to use the same number of remote workers (see ray library Training APIs)
as we store in the previous configuration, by default = False, we use only the local worker
(see ray library API)
"""
# Read in params used to create trainer
config_path = os.path.join(os.path.dirname(save_path), "config.pkl")
with open(config_path, "rb") as f:
# We use dill (instead of pickle) here because we must deserialize functions
config = dill.load(f)
if not true_num_workers:
# Override this param to lower overhead in trainer creation
config["training_params"]["num_workers"] = 0
if config["training_params"]["num_gpus"] == 1:
# all other configs for the server can be kept for local testing
config["training_params"]["num_gpus"] = 0
if "trained_example" in save_path:
# For the unit testing we update the result directory in order to avoid an error
config[
"results_dir"
] = "/Users/runner/work/human_aware_rl/human_aware_rl/human_aware_rl/ppo/results_temp"
# Get un-trained trainer object with proper config
trainer = gen_trainer_from_params(config)
# Load weights into dummy object
trainer.restore(save_path)
return trainer
def get_agent_from_trainer(trainer, policy_id="ppo", agent_index=0):
policy = trainer.get_policy(policy_id)
dummy_env = trainer.env_creator(trainer.config["env_config"])
featurize_fn = dummy_env.featurize_fn_map[policy_id]
agent = RlLibAgent(policy, agent_index, featurize_fn=featurize_fn)
return agent
def get_agent_pair_from_trainer(trainer, policy_id_0="ppo", policy_id_1="ppo"):
agent0 = get_agent_from_trainer(trainer, policy_id=policy_id_0)
agent1 = get_agent_from_trainer(trainer, policy_id=policy_id_1)
return AgentPair(agent0, agent1)
def load_agent_pair(save_path, policy_id_0="ppo", policy_id_1="ppo"):
"""
Returns an Overcooked AgentPair object that has as player 0 and player 1 policies with
ID policy_id_0 and policy_id_1, respectively
"""
trainer = load_trainer(save_path)
return get_agent_pair_from_trainer(trainer, policy_id_0, policy_id_1)
def load_agent(save_path, policy_id="ppo", agent_index=0):
"""
Returns an RllibAgent (compatible with the Overcooked Agent API) from the `save_path` to a previously
serialized trainer object created with `save_trainer`
The trainer can have multiple independent policies, so extract the one with ID `policy_id` to wrap in
an RllibAgent
Agent index indicates whether the agent is player zero or player one (or player n in the general case)
as the featurization is not symmetric for both players
"""
trainer = load_trainer(save_path)
return get_agent_from_trainer(
trainer, policy_id=policy_id, agent_index=agent_index
)
| 34,142 | 36.936667 | 145 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/ppo/ppo_rllib_client.py | # All imports except rllib
import argparse
import os
import sys
import warnings
import numpy as np
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
warnings.simplefilter("ignore")
# environment variable that tells us whether this code is running on the server or not
LOCAL_TESTING = os.getenv("RUN_ENV", "production") == "local"
# Sacred setup (must be before rllib imports)
from sacred import Experiment
ex = Experiment("PPO RLLib")
# Necessary work-around to make sacred pickling compatible with rllib
from sacred import SETTINGS
SETTINGS.CONFIG.READ_ONLY_CONFIG = False
# Slack notification configuration
from sacred.observers import SlackObserver
if os.path.exists("slack.json") and not LOCAL_TESTING:
slack_obs = SlackObserver.from_config("slack.json")
ex.observers.append(slack_obs)
# Necessary for capturing stdout in multiprocessing setting
SETTINGS.CAPTURE_MODE = "sys"
# rllib and rllib-dependent imports
# Note: tensorflow and tensorflow dependent imports must also come after rllib imports
# This is because rllib disables eager execution. Otherwise, it must be manually disabled
import ray
from ray.rllib.agents.ppo.ppo import PPOTrainer
from ray.rllib.models import ModelCatalog
from ray.tune.registry import register_env
from ray.tune.result import DEFAULT_RESULTS_DIR
from human_aware_rl.imitation.behavior_cloning_tf2 import (
BC_SAVE_DIR,
BehaviorCloningPolicy,
)
from human_aware_rl.ppo.ppo_rllib import RllibLSTMPPOModel, RllibPPOModel
from human_aware_rl.rllib.rllib import (
OvercookedMultiAgent,
gen_trainer_from_params,
save_trainer,
)
from human_aware_rl.utils import WANDB_PROJECT
###################### Temp Documentation #######################
# run the following command in order to train a PPO self-play #
# agent with the static parameters listed in my_config #
# #
# python ppo_rllib_client.py #
# #
# In order to view the results of training, run the following #
# command #
# #
# tensorboard --log-dir ~/ray_results/ #
# #
#################################################################
# Dummy wrapper to pass rllib type checks
def _env_creator(env_config):
# Re-import required here to work with serialization
from human_aware_rl.rllib.rllib import OvercookedMultiAgent
return OvercookedMultiAgent.from_config(env_config)
@ex.config
def my_config():
### Resume chekpoint_path ###
resume_checkpoint_path = None
### Model params ###
# Whether dense reward should come from potential function or not
use_phi = True
# whether to use recurrence in ppo model
use_lstm = False
# Base model params
NUM_HIDDEN_LAYERS = 3
SIZE_HIDDEN_LAYERS = 64
NUM_FILTERS = 25
NUM_CONV_LAYERS = 3
# LSTM memory cell size (only used if use_lstm=True)
CELL_SIZE = 256
# whether to use D2RL https://arxiv.org/pdf/2010.09163.pdf (concatenation the result of last conv layer to each hidden layer); works only when use_lstm is False
D2RL = False
### Training Params ###
num_workers = 30 if not LOCAL_TESTING else 2
# list of all random seeds to use for experiments, used to reproduce results
seeds = [0]
# Placeholder for random for current trial
seed = None
# Number of gpus the central driver should use
num_gpus = 0 if LOCAL_TESTING else 1
# How many environment timesteps will be simulated (across all environments)
# for one set of gradient updates. Is divided equally across environments
train_batch_size = 12000 if not LOCAL_TESTING else 800
# size of minibatches we divide up each batch into before
# performing gradient steps
sgd_minibatch_size = 2000 if not LOCAL_TESTING else 800
# Rollout length
rollout_fragment_length = 400
# Whether all PPO agents should share the same policy network
shared_policy = True
# Number of training iterations to run
num_training_iters = 420 if not LOCAL_TESTING else 2
# Stepsize of SGD.
lr = 5e-5
# Learning rate schedule.
lr_schedule = None
# If specified, clip the global norm of gradients by this amount
grad_clip = 0.1
# Discount factor
gamma = 0.99
# Exponential decay factor for GAE (how much weight to put on monte carlo samples)
# Reference: https://arxiv.org/pdf/1506.02438.pdf
lmbda = 0.98
# Whether the value function shares layers with the policy model
vf_share_layers = True
# How much the loss of the value network is weighted in overall loss
vf_loss_coeff = 1e-4
# Entropy bonus coefficient, will anneal linearly from _start to _end over _horizon steps
entropy_coeff_start = 0.2
entropy_coeff_end = 0.1
entropy_coeff_horizon = 3e5
# Initial coefficient for KL divergence.
kl_coeff = 0.2
# PPO clipping factor
clip_param = 0.05
# Number of SGD iterations in each outer loop (i.e., number of epochs to
# execute per train batch).
num_sgd_iter = 8 if not LOCAL_TESTING else 1
# How many trainind iterations (calls to trainer.train()) to run before saving model checkpoint
save_freq = 25
# How many training iterations to run between each evaluation
evaluation_interval = 50 if not LOCAL_TESTING else 1
# How many timesteps should be in an evaluation episode
evaluation_ep_length = 400
# Number of games to simulation each evaluation
evaluation_num_games = 1
# Whether to display rollouts in evaluation
evaluation_display = False
# Where to log the ray dashboard stats
temp_dir = os.path.join(os.path.abspath(os.sep), "tmp", "ray_tmp")
# Where to store model checkpoints and training stats
results_dir = DEFAULT_RESULTS_DIR
# Whether tensorflow should execute eagerly or not
eager = False
# Whether to log training progress and debugging info
verbose = True
### BC Params ###
# path to pickled policy model for behavior cloning
bc_model_dir = os.path.join(BC_SAVE_DIR, "default")
# Whether bc agents should return action logit argmax or sample
bc_stochastic = True
### Environment Params ###
# Which overcooked level to use
layout_name = "cramped_room"
# all_layout_names = '_'.join(layout_names)
# Name of directory to store training results in (stored in ~/ray_results/<experiment_name>)
params_str = str(use_phi) + "_nw=%d_vf=%f_es=%f_en=%f_kl=%f" % (
num_workers,
vf_loss_coeff,
entropy_coeff_start,
entropy_coeff_end,
kl_coeff,
)
experiment_name = "{0}_{1}_{2}".format("PPO", layout_name, params_str)
# Rewards the agent will receive for intermediate actions
rew_shaping_params = {
"PLACEMENT_IN_POT_REW": 3,
"DISH_PICKUP_REWARD": 3,
"SOUP_PICKUP_REWARD": 5,
"DISH_DISP_DISTANCE_REW": 0,
"POT_DISTANCE_REW": 0,
"SOUP_DISTANCE_REW": 0,
}
# whether to start cooking automatically when pot has 3 items in it
old_dynamics = False
# Max episode length
horizon = 400
# Constant by which shaped rewards are multiplied by when calculating total reward
reward_shaping_factor = 1.0
# Linearly anneal the reward shaping factor such that it reaches zero after this number of timesteps
reward_shaping_horizon = float("inf")
# bc_factor represents that ppo agent gets paired with a bc agent for any episode
# schedule for bc_factor is represented by a list of points (t_i, v_i) where v_i represents the
# value of bc_factor at timestep t_i. Values are linearly interpolated between points
# The default listed below represents bc_factor=0 for all timesteps
bc_schedule = OvercookedMultiAgent.self_play_bc_schedule
# To be passed into rl-lib model/custom_options config
model_params = {
"use_lstm": use_lstm,
"NUM_HIDDEN_LAYERS": NUM_HIDDEN_LAYERS,
"SIZE_HIDDEN_LAYERS": SIZE_HIDDEN_LAYERS,
"NUM_FILTERS": NUM_FILTERS,
"NUM_CONV_LAYERS": NUM_CONV_LAYERS,
"CELL_SIZE": CELL_SIZE,
"D2RL": D2RL,
}
# to be passed into the rllib.PPOTrainer class
training_params = {
"num_workers": num_workers,
"train_batch_size": train_batch_size,
"sgd_minibatch_size": sgd_minibatch_size,
"rollout_fragment_length": rollout_fragment_length,
"num_sgd_iter": num_sgd_iter,
"lr": lr,
"lr_schedule": lr_schedule,
"grad_clip": grad_clip,
"gamma": gamma,
"lambda": lmbda,
"vf_share_layers": vf_share_layers,
"vf_loss_coeff": vf_loss_coeff,
"kl_coeff": kl_coeff,
"clip_param": clip_param,
"num_gpus": num_gpus,
"seed": seed,
"evaluation_interval": evaluation_interval,
"entropy_coeff_schedule": [
(0, entropy_coeff_start),
(entropy_coeff_horizon, entropy_coeff_end),
],
"eager_tracing": eager,
"log_level": "WARN" if verbose else "ERROR",
}
# To be passed into AgentEvaluator constructor and _evaluate function
evaluation_params = {
"ep_length": evaluation_ep_length,
"num_games": evaluation_num_games,
"display": evaluation_display,
}
environment_params = {
# To be passed into OvercookedGridWorld constructor
"mdp_params": {
"layout_name": layout_name,
"rew_shaping_params": rew_shaping_params,
# old_dynamics == True makes cooking starts automatically without INTERACT
# allows only 3-item recipes
"old_dynamics": old_dynamics,
},
# To be passed into OvercookedEnv constructor
"env_params": {"horizon": horizon},
# To be passed into OvercookedMultiAgent constructor
"multi_agent_params": {
"reward_shaping_factor": reward_shaping_factor,
"reward_shaping_horizon": reward_shaping_horizon,
"use_phi": use_phi,
"bc_schedule": bc_schedule,
},
}
bc_params = {
"bc_policy_cls": BehaviorCloningPolicy,
"bc_config": {
"model_dir": bc_model_dir,
"stochastic": bc_stochastic,
"eager": eager,
},
}
ray_params = {
"custom_model_id": "MyPPOModel",
"custom_model_cls": RllibLSTMPPOModel
if model_params["use_lstm"]
else RllibPPOModel,
"temp_dir": temp_dir,
"env_creator": _env_creator,
}
params = {
"model_params": model_params,
"training_params": training_params,
"environment_params": environment_params,
"bc_params": bc_params,
"shared_policy": shared_policy,
"num_training_iters": num_training_iters,
"evaluation_params": evaluation_params,
"experiment_name": experiment_name,
"save_every": save_freq,
"seeds": seeds,
"results_dir": results_dir,
"ray_params": ray_params,
"resume_checkpoint_path": resume_checkpoint_path,
"verbose": verbose,
}
def run(params):
run_name = params["experiment_name"]
if params["verbose"]:
import wandb
wandb.init(project=WANDB_PROJECT, sync_tensorboard=True)
wandb.run.name = run_name
# Retrieve the tune.Trainable object that is used for the experiment
trainer = gen_trainer_from_params(params)
# Object to store training results in
result = {}
# Training loop
for i in range(params["num_training_iters"]):
if params["verbose"]:
print("Starting training iteration", i)
result = trainer.train()
if i % params["save_every"] == 0:
save_path = save_trainer(trainer, params)
if params["verbose"]:
print("saved trainer at", save_path)
# Save the state of the experiment at end
save_path = save_trainer(trainer, params)
if params["verbose"]:
print("saved trainer at", save_path)
# quiet = True so wandb doesn't log to console
wandb.finish(quiet=True)
return result
@ex.automain
def main(params):
# List of each random seed to run
seeds = params["seeds"]
del params["seeds"]
# this is required if we want to pass schedules in as command-line args, and we need to pass the string as a list of tuples
bc_schedule = params["environment_params"]["multi_agent_params"][
"bc_schedule"
]
if not isinstance(bc_schedule[0], list):
tuples_lst = []
for i in range(0, len(bc_schedule), 2):
x = int(bc_schedule[i].strip("("))
y = int(bc_schedule[i + 1].strip(")"))
tuples_lst.append((x, y))
params["environment_params"]["multi_agent_params"][
"bc_schedule"
] = tuples_lst
# List to store results dicts (to be passed to sacred slack observer)
results = []
# Train an agent to completion for each random seed specified
for seed in seeds:
# Override the seed
params["training_params"]["seed"] = seed
# Do the thing
result = run(params)
results.append(result)
# Return value gets sent to our slack observer for notification
average_sparse_reward = np.mean(
[res["custom_metrics"]["sparse_reward_mean"] for res in results]
)
average_episode_reward = np.mean(
[res["episode_reward_mean"] for res in results]
)
return {
"average_sparse_reward": average_sparse_reward,
"average_total_reward": average_episode_reward,
}
| 13,930 | 31.625293 | 164 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/ppo/evaluate.py | import os
import warnings
import numpy as np
from human_aware_rl.imitation.behavior_cloning_tf2 import (
BehaviorCloningPolicy,
_get_base_ae,
evaluate_bc_model,
load_bc_model,
)
from human_aware_rl.rllib.rllib import (
AgentPair,
RlLibAgent,
evaluate,
get_agent_from_trainer,
load_agent,
load_agent_pair,
)
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
# Ignore all warnings
warnings.filterwarnings("ignore")
# Customized evaluation functions
def evaluate_hp_bc(bc_model_path, hp_model_path, layout, order=0):
"""
This function evaluates the performance between a BC model (trained with the human training data) and a human proxy model (trained with the human testing data)
The order parameter determines the placement of the agents
"""
bc_model, bc_params = load_bc_model(bc_model_path)
bc_policy = BehaviorCloningPolicy.from_model(
bc_model, bc_params, stochastic=True
)
hp_model, hp_params = load_bc_model(hp_model_path)
hp_policy = BehaviorCloningPolicy.from_model(
hp_model, hp_params, stochastic=True
)
base_ae = _get_base_ae(bc_params)
base_env = base_ae.env
bc_agent = RlLibAgent(bc_policy, 0, base_env.featurize_state_mdp)
hp_agent = RlLibAgent(hp_policy, 1, base_env.featurize_state_mdp)
ae = AgentEvaluator.from_layout_name(
mdp_params={"layout_name": layout, "old_dynamics": True},
env_params={"horizon": 400},
)
if order == 0:
ap = AgentPair(hp_agent, bc_agent)
else:
ap = AgentPair(bc_agent, hp_agent)
result = ae.evaluate_agent_pair(ap, 1, 400)
return result, result["ep_returns"]
def evaluate_ppo_bc(path, layout, order=0):
"""
This function loads and evaluates a PPO agent and a BC agent that was trained together, thus stored in the same trainer
Order determines the starting position of the agents
"""
ae = AgentEvaluator.from_layout_name(
{"layout_name": layout, "old_dynamics": True}, {"horizon": 400}
)
if order == 0:
ap = load_agent_pair(path, "ppo", "bc")
else:
ap = load_agent_pair(path, "bc", "ppo")
result = ae.evaluate_agent_pair(ap, 1, 400)
return result, result["ep_returns"]
def evaluate_ppo(path, layout):
"""
This function loads and evaluates the performance of 2 PPO self-play agents
Order doesn't matter here since the agents are self-play
"""
ae = AgentEvaluator.from_layout_name(
{"layout_name": layout, "old_dynamics": True}, {"horizon": 400}
)
ap = load_agent_pair(path, "ppo", "ppo")
result = ae.evaluate_agent_pair(ap, 1, 400)
return result, result["ep_returns"]
def evaluate_hp_ppo(bc_model_path, trainer_path, layout, order=0):
"""
This function evaluates the performance between a PPO agent and a human proxy model (trained with the human testing data)
The order parameter determines the placement of the agents
"""
bc_model, bc_params = load_bc_model(bc_model_path)
bc_policy = BehaviorCloningPolicy.from_model(
bc_model, bc_params, stochastic=True
)
base_ae = _get_base_ae(bc_params)
base_env = base_ae.env
bc_agent = RlLibAgent(bc_policy, 0, base_env.featurize_state_mdp)
print(trainer_path)
ppo_agent = load_agent(trainer_path, policy_id="ppo", agent_index=1)
ae = AgentEvaluator.from_layout_name(
mdp_params={"layout_name": layout, "old_dynamics": True},
env_params={"horizon": 400},
)
if order == 0:
ap = AgentPair(ppo_agent, bc_agent)
else:
ap = AgentPair(bc_agent, ppo_agent)
result = ae.evaluate_agent_pair(ap, 1, 400)
return result, result["ep_returns"]
# the order of layouts we want to evaluate
layouts = [
"cramped_room",
"asymmetric_advantages",
"coordination_ring",
"forced_coordination",
"counter_circuit_o_1order",
]
file_dir = os.path.dirname(os.path.abspath(__file__))
bc_path = os.path.join(file_dir, "../imitation/bc_runs")
# directories where the BC agents are stored
bc = [
os.path.join(bc_path, "train/cramped_room"),
os.path.join(bc_path, "train/asymmetric_advantages"),
os.path.join(bc_path, "train/coordination_ring"),
os.path.join(bc_path, "train/random0"),
os.path.join(bc_path, "train/random3"),
]
# directories where the human proxy agents are stored
hp = [
os.path.join(bc_path, "test/cramped_room"),
os.path.join(bc_path, "test/asymmetric_advantages"),
os.path.join(bc_path, "test/coordination_ring"),
os.path.join(bc_path, "test/random0"),
os.path.join(bc_path, "test/random3"),
]
# reproduced agents ppo agents trained with bc, change the comments to the path of your trained agents
# change this to one of the agents creatd after running run_ppo_bc_experiments.sh bash script
ppo_bc = [
# ppo_bc_crammed_room,
# ppo_bc_asymmetric_advantages,
# ppo_bc_coordination_ring,
# ppo_bc_forced_coordination,
# ppo_bc_counter_circuit_o_1order,
]
# reproduced agents ppo agents trained with self-play, change the comments to the path of your trained agents
# change this to one of the agents creatd after running run_experiments.sh bash script
ppo_sp = [
# ppo_sp_crammed_room,
# ppo_sp_asymmetric_advantages,
# ppo_sp_coordination_ring,
# ppo_sp_forced_coordination,
# ppo_sp_counter_circuit_o_1order,
]
def eval_models(order):
hp_PBC = {}
hp_PSP = {}
bc_PBC = {}
PSP_PSP = {}
hp_BC = {}
for i in range(5):
# hp vs ppo_bc
_, res = evaluate_hp_ppo(hp[i], ppo_bc[i], layouts[i], order)
hp_PBC[layouts[i]] = (np.mean(res), np.std(res) / len(res) ** 0.5)
# hp vs ppo_sp
_, res = evaluate_hp_ppo(hp[i], ppo_sp[i], layouts[i], order)
hp_PSP[layouts[i]] = (np.mean(res), np.std(res) / len(res) ** 0.5)
# bc vs ppo_bc
_, res = evaluate_ppo_bc(ppo_bc[i], layouts[i], order)
bc_PBC[layouts[i]] = (np.mean(res), np.std(res) / len(res) ** 0.5)
# ppo_sp vs ppo_sp
_, res = evaluate_ppo(ppo_sp[i], layouts[i])
PSP_PSP[layouts[i]] = (np.mean(res), np.std(res) / len(res) ** 0.5)
# bc vs hp
_, res = evaluate_hp_bc(bc[i], hp[i], layouts[i], order)
hp_BC[layouts[i]] = (np.mean(res), np.std(res) / len(res) ** 0.5)
return PSP_PSP, hp_PSP, hp_PBC, hp_BC, bc_PBC
| 6,394 | 32.657895 | 163 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/ppo/ppo_rllib_from_params_client.py | # All imports except rllib
import argparse
import logging
import os
import sys
import numpy as np
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
# environment variable that tells us whether this code is running on the server or not
LOCAL_TESTING = os.getenv("RUN_ENV", "production") == "local"
# Sacred setup (must be before rllib imports)
from sacred import Experiment
ex_fp = Experiment("PPO RLLib From Params")
# Necessary work-around to make sacred pickling compatible with rllib
from sacred import SETTINGS
SETTINGS.CONFIG.READ_ONLY_CONFIG = False
# Slack notification configuration
from sacred.observers import SlackObserver
if os.path.exists("slack.json") and not LOCAL_TESTING:
slack_obs = SlackObserver.from_config("slack.json")
ex_fp.observers.append(slack_obs)
# Necessary for capturing stdout in multiprocessing setting
SETTINGS.CAPTURE_MODE = "sys"
# rllib and rllib-dependent imports
# Note: tensorflow and tensorflow dependent imports must also come after rllib imports
# This is because rllib disables eager execution. Otherwise, it must be manually disabled
import ray
from ray.rllib.agents.ppo.ppo import PPOTrainer
from ray.rllib.models import ModelCatalog
from ray.tune.registry import register_env
from ray.tune.result import DEFAULT_RESULTS_DIR
from human_aware_rl.imitation.behavior_cloning_tf2 import (
BC_SAVE_DIR,
BehaviorCloningPolicy,
)
from human_aware_rl.ppo.ppo_rllib import RllibLSTMPPOModel, RllibPPOModel
from human_aware_rl.rllib.rllib import (
OvercookedMultiAgent,
gen_trainer_from_params,
load_trainer,
save_trainer,
)
from human_aware_rl.utils import WANDB_PROJECT
###################### Temp Documentation #######################
# run the following command in order to train a PPO self-play #
# agent with the static parameters listed in my_config #
# #
# python ppo_rllib_client.py #
# #
# In order to view the results of training, run the following #
# command #
# #
# tensorboard --log-dir ~/ray_results/ #
# #
#################################################################
# Dummy wrapper to pass rllib type checks
def _env_creator(env_config):
# Re-import required here to work with serialization
from human_aware_rl.rllib.rllib import OvercookedMultiAgent
return OvercookedMultiAgent.from_config(env_config)
def naive_params_schedule_fn(outside_information):
"""
In this preliminary version, the outside information is ignored
"""
# Rewards the agent will receive for intermediate actions
rew_shaping_params = {
"PLACEMENT_IN_POT_REW": 3,
"DISH_PICKUP_REWARD": 3,
"SOUP_PICKUP_REWARD": 5,
"DISH_DISP_DISTANCE_REW": 0,
"POT_DISTANCE_REW": 0,
"SOUP_DISTANCE_REW": 0,
}
mdp_default_gen_params = {
"inner_shape": (5, 4),
"prop_empty": 0.95,
"prop_feats": 0.1,
"start_all_orders": [{"ingredients": ["onion", "onion", "onion"]}],
"display": False,
"rew_shaping_params": rew_shaping_params,
"old_dynamics": False,
}
return mdp_default_gen_params
@ex_fp.config
def my_config():
### Resume chekpoint_path ###
resume_checkpoint_path = None
### Model params ###
# whether to use recurrence in ppo model
use_lstm = False
# Base model params
NUM_HIDDEN_LAYERS = 3
SIZE_HIDDEN_LAYERS = 64
NUM_FILTERS = 25
NUM_CONV_LAYERS = 3
D2RL = False
# LSTM memory cell size (only used if use_lstm=True)
CELL_SIZE = 256
### Training Params ###
num_workers = 20 if not LOCAL_TESTING else 2
# list of all random seeds to use for experiments, used to reproduce results
seeds = [0]
# Placeholder for random for current trial
seed = None
# Number of gpus the central driver should use
num_gpus = 0 if LOCAL_TESTING else 1
# How many environment timesteps will be simulated (across all environments)
# for one set of gradient updates. Is divided equally across environments
# train_batch_size = 40000 if not LOCAL_TESTING else 800
train_batch_size = 100000 if not LOCAL_TESTING else 800
# size of minibatches we divide up each batch into before
# performing gradient steps
# sgd_minibatch_size = 10000 if not LOCAL_TESTING else 800
sgd_minibatch_size = 25000 if not LOCAL_TESTING else 800
# Rollout length
rollout_fragment_length = 400
# Whether all PPO agents should share the same policy network
shared_policy = True
# Number of training iterations to run
num_training_iters = 400 if not LOCAL_TESTING else 2
# Stepsize of SGD.
lr = 5e-3
# Learning rate schedule.
lr_schedule = None
# If specified, clip the global norm of gradients by this amount
grad_clip = 0.1
# Discount factor
gamma = 0.99
# Exponential decay factor for GAE (how much weight to put on monte carlo samples)
# Reference: https://arxiv.org/pdf/1506.02438.pdf
lmbda = 0.98
# Whether the value function shares layers with the policy model
vf_share_layers = True
# How much the loss of the value network is weighted in overall loss
vf_loss_coeff = 1e-4
# Entropy bonus coefficient, will anneal linearly from _start to _end over _horizon steps
entropy_coeff_start = 0.02
entropy_coeff_end = 0.00005
entropy_coeff_horizon = 3e5
# Initial coefficient for KL divergence.
kl_coeff = 0.2
# PPO clipping factor
clip_param = 0.05
# Number of SGD iterations in each outer loop (i.e., number of epochs to
# execute per train batch).
num_sgd_iter = 8 if not LOCAL_TESTING else 1
# How many trainind iterations (calls to trainer.train()) to run before saving model checkpoint
save_freq = 250
# How many training iterations to run between each evaluation
evaluation_interval = 50 if not LOCAL_TESTING else 1
# How many timesteps should be in an evaluation episode
evaluation_ep_length = 400
# Number of games to simulation each evaluation
evaluation_num_games = 2
# Whether to display rollouts in evaluation
evaluation_display = True
# Where to store replay txt files
evaluation_replay_store_dir = None
# Where to log the ray dashboard stats
temp_dir = (
os.path.join(os.path.abspath(os.sep), "tmp", "ray_tmp")
if not LOCAL_TESTING
else None
)
# Where to store model checkpoints and training stats
results_dir = os.path.join(os.path.abspath("."), "results_client_temp")
# Whether tensorflow should execute eagerly or not
eager = False
# Whether to log training progress and debugging info
verbose = True
### BC Params ###
# path to pickled policy model for behavior cloning
bc_model_dir = os.path.join(BC_SAVE_DIR, "default")
# Whether bc agents should return action logit argmax or sample
bc_stochastic = True
### Environment Params ###
outer_shape = (5, 4)
params_str = (
"nw=%d_vf=%f_es=%f_en=%f_kl=%f_outer_shape=%d_%d--inner_shape=%d_%d--prop_empty=%f--prop_feats=%f"
% (
num_workers,
vf_loss_coeff,
entropy_coeff_start,
entropy_coeff_end,
kl_coeff,
outer_shape[0],
outer_shape[1],
5,
4,
0.95,
0.1,
)
)
# Name of directory to store training results in (stored in ~/ray_results/<experiment_name>)
experiment_name = "{0}_{1}".format("PPO_fp_", params_str)
# Whether dense reward should come from potential function or not
use_phi = True
# Max episode length
horizon = 400
# The number of MDP in the env.mdp_lst
num_mdp = 1
# num_mdp = np.inf # for infinite mdp
# Constant by which shaped rewards are multiplied by when calculating total reward
reward_shaping_factor = 1.0
# Linearly anneal the reward shaping factor such that it reaches zero after this number of timesteps
reward_shaping_horizon = 1e6
# bc_factor represents that ppo agent gets paired with a bc agent for any episode
# schedule for bc_factor is represented by a list of points (t_i, v_i) where v_i represents the
# value of bc_factor at timestep t_i. Values are linearly interpolated between points
# The default listed below represents bc_factor=0 for all timesteps
bc_schedule = OvercookedMultiAgent.self_play_bc_schedule
# To be passed into rl-lib model/custom_options config
model_params = {
"use_lstm": use_lstm,
"NUM_HIDDEN_LAYERS": NUM_HIDDEN_LAYERS,
"SIZE_HIDDEN_LAYERS": SIZE_HIDDEN_LAYERS,
"NUM_FILTERS": NUM_FILTERS,
"NUM_CONV_LAYERS": NUM_CONV_LAYERS,
"CELL_SIZE": CELL_SIZE,
"D2RL": D2RL,
}
# to be passed into the rllib.PPOTrainer class
training_params = {
"num_workers": num_workers,
"train_batch_size": train_batch_size,
"sgd_minibatch_size": sgd_minibatch_size,
"rollout_fragment_length": rollout_fragment_length,
"num_sgd_iter": num_sgd_iter,
"lr": lr,
"lr_schedule": lr_schedule,
"grad_clip": grad_clip,
"gamma": gamma,
"lambda": lmbda,
"vf_share_layers": vf_share_layers,
"vf_loss_coeff": vf_loss_coeff,
"kl_coeff": kl_coeff,
"clip_param": clip_param,
"num_gpus": num_gpus,
"seed": seed,
"evaluation_interval": evaluation_interval,
"entropy_coeff_schedule": [
(0, entropy_coeff_start),
(entropy_coeff_horizon, entropy_coeff_end),
],
"eager_tracing": eager,
}
# To be passed into AgentEvaluator constructor and _evaluate function
evaluation_params = {
"ep_length": evaluation_ep_length,
"num_games": evaluation_num_games,
"display": evaluation_display,
"store_dir": evaluation_replay_store_dir,
"display_phi": True,
}
environment_params = {
# To be passed into OvercookedGridWorld constructor
"outer_shape": outer_shape,
"mdp_params_schedule_fn": naive_params_schedule_fn,
# To be passed into OvercookedEnv constructor
"env_params": {
"horizon": horizon,
"num_mdp": num_mdp,
"initial_info": {},
},
# evaluation mdp params
"eval_mdp_params": {
"inner_shape": (5, 4),
"prop_empty": 0.95,
"prop_feats": 0.1,
"start_all_orders": [{"ingredients": ["onion", "onion", "onion"]}],
"display": False,
},
# "eval_mdp_params" :{
# "layout_name": "cramped_room"
# },
# To be passed into OvercookedMultiAgent constructor
"multi_agent_params": {
"reward_shaping_factor": reward_shaping_factor,
"reward_shaping_horizon": reward_shaping_horizon,
"use_phi": use_phi,
"bc_schedule": bc_schedule,
},
}
bc_params = {
"bc_policy_cls": BehaviorCloningPolicy,
"bc_config": {
"model_dir": bc_model_dir,
"stochastic": bc_stochastic,
"eager": eager,
},
}
ray_params = {
"custom_model_id": "MyPPOModel",
"custom_model_cls": RllibLSTMPPOModel
if model_params["use_lstm"]
else RllibPPOModel,
"temp_dir": temp_dir,
"env_creator": _env_creator,
}
params = {
"model_params": model_params,
"training_params": training_params,
"environment_params": environment_params,
"bc_params": bc_params,
"shared_policy": shared_policy,
"num_training_iters": num_training_iters,
"evaluation_params": evaluation_params,
"experiment_name": experiment_name,
"save_every": save_freq,
"seeds": seeds,
"temp_dir": temp_dir,
"results_dir": results_dir,
"ray_params": ray_params,
"resume_checkpoint_path": resume_checkpoint_path,
"verbose": verbose,
}
# Dummy wrapper to pass rllib type checks
def _env_creater(env_config):
return OvercookedMultiAgent.from_config(env_config)
def run(params):
run_name = params["experiment_name"]
if params["verbose"]:
import wandb
wandb.init(project=WANDB_PROJECT, sync_tensorboard=True)
wandb.run.name = run_name
# Check if any resume checkpoint given
saved_path = params["resume_checkpoint_path"]
if saved_path:
trainer = load_trainer(save_path=saved_path, true_num_workers=False)
else:
# Retrieve the tune.Trainable object that is used for the experiment
trainer = gen_trainer_from_params(params)
# Object to store training results in
result = {}
# Training loop
for i in range(params["num_training_iters"]):
if params["verbose"]:
print("Starting training iteration", i)
result = trainer.train()
if i % params["save_every"] == 0:
save_path = save_trainer(trainer, params)
if params["verbose"]:
print("saved trainer at", save_path)
# Save the state of the experiment at end
save_path = save_trainer(trainer, params)
if params["verbose"]:
print("saved trainer at", save_path)
# quiet = True so wandb doesn't log to console
wandb.finish(quiet=True)
return result
@ex_fp.automain
def main(params):
# All ray environment set-up
init_params = {
"ignore_reinit_error": True,
"include_dashboard": False,
"_temp_dir": params["ray_params"]["temp_dir"],
"log_to_driver": params["verbose"],
"logging_level": logging.INFO
if params["verbose"]
else logging.CRITICAL,
}
ray.init(**init_params)
register_env("overcooked_multi_agent", _env_creater)
ModelCatalog.register_custom_model(
"MyPPOModel",
RllibLSTMPPOModel
if params["model_params"]["use_lstm"]
else RllibPPOModel,
)
# List of each random seed to run
seeds = params["seeds"]
del params["seeds"]
# List to store results dicts (to be passed to sacred slack observer)
results = []
# Train an agent to completion for each random seed specified
for seed in seeds:
# Override the seed
params["training_params"]["seed"] = seed
# Do the thing
result = run(params)
results.append(result)
# Return value gets sent to our slack observer for notification
average_sparse_reward = np.mean(
[res["custom_metrics"]["sparse_reward_mean"] for res in results]
)
average_episode_reward = np.mean(
[res["episode_reward_mean"] for res in results]
)
return {
"average_sparse_reward": average_sparse_reward,
"average_total_reward": average_episode_reward,
}
| 15,308 | 30.630165 | 106 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/ppo/ppo_rllib.py | import numpy as np
import tensorflow as tf
from ray.rllib.models.tf.recurrent_net import RecurrentNetwork
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
class RllibPPOModel(TFModelV2):
"""
Model that will map environment states to action probabilities. Will be shared across agents
"""
def __init__(
self,
obs_space,
action_space,
num_outputs,
model_config,
name,
**kwargs
):
super(RllibPPOModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name
)
# params we got to pass in from the call to "run"
custom_params = model_config["custom_model_config"]
## Parse custom network params
num_hidden_layers = custom_params["NUM_HIDDEN_LAYERS"]
size_hidden_layers = custom_params["SIZE_HIDDEN_LAYERS"]
num_filters = custom_params["NUM_FILTERS"]
num_convs = custom_params["NUM_CONV_LAYERS"]
d2rl = custom_params["D2RL"]
assert type(d2rl) == bool
## Create graph of custom network. It will under a shared tf scope such that all agents
## use the same model
self.inputs = tf.keras.Input(
shape=obs_space.shape, name="observations"
)
out = self.inputs
# Apply initial conv layer with a larger kenel (why?)
if num_convs > 0:
y = tf.keras.layers.Conv2D(
filters=num_filters,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.leaky_relu,
name="conv_initial",
)
out = y(out)
# Apply remaining conv layers, if any
for i in range(0, num_convs - 1):
padding = "same" if i < num_convs - 2 else "valid"
out = tf.keras.layers.Conv2D(
filters=num_filters,
kernel_size=[3, 3],
padding=padding,
activation=tf.nn.leaky_relu,
name="conv_{}".format(i),
)(out)
# Apply dense hidden layers, if any
conv_out = tf.keras.layers.Flatten()(out)
out = conv_out
for i in range(num_hidden_layers):
if i > 0 and d2rl:
out = tf.keras.layers.Concatenate()([out, conv_out])
out = tf.keras.layers.Dense(size_hidden_layers)(out)
out = tf.keras.layers.LeakyReLU()(out)
# Linear last layer for action distribution logits
layer_out = tf.keras.layers.Dense(self.num_outputs)(out)
# Linear last layer for value function branch of model
value_out = tf.keras.layers.Dense(1)(out)
self.base_model = tf.keras.Model(self.inputs, [layer_out, value_out])
def forward(self, input_dict, state=None, seq_lens=None):
model_out, self._value_out = self.base_model(input_dict["obs"])
return model_out, state
def value_function(self):
return tf.reshape(self._value_out, [-1])
class RllibLSTMPPOModel(RecurrentNetwork):
"""
Model that will map encoded environment observations to action logits
|_______|
/-> | value |
___________ _________ ________ / |_______|
state -> | conv_net | -> | fc_net | -> | lstm |
|__________| |________| |______| \\ |_______________|
/ \\ \\-> | action_logits |
h_in c_in |_______________|
"""
def __init__(
self,
obs_space,
action_space,
num_outputs,
model_config,
name,
**kwargs
):
super(RllibLSTMPPOModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name
)
# params we passed in from rllib client
custom_params = model_config["custom_model_config"]
## Parse custom network params
num_hidden_layers = custom_params["NUM_HIDDEN_LAYERS"]
size_hidden_layers = custom_params["SIZE_HIDDEN_LAYERS"]
num_filters = custom_params["NUM_FILTERS"]
num_convs = custom_params["NUM_CONV_LAYERS"]
cell_size = custom_params["CELL_SIZE"]
### Create graph of the model ###
flattened_dim = np.prod(obs_space.shape)
# Need an extra batch dimension (None) for time dimension
flattened_obs_inputs = tf.keras.Input(
shape=(None, flattened_dim), name="input"
)
lstm_h_in = tf.keras.Input(shape=(cell_size,), name="h_in")
lstm_c_in = tf.keras.Input(shape=(cell_size,), name="c_in")
seq_in = tf.keras.Input(shape=(), name="seq_in", dtype=tf.int32)
# Restore initial observation shape
obs_inputs = tf.keras.layers.Reshape(
target_shape=(-1, *obs_space.shape)
)(flattened_obs_inputs)
out = obs_inputs
## Initial "vision" network
# Apply initial conv layer with a larger kenel (why?)
if num_convs > 0:
out = tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
filters=num_filters,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.leaky_relu,
name="conv_initial",
)
)(out)
# Apply remaining conv layers, if any
for i in range(0, num_convs - 1):
padding = "same" if i < num_convs - 2 else "valid"
out = tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
filters=num_filters,
kernel_size=[3, 3],
padding=padding,
activation=tf.nn.leaky_relu,
name="conv_{}".format(i),
)
)(out)
# Flatten spatial features
out = tf.keras.layers.TimeDistributed(tf.keras.layers.Flatten())(out)
# Apply dense hidden layers, if any
for i in range(num_hidden_layers):
out = tf.keras.layers.TimeDistributed(
tf.keras.layers.Dense(
units=size_hidden_layers,
activation=tf.nn.leaky_relu,
name="fc_{0}".format(i),
)
)(out)
## LSTM network
lstm_out, h_out, c_out = tf.keras.layers.LSTM(
cell_size, return_sequences=True, return_state=True, name="lstm"
)(
inputs=out,
mask=tf.sequence_mask(seq_in),
initial_state=[lstm_h_in, lstm_c_in],
)
# Linear last layer for action distribution logits
layer_out = tf.keras.layers.Dense(self.num_outputs, name="logits")(
lstm_out
)
# Linear last layer for value function branch of model
value_out = tf.keras.layers.Dense(1, name="values")(lstm_out)
self.cell_size = cell_size
self.base_model = tf.keras.Model(
inputs=[flattened_obs_inputs, seq_in, lstm_h_in, lstm_c_in],
outputs=[layer_out, value_out, h_out, c_out],
)
def forward_rnn(self, inputs, state, seq_lens):
"""
Run the forward pass of the model
Arguments:
inputs: np.array of shape [BATCH, T, obs_shape]
state: list of np.arrays [h_in, c_in] each of shape [BATCH, self.cell_size]
seq_lens: np.array of shape [BATCH] where the ith element is the length of the ith sequence
Output:
model_out: tensor of shape [BATCH, T, self.num_outputs] representing action logits
state: list of tensors [h_out, c_out] each of shape [BATCH, self.cell_size]
"""
model_out, self._value_out, h_out, c_out = self.base_model(
[inputs, seq_lens, state]
)
return model_out, [h_out, c_out]
def value_function(self):
"""
Returns a tensor of shape [BATCH * T] representing the value function for the most recent forward pass
"""
return tf.reshape(self._value_out, [-1])
def get_initial_state(self):
"""
Returns the initial hidden state for the LSTM
"""
return [
np.zeros(self.cell_size, np.float32),
np.zeros(self.cell_size, np.float32),
]
| 8,450 | 34.508403 | 110 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/ppo/plot_graph.py | import os
import matplotlib.font_manager
import matplotlib.pyplot as plt
import numpy as np
from evaluate import eval_models
from matplotlib.patches import Patch
# importing from utils causes werid dependency conflicts. Copying here
def set_style(font_scale=1.6):
import matplotlib
import seaborn
seaborn.set(font="serif", font_scale=font_scale)
# Make the background white, and specify the specific font family
seaborn.set_style(
"white",
{
"font.family": "serif",
"font.weight": "normal",
"font.serif": ["Times", "Palatino", "serif"],
"axes.facecolor": "white",
"lines.markeredgewidth": 1,
},
)
matplotlib.rcParams["text.usetex"] = True
matplotlib.rc("font", family="serif", serif=["Palatino"])
# each one is a len-5 dictionary, each value is a tuple of mean and se
PSP_PSP_0, hp_PSP_0, hp_PBC_0, hp_BC_0, bc_PBC_0 = eval_models(0)
_, hp_PSP_1, hp_PBC_1, hp_BC_1, bc_PBC_1 = eval_models(1)
def get_value(dic, pos):
"""
The dictionary consists of layout:(mean, std), and we extract either the mean or the std based on its position
"""
assert pos == 0 or pos == 1
ls = []
for key, values in dic.items():
ls.append(values[pos])
return ls
results_0 = [
get_value(PSP_PSP_0, 0),
get_value(hp_PSP_0, 0),
get_value(hp_PBC_0, 0),
get_value(hp_BC_0, 0),
get_value(hp_PSP_1, 0),
get_value(hp_PBC_1, 0),
get_value(hp_BC_1, 0),
]
dotted_line = [get_value(bc_PBC_0, 0), get_value(bc_PBC_1, 0)]
stds = [
get_value(PSP_PSP_0, 1),
get_value(hp_PSP_0, 1),
get_value(hp_PBC_0, 1),
get_value(hp_BC_0, 1),
get_value(hp_PSP_1, 1),
get_value(hp_PBC_1, 1),
get_value(hp_BC_1, 1),
]
hist_algos = [
"PPO_SP+PPO_SP",
"PPO_SP+HP",
"PPO_BC+HP",
"BC+HP",
"HP+PPO_SP",
"HP+PPO_BC",
"HP+BC",
]
set_style()
fig, ax0 = plt.subplots(1, figsize=(18, 6)) # figsize=(20,6))
plt.rc("legend", fontsize=21)
plt.rc("axes", titlesize=25)
ax0.tick_params(axis="x", labelsize=18.5)
ax0.tick_params(axis="y", labelsize=18.5)
# there are 5 layouts
ind = np.arange(5)
width = 0.1
deltas = [-2.9, -1.5, -0.5, 0.5, 1.9, 2.9, 3.9] # [-1, 0, 1, 2, 2.5, 3]
colors = ["#aeaeae", "#2d6777", "#F79646"]
# for each algo, total of 7
# in each loop, we plot the result for all 5 layouts for each algo
for i in range(len(hist_algos)):
delta, algo = deltas[i], hist_algos[i]
offset = ind + delta * width
if i == 0:
# this is the self-play vs self-play results, we don't want any color
color = "none"
else:
color = colors[i % 3]
if i == 0:
ax0.bar(
offset,
results_0[i],
width,
color=color,
edgecolor="gray",
lw=1.0,
zorder=0,
label=algo,
linestyle=":",
yerr=stds[i],
)
elif 1 <= i <= 3:
ax0.bar(
offset,
results_0[i],
width,
color=color,
lw=1.0,
zorder=0,
label=algo,
yerr=stds[i],
)
else:
ax0.bar(
offset,
results_0[i],
width,
color=color,
edgecolor="white",
lw=1.0,
zorder=0,
hatch="/",
yerr=stds[i],
)
fst = True
for h_line in dotted_line:
if fst:
ax0.hlines(
h_line[0],
xmin=-0.4,
xmax=0.4,
colors="red",
label="PPO_BC+BC",
linestyle=":",
)
fst = False
else:
ax0.hlines(h_line[0], xmin=-0.4, xmax=0.4, colors="red", linestyle=":")
ax0.hlines(h_line[1], xmin=0.6, xmax=1.4, colors="red", linestyle=":")
ax0.hlines(h_line[2], xmin=1.6, xmax=2.4, colors="red", linestyle=":")
ax0.hlines(h_line[3], xmin=2.6, xmax=3.45, colors="red", linestyle=":")
ax0.hlines(h_line[4], xmin=3.6, xmax=4.4, colors="red", linestyle=":")
ax0.set_ylabel("Average reward per episode")
ax0.set_title("Performance with Human Proxy Models")
ax0.set_xticks(ind + width / 3)
ax0.set_xticklabels(
(
"Cramped Rm.",
"Asymm. Adv.",
"Coord. Ring",
"Forced Coord.",
"Counter Circ.",
)
)
ax0.tick_params(axis="x", labelsize=18)
handles, labels = ax0.get_legend_handles_labels()
patch = Patch(
facecolor="white",
edgecolor="black",
hatch="/",
alpha=0.5,
label="Switched start indices",
)
handles.append(patch)
# plot the legend
ax0.legend(handles=handles, loc="best")
ax0.set_ylim(0, 250)
plt.savefig("graph.jpg", format="jpg", bbox_inches="tight")
plt.show()
| 4,747 | 24.390374 | 114 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/ppo/__init__.py | 0 | 0 | 0 | py |
|
overcooked_ai | overcooked_ai-master/src/human_aware_rl/ppo/ppo_rllib_test.py | import glob
import os
import pickle
import random
import shutil
import unittest
import warnings
import ray
os.environ["RUN_ENV"] = "local"
import numpy as np
import tensorflow as tf
from human_aware_rl.data_dir import DATA_DIR
from human_aware_rl.imitation.behavior_cloning_tf2 import (
get_bc_params,
train_bc_model,
)
from human_aware_rl.ppo.ppo_rllib_client import ex
from human_aware_rl.ppo.ppo_rllib_from_params_client import ex_fp
from human_aware_rl.rllib.rllib import load_agent, load_agent_pair
from human_aware_rl.static import PPO_EXPECTED_DATA_PATH
from human_aware_rl.utils import get_last_episode_rewards
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
from overcooked_ai_py.mdp.overcooked_mdp import OvercookedGridworld
# Note: using the same seed across architectures can still result in differing values
def set_global_seed(seed):
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
tf.compat.v1.set_random_seed(seed)
def _clear_pickle():
# Write an empty dictionary to our static "expected" results location
with open(PPO_EXPECTED_DATA_PATH, "wb") as f:
pickle.dump({}, f)
class TestPPORllib(unittest.TestCase):
"""
Unittests for rllib PPO training loop
compute_pickle (bool): Whether the results of this test should be stored as the expected values for future tests
strict (bool): Whether the results of this test should be compared against expected values for exact match
min_performance (int): Minimum sparse reward that must be achieved during training for test to count as "success"
Note, this test always performs a basic sanity check to verify some learning is happening, even if the `strict` param is false
"""
def __init__(self, test_name):
super(TestPPORllib, self).__init__(test_name)
# default parameters, feel free to change
self.compute_pickle = False
# Reproducibility test
self.strict = False
# this value exists as sanity checks since random actions can still obtain at least this much dense reward
# some performance thresholds are hardcoded and are determined by comparing empirical performances with and with actual trainings/gradient updates
self.min_performance = 5
assert not (
self.compute_pickle and self.strict
), "Cannot compute pickle and run strict reproducibility tests at same time"
if self.compute_pickle:
_clear_pickle()
def setUp(self):
set_global_seed(0)
print(
"\nIn Class {}, in Method {}".format(
self.__class__.__name__, self._testMethodName
)
)
# unittest generates a lot of warning msgs due to third-party dependencies (e.g. ray[rllib] using outdated np methods)
# not a problem when directly ran, but when using -m unittest this helps filter out the warnings
warnings.filterwarnings("ignore")
# Setting CWD
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Temporary disk space to store logging results from tests
self.temp_results_dir = os.path.join(
os.path.abspath("."), "results_temp"
)
self.temp_model_dir = os.path.join(os.path.abspath("."), "model_temp")
# Make all necessary directories
if not os.path.exists(self.temp_model_dir):
os.makedirs(self.temp_model_dir)
if not os.path.exists(self.temp_results_dir):
os.makedirs(self.temp_results_dir)
# Load in expected values (this is an empty dict if compute_pickle=True)
with open(PPO_EXPECTED_DATA_PATH, "rb") as f:
self.expected = pickle.load(f)
def tearDown(self):
# Write results of this test to disk for future reproducibility tests
# Note: This causes unit tests to have a side effect (generally frowned upon) and only works because
# unittest is single threaded. If tests were run concurrently this could result in a race condition!
if self.compute_pickle:
with open(PPO_EXPECTED_DATA_PATH, "wb") as f:
pickle.dump(self.expected, f)
# Cleanup
shutil.rmtree(self.temp_results_dir)
shutil.rmtree(self.temp_model_dir)
ray.shutdown()
def test_save_load(self):
# Train a quick self play agent for 2 iterations
ex.run(
config_updates={
# Please feel free to modify the parameters below
"results_dir": self.temp_results_dir,
"experiment_name": "save_load_test",
"layout_name": "cramped_room",
"num_workers": 1,
"train_batch_size": 800,
"sgd_minibatch_size": 800,
"num_training_iters": 2,
"evaluation_interval": 10,
"entropy_coeff_start": 0.0,
"entropy_coeff_end": 0.0,
"use_phi": False,
"evaluation_display": False,
"verbose": False,
},
options={"--loglevel": "ERROR"},
)
# Kill all ray processes to ensure loading works in a vaccuum
ray.shutdown()
# Where the agent is stored (this is kind of hardcoded, would like for it to be more easily obtainable)
# 2 checkpoints(checkpoint_000001 and checkpoint_000002) are saved
# since we are only interested in reproducing the same actions, either one should be fine
load_path = os.path.join(
glob.glob(os.path.join(self.temp_results_dir, "save_load_test*"))[
0
],
"checkpoint_000002",
)
# Load a dummy state
mdp = OvercookedGridworld.from_layout_name("cramped_room")
state = mdp.get_standard_start_state()
# Ensure simple single-agent loading works
agent_0 = load_agent(load_path)
agent_0.reset()
agent_1 = load_agent(load_path)
agent_1.reset()
# Ensure forward pass of policy network still works
_, _ = agent_0.action(state)
_, _ = agent_1.action(state)
# Now let's load an agent pair and evaluate it
agent_pair = load_agent_pair(load_path)
ae = AgentEvaluator.from_layout_name(
mdp_params={"layout_name": "cramped_room"},
env_params={"horizon": 400},
)
# We assume no runtime errors => success, no performance consistency check for now
ae.evaluate_agent_pair(agent_pair, 1, info=False)
def test_ppo_sp_no_phi(self):
# Train a self play agent for 20 iterations
results = ex.run(
config_updates={
# Please feel free to modify the parameters below
"results_dir": self.temp_results_dir,
"num_workers": 2,
"train_batch_size": 800,
"sgd_minibatch_size": 800,
"num_training_iters": 30,
"evaluation_interval": 10,
"entropy_coeff_start": 0.0,
"entropy_coeff_end": 0.0,
"use_phi": False,
"evaluation_display": False,
"verbose": False,
},
options={"--loglevel": "ERROR"},
).result
# Sanity check (make sure it begins to learn to receive dense reward)
self.assertGreaterEqual(
results["average_total_reward"], self.min_performance
)
if self.compute_pickle:
self.expected["test_ppo_sp_no_phi"] = results
# Reproducibility test
if self.strict:
self.assertDictEqual(results, self.expected["test_ppo_sp_no_phi"])
def test_ppo_sp_yes_phi(self):
# Train a self play agent for 20 iterations
results = ex.run(
config_updates={
# Please feel free to modify the parameters below
"results_dir": self.temp_results_dir,
"num_workers": 2,
"train_batch_size": 1600,
"sgd_minibatch_size": 800,
"num_training_iters": 30,
"evaluation_interval": 10,
"entropy_coeff_start": 0.0,
"entropy_coeff_end": 0.0,
"use_phi": True,
"evaluation_display": False,
"verbose": False,
"lr": 5e-3,
},
options={"--loglevel": "ERROR"},
).result
# Sanity check (make sure it begins to learn to receive dense reward)
# This value is determined by comparing emperical performances with and without actual training updates
self.assertGreaterEqual(results["average_total_reward"], 15)
if self.compute_pickle:
self.expected["test_ppo_sp_yes_phi"] = results
# Reproducibility test
if self.strict:
self.assertDictEqual(results, self.expected["test_ppo_sp_yes_phi"])
def test_ppo_fp_sp_no_phi(self):
# Train a self play agent for 20 iterations
results = ex_fp.run(
config_updates={
"results_dir": self.temp_results_dir,
"num_workers": 2,
"train_batch_size": 2400,
"sgd_minibatch_size": 800,
"num_training_iters": 30,
"evaluation_interval": 10,
"use_phi": False,
"entropy_coeff_start": 0.0002,
"entropy_coeff_end": 0.00005,
"lr": 5e-3,
"seeds": [0],
"outer_shape": (5, 4),
"evaluation_display": False,
"verbose": False,
},
options={"--loglevel": "ERROR"},
).result
# Sanity check (make sure it begins to learn to receive dense reward)
self.assertGreaterEqual(results["average_total_reward"], 7)
if self.compute_pickle:
self.expected["test_ppo_fp_sp_no_phi"] = results
# Reproducibility test
if self.strict:
self.assertDictEqual(
results, self.expected["test_ppo_fp_sp_no_phi"]
)
def test_ppo_fp_sp_yes_phi(self):
# Train a self play agent for 20 iterations
results = ex_fp.run(
config_updates={
"results_dir": self.temp_results_dir,
"num_workers": 2,
"train_batch_size": 1600,
"sgd_minibatch_size": 800,
"num_training_iters": 30,
"evaluation_interval": 10,
"use_phi": True,
"entropy_coeff_start": 0.0002,
"entropy_coeff_end": 0.00005,
"lr": 7e-4,
"seeds": [0],
"outer_shape": (5, 4),
"evaluation_display": False,
"verbose": False,
},
options={"--loglevel": "ERROR"},
).result
# Sanity check (make sure it begins to learn to receive dense reward)
self.assertGreaterEqual(
results["average_total_reward"], self.min_performance
)
if self.compute_pickle:
self.expected["test_ppo_fp_sp_yes_phi"] = results
# Reproducibility test
if self.strict:
self.assertDictEqual(
results, self.expected["test_ppo_fp_sp_yes_phi"]
)
def test_ppo_bc(self):
# Train bc model
model_dir = self.temp_model_dir
params_to_override = {
"layouts": ["asymmetric_advantages_tomato"],
"data_path": None,
"epochs": 10,
}
bc_params = get_bc_params(**params_to_override)
train_bc_model(model_dir, bc_params)
# Train rllib model
config_updates = {
"results_dir": self.temp_results_dir,
"bc_schedule": [(0.0, 0.0), (8e3, 1.0)],
"num_training_iters": 20,
"bc_model_dir": model_dir,
"evaluation_interval": 5,
"verbose": False,
"layout_name": "asymmetric_advantages_tomato",
}
results = ex.run(
config_updates=config_updates, options={"--loglevel": "ERROR"}
).result
# Sanity check
# This value is determined by comparing emperical performances with and without actual training updates
self.assertGreaterEqual(results["average_total_reward"], 30)
if self.compute_pickle:
self.expected["test_ppo_bc"] = results
# Reproducibility test
if self.strict:
self.assertDictEqual(results, self.expected["test_ppo_bc"])
def test_resume_functionality(self):
load_path = os.path.join(
os.path.abspath("."),
"trained_example/checkpoint_000500",
)
# Load and train an agent for another iteration
results = ex_fp.run(
config_updates={
"results_dir": self.temp_results_dir,
"num_workers": 1,
"num_training_iters": 1,
"resume_checkpoint_path": load_path,
"verbose": False,
"evaluation_display": False,
},
options={"--loglevel": "ERROR"},
).result
# Test that the rewards from 1 additional iteration are not too different from the original model
# performance
threshold = 0.1
rewards = get_last_episode_rewards("trained_example/result.json")
# Test total reward
self.assertAlmostEqual(
rewards["episode_reward_mean"],
results["average_total_reward"],
delta=threshold * rewards["episode_reward_mean"],
)
# Test sparse reward
self.assertAlmostEqual(
rewards["sparse_reward_mean"],
results["average_sparse_reward"],
delta=threshold * rewards["sparse_reward_mean"],
)
if __name__ == "__main__":
warnings.filterwarnings("ignore")
unittest.main()
| 14,076 | 36.044737 | 154 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/ppo/plot_example_experiments.py | import os
import re
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from human_aware_rl.utils import *
from human_aware_rl.utils import set_style
envs = [
"cramped_room",
"forced_coordination",
"counter_circuit_o_1",
"coordination_ring",
"asymmetric_advantages",
]
def get_list_experiments(path):
result = {}
subdirs = [
name
for name in os.listdir(path)
if os.path.isdir(os.path.join(path, name))
]
for env in envs:
result[env] = {
"files": [path + "/" + x for x in subdirs if re.search(env, x)]
}
return result
def get_statistics(dict):
for env in dict:
rewards = [
get_last_episode_rewards(file + "/result.json")[
"sparse_reward_mean"
]
for file in dict[env]["files"]
]
dict[env]["rewards"] = rewards
dict[env]["std"] = np.std(rewards)
dict[env]["mean"] = np.mean(rewards)
return dict
def plot_statistics(dict):
names = []
stds = []
means = []
for env in dict:
names.append(env)
stds.append(dict[env]["std"])
means.append(dict[env]["mean"])
x_pos = np.arange(len(names))
matplotlib.rc("xtick", labelsize=7)
fig, ax = plt.subplots()
ax.bar(
x_pos,
means,
yerr=stds,
align="center",
alpha=0.5,
ecolor="black",
capsize=10,
)
ax.set_ylabel("Average reward per episode")
ax.set_xticks(x_pos)
ax.set_xticklabels(names)
ax.yaxis.grid(True)
# Save the figure and show
plt.tight_layout()
plt.savefig("example_rewards.png")
plt.show()
if __name__ == "__main__":
experiments = get_list_experiments("results")
experiments_results = get_statistics(experiments)
plot_statistics(experiments_results)
| 1,874 | 21.321429 | 75 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/static/__init__.py | import os
_curr_directory = os.path.dirname(os.path.abspath(__file__))
# Root dir where all hunan data is located
HUMAN_DATA_DIR = os.path.join(_curr_directory, "human_data")
# Paths to pre-processed data
CLEAN_HUMAN_DATA_DIR = os.path.join(HUMAN_DATA_DIR, "cleaned")
CLEAN_2020_HUMAN_DATA_ALL = os.path.join(
CLEAN_HUMAN_DATA_DIR, "2020_hh_trials_all.pickle"
)
CLEAN_2020_HUMAN_DATA_TRAIN = os.path.join(
CLEAN_HUMAN_DATA_DIR, "2020_hh_trials_train.pickle"
)
CLEAN_2020_HUMAN_DATA_TEST = os.path.join(
CLEAN_HUMAN_DATA_DIR, "2020_hh_trials_test.pickle"
)
CLEAN_2019_HUMAN_DATA_ALL = os.path.join(
CLEAN_HUMAN_DATA_DIR, "2019_hh_trials_all.pickle"
)
CLEAN_2019_HUMAN_DATA_TRAIN = os.path.join(
CLEAN_HUMAN_DATA_DIR, "2019_hh_trials_train.pickle"
)
CLEAN_2019_HUMAN_DATA_TEST = os.path.join(
CLEAN_HUMAN_DATA_DIR, "2019_hh_trials_test.pickle"
)
# Human data tests (smaller datasets for more efficient sanity checks)
DUMMY_HUMAN_DATA_DIR = os.path.join(HUMAN_DATA_DIR, "dummy")
DUMMY_2020_CLEAN_HUMAN_DATA_PATH = os.path.join(
DUMMY_HUMAN_DATA_DIR, "dummy_2020_hh_trials.pickle"
)
DUMMY_2020_RAW_HUMAN_DATA_PATH = os.path.join(
DUMMY_HUMAN_DATA_DIR, "dummy_2020_hh_trials.csv"
)
DUMMY_2019_CLEAN_HUMAN_DATA_PATH = os.path.join(
DUMMY_HUMAN_DATA_DIR, "dummy_2019_hh_trials_all.pickle"
)
DUMMY_2019_RAW_HUMAN_DATA_PATH = os.path.join(
DUMMY_HUMAN_DATA_DIR, "dummy_2019_hh_trials.csv"
)
# Expected values for reproducibility unit tests
BC_EXPECTED_DATA_PATH = os.path.join(
_curr_directory, "testing_data", "bc", "expected.pickle"
)
PPO_EXPECTED_DATA_PATH = os.path.join(
_curr_directory, "testing_data", "ppo", "expected.pickle"
)
# Human data constants
OLD_SCHEMA = set(
[
"Unnamed: 0",
"Unnamed: 0.1",
"cur_gameloop",
"datetime",
"is_leader",
"joint_action",
"layout",
"layout_name",
"next_state",
"reward",
"round_num",
"round_type",
"score",
"state",
"time_elapsed",
"time_left",
"is_wait",
"completed",
"run",
"workerid_num",
]
)
NEW_SCHEMA = set(
[
"state",
"joint_action",
"reward",
"time_left",
"score",
"time_elapsed",
"cur_gameloop",
"layout",
"layout_name",
"trial_id",
"player_0_id",
"player_1_id",
"player_0_is_human",
"player_1_is_human",
]
)
LAYOUTS_WITH_DATA_2019 = set(
[
"asymmetric_advantages",
"coordination_ring",
"cramped_room",
"random0",
"random3",
]
)
LAYOUTS_WITH_DATA_2020 = set(
[
"asymmetric_advantages_tomato",
"counter_circuit",
"cramped_corridor",
"inverse_marshmallow_experiment",
"marshmallow_experiment",
"marshmallow_experiment_coordination",
"soup_coordination",
"you_shall_not_pass",
]
)
LAYOUTS_WITH_DATA = LAYOUTS_WITH_DATA_2019.union(LAYOUTS_WITH_DATA_2020)
| 3,062 | 24.106557 | 72 | py |
CBA | CBA-main/vignette.py | #this file is to teach you how to use CBA
"""
Created on Fri Mar 27 18:58:59 2020
@author: 17b90
"""
import kBET
import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
import sklearn.metrics as sm
from collections import Counter
import matplotlib.pyplot as plt
from keras.regularizers import l2
from sklearn import preprocessing
from keras.layers.core import Lambda
from keras.callbacks import TensorBoard
from imblearn.over_sampling import SMOTE,ADASYN
from keras.callbacks import LearningRateScheduler
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.cluster.hierarchy import dendrogram, linkage
###############################################################################
#input the data
RAWseries1= #batch one, gene * cell
RAWseries2= #batch two, gene * cell
#input the label
choose_seriestype1= #cluster1, cell * 1, the element should be like 'gamma', not number
choose_seriestype2= #cluster2, cell * 1, the element should be like 'gamma', not number
#input the gene name
genename= #gene name, (gene * none)
fromname= #input your code name
#we choose some parameters
min_cells= #remove some genes, expressed in less than 50 cells
pca_dim= #the number of PCs, you can choose as you like
minnumberofcluster= #this parameter is used for doing Louvain clustering again
#because sometimes obtained clusters by Louvain are quite big, you can do Louvain again for each obtained cluster
#no rule, if you think the clusters are big, you can do it, judged by yourself
#clusters with more than $minnumberofcluster$ cells will be clustered again to make them smaller
#I think this hardly influence the result, just make it beautiful, so you can choose it!
clusternumber= #the number of neighboors when doing the cluster matching, we choose one neighbor, but you can choose more
chosen_cluster= #select your target cell types, like ['alpha','beta','ductal','acinar','delta','gamma','endothelial','epsilon']
cluster_index2= #give each cell type an index, like {'alpha':0,'beta':1,'ductal':2,'acinar':3,'delta':4,'gamma':5,'endothelial':6,'epsilon':7}
###############################################################################
#merge them
Alldata=np.concatenate([RAWseries1.T,RAWseries2.T])
Alllabel=np.concatenate([choose_seriestype1,choose_seriestype2])
Allbatch=np.concatenate([np.zeros(choose_seriestype1.shape[0]),np.zeros(choose_seriestype2.shape[0])+1])
###############################################################################
#ok, we select some interesting cell types
chosen_index=np.arange(Alllabel.shape[0])
for i in range(Alllabel.shape[0]):
if Alllabel[i] in chosen_cluster:
chosen_index[i]=1
else:
chosen_index[i]=0
Alldata=Alldata[chosen_index==1,:]
Allbatch=Allbatch[chosen_index==1]
Alllabel=Alllabel[chosen_index==1]
###############################################################################
#and them, use numbers to replace the name of cell types
Numlabel=np.zeros(Alllabel.shape[0])
for i in range(Alllabel.shape[0]):
Numlabel[i]=cluster_index2[Alllabel[i][0]]
###############################################################################
#use Scanpy!!!
anndata=sc.AnnData(pd.DataFrame(Alldata,columns=genename))
sc.pp.filter_genes(anndata,min_cells=min_cells)
sc.pp.normalize_per_cell(anndata,counts_per_cell_after=1e4)
sc.pp.log1p(anndata)
sc.pp.highly_variable_genes(anndata)
sc.pl.highly_variable_genes(anndata)
anndata=anndata[:,anndata.var['highly_variable']]
sc.pl.highest_expr_genes(anndata,n_top=20)
sc.tl.pca(anndata,n_comps=100,svd_solver='arpack')
sc.pl.pca(anndata)
sc.pl.pca_variance_ratio(anndata,log=True,n_pcs=100,save=[True,'pancreas'])
#after prepossessing, we rename these datasets
Alldata_aft=anndata.obsm['X_pca'][:,0:pca_dim]
#this is for the preparation of deep learning training, the training is hard if you don't do that
Alldata_aft=preprocessing.StandardScaler().fit_transform(Alldata_aft)
Alldata_aft=preprocessing.MinMaxScaler().fit_transform(Alldata_aft)
PCAseries1=Alldata_aft[Allbatch==0,:][Numlabel[Allbatch==0].argsort()]
PCAseries2=Alldata_aft[Allbatch==1,:][Numlabel[Allbatch==1].argsort()]
choose_seriestype1=Numlabel[Allbatch==0][Numlabel[Allbatch==0].argsort()].astype('int')
choose_seriestype2=Numlabel[Allbatch==1][Numlabel[Allbatch==1].argsort()].astype('int')
###############################################################################
#do Louvain clustering
cluster_series1=sc.AnnData(PCAseries1)
cluster_series2=sc.AnnData(PCAseries2)
sc.pp.neighbors(cluster_series1,n_pcs=0)
sc.pp.neighbors(cluster_series2,n_pcs=0)
sc.tl.umap(cluster_series1)
sc.tl.umap(cluster_series2)
sc.tl.louvain(cluster_series1)
sc.tl.louvain(cluster_series2)
sc.pl.umap(cluster_series1,color='louvain',size=30)
sc.pl.umap(cluster_series2,color='louvain',size=30)
cluster1=np.array(list(map(int,cluster_series1.obs['louvain'])))
cluster2=np.array(list(map(int,cluster_series2.obs['louvain'])))
###############################################################################
#ok, as you like, you can do clustering for each cluster, or not
recluster1=np.zeros(cluster1.shape[0])
recluster2=np.zeros(cluster2.shape[0])
palsecluster1=cluster1
count_cluster1=pd.value_counts(cluster_series1.obs['louvain'])
for i in range(1000000000000000):#until there are no clusters with more than $minnumberofcluster$ cells
if count_cluster1.max()<minnumberofcluster:
break
else:
print(count_cluster1.max())
recluster1=np.zeros(cluster1.shape[0])
recluster1_number=0
for i in np.unique(palsecluster1):
index=palsecluster1==i
if index.sum()<minnumberofcluster:
thisrecluster=np.zeros(index.sum())
recluster1[index]=thisrecluster+recluster1_number
recluster1_number=len(np.unique(recluster1))
else:
data=PCAseries1[index]
anndata=sc.AnnData(data)
sc.pp.neighbors(anndata,n_pcs=0)
sc.tl.louvain(anndata)
thisrecluster=np.array(list(map(int,anndata.obs['louvain'])))
recluster1[index]=thisrecluster+recluster1_number
recluster1_number=len(np.unique(recluster1))
palsecluster1=recluster1.astype('int')
count_cluster1=pd.value_counts(palsecluster1)
palsecluster2=cluster2
count_cluster2=pd.value_counts(cluster_series2.obs['louvain'])
for i in range(1000000000000000):
if count_cluster2.max()<minnumberofcluster:
break
else:
print(count_cluster2.max())
recluster2=np.zeros(cluster2.shape[0])
recluster2_number=0
for i in np.unique(palsecluster2):
index=palsecluster2==i
if index.sum()<minnumberofcluster:
thisrecluster=np.zeros(index.sum())
recluster2[index]=thisrecluster+recluster2_number
recluster2_number=len(np.unique(recluster2))
else:
data=PCAseries2[index]
anndata=sc.AnnData(data)
sc.pp.neighbors(anndata,n_pcs=0)
sc.tl.louvain(anndata)
thisrecluster=np.array(list(map(int,anndata.obs['louvain'])))
recluster2[index]=thisrecluster+recluster2_number
recluster2_number=len(np.unique(recluster2))
palsecluster2=recluster2.astype('int')
count_cluster2=pd.value_counts(palsecluster2)
recluster1=palsecluster1
recluster2=palsecluster2
###############################################################################
#show the Louvain results
series1=sc.AnnData(PCAseries1)
series2=sc.AnnData(PCAseries2)
sc.pp.neighbors(series1,n_pcs=0)
sc.pp.neighbors(series2,n_pcs=0)
sc.tl.umap(series1)
sc.tl.umap(series2)
df1=pd.DataFrame(choose_seriestype1)
df1=pd.Series(np.reshape(df1.values,df1.values.shape[0]), dtype="category")
series1.obs['real']=df1.values
df2=pd.DataFrame(choose_seriestype2)
df2=pd.Series(np.reshape(df2.values,df2.values.shape[0]), dtype="category")
series2.obs['real']=df2.values
sc.pl.umap(series1,color='real',size=30)
sc.pl.umap(series2,color='real',size=30)
df1=pd.DataFrame(recluster1.astype('int'))
df1=pd.Series(np.reshape(df1.values,df1.values.shape[0]), dtype="category")
series1.obs['recluster']=df1.values
df2=pd.DataFrame(recluster2.astype('int'))
df2=pd.Series(np.reshape(df2.values,df2.values.shape[0]), dtype="category")
series2.obs['recluster']=df2.values
sc.pl.umap(series1,color='recluster',size=30)
sc.pl.umap(series2,color='recluster',size=30)
###############################################################################
#this is used to select the metric when selecting neighbor clusters
def dis(P,Q,distance_method):
if distance_method==0:#euclidean distance
return np.sqrt(np.sum(np.square(P-Q)))
if distance_method==1:#cos distance
return 1-(np.multiply(P,Q).sum()/(np.sqrt(np.sum(np.square(P)))*np.sqrt(np.sum(np.square(Q)))))
###############################################################################
#you can choose change their turn or not
if len(np.unique(recluster1))>=len(np.unique(recluster2)):
a=PCAseries1
PCAseries1=PCAseries2
PCAseries2=a
b=choose_seriestype1
choose_seriestype1=choose_seriestype2
choose_seriestype2=b
c=cluster1
cluster1=cluster2
cluster2=c
d=recluster1
recluster1=recluster2
recluster2=d
###############################################################################
#ok, let's calculate the similarity of cells/clusters
correlation_recluster=np.zeros([len(np.unique(recluster1)),len(np.unique(recluster2))])
correlation_recluster_cell=np.zeros([recluster1.shape[0],recluster2.shape[0]])
for i in range(len(np.unique(recluster1))):
for j in range(len(np.unique(recluster2))):
print(i,j)
index_series1=np.where(recluster1==i)[0]
index_series2=np.where(recluster2==j)[0]
cell_series1=PCAseries1[index_series1,:]
cell_series2=PCAseries2[index_series2,:]
mean1=0
for iq in range(cell_series1.shape[0]):
for jq in range(cell_series2.shape[0]):
mean1+=dis(cell_series1[iq,:],cell_series2[jq,:],1)
correlation_recluster[i,j]=mean1/(cell_series1.shape[0]*cell_series2.shape[0])
for ii in range(cell_series1.shape[0]):
for jj in range(cell_series2.shape[0]):
mean2=dis(cell_series1[ii,:],cell_series2[jj,:],0)
correlation_recluster_cell[index_series1[ii],index_series2[jj]]=mean2
plt.imshow(correlation_recluster)
plt.imshow(correlation_recluster_cell)
correlation_recluster_div=-np.log10(correlation_recluster)
correlation_recluster_cell_div=-np.log10(correlation_recluster_cell)
correlation_recluster_norm=(correlation_recluster_div-correlation_recluster_div.min())/(correlation_recluster_div.max()-correlation_recluster_div.min())
correlation_recluster_cell_norm=(correlation_recluster_cell_div-correlation_recluster_cell_div.min())/(correlation_recluster_cell_div.max()-correlation_recluster_cell_div.min())
#show them
plt.imshow(correlation_recluster_norm)
plt.imshow(correlation_recluster_cell_norm)
###############################################################################
#remove bad parts, do the matching
correlation_recluster_select=np.zeros(correlation_recluster_norm.shape)
recluster_mid=np.zeros(recluster1.shape)
for kk in range(correlation_recluster_norm.shape[0]):
ind=np.sort(correlation_recluster_norm[kk,:])
select=correlation_recluster_norm[kk,:]<ind[-clusternumber]
select=(select==False)
recluster_mid[recluster1==kk]+=int(np.where(select==True)[0])
correlation_recluster_select[kk,:]=correlation_recluster_norm[kk,:]*select
plt.imshow(correlation_recluster_select)
correlation_recluster_cell_final=correlation_recluster_cell*0
for i in range(correlation_recluster_cell_norm.shape[0]):
for j in range(correlation_recluster_cell_norm.shape[1]):
label1=recluster1[i]
label2=recluster2[j]
mean1=correlation_recluster_select[label1,label2]
mean2=correlation_recluster_cell_norm[i,j]
if mean1==0:
correlation_recluster_cell_final[i,j]=0
else:
correlation_recluster_cell_final[i,j]=mean2
plt.imshow(correlation_recluster_select)
plt.imshow(correlation_recluster_cell_final)
recluster1=recluster_mid.astype('int')
sort_correlation_recluster_cell_final=correlation_recluster_cell_final[recluster1.argsort(),:]
sort_correlation_recluster_cell_final=sort_correlation_recluster_cell_final[:,recluster2.argsort()]
###############################################################################
#heatmap
heatmap(correlation_recluster_cell_final,choose_seriestype1,choose_seriestype2,save=False,name='pancreasmatrix')
heatmap(sort_correlation_recluster_cell_final,np.sort(recluster1)+9,np.sort(recluster2)+9,save=False,name='ourpancreasmatrix')
###############################################################################
#ok, I use keras, cells in each input are randomly selected, I don't know how to match cells with their similarity
#I also don't know how to match the cell part with their distance, so I design the following inputs
#It will waste some time, it's not easy and unclear for readers, but it works!
x_input1=np.zeros([PCAseries1.shape[0],PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]+recluster2.max()+1])
x_input2=np.zeros([PCAseries2.shape[0],PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]+recluster2.max()+1])
for i in range(PCAseries1.shape[0]):
print(i)
x_input1[i,0:PCAseries1.shape[1]]=PCAseries1[i,:]
x_input1[i,PCAseries1.shape[1]:PCAseries1.shape[1]+PCAseries1.shape[0]]=K.utils.np_utils.to_categorical(i,PCAseries1.shape[0])
x_input1[i,PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]=correlation_recluster_cell_final[i,:]
x_input1[i,PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]:]=K.utils.np_utils.to_categorical(recluster1[i],recluster2.max()+1)
for j in range(PCAseries2.shape[0]):
print(j)
x_input2[j,0:PCAseries2.shape[1]]=PCAseries2[j,:]
x_input2[j,PCAseries2.shape[1]:PCAseries2.shape[1]+PCAseries2.shape[0]]=K.utils.np_utils.to_categorical(j,PCAseries2.shape[0])
x_input2[j,PCAseries2.shape[1]+PCAseries2.shape[0]:PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]]=correlation_recluster_cell_final[:,j]
x_input2[j,PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]:]=K.utils.np_utils.to_categorical(recluster2[j],recluster2.max()+1)
###############################################################################
#interesting, I need to make two batches have the same number of cells, so I have to copy cells again and again
if x_input1.shape[0]>=x_input2.shape[0]:
x_test1=x_input1
y_test1=recluster1
y_testreal1=choose_seriestype1
repeat_num=int(np.ceil(x_input1.shape[0]/x_input2.shape[0]))
x_test2=np.tile(x_input2,(repeat_num,1))
y_test2=np.tile(recluster2,repeat_num)
y_testreal2=np.tile(choose_seriestype2,repeat_num)
x_test2=x_test2[0:x_test1.shape[0],:]
y_test2=y_test2[0:x_test1.shape[0]]
y_testreal2=y_testreal2[0:x_test1.shape[0]]
elif x_input1.shape[0]<x_input2.shape[0]:
x_test2=x_input2
y_test2=recluster2
y_testreal2=choose_seriestype2
repeat_num=int(np.ceil(x_input2.shape[0]/x_input1.shape[0]))
x_test1=np.tile(x_input1,(repeat_num,1))
y_test1=np.tile(recluster1,repeat_num)
y_testreal1=np.tile(choose_seriestype1,repeat_num)
x_test1=x_test1[0:x_test2.shape[0],:]
y_test1=y_test1[0:x_test2.shape[0]]
y_testreal1=y_testreal1[0:x_test2.shape[0]]
###############################################################################
def choose_info(x,info_number):
return x[:,0:info_number]
def choose_index(x,info_number,x_samplenumber):
return x[:,info_number:info_number+x_samplenumber]
def choose_corrlation(x,info_number,x_samplenumber,cor_number):
return x[:,info_number+x_samplenumber:info_number+x_samplenumber+cor_number]
def choose_relabel(x,info_number,x_samplenumber,cor_number):
return x[:,info_number+x_samplenumber+cor_number:]
def slic(input_):
return input_[:,0]
###############################################################################
activation='relu'
info_number=PCAseries1.shape[1]
layer=PCAseries1.shape[1]
input1=K.Input(shape=(x_test1.shape[1],))#line1 species1
input2=K.Input(shape=(x_test2.shape[1],))#line1 species2
input3=K.Input(shape=(x_test1.shape[1],))#line2 species1
input4=K.Input(shape=(x_test2.shape[1],))#line2 species2
Data1=Lambda(choose_info,arguments={'info_number':info_number})(input1)
Data2=Lambda(choose_info,arguments={'info_number':info_number})(input2)
Data3=Lambda(choose_info,arguments={'info_number':info_number})(input3)
Data4=Lambda(choose_info,arguments={'info_number':info_number})(input4)
Index1=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0]})(input1)
Index2=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0]})(input2)
Index3=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0]})(input3)
Index4=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0]})(input4)
Cor1=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input1)
Cor2=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input2)
Cor3=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input3)
Cor4=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input4)
Relabel1=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input1)
Relabel2=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input2)
Relabel3=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input3)
Relabel4=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input4)
x_concat1=layers.concatenate([Data1,Data3])#batch1
x_concat2=layers.concatenate([Data2,Data4])#batch2
x1=layers.Dense(layer,activation=activation)(Data1)
x2=layers.Dense(layer,activation=activation)(Data2)
x3=layers.Dense(layer,activation=activation)(Data3)
x4=layers.Dense(layer,activation=activation)(Data4)
x1=layers.BatchNormalization()(x1)
x2=layers.BatchNormalization()(x2)
x3=layers.BatchNormalization()(x3)
x4=layers.BatchNormalization()(x4)
x1_mid1=layers.Dense(layer,activation=activation)(layers.concatenate([x1,x2]))
x2_mid1=layers.Dense(layer,activation=activation)(layers.concatenate([x1,x2]))
x1_mid2=layers.Dense(layer,activation=activation)(layers.concatenate([x3,x4]))
x2_mid2=layers.Dense(layer,activation=activation)(layers.concatenate([x3,x4]))
x1_mid1=layers.BatchNormalization()(x1_mid1)
x2_mid1=layers.BatchNormalization()(x2_mid1)
x1_mid2=layers.BatchNormalization()(x1_mid2)
x2_mid2=layers.BatchNormalization()(x2_mid2)
layer_classify=layers.Dense(recluster2.max()+1,activation='relu')
y1=layer_classify(x1_mid1)
y2=layer_classify(x2_mid1)
y3=layer_classify(x1_mid2)
y4=layer_classify(x2_mid2)
x1=layers.concatenate([x1_mid1,x1_mid2])#batch1
x2=layers.concatenate([x2_mid1,x2_mid2])#batch2
output1=layers.Dense(2*layer,activation=activation)(x1)
output2=layers.Dense(2*layer,activation=activation)(x2)
output1=layers.BatchNormalization()(output1)
output2=layers.BatchNormalization()(output2)
def loss_weight(input_):
return tf.reduce_sum(tf.multiply(input_[0],input_[1]),axis=-1)
def MSE(input_):
return tf.reduce_mean(tf.square(input_[0]-input_[1]),axis=-1)
def multi_classification_loss(input_):
return tf.keras.losses.categorical_crossentropy(input_[0],input_[1])
AE_loss_1=Lambda(MSE)([output1,x_concat1])
AE_loss_2=Lambda(MSE)([output2,x_concat2])
cls_loss_1=Lambda(MSE)([y1,Relabel1])
cls_loss_2=Lambda(MSE)([y2,Relabel2])
cls_loss_3=Lambda(MSE)([y3,Relabel3])
cls_loss_4=Lambda(MSE)([y4,Relabel4])
interweight1=Lambda(loss_weight)([Index1,Cor2])
interweight4=Lambda(loss_weight)([Index3,Cor4])
interloss_1=Lambda(MSE)([x1_mid1,x2_mid1])
interloss_4=Lambda(MSE)([x1_mid2,x2_mid2])
interloss_1=layers.Multiply()([interweight1,interloss_1])
interloss_4=layers.Multiply()([interweight4,interloss_4])
intraweight1=Lambda(loss_weight)([Relabel1,Relabel3])
intraweight2=Lambda(loss_weight)([Relabel2,Relabel4])
intraloss_1=Lambda(MSE)([x1_mid1,x1_mid2])
intraloss_2=Lambda(MSE)([x2_mid1,x2_mid2])
intraloss_1=layers.Multiply()([intraweight1,intraloss_1])
intraloss_2=layers.Multiply()([intraweight2,intraloss_2])
Loss1=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss1')([AE_loss_1,AE_loss_2])
Loss2=Lambda(lambda x:(x[0]*1+x[1]*1+x[2]*1+x[3]*1)/4,name='loss2')([cls_loss_1,cls_loss_2,cls_loss_3,cls_loss_4])
Loss3=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss3')([interloss_1,interloss_4])
Loss4=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss4')([intraloss_1,intraloss_2])
###############################################################################
network_train=K.models.Model([input1,input2,input3,input4],[Loss1,Loss2,Loss3,Loss4])
network_train.summary()
###############################################################################
intra_data1={}
inter_data1={}
for i in range(x_test1.shape[0]):
label_i=y_test1[i]
intra_data1[i]=np.where(y_test1==label_i)
inter_data1[i]=np.where(y_test1!=label_i)
intra_data2={}
inter_data2={}
for i in range(x_test2.shape[0]):
label_i=y_test2[i]
intra_data2[i]=np.where(y_test2==label_i)
inter_data2[i]=np.where(y_test2!=label_i)
###############################################################################
batch_size=256
train_loss=[]
loss1=[]
loss2=[]
loss3=[]
loss4=[]
###############################################################################
iterations=10000000
lr=1e-4
optimizer=K.optimizers.Adam(lr=lr)
loss_weights=[1,1,1,1]
#these four parts will not converge at the same speed, I don't know how to resolve it
#so I choose a hard strategy, if either one is too small, stop the training, enlarge its weight, do training again
#I think you can train this model better...or maybe you can teach me how to auto-balance the weight, thank you!
network_train.compile(optimizer=optimizer,
loss=[lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred],
loss_weights=loss_weights)
for i in range(iterations):
x_input1_series1_train=np.zeros(x_test1.shape)
index0=np.zeros(x_input1_series1_train.shape[0])
x_input1_series2_train=np.zeros(x_test2.shape)
index1=np.zeros(x_input1_series2_train.shape[0])
x_input2_series1_train=np.zeros(x_test1.shape)
index2=np.zeros(x_input2_series1_train.shape[0])
x_input2_series2_train=np.zeros(x_test2.shape)
index3=np.zeros(x_input2_series2_train.shape[0])
for ii in range(x_test1.shape[0]):
index0[ii]=random.choice(range(x_test1.shape[0]))
rand1=random.random()
in_rand1=np.where(x_test1[ii,:][PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]>0)[0]
out_rand1=np.where(x_test1[ii,:][PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]<=0)[0]
if rand1>=0.5:
index1[ii]=random.choice(in_rand1)
elif rand1<0.5:
index1[ii]=random.choice(out_rand1)
rand2=random.random()
if rand2>=0.5:
index2[ii]=random.choice(intra_data1[index0[ii]][0])
elif rand2<0.5:
index2[ii]=random.choice(inter_data1[index0[ii]][0])
rand3=random.random()
if rand3>=0.5:
index3[ii]=random.choice(intra_data2[index1[ii]][0])
elif rand3<0.5:
index3[ii]=random.choice(inter_data2[index1[ii]][0])
train1=x_test1[index0.astype('int'),:]
train2=x_test2[index1.astype('int'),:]
train3=x_test1[index2.astype('int'),:]
train4=x_test2[index3.astype('int'),:]
Train=network_train.fit([train1,train2,train3,train4],
[np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1])],
batch_size=batch_size,shuffle=True)
train_loss.append(Train.history['loss'][:][0])
loss1.append(Train.history['loss1_loss'][:][0]*loss_weights[0])
loss2.append(Train.history['loss2_loss'][:][0]*loss_weights[1])
loss3.append(Train.history['loss3_loss'][:][0]*loss_weights[2])
loss4.append(Train.history['loss4_loss'][:][0]*loss_weights[3])
print(i,'loss=',
Train.history['loss'][:][0],
Train.history['loss1_loss'][:][0]*loss_weights[0],
Train.history['loss2_loss'][:][0]*loss_weights[1],
Train.history['loss3_loss'][:][0]*loss_weights[2],
Train.history['loss4_loss'][:][0]*loss_weights[3])
if i>500:
plt.plot(train_loss[:])
plt.plot(loss1[:])
plt.plot(loss2[:])
plt.plot(loss3[:])
plt.plot(loss4[:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.ylim(0,max(max(train_loss[i-500:],loss1[i-500:],loss2[i-500:],loss3[i-500:],loss4[i-500:])))
plt.xlim(i-500,i)
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
plt.plot(train_loss[:])
plt.plot(loss1[:])
plt.plot(loss2[:])
plt.plot(loss3[:])
plt.plot(loss4[:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
else:
plt.plot(train_loss[500:])
plt.plot(loss1[500:])
plt.plot(loss2[500:])
plt.plot(loss3[500:])
plt.plot(loss4[500:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
###############################################################################
network_predict=K.models.Model([input1,input2,input3,input4],[x1_mid1,x2_mid1,x1_mid2,x2_mid2])
[low_dim1,low_dim2,low_dim3,low_dim4]=network_predict.predict([x_test1,x_test2,x_test1,x_test2])
low_dim1=low_dim1[0:x_input1.shape[0]]
low_dim2=low_dim2[0:x_input2.shape[0]]
low_dim3=low_dim3[0:x_input1.shape[0]]
low_dim4=low_dim4[0:x_input2.shape[0]]
low_dim1=np.concatenate([low_dim1,low_dim3],axis=1)
low_dim2=np.concatenate([low_dim2,low_dim4],axis=1)
y_real_no1=y_testreal1[0:x_input1.shape[0]]
y_recluster_no1=recluster1[0:x_input1.shape[0]]
y_real_no2=y_testreal2[0:x_input2.shape[0]]
y_recluster_no2=recluster2[0:x_input2.shape[0]]
total_real_type=np.concatenate([y_real_no1,y_real_no2])
total_recluster_type=np.concatenate([y_recluster_no1,y_recluster_no2])
###############################################################################
series1=sc.AnnData(low_dim1)
series2=sc.AnnData(low_dim2)
mergedata=series1.concatenate(series2)
mergedata.obsm['NN']=mergedata.X
sc.pp.neighbors(mergedata,n_pcs=0)
sc.tl.louvain(mergedata)
sc.tl.leiden(mergedata)
sc.tl.umap(mergedata)
df=pd.DataFrame(total_real_type.astype('int'))
df=pd.Series(np.reshape(df.values,df.values.shape[0]), dtype="category")
mergedata.obs['real']=df.values
sc.pl.umap(mergedata,color='louvain',size=30)
sc.pl.umap(mergedata,color='leiden',size=30)
sc.pl.umap(mergedata,color='batch',size=30)
sc.pl.umap(mergedata,color='real',size=30)
type_louvain=mergedata.obs['louvain']
type_leiden=mergedata.obs['leiden']
type_batch=mergedata.obs['batch']
type_real=mergedata.obs['real']
###############################################################################
umapdata=pd.DataFrame(mergedata.obsm['X_umap'].T,index=['tSNE1','tSNE2'])
umapdata1=pd.DataFrame(mergedata.obsm['X_umap'][0:PCAseries1.shape[0],:].T,index=['tSNE1','tSNE2'])
umapdata2=pd.DataFrame(mergedata.obsm['X_umap'][PCAseries1.shape[0]:,:].T,index=['tSNE1','tSNE2'])
###############################################################################
plot_tSNE_batchclusters(umapdata1,umapdata2,choose_seriestype1,choose_seriestype2,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'batch1')
plot_tSNE_batchclusters(umapdata2,umapdata1,choose_seriestype2,choose_seriestype1,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'batch2')
plot_tSNE_clusters(umapdata,list(map(int,type_batch)), cluster_colors=cluster_colors,save=False,name=fromname+'batch')
plot_tSNE_sepclusters(umapdata1,umapdata2,choose_seriestype1,choose_seriestype2,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'label1')
plot_tSNE_sepclusters(umapdata2,umapdata1,choose_seriestype2,choose_seriestype1,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'label2')
plot_tSNE_clusters(umapdata,list(map(int,type_real)), cluster_colors=cluster_colors,save=False, name=fromname+'label')
#sio.savemat('pancreas_ourdata.mat',{'mergedata':mergedata.X,'umapdata':umapdata.values})#you need to see whether two batches are changed in turn, if so do changing again by yourself!!! | 30,327 | 46.76063 | 185 | py |
CBA | CBA-main/evaluation/evaluation_pancreas.py | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 27 18:58:59 2020
@author: 17b90
"""
import keras as K
import pandas as pd
from keras import layers
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.decomposition import PCA
import scanpy as sc
import scipy
import pickle
from sklearn.manifold import TSNE
from keras.layers.core import Lambda
import scipy.io as sio
import seaborn as sns
import umap
import numpy as np
import metrics
from ywb_function import *
import scanorama
import sklearn.metrics as sm
import kBET
we_use=[1,2]#we try to integrate pancreas1 and pancreas2
#input the data
RAWseries1=pd.read_csv('RAWseries_'+str(we_use[0])+'.csv',header=None)[1:].values.astype('single')
RAWseries2=pd.read_csv('RAWseries_'+str(we_use[1])+'.csv',header=None)[1:].values.astype('single')
#input the label
choose_seriestype1=pd.read_csv('realseries_'+str(we_use[0])+'.csv',header=None)[1:].values
choose_seriestype2=pd.read_csv('realseries_'+str(we_use[1])+'.csv',header=None)[1:].values
Alldata=np.concatenate([RAWseries1.T,RAWseries2.T])
Alllabel=np.concatenate([choose_seriestype1,choose_seriestype2])
Allbatch=np.concatenate([np.zeros(choose_seriestype1.shape[0]),np.zeros(choose_seriestype2.shape[0])+1])
###############################################################################
chosen_cluster=['alpha','beta','ductal','acinar','delta','gamma','endothelial','epsilon']
chosen_index=np.arange(Alllabel.shape[0])
for i in range(Alllabel.shape[0]):
if Alllabel[i] in chosen_cluster:
chosen_index[i]=1
else:
chosen_index[i]=0
Alldata=Alldata[chosen_index==1,:]
Allbatch=Allbatch[chosen_index==1]
Alllabel=Alllabel[chosen_index==1]
###############################################################################
Numlabel=np.zeros(Alllabel.shape[0])
cluster_index2={'alpha':0,'beta':1,'ductal':2,'acinar':3,'delta':4,'gamma':5,'endothelial':6,'epsilon':7}
for i in range(Alllabel.shape[0]):
Numlabel[i]=cluster_index2[Alllabel[i][0]]
###############################################################################
choose_seriestype1=Numlabel[Allbatch==0][Numlabel[Allbatch==0].argsort()].astype('int')
choose_seriestype2=Numlabel[Allbatch==1][Numlabel[Allbatch==1].argsort()].astype('int')
Numlabel[Allbatch==0]=choose_seriestype1
Numlabel[Allbatch==1]=choose_seriestype2
total_given_type=Numlabel
merge=sio.loadmat('pancreas_ourdata')['mergedata']
#here is hard, you need to check which one is batch1 and which one is batch2, I do that manually
mergedata=sc.AnnData(merge)
total_batch_type=np.concatenate([choose_seriestype1*0,choose_seriestype2*0+1])
total_batch_type=np.reshape(total_batch_type,total_batch_type.shape[0])
mergedata.obs['batch']=total_batch_type
zero_type=np.concatenate([choose_seriestype1*0,choose_seriestype2*0])
zero_type=np.reshape(zero_type,zero_type.shape[0])
mergedata.obs['zero']=zero_type
total_given_type=np.concatenate([choose_seriestype1,choose_seriestype2])
total_given_type=np.reshape(total_given_type,total_given_type.shape[0])
mergedata.obs['real']=total_given_type
mergedata.obsm["embedding"]=mergedata.X
sc.pp.neighbors(mergedata,n_pcs=0)
mergedata.obsm['NN']=mergedata.X
sc.tl.louvain(mergedata,resolution=0.5)
sc.tl.umap(mergedata)
sc.pl.umap(mergedata,color=['batch','louvain','real'])
type_louvain=mergedata.obs['louvain']
type_batch=mergedata.obs['batch']
type_real=mergedata.obs['real']
type_batch=type_batch.replace('ref',0)
type_batch=type_batch.replace('new',1)
###############################################################################
kBET_score=kBET.kbet(mergedata,'batch','real',embed='embedding')
for i in range(8):
print(kBET_score['kBET'][i])
print(kBET_score.mean()[1])
kBET_score_whole=kBET.kbet(mergedata,'batch','zero',embed='embedding')
print(kBET_score_whole.mean()[1])
S_score=kBET.silhouette(mergedata,'real',metric='euclidean',embed='embedding')
print(S_score)
NMI_louvain=kBET.nmi(mergedata,'louvain','real')
print(NMI_louvain)
ARI_louvain=kBET.ari(mergedata,'louvain','real')
print(ARI_louvain)
FMI_louvain=sm.fowlkes_mallows_score(type_real,type_louvain)
print(FMI_louvain)
###############################################################################
umapdata=pd.DataFrame(mergedata.obsm['X_umap'].T,index=['tSNE1','tSNE2'])
umapdata1=pd.DataFrame(mergedata.obsm['X_umap'][0:(Allbatch==0).sum(),:].T,index=['tSNE1','tSNE2'])
umapdata2=pd.DataFrame(mergedata.obsm['X_umap'][(Allbatch==0).sum():,:].T,index=['tSNE1','tSNE2'])
##############################################################################
fromname='do'
plot_tSNE_clusters(umapdata,list(map(int,type_real)), cluster_colors=cluster_colors,save=False, name=fromname+'label') | 4,698 | 38.487395 | 118 | py |
CBA | CBA-main/lung/kBET.py | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 31 10:41:54 2021
@author: 17b90
"""
import numpy as np
import anndata
import scanpy as sc
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.sparse.csgraph import connected_components
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#import networkx as nx
from scIB.utils import *
from scIB.preprocessing import score_cell_cycle
from scIB.clustering import opt_louvain
from scipy import sparse
from scipy.sparse.csgraph import connected_components
from scipy.io import mmwrite
import sklearn
import sklearn.metrics
from time import time
import cProfile
from pstats import Stats
import memory_profiler
import itertools
import multiprocessing as multipro
import subprocess
import tempfile
import pathlib
from os import mkdir, path, remove, stat
import gc
import rpy2.rinterface_lib.callbacks
import logging
rpy2.rinterface_lib.callbacks.logger.setLevel(logging.ERROR) # Ignore R warning messages
import rpy2.robjects as ro
import anndata2ri
def checkAdata(adata):
if type(adata) is not anndata.AnnData:
raise TypeError('Input is not a valid AnnData object')
def checkBatch(batch, obs, verbose=False):
if batch not in obs:
raise ValueError(f'column {batch} is not in obs')
elif verbose:
print(f'Object contains {obs[batch].nunique()} batches.')
def diffusion_conn(adata, min_k=50, copy=True, max_iterations=26):
'''
This function performs graph diffusion on the connectivities matrix until a
minimum number `min_k` of entries per row are non-zero.
Note:
Due to self-loops min_k-1 non-zero connectivies entries is actually the stopping
criterion. This is equivalent to `sc.pp.neighbors`.
Returns:
The diffusion-enhanced connectivities matrix of a copy of the AnnData object
with the diffusion-enhanced connectivities matrix is in
`adata.uns["neighbors"]["conectivities"]`
'''
if 'neighbors' not in adata.uns:
raise ValueError('`neighbors` not in adata object. '
'Please compute a neighbourhood graph!')
if 'connectivities' not in adata.uns['neighbors']:
raise ValueError('`connectivities` not in `adata.uns["neighbors"]`. '
'Please pass an object with connectivities computed!')
T = adata.uns['neighbors']['connectivities']
#Normalize T with max row sum
# Note: This keeps the matrix symmetric and ensures |M| doesn't keep growing
T = sparse.diags(1/np.array([T.sum(1).max()]*T.shape[0]))*T
M = T
# Check for disconnected component
n_comp, labs = connected_components(adata.uns['neighbors']['connectivities'],
connection='strong')
if n_comp > 1:
tab = pd.value_counts(labs)
small_comps = tab.index[tab<min_k]
large_comp_mask = np.array(~pd.Series(labs).isin(small_comps))
else:
large_comp_mask = np.array([True]*M.shape[0])
T_agg = T
i = 2
while ((M[large_comp_mask,:][:,large_comp_mask]>0).sum(1).min() < min_k) and (i < max_iterations):
print(f'Adding diffusion to step {i}')
T_agg *= T
M += T_agg
i+=1
if (M[large_comp_mask,:][:,large_comp_mask]>0).sum(1).min() < min_k:
raise ValueError('could not create diffusion connectivities matrix'
f'with at least {min_k} non-zero entries in'
f'{max_iterations} iterations.\n Please increase the'
'value of max_iterations or reduce k_min.\n')
M.setdiag(0)
if copy:
adata_tmp = adata.copy()
adata_tmp.uns['neighbors'].update({'diffusion_connectivities': M})
return adata_tmp
else:
return M
def diffusion_nn(adata, k, max_iterations=26):
'''
This function generates a nearest neighbour list from a connectivities matrix
as supplied by BBKNN or Conos. This allows us to select a consistent number
of nearest neighbours across all methods.
Return:
`k_indices` a numpy.ndarray of the indices of the k-nearest neighbors.
'''
if 'neighbors' not in adata.uns:
raise ValueError('`neighbors` not in adata object. '
'Please compute a neighbourhood graph!')
if 'connectivities' not in adata.uns['neighbors']:
raise ValueError('`connectivities` not in `adata.uns["neighbors"]`. '
'Please pass an object with connectivities computed!')
T = adata.uns['neighbors']['connectivities']
# Row-normalize T
T = sparse.diags(1/T.sum(1).A.ravel())*T
T_agg = T**3
M = T+T**2+T_agg
i = 4
while ((M>0).sum(1).min() < (k+1)) and (i < max_iterations):
#note: k+1 is used as diag is non-zero (self-loops)
print(f'Adding diffusion to step {i}')
T_agg *= T
M += T_agg
i+=1
if (M>0).sum(1).min() < (k+1):
raise NeighborsError(f'could not find {k} nearest neighbors in {max_iterations}'
'diffusion steps.\n Please increase max_iterations or reduce'
' k.\n')
M.setdiag(0)
k_indices = np.argpartition(M.A, -k, axis=1)[:, -k:]
return k_indices
def kBET_single(matrix, batch, type_ = None, k0 = 10, knn=None, subsample=0.5, heuristic=True, verbose=False):
"""
params:
matrix: expression matrix (at the moment: a PCA matrix, so do.pca is set to FALSE
batch: series or list of batch assignemnts
subsample: fraction to be subsampled. No subsampling if `subsample=None`
returns:
kBET p-value
"""
anndata2ri.activate()
ro.r("library(kBET)")
if verbose:
print("importing expression matrix")
ro.globalenv['data_mtrx'] = matrix
ro.globalenv['batch'] = batch
#print(matrix.shape)
#print(len(batch))
if verbose:
print("kBET estimation")
#k0 = len(batch) if len(batch) < 50 else 'NULL'
ro.globalenv['knn_graph'] = knn
ro.globalenv['k0'] = k0
batch_estimate = ro.r(f"batch.estimate <- kBET(data_mtrx, batch, knn=knn_graph, k0=k0, plot=FALSE, do.pca=FALSE, heuristic=FALSE, adapt=FALSE, verbose={str(verbose).upper()})")
anndata2ri.deactivate()
try:
ro.r("batch.estimate$average.pval")[0]
except rpy2.rinterface_lib.embedded.RRuntimeError:
return np.nan
else:
return ro.r("batch.estimate$average.pval")[0]
def kbet(adata, batch_key, label_key, embed='X_pca', type_ = None,
hvg=False, subsample=0.5, heuristic=False, verbose=False):
"""
Compare the effect before and after integration
params:
matrix: matrix from adata to calculate on
return:
pd.DataFrame with kBET p-values per cluster for batch
"""
checkAdata(adata)
checkBatch(batch_key, adata.obs)
checkBatch(label_key, adata.obs)
#compute connectivities for non-knn type data integrations
#and increase neighborhoods for knn type data integrations
if type_ =='haveneighbor':
adata_tmp = adata
print('neighbor have already obtained!')
elif type_ != 'knn':
adata_tmp = sc.pp.neighbors(adata, n_neighbors = 50, use_rep=embed, copy=True)
else:
#check if pre-computed neighbours are stored in input file
adata_tmp = adata.copy()
if 'diffusion_connectivities' not in adata.uns['neighbors']:
if verbose:
print(f"Compute: Diffusion neighbours.")
adata_tmp = diffusion_conn(adata, min_k = 50, copy = True)
adata_tmp.uns['neighbors']['connectivities'] = adata_tmp.uns['neighbors']['diffusion_connectivities']
if verbose:
print(f"batch: {batch_key}")
#set upper bound for k0
size_max = 2**31 - 1
kBET_scores = {'cluster': [], 'kBET': []}
for clus in adata_tmp.obs[label_key].unique():
adata_sub = adata_tmp[adata_tmp.obs[label_key] == clus,:].copy()
#check if neighborhood size too small or only one batch in subset
if np.logical_or(adata_sub.n_obs < 10,
len(np.unique(adata_sub.obs[batch_key]))==1):
print(f"{clus} consists of a single batch or is too small. Skip.")
score = np.nan
else:
quarter_mean = np.floor(np.mean(adata_sub.obs[batch_key].value_counts())/4).astype('int')
k0 = np.min([70, np.max([10, quarter_mean])])
#check k0 for reasonability
if (k0*adata_sub.n_obs) >=size_max:
k0 = np.floor(size_max/adata_sub.n_obs).astype('int')
matrix = np.zeros(shape=(adata_sub.n_obs, k0+1))
if verbose:
print(f"Use {k0} nearest neighbors.")
n_comp, labs = connected_components(adata_sub.uns['neighbors']['connectivities'],
connection='strong')
if n_comp > 1:
#check the number of components where kBET can be computed upon
comp_size = pd.value_counts(labs)
#check which components are small
comp_size_thresh = 3*k0
idx_nonan = np.flatnonzero(np.in1d(labs,
comp_size[comp_size>=comp_size_thresh].index))
#check if 75% of all cells can be used for kBET run
if len(idx_nonan)/len(labs) >= 0.75:
#create another subset of components, assume they are not visited in a diffusion process
adata_sub_sub = adata_sub[idx_nonan,:].copy()
nn_index_tmp = np.empty(shape=(adata_sub.n_obs, k0))
nn_index_tmp[:] = np.nan
nn_index_tmp[idx_nonan] = diffusion_nn(adata_sub_sub, k=k0).astype('float')
#need to check neighbors (k0 or k0-1) as input?
score = kBET_single(
matrix=matrix,
batch=adata_sub.obs[batch_key],
knn = nn_index_tmp+1, #nn_index in python is 0-based and 1-based in R
subsample=subsample,
verbose=verbose,
heuristic=False,
k0 = k0,
type_ = type_
)
else:
#if there are too many too small connected components, set kBET score to 1
#(i.e. 100% rejection)
score = 1
else: #a single component to compute kBET on
#need to check neighbors (k0 or k0-1) as input?
nn_index_tmp = diffusion_nn(adata_sub, k=k0).astype('float')
score = kBET_single(
matrix=matrix,
batch=adata_sub.obs[batch_key],
knn = nn_index_tmp+1, #nn_index in python is 0-based and 1-based in R
subsample=subsample,
verbose=verbose,
heuristic=False,
k0 = k0,
type_ = type_
)
kBET_scores['cluster'].append(clus)
kBET_scores['kBET'].append(score)
kBET_scores = pd.DataFrame.from_dict(kBET_scores)
kBET_scores = kBET_scores.reset_index(drop=True)
return kBET_scores
def nmi(adata, group1, group2, method="arithmetic", nmi_dir=None):
"""
Normalized mutual information NMI based on 2 different cluster assignments `group1` and `group2`
params:
adata: Anndata object
group1: column name of `adata.obs` or group assignment
group2: column name of `adata.obs` or group assignment
method: NMI implementation
'max': scikit method with `average_method='max'`
'min': scikit method with `average_method='min'`
'geometric': scikit method with `average_method='geometric'`
'arithmetic': scikit method with `average_method='arithmetic'`
'Lancichinetti': implementation by A. Lancichinetti 2009 et al.
'ONMI': implementation by Aaron F. McDaid et al. (https://github.com/aaronmcdaid/Overlapping-NMI) Hurley 2011
nmi_dir: directory of compiled C code if 'Lancichinetti' or 'ONMI' are specified as `method`. Compilation should be done as specified in the corresponding README.
return:
normalized mutual information (NMI)
"""
checkAdata(adata)
if isinstance(group1, str):
checkBatch(group1, adata.obs)
group1 = adata.obs[group1].tolist()
elif isinstance(group1, pd.Series):
group1 = group1.tolist()
if isinstance(group2, str):
checkBatch(group2, adata.obs)
group2 = adata.obs[group2].tolist()
elif isinstance(group2, pd.Series):
group2 = group2.tolist()
if len(group1) != len(group2):
raise ValueError(f'different lengths in group1 ({len(group1)}) and group2 ({len(group2)})')
# choose method
if method in ['max', 'min', 'geometric', 'arithmetic']:
nmi_value = sklearn.metrics.normalized_mutual_info_score(group1, group2, average_method=method)
elif method == "Lancichinetti":
nmi_value = nmi_Lanc(group1, group2, nmi_dir=nmi_dir)
elif method == "ONMI":
nmi_value = onmi(group1, group2, nmi_dir=nmi_dir)
else:
raise ValueError(f"Method {method} not valid")
return nmi_value
def ari(adata, group1, group2):
"""
params:
adata: anndata object
group1: ground-truth cluster assignments (e.g. cell type labels)
group2: "predicted" cluster assignments
The function is symmetric, so group1 and group2 can be switched
"""
checkAdata(adata)
if isinstance(group1, str):
checkBatch(group1, adata.obs)
group1 = adata.obs[group1].tolist()
elif isinstance(group1, pd.Series):
group1 = group1.tolist()
if isinstance(group2, str):
checkBatch(group2, adata.obs)
group2 = adata.obs[group2].tolist()
elif isinstance(group2, pd.Series):
group2 = group2.tolist()
if len(group1) != len(group2):
raise ValueError(f'different lengths in group1 ({len(group1)}) and group2 ({len(group2)})')
return sklearn.metrics.cluster.adjusted_rand_score(group1, group2)
def silhouette(adata, group_key, metric='euclidean', embed='X_pca', scale=True):
"""
wrapper for sklearn silhouette function values range from [-1, 1] with 1 being an ideal fit, 0 indicating overlapping clusters and -1 indicating misclassified cells
"""
if embed not in adata.obsm.keys():
print(adata.obsm.keys())
raise KeyError(f'{embed} not in obsm')
asw = sklearn.metrics.silhouette_score(adata.obsm[embed], adata.obs[group_key], metric=metric)
if scale:
asw = (asw + 1)/2
return asw
### PC Regression
def pcr_comparison(adata_pre, adata_post, covariate, embed=None, n_comps=50, scale=True, verbose=False):
"""
Compare the effect before and after integration
Return either the difference of variance contribution before and after integration
or a score between 0 and 1 (`scaled=True`) with 0 if the variance contribution hasn't
changed. The larger the score, the more different the variance contributions are before
and after integration.
params:
adata_pre: uncorrected adata
adata_post: integrated adata
embed : if `embed=None`, use the full expression matrix (`adata.X`), otherwise
use the embedding provided in `adata_post.obsm[embed]`
scale: if True, return scaled score
return:
difference of R2Var value of PCR
"""
if embed == 'X_pca':
embed = None
pcr_before = pcr(adata_pre, covariate=covariate, recompute_pca=True,
n_comps=n_comps, verbose=verbose)
pcr_after = pcr(adata_post, covariate=covariate, embed=embed, recompute_pca=True,
n_comps=n_comps, verbose=verbose)
if scale:
score = (pcr_before - pcr_after)/pcr_before
if score < 0:
print("Variance contribution increased after integration!")
print("Setting PCR comparison score to 0.")
score = 0
return score
else:
return pcr_after - pcr_before
def pcr(adata, covariate, embed=None, n_comps=50, recompute_pca=True, verbose=False):
"""
PCR for Adata object
Checks whether to
+ compute PCA on embedding or expression data (set `embed` to name of embedding matrix e.g. `embed='X_emb'`)
+ use existing PCA (only if PCA entry exists)
+ recompute PCA on expression matrix (default)
params:
adata: Anndata object
embed : if `embed=None`, use the full expression matrix (`adata.X`), otherwise
use the embedding provided in `adata_post.obsm[embed]`
n_comps: number of PCs if PCA should be computed
covariate: key for adata.obs column to regress against
return:
R2Var of PCR
"""
checkAdata(adata)
checkBatch(covariate, adata.obs)
if verbose:
print(f"covariate: {covariate}")
batch = adata.obs[covariate]
# use embedding for PCA
if (embed is not None) and (embed in adata.obsm):
if verbose:
print(f"compute PCR on embedding n_comps: {n_comps}")
return pc_regression(adata.obsm[embed], batch, n_comps=n_comps)
# use existing PCA computation
elif (recompute_pca == False) and ('X_pca' in adata.obsm) and ('pca' in adata.uns):
if verbose:
print("using existing PCA")
return pc_regression(adata.obsm['X_pca'], batch, pca_var=adata.uns['pca']['variance'])
# recompute PCA
else:
if verbose:
print(f"compute PCA n_comps: {n_comps}")
return pc_regression(adata.X, batch, n_comps=n_comps)
def pc_regression(data, variable, pca_var=None, n_comps=50, svd_solver='arpack', verbose=False):
"""
params:
data: expression or PCA matrix. Will be assumed to be PCA values, if pca_sd is given
variable: series or list of batch assignments
n_comps: number of PCA components for computing PCA, only when pca_sd is not given. If no pca_sd is given and n_comps=None, comute PCA and don't reduce data
pca_var: iterable of variances for `n_comps` components. If `pca_sd` is not `None`, it is assumed that the matrix contains PCA values, else PCA is computed
PCA is only computed, if variance contribution is not given (pca_sd).
"""
if isinstance(data, (np.ndarray, sparse.csr_matrix)):
matrix = data
else:
raise TypeError(f'invalid type: {data.__class__} is not a numpy array or sparse matrix')
# perform PCA if no variance contributions are given
if pca_var is None:
if n_comps is None or n_comps > min(matrix.shape):
n_comps = min(matrix.shape)
if n_comps == min(matrix.shape):
svd_solver = 'full'
if verbose:
print("compute PCA")
pca = sc.tl.pca(matrix, n_comps=n_comps, use_highly_variable=False,
return_info=True, svd_solver=svd_solver, copy=True)
X_pca = pca[0].copy()
pca_var = pca[3].copy()
del pca
else:
X_pca = matrix
n_comps = matrix.shape[1]
## PC Regression
if verbose:
print("fit regression on PCs")
# handle categorical values
if pd.api.types.is_numeric_dtype(variable):
variable = np.array(variable).reshape(-1, 1)
else:
if verbose:
print("one-hot encode categorical values")
variable = pd.get_dummies(variable)
# fit linear model for n_comps PCs
r2 = []
for i in range(n_comps):
pc = X_pca[:, [i]]
lm = sklearn.linear_model.LinearRegression()
lm.fit(variable, pc)
r2_score = np.maximum(0,lm.score(variable, pc))
r2.append(r2_score)
Var = pca_var / sum(pca_var) * 100
R2Var = sum(r2*Var)/100
return R2Var | 20,538 | 37.390654 | 180 | py |
CBA | CBA-main/lung/ywb_function.py | import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
from collections import Counter
import matplotlib.pyplot as plt
from keras.regularizers import l2
from sklearn import preprocessing
from keras.layers.core import Lambda
from keras.callbacks import TensorBoard
from keras.callbacks import LearningRateScheduler
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler
from mpl_toolkits.axes_grid1 import make_axes_locatable
def color(value):
digit = list(map(str, range(10))) + list("ABCDEF")
if isinstance(value, tuple):
string = '#'
for i in value:
a1 = i // 16
a2 = i % 16
string += digit[a1] + digit[a2]
return string
elif isinstance(value, str):
a1 = digit.index(value[1]) * 16 + digit.index(value[2])
a2 = digit.index(value[3]) * 16 + digit.index(value[4])
a3 = digit.index(value[5]) * 16 + digit.index(value[6])
return (a1, a2, a3)
cluster_colors=[
color((213,94,0)),
color((0,114,178)),
color((204,121,167)),
color((0,158,115)),
color((86,180,233)),
color((230,159,0)),
color((240,228,66)),
color((0,0,0)),
'#D3D3D3',
'#FF00FF',
'#aec470',
'#b3ee3d',
'#de4726',
'#f69149',
'#f81919',
'#ff49b0',
'#f05556',
'#fadf0b',
'#f8c495',
'#ffc1c1',
'#ffc125',
'#ffc0cb',
'#ffbbff',
'#ffb90f',
'#ffb6c1',
'#ffb5c5',
'#ff83fa',
'#ff8c00',
'#ff4040',
'#ff3030',
'#ff34b3',
'#00fa9a',
'#ca4479',
'#eead0e',
'#ff1493',
'#0ab4e4',
'#1e6a87',
'#800080',
'#00e5ee',
'#c71585',
'#027fd0',
'#004dba',
'#0a9fb4',
'#004b71',
'#285528',
'#2f7449',
'#21b183',
'#3e4198',
'#4e14a6',
'#5dd73d',
'#64a44e',
'#6787d6',
'#6c6b6b',
'#6c6b6b',
'#7759a4',
'#78edff',
'#762a14',
'#9805cc',
'#9b067d',
'#af7efe',
'#a7623d']
def plot_tSNE_clusters(df_tSNE,labels,cluster_colors=None,s=6,save=False,name=None):
fig,ax=plt.subplots(figsize=(4, 4))
ax.scatter(df_tSNE.loc['tSNE1'], df_tSNE.loc['tSNE2'],s=s,alpha=0.8,lw=0,c=[cluster_colors[i] for i in labels])
ax.axis('equal')
ax.set_axis_off()
if save==True:
plt.savefig('{}.eps'.format(name),dpi=600,format='eps')
def plot_tSNE_batchclusters(df_tSNE1,df_tSNE2,labels1,labels2,cluster_colors=None,s=0.8,save=False,name=None):
fig,ax=plt.subplots(figsize=(4, 4))
ax.scatter(df_tSNE2.loc['tSNE1'], df_tSNE2.loc['tSNE2'],s=s,alpha=0.8,lw=0,c='#D3D3D3')
ax.scatter(df_tSNE1.loc['tSNE1'], df_tSNE1.loc['tSNE2'],s=s,alpha=0.8,lw=0,c=[cluster_colors[1] for i in labels1])
ax.axis('equal')
ax.set_axis_off()
if save==True:
plt.savefig('{}.eps'.format(name),dpi=600,format='eps')
def plot_tSNE_sepclusters(df_tSNE1,df_tSNE2,labels1,labels2,cluster_colors=None,s=0.8,save=False,name=None):
fig,ax=plt.subplots(figsize=(4, 4))
ax.scatter(df_tSNE2.loc['tSNE1'], df_tSNE2.loc['tSNE2'],s=s,alpha=0.8,lw=0,c='#D3D3D3')
ax.scatter(df_tSNE1.loc['tSNE1'], df_tSNE1.loc['tSNE2'],s=s,alpha=0.8,lw=0,c=[cluster_colors[i] for i in labels1])
ax.axis('equal')
ax.set_axis_off()
if save==True:
plt.savefig('{}.eps'.format(name),dpi=600,format='eps')
def plot_tSNE_cluster(df_tSNE,labels,cluster_colors=None,s=6,save=False,name=None):
index=[[] for i in range(np.max(labels)+1)]
for i in range(len(labels)):
index[int(labels[i])].append(i)
index=[i for i in index if i!=[]]
for i in range(len(np.unique(labels))):
color=np.array(labels)[index[i]][0]
fig,ax=plt.subplots()
ax.scatter(df_tSNE.loc['tSNE1'], df_tSNE.loc['tSNE2'],c='#D3D3D3',s=s,lw=0)
ax.scatter(df_tSNE.loc['tSNE1'].iloc[index[i]],df_tSNE.loc['tSNE2'].iloc[index[i]],c=[cluster_colors[k] for k in np.array(labels)[index[i]]],s=s,lw=0)
ax.axis('equal')
ax.set_axis_off()
if save == True:
plt.savefig('{}.eps'.format(name+str(color)), dpi=600,format='eps')
def gen_labels(df, model):
if str(type(model)).startswith("<class 'sklearn.cluster"):
cell_labels = dict(zip(df.columns, model.labels_))
label_cells = {}
for l in np.unique(model.labels_):
label_cells[l] = []
for i, label in enumerate(model.labels_):
label_cells[label].append(df.columns[i])
cellID = list(df.columns)
labels = list(model.labels_)
labels_a = model.labels_
elif type(model) == np.ndarray:
cell_labels = dict(zip(df.columns, model))
label_cells = {}
for l in np.unique(model):
label_cells[l] = []
for i, label in enumerate(model):
label_cells[label].append(df.columns[i])
cellID = list(df.columns)
labels = list(model)
labels_a = model
else:
print('Error wrong input type')
return cell_labels, label_cells, cellID, labels, labels_a
def heatmap(correlation_recluster_cell_final,choose_seriestype1,choose_seriestype2,save=False,name=''):
df=pd.DataFrame(correlation_recluster_cell_final)
labels1=np.array(choose_seriestype1)
labels2=np.array(choose_seriestype2)
cell_labels1,label_cells1,cellID1,labels1,labels_a1=gen_labels(df.T,np.array(labels1))
cell_labels2,label_cells2,cellID2,labels2,labels_a2=gen_labels(df,np.array(labels2))
optimal_order=np.unique(np.concatenate([labels1,labels2]))
cl,lc=gen_labels(df,np.array(labels2))[:2]
optimal_sort_cells=sum([lc[i] for i in np.unique(labels2)],[])
optimal_sort_labels=[cl[i] for i in optimal_sort_cells]
fig,axHM=plt.subplots(figsize=(9,5))
df_full=df.copy()
z=df_full.values
z=pd.DataFrame(z, index=df_full.index,columns=df_full.columns)
z=z.loc[:,optimal_sort_cells].values
im=axHM.pcolormesh(z,cmap='viridis',vmax=1)
plt.gca().invert_yaxis()
plt.xlim(xmax=len(labels2))
plt.xticks([])
plt.yticks([])
divider=make_axes_locatable(axHM)
axLabel1=divider.append_axes("top",.3,pad=0,sharex=axHM)
axLabel2=divider.append_axes("left",.3,pad=0,sharex=axHM)
counter2=Counter(labels2)
counter1=Counter(labels1)
pos2=0
pos1=0
for l in optimal_order:
axLabel1.barh(y=0,left=pos2,width=counter2[l],color=cluster_colors[l],linewidth=0.5,edgecolor=cluster_colors[l])
pos2+=counter2[l]
optimal_order=np.flipud(optimal_order)
for l in optimal_order:
axLabel2.bar(x=0,bottom=pos1,height=counter1[l],color=cluster_colors[l],linewidth=50,edgecolor=cluster_colors[l])
pos1+=counter1[l]
axLabel1.set_xlim(xmax=len(labels2))
axLabel1.axis('off')
axLabel2.set_ylim(ymax=len(labels1))
axLabel2.axis('off')
cax=fig.add_axes([.91,0.13,0.01,0.22])
colorbar=fig.colorbar(im,cax=cax,ticks=[0,1])
colorbar.set_ticklabels(['0','max'])
plt.savefig('{}.jpg'.format(name),dpi=600,format='jpg') | 7,512 | 33.782407 | 158 | py |
CBA | CBA-main/lung/lung_main.py | """
Created on Fri Mar 27 18:58:59 2020
@author: 17b90
"""
import kBET
import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
import sklearn.metrics as sm
from collections import Counter
import matplotlib.pyplot as plt
from keras.regularizers import l2
from sklearn import preprocessing
from keras.layers.core import Lambda
from keras.callbacks import TensorBoard
from imblearn.over_sampling import SMOTE,ADASYN
from keras.callbacks import LearningRateScheduler
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.cluster.hierarchy import dendrogram, linkage
we_use=[1]
RAWseries1=pd.read_csv('RAWlung_'+str(we_use[0])+'.csv',header=None)[1:].values.astype('single')
choose_seriestype1=pd.read_csv('reallung_'+str(we_use[0])+'.csv',header=None)[1:].values
row1=pd.read_csv('rowgenelung_'+str(we_use[0])+'.csv',header=None)[1:].values
csv_data=pd.read_csv("Lung-countsFACS.csv",header=None)
cellname=csv_data.iloc[0][1:]
csv_data=csv_data[1:]
csv_df=pd.DataFrame(csv_data)
row2=csv_df[0].values
RAWseries2=csv_df.drop(labels=0,axis=1).values.astype('int')
batch2dict=pd.read_csv('annotations_FACS.csv',header=None)[1:]
dictbatch=pd.DataFrame(batch2dict[2].values,index=batch2dict[0].values)
choose_seriestype2=[]
for i in cellname:
if i in batch2dict[0].values:
choose_seriestype2.append(dictbatch.loc[i][0])
else:
choose_seriestype2.append('0')
choose_seriestype2=np.array(choose_seriestype2)
choose_seriestype2=np.reshape(choose_seriestype2,[choose_seriestype2.shape[0],1])
cob_gene=[]
for i in row1:
if i in row2:
cob_gene.append(i)
line1=np.zeros(len(cob_gene))
line2=np.zeros(len(cob_gene))
index=0
for i in cob_gene:
line1[index]=np.where(row1==i[0])[0][0]
line2[index]=np.where(row2==i[0])[0][0]
index+=1
RAWseries1=RAWseries1[line1.astype('int'),:]
RAWseries2=RAWseries2[line2.astype('int'),:]
fromname='lung'+str(we_use[0])
Alldata=np.concatenate([RAWseries1.T,RAWseries2.T])
Alllabel=np.concatenate([choose_seriestype1,choose_seriestype2])
Allbatch=np.concatenate([np.zeros(choose_seriestype1.shape[0]),np.zeros(choose_seriestype2.shape[0])+1])
for i in np.unique(Alllabel):
print(i,(choose_seriestype1==i).sum(),(choose_seriestype2==i).sum())
chosen_cluster=['269',
'268',
'275',
'277',
'265',
'287',
'266',
'273',
'282',
'B cell',
'T cell',
'dendritic cell',
'endothelial cell',
'stromal cell'
]
chosen_index=np.arange(Alllabel.shape[0])
for i in range(Alllabel.shape[0]):
if Alllabel[i] in chosen_cluster:
chosen_index[i]=1
else:
chosen_index[i]=0
Alldata=Alldata[chosen_index==1,:]
Allbatch=Allbatch[chosen_index==1]
Alllabel=Alllabel[chosen_index==1]
###############################################################################
Numlabel=np.zeros(Alllabel.shape[0])
cluster_index2={'269':0,
'268':1,
'275':2,
'277':3,
'265':3,
'287':3,
'266':4,
'273':4,
'282':4,
'B cell':0,
'T cell':1,
'dendritic cell':2,
'endothelial cell':3,
'stromal cell':4
}
for i in range(Alllabel.shape[0]):
Numlabel[i]=cluster_index2[Alllabel[i][0]]
###############################################################################
choose_seriestype1=Numlabel[Allbatch==0][Numlabel[Allbatch==0].argsort()].astype('int')
choose_seriestype2=Numlabel[Allbatch==1][Numlabel[Allbatch==1].argsort()].astype('int')
###############################################################################
min_cells=100
pca_dim=15
minnumberofcluster=10000000000
clusternumber=1
###############################################################################
anndata=sc.AnnData(pd.DataFrame(Alldata))
sc.pp.filter_genes(anndata,min_cells=min_cells)
sc.pp.normalize_per_cell(anndata,counts_per_cell_after=1e4)
sc.pp.log1p(anndata)
sc.pp.highly_variable_genes(anndata)
sc.pl.highly_variable_genes(anndata)
anndata=anndata[:,anndata.var['highly_variable']]
sc.pl.highest_expr_genes(anndata,n_top=20)
sc.tl.pca(anndata,n_comps=100,svd_solver='arpack')
sc.pl.pca(anndata)
sc.pl.pca_variance_ratio(anndata,log=True,n_pcs=100,save=[True,'pancreas'])
Alldata_aft=anndata.obsm['X_pca'][:,0:pca_dim]
Alldata_aft=preprocessing.StandardScaler().fit_transform(Alldata_aft)
Alldata_aft=preprocessing.MinMaxScaler().fit_transform(Alldata_aft)
PCAseries1=Alldata_aft[Allbatch==0,:][Numlabel[Allbatch==0].argsort()]
PCAseries2=Alldata_aft[Allbatch==1,:][Numlabel[Allbatch==1].argsort()]
choose_seriestype1=Numlabel[Allbatch==0][Numlabel[Allbatch==0].argsort()].astype('int')
choose_seriestype2=Numlabel[Allbatch==1][Numlabel[Allbatch==1].argsort()].astype('int')
###############################################################################
cluster_series1=sc.AnnData(PCAseries1)
cluster_series2=sc.AnnData(PCAseries2)
sc.pp.neighbors(cluster_series1,n_pcs=0)
sc.pp.neighbors(cluster_series2,n_pcs=0)
sc.tl.umap(cluster_series1)
sc.tl.umap(cluster_series2)
sc.tl.louvain(cluster_series1,resolution=0.5)
sc.pl.umap(cluster_series1,color='louvain',size=30)
sc.tl.louvain(cluster_series2,resolution=0.5)
sc.pl.umap(cluster_series2,color='louvain',size=30)
cluster1=np.array(list(map(int,cluster_series1.obs['louvain'])))
cluster2=np.array(list(map(int,cluster_series2.obs['louvain'])))
###############################################################################
recluster1=np.zeros(cluster1.shape[0])
recluster2=np.zeros(cluster2.shape[0])
palsecluster1=cluster1
count_cluster1=pd.value_counts(cluster_series1.obs['louvain'])
for i in range(1000000000000000):
if count_cluster1.max()<minnumberofcluster:
break
else:
print(count_cluster1.max())
recluster1=np.zeros(cluster1.shape[0])
recluster1_number=0
for i in np.unique(palsecluster1):
index=palsecluster1==i
if index.sum()<minnumberofcluster:
thisrecluster=np.zeros(index.sum())
recluster1[index]=thisrecluster+recluster1_number
recluster1_number=len(np.unique(recluster1))
else:
data=PCAseries1[index]
anndata=sc.AnnData(data)
sc.pp.neighbors(anndata,n_pcs=0)
sc.tl.louvain(anndata)
thisrecluster=np.array(list(map(int,anndata.obs['louvain'])))
recluster1[index]=thisrecluster+recluster1_number
recluster1_number=len(np.unique(recluster1))
palsecluster1=recluster1.astype('int')
count_cluster1=pd.value_counts(palsecluster1)
palsecluster2=cluster2
count_cluster2=pd.value_counts(cluster_series2.obs['louvain'])
for i in range(1000000000000000):
if count_cluster2.max()<minnumberofcluster:
break
else:
print(count_cluster2.max())
recluster2=np.zeros(cluster2.shape[0])
recluster2_number=0
for i in np.unique(palsecluster2):
index=palsecluster2==i
if index.sum()<minnumberofcluster:
thisrecluster=np.zeros(index.sum())
recluster2[index]=thisrecluster+recluster2_number
recluster2_number=len(np.unique(recluster2))
else:
data=PCAseries2[index]
anndata=sc.AnnData(data)
sc.pp.neighbors(anndata,n_pcs=0)
sc.tl.louvain(anndata)
thisrecluster=np.array(list(map(int,anndata.obs['louvain'])))
recluster2[index]=thisrecluster+recluster2_number
recluster2_number=len(np.unique(recluster2))
palsecluster2=recluster2.astype('int')
count_cluster2=pd.value_counts(palsecluster2)
recluster1=palsecluster1
recluster2=palsecluster2
###############################################################################
series1=sc.AnnData(PCAseries1)
series2=sc.AnnData(PCAseries2)
sc.pp.neighbors(series1,n_pcs=0)
sc.pp.neighbors(series2,n_pcs=0)
sc.tl.umap(series1)
sc.tl.umap(series2)
df1=pd.DataFrame(choose_seriestype1)
df1=pd.Series(np.reshape(df1.values,df1.values.shape[0]), dtype="category")
series1.obs['real']=df1.values
df2=pd.DataFrame(choose_seriestype2)
df2=pd.Series(np.reshape(df2.values,df2.values.shape[0]), dtype="category")
series2.obs['real']=df2.values
sc.pl.umap(series1,color='real',size=30)
sc.pl.umap(series2,color='real',size=30)
df1=pd.DataFrame(recluster1.astype('int'))
df1=pd.Series(np.reshape(df1.values,df1.values.shape[0]), dtype="category")
series1.obs['recluster']=df1.values
df2=pd.DataFrame(recluster2.astype('int'))
df2=pd.Series(np.reshape(df2.values,df2.values.shape[0]), dtype="category")
series2.obs['recluster']=df2.values
sc.pl.umap(series1,color='recluster',size=30)
sc.pl.umap(series2,color='recluster',size=30)
###############################################################################
def dis(P,Q,distance_method):
if distance_method==0:
return np.sqrt(np.sum(np.square(P-Q)))
if distance_method==1:
return 1-(np.multiply(P,Q).sum()/(np.sqrt(np.sum(np.square(P)))*np.sqrt(np.sum(np.square(Q)))))
###############################################################################
if len(np.unique(recluster1))<=len(np.unique(recluster2)):
a=PCAseries1
PCAseries1=PCAseries2
PCAseries2=a
b=choose_seriestype1
choose_seriestype1=choose_seriestype2
choose_seriestype2=b
c=cluster1
cluster1=cluster2
cluster2=c
d=recluster1
recluster1=recluster2
recluster2=d
###############################################################################
correlation_recluster=np.zeros([len(np.unique(recluster1)),len(np.unique(recluster2))])
correlation_recluster_cell=np.zeros([recluster1.shape[0],recluster2.shape[0]])
for i in range(len(np.unique(recluster1))):
for j in range(len(np.unique(recluster2))):
print(i,j)
index_series1=np.where(recluster1==i)[0]
index_series2=np.where(recluster2==j)[0]
cell_series1=PCAseries1[index_series1,:]
cell_series2=PCAseries2[index_series2,:]
mean1=0
for iq in range(cell_series1.shape[0]):
for jq in range(cell_series2.shape[0]):
mean1+=dis(cell_series1[iq,:],cell_series2[jq,:],1)
correlation_recluster[i,j]=mean1/(cell_series1.shape[0]*cell_series2.shape[0])
for ii in range(cell_series1.shape[0]):
for jj in range(cell_series2.shape[0]):
mean2=dis(cell_series1[ii,:],cell_series2[jj,:],0)
correlation_recluster_cell[index_series1[ii],index_series2[jj]]=mean2
plt.imshow(correlation_recluster)
plt.imshow(correlation_recluster_cell)
correlation_recluster_div=-np.log10(correlation_recluster)
correlation_recluster_cell_div=-np.log10(correlation_recluster_cell)
correlation_recluster_norm=(correlation_recluster_div-correlation_recluster_div.min())/(correlation_recluster_div.max()-correlation_recluster_div.min())
correlation_recluster_cell_norm=(correlation_recluster_cell_div-correlation_recluster_cell_div.min())/(correlation_recluster_cell_div.max()-correlation_recluster_cell_div.min())
plt.imshow(correlation_recluster_norm)
plt.imshow(correlation_recluster_cell_norm)
###############################################################################
correlation_recluster_select=np.zeros(correlation_recluster_norm.shape)
recluster_mid=np.zeros(recluster1.shape)
for kk in range(correlation_recluster_norm.shape[0]):
ind=np.sort(correlation_recluster_norm[kk,:])
select=correlation_recluster_norm[kk,:]<ind[-clusternumber]
select=(select==False)
recluster_mid[recluster1==kk]+=int(np.where(select==True)[0])
correlation_recluster_select[kk,:]=correlation_recluster_norm[kk,:]*select
plt.imshow(correlation_recluster_select)
correlation_recluster_cell_final=correlation_recluster_cell*0
for i in range(correlation_recluster_cell_norm.shape[0]):
for j in range(correlation_recluster_cell_norm.shape[1]):
label1=recluster1[i]
label2=recluster2[j]
mean1=correlation_recluster_select[label1,label2]
mean2=correlation_recluster_cell_norm[i,j]
if mean1==0:
correlation_recluster_cell_final[i,j]=0
else:
correlation_recluster_cell_final[i,j]=mean2
plt.imshow(correlation_recluster_select)
plt.imshow(correlation_recluster_cell_final)
recluster1=recluster_mid.astype('int')
sort_correlation_recluster_cell_final=correlation_recluster_cell_final[recluster1.argsort(),:]
sort_correlation_recluster_cell_final=sort_correlation_recluster_cell_final[:,recluster2.argsort()]
###############################################################################
heatmap(correlation_recluster_cell_final,choose_seriestype1,choose_seriestype2,save=False,name='pancreasmatrix')
################################################################################
x_input1=np.zeros([PCAseries1.shape[0],PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]+recluster2.max()+1])
x_input2=np.zeros([PCAseries2.shape[0],PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]+recluster2.max()+1])
for i in range(PCAseries1.shape[0]):
print(i)
x_input1[i,0:PCAseries1.shape[1]]=PCAseries1[i,:]
x_input1[i,PCAseries1.shape[1]:PCAseries1.shape[1]+PCAseries1.shape[0]]=K.utils.np_utils.to_categorical(i,PCAseries1.shape[0])
x_input1[i,PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]=correlation_recluster_cell_final[i,:]
x_input1[i,PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]:]=K.utils.np_utils.to_categorical(recluster1[i],recluster2.max()+1)
for j in range(PCAseries2.shape[0]):
print(j)
x_input2[j,0:PCAseries2.shape[1]]=PCAseries2[j,:]
x_input2[j,PCAseries2.shape[1]:PCAseries2.shape[1]+PCAseries2.shape[0]]=K.utils.np_utils.to_categorical(j,PCAseries2.shape[0])
x_input2[j,PCAseries2.shape[1]+PCAseries2.shape[0]:PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]]=correlation_recluster_cell_final[:,j]
x_input2[j,PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]:]=K.utils.np_utils.to_categorical(recluster2[j],recluster2.max()+1)
###############################################################################
x_input1_new=x_input1
recluster1_new=recluster1
x_input2_new=x_input2
recluster2_new=recluster2
###############################################################################
if x_input1_new.shape[0]>=x_input2_new.shape[0]:
x_test1=x_input1_new
y_test1=recluster1_new
y_testreal1=choose_seriestype1
repeat_num=int(np.ceil(x_input1_new.shape[0]/x_input2_new.shape[0]))
x_test2=np.tile(x_input2_new,(repeat_num,1))
y_test2=np.tile(recluster2_new,repeat_num)
y_testreal2=np.tile(choose_seriestype2,repeat_num)
x_test2=x_test2[0:x_test1.shape[0],:]
y_test2=y_test2[0:x_test1.shape[0]]
y_testreal2=y_testreal2[0:x_test1.shape[0]]
elif x_input1_new.shape[0]<x_input2_new.shape[0]:
x_test2=x_input2_new
y_test2=recluster2_new
y_testreal2=choose_seriestype2
repeat_num=int(np.ceil(x_input2_new.shape[0]/x_input1_new.shape[0]))
x_test1=np.tile(x_input1_new,(repeat_num,1))
y_test1=np.tile(recluster1_new,repeat_num)
y_testreal1=np.tile(choose_seriestype1,repeat_num)
x_test1=x_test1[0:x_test2.shape[0],:]
y_test1=y_test1[0:x_test2.shape[0]]
y_testreal1=y_testreal1[0:x_test2.shape[0]]
###############################################################################
def choose_info(x,info_number):
return x[:,0:info_number]
def choose_index(x,info_number,x_samplenumber):
return x[:,info_number:info_number+x_samplenumber]
def choose_corrlation(x,info_number,x_samplenumber,cor_number):
return x[:,info_number+x_samplenumber:info_number+x_samplenumber+cor_number]
def choose_relabel(x,info_number,x_samplenumber,cor_number):
return x[:,info_number+x_samplenumber+cor_number:]
def slic(input_):
return input_[:,0]
###############################################################################
activation='relu'
info_number=PCAseries1.shape[1]
layer=PCAseries1.shape[1]
layer2=layer
input1=K.Input(shape=(x_test1.shape[1],))#line1 species1
input2=K.Input(shape=(x_test2.shape[1],))#line1 species2
input3=K.Input(shape=(x_test1.shape[1],))#line2 species1
input4=K.Input(shape=(x_test2.shape[1],))#line2 species2
Data1=Lambda(choose_info,arguments={'info_number':info_number})(input1)
Data2=Lambda(choose_info,arguments={'info_number':info_number})(input2)
Data3=Lambda(choose_info,arguments={'info_number':info_number})(input3)
Data4=Lambda(choose_info,arguments={'info_number':info_number})(input4)
Index1=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0]})(input1)
Index2=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0]})(input2)
Index3=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0]})(input3)
Index4=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0]})(input4)
Cor1=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input1)
Cor2=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input2)
Cor3=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input3)
Cor4=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input4)
Relabel1=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input1)
Relabel2=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input2)
Relabel3=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input3)
Relabel4=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input4)
x_concat1=layers.concatenate([Data1,Data3])#batch1
x_concat2=layers.concatenate([Data2,Data4])#batch2
x1=layers.Dense(layer2,activation=activation)(Data1)
x2=layers.Dense(layer2,activation=activation)(Data2)
x3=layers.Dense(layer2,activation=activation)(Data3)
x4=layers.Dense(layer2,activation=activation)(Data4)
x1=layers.BatchNormalization()(x1)
x2=layers.BatchNormalization()(x2)
x3=layers.BatchNormalization()(x3)
x4=layers.BatchNormalization()(x4)
x1_mid1=layers.Dense(layer2,activation=activation)(layers.concatenate([x1,x2]))
x2_mid1=layers.Dense(layer2,activation=activation)(layers.concatenate([x1,x2]))
x1_mid2=layers.Dense(layer2,activation=activation)(layers.concatenate([x3,x4]))
x2_mid2=layers.Dense(layer2,activation=activation)(layers.concatenate([x3,x4]))
x1_mid1=layers.BatchNormalization()(x1_mid1)
x2_mid1=layers.BatchNormalization()(x2_mid1)
x1_mid2=layers.BatchNormalization()(x1_mid2)
x2_mid2=layers.BatchNormalization()(x2_mid2)
layer_classify=layers.Dense(recluster2_new.max()+1,activation='relu')
y1=layer_classify(x1_mid1)
y2=layer_classify(x2_mid1)
y3=layer_classify(x1_mid2)
y4=layer_classify(x2_mid2)
x1=layers.concatenate([x1_mid1,x1_mid2])#batch1
x2=layers.concatenate([x2_mid1,x2_mid2])#batch2
output1=layers.Dense(2*layer,activation=activation)(x1)
output2=layers.Dense(2*layer,activation=activation)(x2)
output1=layers.BatchNormalization()(output1)
output2=layers.BatchNormalization()(output2)
def loss_weight(input_):
return tf.reduce_sum(tf.multiply(input_[0],input_[1]),axis=-1)
def MSE(input_):
return tf.reduce_mean(tf.square(input_[0]-input_[1]),axis=-1)
def multi_classification_loss(input_):
return tf.keras.losses.categorical_crossentropy(input_[0],input_[1])
#loss1
AE_loss_1=Lambda(MSE)([output1,x_concat1])
AE_loss_2=Lambda(MSE)([output2,x_concat2])
#loss2
cls_loss_1=Lambda(MSE)([y1,Relabel1])
cls_loss_2=Lambda(MSE)([y2,Relabel2])
cls_loss_3=Lambda(MSE)([y3,Relabel3])
cls_loss_4=Lambda(MSE)([y4,Relabel4])
#loss3
interweight1=Lambda(loss_weight)([Index1,Cor2])
interweight4=Lambda(loss_weight)([Index3,Cor4])
interloss_1=Lambda(MSE)([x1_mid1,x2_mid1])
interloss_4=Lambda(MSE)([x1_mid2,x2_mid2])
interloss_1=layers.Multiply()([interweight1,interloss_1])
interloss_4=layers.Multiply()([interweight4,interloss_4])
#loss4
intraweight1=Lambda(loss_weight)([Relabel1,Relabel3])
intraweight2=Lambda(loss_weight)([Relabel2,Relabel4])
intraloss_1=Lambda(MSE)([x1_mid1,x1_mid2])
intraloss_2=Lambda(MSE)([x2_mid1,x2_mid2])
intraloss_1=layers.Multiply()([intraweight1,intraloss_1])
intraloss_2=layers.Multiply()([intraweight2,intraloss_2])
Loss1=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss1')([AE_loss_1,AE_loss_2])
Loss2=Lambda(lambda x:(x[0]*1+x[1]*1+x[2]*1+x[3]*1)/4,name='loss2')([cls_loss_1,cls_loss_2,cls_loss_3,cls_loss_4])
Loss3=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss3')([interloss_1,interloss_4])
Loss4=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss4')([intraloss_1,intraloss_2])
###############################################################################
network_train=K.models.Model([input1,input2,input3,input4],
[Loss1,Loss2,Loss3,Loss4])
network_train.summary()
###############################################################################
intra_data1={}
inter_data1={}
for i in range(x_test1.shape[0]):
label_i=y_test1[i]
intra_data1[i]=np.where(y_test1==label_i)
inter_data1[i]=np.where(y_test1!=label_i)
intra_data2={}
inter_data2={}
for i in range(x_test2.shape[0]):
label_i=y_test2[i]
intra_data2[i]=np.where(y_test2==label_i)
inter_data2[i]=np.where(y_test2!=label_i)
###############################################################################
batch_size=128
train_loss=[]
loss1=[]
loss2=[]
loss3=[]
loss4=[]
###############################################################################
iterations=1000000000
lr=1e-3
optimizer=K.optimizers.Adam(lr=lr)
loss_weights=[1,1,1,1]
network_train.compile(optimizer=optimizer,
loss=[lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred],
loss_weights=loss_weights)
for i in range(iterations):
x_input1_series1_train=np.zeros(x_test1.shape)
index0=np.zeros(x_input1_series1_train.shape[0])
x_input1_series2_train=np.zeros(x_test2.shape)
index1=np.zeros(x_input1_series2_train.shape[0])
x_input2_series1_train=np.zeros(x_test1.shape)
index2=np.zeros(x_input2_series1_train.shape[0])
x_input2_series2_train=np.zeros(x_test2.shape)
index3=np.zeros(x_input2_series2_train.shape[0])
for ii in range(x_test1.shape[0]):
index0[ii]=random.choice(range(x_test1.shape[0]))
rand1=random.random()
in_rand1=np.where(x_test1[ii,:][PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]>0)[0]
out_rand1=np.where(x_test1[ii,:][PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]<=0)[0]
if rand1>=0.5:
index1[ii]=random.choice(in_rand1)
elif rand1<0.5:
index1[ii]=random.choice(out_rand1)
rand2=random.random()
if rand2>=0.5:
index2[ii]=random.choice(intra_data1[index0[ii]][0])
elif rand2<0.5:
index2[ii]=random.choice(inter_data1[index0[ii]][0])
rand3=random.random()
if rand3>=0.5:
index3[ii]=random.choice(intra_data2[index1[ii]][0])
elif rand3<0.5:
index3[ii]=random.choice(inter_data2[index1[ii]][0])
train1=x_test1[index0.astype('int'),:]
train2=x_test2[index1.astype('int'),:]
train3=x_test1[index2.astype('int'),:]
train4=x_test2[index3.astype('int'),:]
Train=network_train.fit([train1,train2,train3,train4],
[np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1])],
batch_size=batch_size,shuffle=True)
train_loss.append(Train.history['loss'][:][0])
loss1.append(Train.history['loss1_loss'][:][0]*loss_weights[0])
loss2.append(Train.history['loss2_loss'][:][0]*loss_weights[1])
loss3.append(Train.history['loss3_loss'][:][0]*loss_weights[2])
loss4.append(Train.history['loss4_loss'][:][0]*loss_weights[3])
print(i,'loss=',
Train.history['loss'][:][0],
Train.history['loss1_loss'][:][0]*loss_weights[0],
Train.history['loss2_loss'][:][0]*loss_weights[1],
Train.history['loss3_loss'][:][0]*loss_weights[2],
Train.history['loss4_loss'][:][0]*loss_weights[3])
if i>100:
plt.plot(train_loss[:])
plt.plot(loss1[:])
plt.plot(loss2[:])
plt.plot(loss3[:])
plt.plot(loss4[:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.ylim(0,max(max(train_loss[i-100:],loss1[i-100:],loss2[i-100:],loss3[i-100:],loss4[i-100:])))
plt.xlim(i-100,i)
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
plt.plot(train_loss[:])
plt.plot(loss1[:])
plt.plot(loss2[:])
plt.plot(loss3[:])
plt.plot(loss4[:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
else:
plt.plot(train_loss[100:])
plt.plot(loss1[100:])
plt.plot(loss2[100:])
plt.plot(loss3[100:])
plt.plot(loss4[100:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
###############################################################################
network_train.load_weights('lungweight.h5')
network_predict=K.models.Model([input1,input2,input3,input4],
[x1_mid1,x2_mid1,x1_mid2,x2_mid2])
[low_dim1,low_dim2,low_dim3,low_dim4]=network_predict.predict([x_test1,x_test2,x_test1,x_test2])
low_dim1=low_dim1[0:x_input1.shape[0]]
low_dim2=low_dim2[0:x_input2.shape[0]]
low_dim3=low_dim3[0:x_input1.shape[0]]
low_dim4=low_dim4[0:x_input2.shape[0]]
y_real_no1=y_testreal1[0:x_input1.shape[0]]
y_recluster_no1=recluster1[0:x_input1.shape[0]]
y_real_no2=y_testreal2[0:x_input2.shape[0]]
y_recluster_no2=recluster2[0:x_input2.shape[0]]
total_real_type=np.concatenate([y_real_no1,y_real_no2])
total_recluster_type=np.concatenate([y_recluster_no1,y_recluster_no2])
###############################################################################
series1=sc.AnnData(low_dim1)
series2=sc.AnnData(low_dim2)
mergedata=series1.concatenate(series2)
mergedata.obsm['NN']=mergedata.X
sc.pp.neighbors(mergedata,n_pcs=0)
sc.tl.louvain(mergedata)
sc.tl.leiden(mergedata)
sc.tl.umap(mergedata)
df=pd.DataFrame(total_real_type.astype('int'))
df=pd.Series(np.reshape(df.values,df.values.shape[0]), dtype="category")
mergedata.obs['real']=df.values
sc.pl.umap(mergedata,color='louvain',size=30)
sc.pl.umap(mergedata,color='leiden',size=30)
sc.pl.umap(mergedata,color='batch',size=30)
sc.pl.umap(mergedata,color='real',size=30)
type_louvain=mergedata.obs['louvain']
type_leiden=mergedata.obs['leiden']
type_batch=mergedata.obs['batch']
type_real=mergedata.obs['real']
###############################################################################
umapdata=pd.DataFrame(mergedata.obsm['X_umap'].T,index=['tSNE1','tSNE2'])
umapdata1=pd.DataFrame(mergedata.obsm['X_umap'][0:PCAseries1.shape[0],:].T,index=['tSNE1','tSNE2'])
umapdata2=pd.DataFrame(mergedata.obsm['X_umap'][PCAseries1.shape[0]:,:].T,index=['tSNE1','tSNE2'])
###############################################################################
plot_tSNE_batchclusters(umapdata1,umapdata2,choose_seriestype1,choose_seriestype2,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'batch1')
plot_tSNE_batchclusters(umapdata2,umapdata1,choose_seriestype2,choose_seriestype1,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'batch2')
plot_tSNE_clusters(umapdata,list(map(int,type_batch)), cluster_colors=cluster_colors,save=False,name=fromname+'batch')
plot_tSNE_sepclusters(umapdata1,umapdata2,choose_seriestype1,choose_seriestype2,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'label1')
plot_tSNE_sepclusters(umapdata2,umapdata1,choose_seriestype2,choose_seriestype1,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'label2')
plot_tSNE_clusters(umapdata,list(map(int,type_real)), cluster_colors=cluster_colors,save=False, name=fromname+'label')
#sio.savemat('lung_ourdata.mat',{'mergedata':mergedata.X,'umapdata':umapdata.values}) | 29,860 | 43.702096 | 177 | py |
CBA | CBA-main/pancreas/kBET.py | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 31 10:41:54 2021
@author: 17b90
"""
import numpy as np
import anndata
import scanpy as sc
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.sparse.csgraph import connected_components
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#import networkx as nx
from scIB.utils import *
from scIB.preprocessing import score_cell_cycle
from scIB.clustering import opt_louvain
from scipy import sparse
from scipy.sparse.csgraph import connected_components
from scipy.io import mmwrite
import sklearn
import sklearn.metrics
from time import time
import cProfile
from pstats import Stats
import memory_profiler
import itertools
import multiprocessing as multipro
import subprocess
import tempfile
import pathlib
from os import mkdir, path, remove, stat
import gc
import rpy2.rinterface_lib.callbacks
import logging
rpy2.rinterface_lib.callbacks.logger.setLevel(logging.ERROR) # Ignore R warning messages
import rpy2.robjects as ro
import anndata2ri
def checkAdata(adata):
if type(adata) is not anndata.AnnData:
raise TypeError('Input is not a valid AnnData object')
def checkBatch(batch, obs, verbose=False):
if batch not in obs:
raise ValueError(f'column {batch} is not in obs')
elif verbose:
print(f'Object contains {obs[batch].nunique()} batches.')
def diffusion_conn(adata, min_k=50, copy=True, max_iterations=26):
'''
This function performs graph diffusion on the connectivities matrix until a
minimum number `min_k` of entries per row are non-zero.
Note:
Due to self-loops min_k-1 non-zero connectivies entries is actually the stopping
criterion. This is equivalent to `sc.pp.neighbors`.
Returns:
The diffusion-enhanced connectivities matrix of a copy of the AnnData object
with the diffusion-enhanced connectivities matrix is in
`adata.uns["neighbors"]["conectivities"]`
'''
if 'neighbors' not in adata.uns:
raise ValueError('`neighbors` not in adata object. '
'Please compute a neighbourhood graph!')
if 'connectivities' not in adata.uns['neighbors']:
raise ValueError('`connectivities` not in `adata.uns["neighbors"]`. '
'Please pass an object with connectivities computed!')
T = adata.uns['neighbors']['connectivities']
#Normalize T with max row sum
# Note: This keeps the matrix symmetric and ensures |M| doesn't keep growing
T = sparse.diags(1/np.array([T.sum(1).max()]*T.shape[0]))*T
M = T
# Check for disconnected component
n_comp, labs = connected_components(adata.uns['neighbors']['connectivities'],
connection='strong')
if n_comp > 1:
tab = pd.value_counts(labs)
small_comps = tab.index[tab<min_k]
large_comp_mask = np.array(~pd.Series(labs).isin(small_comps))
else:
large_comp_mask = np.array([True]*M.shape[0])
T_agg = T
i = 2
while ((M[large_comp_mask,:][:,large_comp_mask]>0).sum(1).min() < min_k) and (i < max_iterations):
print(f'Adding diffusion to step {i}')
T_agg *= T
M += T_agg
i+=1
if (M[large_comp_mask,:][:,large_comp_mask]>0).sum(1).min() < min_k:
raise ValueError('could not create diffusion connectivities matrix'
f'with at least {min_k} non-zero entries in'
f'{max_iterations} iterations.\n Please increase the'
'value of max_iterations or reduce k_min.\n')
M.setdiag(0)
if copy:
adata_tmp = adata.copy()
adata_tmp.uns['neighbors'].update({'diffusion_connectivities': M})
return adata_tmp
else:
return M
def diffusion_nn(adata, k, max_iterations=26):
'''
This function generates a nearest neighbour list from a connectivities matrix
as supplied by BBKNN or Conos. This allows us to select a consistent number
of nearest neighbours across all methods.
Return:
`k_indices` a numpy.ndarray of the indices of the k-nearest neighbors.
'''
if 'neighbors' not in adata.uns:
raise ValueError('`neighbors` not in adata object. '
'Please compute a neighbourhood graph!')
if 'connectivities' not in adata.uns['neighbors']:
raise ValueError('`connectivities` not in `adata.uns["neighbors"]`. '
'Please pass an object with connectivities computed!')
T = adata.uns['neighbors']['connectivities']
# Row-normalize T
T = sparse.diags(1/T.sum(1).A.ravel())*T
T_agg = T**3
M = T+T**2+T_agg
i = 4
while ((M>0).sum(1).min() < (k+1)) and (i < max_iterations):
#note: k+1 is used as diag is non-zero (self-loops)
print(f'Adding diffusion to step {i}')
T_agg *= T
M += T_agg
i+=1
if (M>0).sum(1).min() < (k+1):
raise NeighborsError(f'could not find {k} nearest neighbors in {max_iterations}'
'diffusion steps.\n Please increase max_iterations or reduce'
' k.\n')
M.setdiag(0)
k_indices = np.argpartition(M.A, -k, axis=1)[:, -k:]
return k_indices
def kBET_single(matrix, batch, type_ = None, k0 = 10, knn=None, subsample=0.5, heuristic=True, verbose=False):
"""
params:
matrix: expression matrix (at the moment: a PCA matrix, so do.pca is set to FALSE
batch: series or list of batch assignemnts
subsample: fraction to be subsampled. No subsampling if `subsample=None`
returns:
kBET p-value
"""
anndata2ri.activate()
ro.r("library(kBET)")
if verbose:
print("importing expression matrix")
ro.globalenv['data_mtrx'] = matrix
ro.globalenv['batch'] = batch
#print(matrix.shape)
#print(len(batch))
if verbose:
print("kBET estimation")
#k0 = len(batch) if len(batch) < 50 else 'NULL'
ro.globalenv['knn_graph'] = knn
ro.globalenv['k0'] = k0
batch_estimate = ro.r(f"batch.estimate <- kBET(data_mtrx, batch, knn=knn_graph, k0=k0, plot=FALSE, do.pca=FALSE, heuristic=FALSE, adapt=FALSE, verbose={str(verbose).upper()})")
anndata2ri.deactivate()
try:
ro.r("batch.estimate$average.pval")[0]
except rpy2.rinterface_lib.embedded.RRuntimeError:
return np.nan
else:
return ro.r("batch.estimate$average.pval")[0]
def kbet(adata, batch_key, label_key, embed='X_pca', type_ = None,
hvg=False, subsample=0.5, heuristic=False, verbose=False):
"""
Compare the effect before and after integration
params:
matrix: matrix from adata to calculate on
return:
pd.DataFrame with kBET p-values per cluster for batch
"""
checkAdata(adata)
checkBatch(batch_key, adata.obs)
checkBatch(label_key, adata.obs)
#compute connectivities for non-knn type data integrations
#and increase neighborhoods for knn type data integrations
if type_ =='haveneighbor':
adata_tmp = adata
print('neighbor have already obtained!')
elif type_ != 'knn':
adata_tmp = sc.pp.neighbors(adata, n_neighbors = 50, use_rep=embed, copy=True)
else:
#check if pre-computed neighbours are stored in input file
adata_tmp = adata.copy()
if 'diffusion_connectivities' not in adata.uns['neighbors']:
if verbose:
print(f"Compute: Diffusion neighbours.")
adata_tmp = diffusion_conn(adata, min_k = 50, copy = True)
adata_tmp.uns['neighbors']['connectivities'] = adata_tmp.uns['neighbors']['diffusion_connectivities']
if verbose:
print(f"batch: {batch_key}")
#set upper bound for k0
size_max = 2**31 - 1
kBET_scores = {'cluster': [], 'kBET': []}
for clus in adata_tmp.obs[label_key].unique():
adata_sub = adata_tmp[adata_tmp.obs[label_key] == clus,:].copy()
#check if neighborhood size too small or only one batch in subset
if np.logical_or(adata_sub.n_obs < 10,
len(np.unique(adata_sub.obs[batch_key]))==1):
print(f"{clus} consists of a single batch or is too small. Skip.")
score = np.nan
else:
quarter_mean = np.floor(np.mean(adata_sub.obs[batch_key].value_counts())/4).astype('int')
k0 = np.min([70, np.max([10, quarter_mean])])
#check k0 for reasonability
if (k0*adata_sub.n_obs) >=size_max:
k0 = np.floor(size_max/adata_sub.n_obs).astype('int')
matrix = np.zeros(shape=(adata_sub.n_obs, k0+1))
if verbose:
print(f"Use {k0} nearest neighbors.")
n_comp, labs = connected_components(adata_sub.uns['neighbors']['connectivities'],
connection='strong')
if n_comp > 1:
#check the number of components where kBET can be computed upon
comp_size = pd.value_counts(labs)
#check which components are small
comp_size_thresh = 3*k0
idx_nonan = np.flatnonzero(np.in1d(labs,
comp_size[comp_size>=comp_size_thresh].index))
#check if 75% of all cells can be used for kBET run
if len(idx_nonan)/len(labs) >= 0.75:
#create another subset of components, assume they are not visited in a diffusion process
adata_sub_sub = adata_sub[idx_nonan,:].copy()
nn_index_tmp = np.empty(shape=(adata_sub.n_obs, k0))
nn_index_tmp[:] = np.nan
nn_index_tmp[idx_nonan] = diffusion_nn(adata_sub_sub, k=k0).astype('float')
#need to check neighbors (k0 or k0-1) as input?
score = kBET_single(
matrix=matrix,
batch=adata_sub.obs[batch_key],
knn = nn_index_tmp+1, #nn_index in python is 0-based and 1-based in R
subsample=subsample,
verbose=verbose,
heuristic=False,
k0 = k0,
type_ = type_
)
else:
#if there are too many too small connected components, set kBET score to 1
#(i.e. 100% rejection)
score = 1
else: #a single component to compute kBET on
#need to check neighbors (k0 or k0-1) as input?
nn_index_tmp = diffusion_nn(adata_sub, k=k0).astype('float')
score = kBET_single(
matrix=matrix,
batch=adata_sub.obs[batch_key],
knn = nn_index_tmp+1, #nn_index in python is 0-based and 1-based in R
subsample=subsample,
verbose=verbose,
heuristic=False,
k0 = k0,
type_ = type_
)
kBET_scores['cluster'].append(clus)
kBET_scores['kBET'].append(score)
kBET_scores = pd.DataFrame.from_dict(kBET_scores)
kBET_scores = kBET_scores.reset_index(drop=True)
return kBET_scores
def nmi(adata, group1, group2, method="arithmetic", nmi_dir=None):
"""
Normalized mutual information NMI based on 2 different cluster assignments `group1` and `group2`
params:
adata: Anndata object
group1: column name of `adata.obs` or group assignment
group2: column name of `adata.obs` or group assignment
method: NMI implementation
'max': scikit method with `average_method='max'`
'min': scikit method with `average_method='min'`
'geometric': scikit method with `average_method='geometric'`
'arithmetic': scikit method with `average_method='arithmetic'`
'Lancichinetti': implementation by A. Lancichinetti 2009 et al.
'ONMI': implementation by Aaron F. McDaid et al. (https://github.com/aaronmcdaid/Overlapping-NMI) Hurley 2011
nmi_dir: directory of compiled C code if 'Lancichinetti' or 'ONMI' are specified as `method`. Compilation should be done as specified in the corresponding README.
return:
normalized mutual information (NMI)
"""
checkAdata(adata)
if isinstance(group1, str):
checkBatch(group1, adata.obs)
group1 = adata.obs[group1].tolist()
elif isinstance(group1, pd.Series):
group1 = group1.tolist()
if isinstance(group2, str):
checkBatch(group2, adata.obs)
group2 = adata.obs[group2].tolist()
elif isinstance(group2, pd.Series):
group2 = group2.tolist()
if len(group1) != len(group2):
raise ValueError(f'different lengths in group1 ({len(group1)}) and group2 ({len(group2)})')
# choose method
if method in ['max', 'min', 'geometric', 'arithmetic']:
nmi_value = sklearn.metrics.normalized_mutual_info_score(group1, group2, average_method=method)
elif method == "Lancichinetti":
nmi_value = nmi_Lanc(group1, group2, nmi_dir=nmi_dir)
elif method == "ONMI":
nmi_value = onmi(group1, group2, nmi_dir=nmi_dir)
else:
raise ValueError(f"Method {method} not valid")
return nmi_value
def ari(adata, group1, group2):
"""
params:
adata: anndata object
group1: ground-truth cluster assignments (e.g. cell type labels)
group2: "predicted" cluster assignments
The function is symmetric, so group1 and group2 can be switched
"""
checkAdata(adata)
if isinstance(group1, str):
checkBatch(group1, adata.obs)
group1 = adata.obs[group1].tolist()
elif isinstance(group1, pd.Series):
group1 = group1.tolist()
if isinstance(group2, str):
checkBatch(group2, adata.obs)
group2 = adata.obs[group2].tolist()
elif isinstance(group2, pd.Series):
group2 = group2.tolist()
if len(group1) != len(group2):
raise ValueError(f'different lengths in group1 ({len(group1)}) and group2 ({len(group2)})')
return sklearn.metrics.cluster.adjusted_rand_score(group1, group2)
def silhouette(adata, group_key, metric='euclidean', embed='X_pca', scale=True):
"""
wrapper for sklearn silhouette function values range from [-1, 1] with 1 being an ideal fit, 0 indicating overlapping clusters and -1 indicating misclassified cells
"""
if embed not in adata.obsm.keys():
print(adata.obsm.keys())
raise KeyError(f'{embed} not in obsm')
asw = sklearn.metrics.silhouette_score(adata.obsm[embed], adata.obs[group_key], metric=metric)
if scale:
asw = (asw + 1)/2
return asw
### PC Regression
def pcr_comparison(adata_pre, adata_post, covariate, embed=None, n_comps=50, scale=True, verbose=False):
"""
Compare the effect before and after integration
Return either the difference of variance contribution before and after integration
or a score between 0 and 1 (`scaled=True`) with 0 if the variance contribution hasn't
changed. The larger the score, the more different the variance contributions are before
and after integration.
params:
adata_pre: uncorrected adata
adata_post: integrated adata
embed : if `embed=None`, use the full expression matrix (`adata.X`), otherwise
use the embedding provided in `adata_post.obsm[embed]`
scale: if True, return scaled score
return:
difference of R2Var value of PCR
"""
if embed == 'X_pca':
embed = None
pcr_before = pcr(adata_pre, covariate=covariate, recompute_pca=True,
n_comps=n_comps, verbose=verbose)
pcr_after = pcr(adata_post, covariate=covariate, embed=embed, recompute_pca=True,
n_comps=n_comps, verbose=verbose)
if scale:
score = (pcr_before - pcr_after)/pcr_before
if score < 0:
print("Variance contribution increased after integration!")
print("Setting PCR comparison score to 0.")
score = 0
return score
else:
return pcr_after - pcr_before
def pcr(adata, covariate, embed=None, n_comps=50, recompute_pca=True, verbose=False):
"""
PCR for Adata object
Checks whether to
+ compute PCA on embedding or expression data (set `embed` to name of embedding matrix e.g. `embed='X_emb'`)
+ use existing PCA (only if PCA entry exists)
+ recompute PCA on expression matrix (default)
params:
adata: Anndata object
embed : if `embed=None`, use the full expression matrix (`adata.X`), otherwise
use the embedding provided in `adata_post.obsm[embed]`
n_comps: number of PCs if PCA should be computed
covariate: key for adata.obs column to regress against
return:
R2Var of PCR
"""
checkAdata(adata)
checkBatch(covariate, adata.obs)
if verbose:
print(f"covariate: {covariate}")
batch = adata.obs[covariate]
# use embedding for PCA
if (embed is not None) and (embed in adata.obsm):
if verbose:
print(f"compute PCR on embedding n_comps: {n_comps}")
return pc_regression(adata.obsm[embed], batch, n_comps=n_comps)
# use existing PCA computation
elif (recompute_pca == False) and ('X_pca' in adata.obsm) and ('pca' in adata.uns):
if verbose:
print("using existing PCA")
return pc_regression(adata.obsm['X_pca'], batch, pca_var=adata.uns['pca']['variance'])
# recompute PCA
else:
if verbose:
print(f"compute PCA n_comps: {n_comps}")
return pc_regression(adata.X, batch, n_comps=n_comps)
def pc_regression(data, variable, pca_var=None, n_comps=50, svd_solver='arpack', verbose=False):
"""
params:
data: expression or PCA matrix. Will be assumed to be PCA values, if pca_sd is given
variable: series or list of batch assignments
n_comps: number of PCA components for computing PCA, only when pca_sd is not given. If no pca_sd is given and n_comps=None, comute PCA and don't reduce data
pca_var: iterable of variances for `n_comps` components. If `pca_sd` is not `None`, it is assumed that the matrix contains PCA values, else PCA is computed
PCA is only computed, if variance contribution is not given (pca_sd).
"""
if isinstance(data, (np.ndarray, sparse.csr_matrix)):
matrix = data
else:
raise TypeError(f'invalid type: {data.__class__} is not a numpy array or sparse matrix')
# perform PCA if no variance contributions are given
if pca_var is None:
if n_comps is None or n_comps > min(matrix.shape):
n_comps = min(matrix.shape)
if n_comps == min(matrix.shape):
svd_solver = 'full'
if verbose:
print("compute PCA")
pca = sc.tl.pca(matrix, n_comps=n_comps, use_highly_variable=False,
return_info=True, svd_solver=svd_solver, copy=True)
X_pca = pca[0].copy()
pca_var = pca[3].copy()
del pca
else:
X_pca = matrix
n_comps = matrix.shape[1]
## PC Regression
if verbose:
print("fit regression on PCs")
# handle categorical values
if pd.api.types.is_numeric_dtype(variable):
variable = np.array(variable).reshape(-1, 1)
else:
if verbose:
print("one-hot encode categorical values")
variable = pd.get_dummies(variable)
# fit linear model for n_comps PCs
r2 = []
for i in range(n_comps):
pc = X_pca[:, [i]]
lm = sklearn.linear_model.LinearRegression()
lm.fit(variable, pc)
r2_score = np.maximum(0,lm.score(variable, pc))
r2.append(r2_score)
Var = pca_var / sum(pca_var) * 100
R2Var = sum(r2*Var)/100
return R2Var | 20,538 | 37.390654 | 180 | py |
CBA | CBA-main/pancreas/ywb_function.py | import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
from collections import Counter
import matplotlib.pyplot as plt
from keras.regularizers import l2
from sklearn import preprocessing
from keras.layers.core import Lambda
from keras.callbacks import TensorBoard
from keras.callbacks import LearningRateScheduler
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler
from mpl_toolkits.axes_grid1 import make_axes_locatable
def color(value):
digit = list(map(str, range(10))) + list("ABCDEF")
if isinstance(value, tuple):
string = '#'
for i in value:
a1 = i // 16
a2 = i % 16
string += digit[a1] + digit[a2]
return string
elif isinstance(value, str):
a1 = digit.index(value[1]) * 16 + digit.index(value[2])
a2 = digit.index(value[3]) * 16 + digit.index(value[4])
a3 = digit.index(value[5]) * 16 + digit.index(value[6])
return (a1, a2, a3)
cluster_colors=[
color((213,94,0)),
color((0,114,178)),
color((204,121,167)),
color((0,158,115)),
color((86,180,233)),
color((230,159,0)),
color((240,228,66)),
color((0,0,0)),
'#D3D3D3',
'#FF00FF',
'#aec470',
'#b3ee3d',
'#de4726',
'#f69149',
'#f81919',
'#ff49b0',
'#f05556',
'#fadf0b',
'#f8c495',
'#ffc1c1',
'#ffc125',
'#ffc0cb',
'#ffbbff',
'#ffb90f',
'#ffb6c1',
'#ffb5c5',
'#ff83fa',
'#ff8c00',
'#ff4040',
'#ff3030',
'#ff34b3',
'#00fa9a',
'#ca4479',
'#eead0e',
'#ff1493',
'#0ab4e4',
'#1e6a87',
'#800080',
'#00e5ee',
'#c71585',
'#027fd0',
'#004dba',
'#0a9fb4',
'#004b71',
'#285528',
'#2f7449',
'#21b183',
'#3e4198',
'#4e14a6',
'#5dd73d',
'#64a44e',
'#6787d6',
'#6c6b6b',
'#6c6b6b',
'#7759a4',
'#78edff',
'#762a14',
'#9805cc',
'#9b067d',
'#af7efe',
'#a7623d']
def plot_tSNE_clusters(df_tSNE,labels,cluster_colors=None,s=6,save=False,name=None):
fig,ax=plt.subplots(figsize=(4, 4))
ax.scatter(df_tSNE.loc['tSNE1'], df_tSNE.loc['tSNE2'],s=s,alpha=0.8,lw=0,c=[cluster_colors[i] for i in labels])
ax.axis('equal')
ax.set_axis_off()
if save==True:
plt.savefig('{}.eps'.format(name),dpi=600,format='eps')
def plot_tSNE_batchclusters(df_tSNE1,df_tSNE2,labels1,labels2,cluster_colors=None,s=0.8,save=False,name=None):
fig,ax=plt.subplots(figsize=(4, 4))
ax.scatter(df_tSNE2.loc['tSNE1'], df_tSNE2.loc['tSNE2'],s=s,alpha=0.8,lw=0,c='#D3D3D3')
ax.scatter(df_tSNE1.loc['tSNE1'], df_tSNE1.loc['tSNE2'],s=s,alpha=0.8,lw=0,c=[cluster_colors[1] for i in labels1])
ax.axis('equal')
ax.set_axis_off()
if save==True:
plt.savefig('{}.eps'.format(name),dpi=600,format='eps')
def plot_tSNE_sepclusters(df_tSNE1,df_tSNE2,labels1,labels2,cluster_colors=None,s=0.8,save=False,name=None):
fig,ax=plt.subplots(figsize=(4, 4))
ax.scatter(df_tSNE2.loc['tSNE1'], df_tSNE2.loc['tSNE2'],s=s,alpha=0.8,lw=0,c='#D3D3D3')
ax.scatter(df_tSNE1.loc['tSNE1'], df_tSNE1.loc['tSNE2'],s=s,alpha=0.8,lw=0,c=[cluster_colors[i] for i in labels1])
ax.axis('equal')
ax.set_axis_off()
if save==True:
plt.savefig('{}.eps'.format(name),dpi=600,format='eps')
def plot_tSNE_cluster(df_tSNE,labels,cluster_colors=None,s=6,save=False,name=None):
index=[[] for i in range(np.max(labels)+1)]
for i in range(len(labels)):
index[int(labels[i])].append(i)
index=[i for i in index if i!=[]]
for i in range(len(np.unique(labels))):
color=np.array(labels)[index[i]][0]
fig,ax=plt.subplots()
ax.scatter(df_tSNE.loc['tSNE1'], df_tSNE.loc['tSNE2'],c='#D3D3D3',s=s,lw=0)
ax.scatter(df_tSNE.loc['tSNE1'].iloc[index[i]],df_tSNE.loc['tSNE2'].iloc[index[i]],c=[cluster_colors[k] for k in np.array(labels)[index[i]]],s=s,lw=0)
ax.axis('equal')
ax.set_axis_off()
if save == True:
plt.savefig('{}.eps'.format(name+str(color)), dpi=600,format='eps')
def gen_labels(df, model):
if str(type(model)).startswith("<class 'sklearn.cluster"):
cell_labels = dict(zip(df.columns, model.labels_))
label_cells = {}
for l in np.unique(model.labels_):
label_cells[l] = []
for i, label in enumerate(model.labels_):
label_cells[label].append(df.columns[i])
cellID = list(df.columns)
labels = list(model.labels_)
labels_a = model.labels_
elif type(model) == np.ndarray:
cell_labels = dict(zip(df.columns, model))
label_cells = {}
for l in np.unique(model):
label_cells[l] = []
for i, label in enumerate(model):
label_cells[label].append(df.columns[i])
cellID = list(df.columns)
labels = list(model)
labels_a = model
else:
print('Error wrong input type')
return cell_labels, label_cells, cellID, labels, labels_a
def heatmap(correlation_recluster_cell_final,choose_seriestype1,choose_seriestype2,save=False,name=''):
df=pd.DataFrame(correlation_recluster_cell_final)
labels1=np.array(choose_seriestype1)
labels2=np.array(choose_seriestype2)
cell_labels1,label_cells1,cellID1,labels1,labels_a1=gen_labels(df.T,np.array(labels1))
cell_labels2,label_cells2,cellID2,labels2,labels_a2=gen_labels(df,np.array(labels2))
optimal_order=np.unique(np.concatenate([labels1,labels2]))
cl,lc=gen_labels(df,np.array(labels2))[:2]
optimal_sort_cells=sum([lc[i] for i in np.unique(labels2)],[])
optimal_sort_labels=[cl[i] for i in optimal_sort_cells]
fig,axHM=plt.subplots(figsize=(9,5))
df_full=df.copy()
z=df_full.values
z=pd.DataFrame(z, index=df_full.index,columns=df_full.columns)
z=z.loc[:,optimal_sort_cells].values
im=axHM.pcolormesh(z,cmap='viridis',vmax=1)
plt.gca().invert_yaxis()
plt.xlim(xmax=len(labels2))
plt.xticks([])
plt.yticks([])
divider=make_axes_locatable(axHM)
axLabel1=divider.append_axes("top",.3,pad=0,sharex=axHM)
axLabel2=divider.append_axes("left",.3,pad=0,sharex=axHM)
counter2=Counter(labels2)
counter1=Counter(labels1)
pos2=0
pos1=0
for l in optimal_order:
axLabel1.barh(y=0,left=pos2,width=counter2[l],color=cluster_colors[l],linewidth=0.5,edgecolor=cluster_colors[l])
pos2+=counter2[l]
optimal_order=np.flipud(optimal_order)
for l in optimal_order:
axLabel2.bar(x=0,bottom=pos1,height=counter1[l],color=cluster_colors[l],linewidth=50,edgecolor=cluster_colors[l])
pos1+=counter1[l]
axLabel1.set_xlim(xmax=len(labels2))
axLabel1.axis('off')
axLabel2.set_ylim(ymax=len(labels1))
axLabel2.axis('off')
cax=fig.add_axes([.91,0.13,0.01,0.22])
colorbar=fig.colorbar(im,cax=cax,ticks=[0,1])
colorbar.set_ticklabels(['0','max'])
plt.savefig('{}.jpg'.format(name),dpi=600,format='jpg') | 7,512 | 33.782407 | 158 | py |
CBA | CBA-main/pancreas/pancreas_main.py | """
Created on Fri Mar 27 18:58:59 2020
@author: 17b90
"""
import kBET
import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
import sklearn.metrics as sm
from collections import Counter
import matplotlib.pyplot as plt
from keras.regularizers import l2
from sklearn import preprocessing
from keras.layers.core import Lambda
from keras.callbacks import TensorBoard
from imblearn.over_sampling import SMOTE,ADASYN
from keras.callbacks import LearningRateScheduler
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.cluster.hierarchy import dendrogram, linkage
we_use=[1,2]#we try to integrate pancreas1 and pancreas2
#input the data
RAWseries1=pd.read_csv('RAWseries_'+str(we_use[0])+'.csv',header=None)[1:].values.astype('single')
RAWseries2=pd.read_csv('RAWseries_'+str(we_use[1])+'.csv',header=None)[1:].values.astype('single')
#input the label
choose_seriestype1=pd.read_csv('realseries_'+str(we_use[0])+'.csv',header=None)[1:].values
choose_seriestype2=pd.read_csv('realseries_'+str(we_use[1])+'.csv',header=None)[1:].values
#input the gene name
genename=pd.read_csv('pancreas_genename.csv',header=None)[1:][0].values
#this is our code name
fromname='pancreas'+str(we_use[0])+str(we_use[1])
#we choose some parameters
min_cells=50#remove some genes, expressed in less than 50 cells
pca_dim=50#the number of PCs, you can choose as you like
minnumberofcluster=300#this parameter is used for doing Louvain clustering again
#because sometimes obtained clusters by Louvain are quite big, you can do Louvain again for each obtained cluster
#no rule, if you think the clusters are big, you can do it, judged by yourself
#clusters with more than $minnumberofcluster$ cells will be clustered again to make them smaller
#I think this hardly influence the result, just make it beautiful, so you can choose it!
clusternumber=1#the number of neighboors when doing the cluster matching, we choose one neighbor, but you can choose more
#merge them
Alldata=np.concatenate([RAWseries1.T,RAWseries2.T])
Alllabel=np.concatenate([choose_seriestype1,choose_seriestype2])
Allbatch=np.concatenate([np.zeros(choose_seriestype1.shape[0]),np.zeros(choose_seriestype2.shape[0])+1])
###############################################################################
#ok, we select some interesting cell types
chosen_cluster=['alpha','beta','ductal','acinar','delta','gamma','endothelial','epsilon']
chosen_index=np.arange(Alllabel.shape[0])
for i in range(Alllabel.shape[0]):
if Alllabel[i] in chosen_cluster:
chosen_index[i]=1
else:
chosen_index[i]=0
Alldata=Alldata[chosen_index==1,:]
Allbatch=Allbatch[chosen_index==1]
Alllabel=Alllabel[chosen_index==1]
###############################################################################
#and them, use numbers to replace the name of cell types
Numlabel=np.zeros(Alllabel.shape[0])
cluster_index2={'alpha':0,'beta':1,'ductal':2,'acinar':3,'delta':4,'gamma':5,'endothelial':6,'epsilon':7}
for i in range(Alllabel.shape[0]):
Numlabel[i]=cluster_index2[Alllabel[i][0]]
###############################################################################
#use Scanpy!!!
anndata=sc.AnnData(pd.DataFrame(Alldata,columns=genename))
sc.pp.filter_genes(anndata,min_cells=min_cells)
sc.pp.normalize_per_cell(anndata,counts_per_cell_after=1e4)
sc.pp.log1p(anndata)
sc.pp.highly_variable_genes(anndata)
sc.pl.highly_variable_genes(anndata)
anndata=anndata[:,anndata.var['highly_variable']]
sc.pl.highest_expr_genes(anndata,n_top=20)
sc.tl.pca(anndata,n_comps=100,svd_solver='arpack')
sc.pl.pca(anndata)
sc.pl.pca_variance_ratio(anndata,log=True,n_pcs=100,save=[True,'pancreas'])
#after prepossessing, we rename these datasets
Alldata_aft=anndata.obsm['X_pca'][:,0:pca_dim]
#this is for the preparation of deep learning training, the training is hard if you don't do that
Alldata_aft=preprocessing.StandardScaler().fit_transform(Alldata_aft)
Alldata_aft=preprocessing.MinMaxScaler().fit_transform(Alldata_aft)
PCAseries1=Alldata_aft[Allbatch==0,:][Numlabel[Allbatch==0].argsort()]
PCAseries2=Alldata_aft[Allbatch==1,:][Numlabel[Allbatch==1].argsort()]
choose_seriestype1=Numlabel[Allbatch==0][Numlabel[Allbatch==0].argsort()].astype('int')
choose_seriestype2=Numlabel[Allbatch==1][Numlabel[Allbatch==1].argsort()].astype('int')
###############################################################################
#do Louvain clustering
cluster_series1=sc.AnnData(PCAseries1)
cluster_series2=sc.AnnData(PCAseries2)
sc.pp.neighbors(cluster_series1,n_pcs=0)
sc.pp.neighbors(cluster_series2,n_pcs=0)
sc.tl.umap(cluster_series1)
sc.tl.umap(cluster_series2)
sc.tl.louvain(cluster_series1)
sc.tl.louvain(cluster_series2)
sc.pl.umap(cluster_series1,color='louvain',size=30)
sc.pl.umap(cluster_series2,color='louvain',size=30)
cluster1=np.array(list(map(int,cluster_series1.obs['louvain'])))
cluster2=np.array(list(map(int,cluster_series2.obs['louvain'])))
###############################################################################
#ok, as you like, you can do clustering for each cluster, or not
recluster1=np.zeros(cluster1.shape[0])
recluster2=np.zeros(cluster2.shape[0])
palsecluster1=cluster1
count_cluster1=pd.value_counts(cluster_series1.obs['louvain'])
for i in range(1000000000000000):#until there are no clusters with more than $minnumberofcluster$ cells
if count_cluster1.max()<minnumberofcluster:
break
else:
print(count_cluster1.max())
recluster1=np.zeros(cluster1.shape[0])
recluster1_number=0
for i in np.unique(palsecluster1):
index=palsecluster1==i
if index.sum()<minnumberofcluster:
thisrecluster=np.zeros(index.sum())
recluster1[index]=thisrecluster+recluster1_number
recluster1_number=len(np.unique(recluster1))
else:
data=PCAseries1[index]
anndata=sc.AnnData(data)
sc.pp.neighbors(anndata,n_pcs=0)
sc.tl.louvain(anndata)
thisrecluster=np.array(list(map(int,anndata.obs['louvain'])))
recluster1[index]=thisrecluster+recluster1_number
recluster1_number=len(np.unique(recluster1))
palsecluster1=recluster1.astype('int')
count_cluster1=pd.value_counts(palsecluster1)
palsecluster2=cluster2
count_cluster2=pd.value_counts(cluster_series2.obs['louvain'])
for i in range(1000000000000000):
if count_cluster2.max()<minnumberofcluster:
break
else:
print(count_cluster2.max())
recluster2=np.zeros(cluster2.shape[0])
recluster2_number=0
for i in np.unique(palsecluster2):
index=palsecluster2==i
if index.sum()<minnumberofcluster:
thisrecluster=np.zeros(index.sum())
recluster2[index]=thisrecluster+recluster2_number
recluster2_number=len(np.unique(recluster2))
else:
data=PCAseries2[index]
anndata=sc.AnnData(data)
sc.pp.neighbors(anndata,n_pcs=0)
sc.tl.louvain(anndata)
thisrecluster=np.array(list(map(int,anndata.obs['louvain'])))
recluster2[index]=thisrecluster+recluster2_number
recluster2_number=len(np.unique(recluster2))
palsecluster2=recluster2.astype('int')
count_cluster2=pd.value_counts(palsecluster2)
recluster1=palsecluster1
recluster2=palsecluster2
###############################################################################
#show the Louvain results
series1=sc.AnnData(PCAseries1)
series2=sc.AnnData(PCAseries2)
sc.pp.neighbors(series1,n_pcs=0)
sc.pp.neighbors(series2,n_pcs=0)
sc.tl.umap(series1)
sc.tl.umap(series2)
df1=pd.DataFrame(choose_seriestype1)
df1=pd.Series(np.reshape(df1.values,df1.values.shape[0]), dtype="category")
series1.obs['real']=df1.values
df2=pd.DataFrame(choose_seriestype2)
df2=pd.Series(np.reshape(df2.values,df2.values.shape[0]), dtype="category")
series2.obs['real']=df2.values
sc.pl.umap(series1,color='real',size=30)
sc.pl.umap(series2,color='real',size=30)
df1=pd.DataFrame(recluster1.astype('int'))
df1=pd.Series(np.reshape(df1.values,df1.values.shape[0]), dtype="category")
series1.obs['recluster']=df1.values
df2=pd.DataFrame(recluster2.astype('int'))
df2=pd.Series(np.reshape(df2.values,df2.values.shape[0]), dtype="category")
series2.obs['recluster']=df2.values
sc.pl.umap(series1,color='recluster',size=30)
sc.pl.umap(series2,color='recluster',size=30)
###############################################################################
#this is used to select the metric when selecting neighbor clusters
def dis(P,Q,distance_method):
if distance_method==0:#euclidean distance
return np.sqrt(np.sum(np.square(P-Q)))
if distance_method==1:#cos distance
return 1-(np.multiply(P,Q).sum()/(np.sqrt(np.sum(np.square(P)))*np.sqrt(np.sum(np.square(Q)))))
###############################################################################
#you can choose change their turn or not
if len(np.unique(recluster1))>=len(np.unique(recluster2)):
a=PCAseries1
PCAseries1=PCAseries2
PCAseries2=a
b=choose_seriestype1
choose_seriestype1=choose_seriestype2
choose_seriestype2=b
c=cluster1
cluster1=cluster2
cluster2=c
d=recluster1
recluster1=recluster2
recluster2=d
###############################################################################
#ok, let's calculate the similarity of cells/clusters
correlation_recluster=np.zeros([len(np.unique(recluster1)),len(np.unique(recluster2))])
correlation_recluster_cell=np.zeros([recluster1.shape[0],recluster2.shape[0]])
for i in range(len(np.unique(recluster1))):
for j in range(len(np.unique(recluster2))):
print(i,j)
index_series1=np.where(recluster1==i)[0]
index_series2=np.where(recluster2==j)[0]
cell_series1=PCAseries1[index_series1,:]
cell_series2=PCAseries2[index_series2,:]
mean1=0
for iq in range(cell_series1.shape[0]):
for jq in range(cell_series2.shape[0]):
mean1+=dis(cell_series1[iq,:],cell_series2[jq,:],1)
correlation_recluster[i,j]=mean1/(cell_series1.shape[0]*cell_series2.shape[0])
for ii in range(cell_series1.shape[0]):
for jj in range(cell_series2.shape[0]):
mean2=dis(cell_series1[ii,:],cell_series2[jj,:],0)
correlation_recluster_cell[index_series1[ii],index_series2[jj]]=mean2
plt.imshow(correlation_recluster)
plt.imshow(correlation_recluster_cell)
correlation_recluster_div=-np.log10(correlation_recluster)
correlation_recluster_cell_div=-np.log10(correlation_recluster_cell)
correlation_recluster_norm=(correlation_recluster_div-correlation_recluster_div.min())/(correlation_recluster_div.max()-correlation_recluster_div.min())
correlation_recluster_cell_norm=(correlation_recluster_cell_div-correlation_recluster_cell_div.min())/(correlation_recluster_cell_div.max()-correlation_recluster_cell_div.min())
#show them
plt.imshow(correlation_recluster_norm)
plt.imshow(correlation_recluster_cell_norm)
###############################################################################
#remove bad parts, do the matching
correlation_recluster_select=np.zeros(correlation_recluster_norm.shape)
recluster_mid=np.zeros(recluster1.shape)
for kk in range(correlation_recluster_norm.shape[0]):
ind=np.sort(correlation_recluster_norm[kk,:])
select=correlation_recluster_norm[kk,:]<ind[-clusternumber]
select=(select==False)
recluster_mid[recluster1==kk]+=int(np.where(select==True)[0])
correlation_recluster_select[kk,:]=correlation_recluster_norm[kk,:]*select
plt.imshow(correlation_recluster_select)
correlation_recluster_cell_final=correlation_recluster_cell*0
for i in range(correlation_recluster_cell_norm.shape[0]):
for j in range(correlation_recluster_cell_norm.shape[1]):
label1=recluster1[i]
label2=recluster2[j]
mean1=correlation_recluster_select[label1,label2]
mean2=correlation_recluster_cell_norm[i,j]
if mean1==0:
correlation_recluster_cell_final[i,j]=0
else:
correlation_recluster_cell_final[i,j]=mean2
plt.imshow(correlation_recluster_select)
plt.imshow(correlation_recluster_cell_final)
recluster1=recluster_mid.astype('int')
sort_correlation_recluster_cell_final=correlation_recluster_cell_final[recluster1.argsort(),:]
sort_correlation_recluster_cell_final=sort_correlation_recluster_cell_final[:,recluster2.argsort()]
###############################################################################
#heatmap
heatmap(correlation_recluster_cell_final,choose_seriestype1,choose_seriestype2,save=False,name='pancreasmatrix')
heatmap(sort_correlation_recluster_cell_final,np.sort(recluster1)+9,np.sort(recluster2)+9,save=False,name='ourpancreasmatrix')
###############################################################################
#ok, I use keras, cells in each input are randomly selected, I don't know how to match cells with their similarity
#I also don't know how to match the cell part with their distance, so I design the following inputs
#It will waste some time, it's not easy and unclear for readers, but it works!
x_input1=np.zeros([PCAseries1.shape[0],PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]+recluster2.max()+1])
x_input2=np.zeros([PCAseries2.shape[0],PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]+recluster2.max()+1])
for i in range(PCAseries1.shape[0]):
print(i)
x_input1[i,0:PCAseries1.shape[1]]=PCAseries1[i,:]
x_input1[i,PCAseries1.shape[1]:PCAseries1.shape[1]+PCAseries1.shape[0]]=K.utils.np_utils.to_categorical(i,PCAseries1.shape[0])
x_input1[i,PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]=correlation_recluster_cell_final[i,:]
x_input1[i,PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]:]=K.utils.np_utils.to_categorical(recluster1[i],recluster2.max()+1)
for j in range(PCAseries2.shape[0]):
print(j)
x_input2[j,0:PCAseries2.shape[1]]=PCAseries2[j,:]
x_input2[j,PCAseries2.shape[1]:PCAseries2.shape[1]+PCAseries2.shape[0]]=K.utils.np_utils.to_categorical(j,PCAseries2.shape[0])
x_input2[j,PCAseries2.shape[1]+PCAseries2.shape[0]:PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]]=correlation_recluster_cell_final[:,j]
x_input2[j,PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]:]=K.utils.np_utils.to_categorical(recluster2[j],recluster2.max()+1)
###############################################################################
#interesting, I need to make two batches have the same number of cells, so I have to copy cells again and again
if x_input1.shape[0]>=x_input2.shape[0]:
x_test1=x_input1
y_test1=recluster1
y_testreal1=choose_seriestype1
repeat_num=int(np.ceil(x_input1.shape[0]/x_input2.shape[0]))
x_test2=np.tile(x_input2,(repeat_num,1))
y_test2=np.tile(recluster2,repeat_num)
y_testreal2=np.tile(choose_seriestype2,repeat_num)
x_test2=x_test2[0:x_test1.shape[0],:]
y_test2=y_test2[0:x_test1.shape[0]]
y_testreal2=y_testreal2[0:x_test1.shape[0]]
elif x_input1.shape[0]<x_input2.shape[0]:
x_test2=x_input2
y_test2=recluster2
y_testreal2=choose_seriestype2
repeat_num=int(np.ceil(x_input2.shape[0]/x_input1.shape[0]))
x_test1=np.tile(x_input1,(repeat_num,1))
y_test1=np.tile(recluster1,repeat_num)
y_testreal1=np.tile(choose_seriestype1,repeat_num)
x_test1=x_test1[0:x_test2.shape[0],:]
y_test1=y_test1[0:x_test2.shape[0]]
y_testreal1=y_testreal1[0:x_test2.shape[0]]
###############################################################################
def choose_info(x,info_number):
return x[:,0:info_number]
def choose_index(x,info_number,x_samplenumber):
return x[:,info_number:info_number+x_samplenumber]
def choose_corrlation(x,info_number,x_samplenumber,cor_number):
return x[:,info_number+x_samplenumber:info_number+x_samplenumber+cor_number]
def choose_relabel(x,info_number,x_samplenumber,cor_number):
return x[:,info_number+x_samplenumber+cor_number:]
def slic(input_):
return input_[:,0]
###############################################################################
activation='relu'
info_number=PCAseries1.shape[1]
layer=PCAseries1.shape[1]
input1=K.Input(shape=(x_test1.shape[1],))#line1 species1
input2=K.Input(shape=(x_test2.shape[1],))#line1 species2
input3=K.Input(shape=(x_test1.shape[1],))#line2 species1
input4=K.Input(shape=(x_test2.shape[1],))#line2 species2
Data1=Lambda(choose_info,arguments={'info_number':info_number})(input1)
Data2=Lambda(choose_info,arguments={'info_number':info_number})(input2)
Data3=Lambda(choose_info,arguments={'info_number':info_number})(input3)
Data4=Lambda(choose_info,arguments={'info_number':info_number})(input4)
Index1=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0]})(input1)
Index2=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0]})(input2)
Index3=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0]})(input3)
Index4=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0]})(input4)
Cor1=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input1)
Cor2=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input2)
Cor3=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input3)
Cor4=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input4)
Relabel1=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input1)
Relabel2=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input2)
Relabel3=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input3)
Relabel4=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input4)
x_concat1=layers.concatenate([Data1,Data3])#batch1
x_concat2=layers.concatenate([Data2,Data4])#batch2
x1=layers.Dense(layer,activation=activation)(Data1)
x2=layers.Dense(layer,activation=activation)(Data2)
x3=layers.Dense(layer,activation=activation)(Data3)
x4=layers.Dense(layer,activation=activation)(Data4)
x1=layers.BatchNormalization()(x1)
x2=layers.BatchNormalization()(x2)
x3=layers.BatchNormalization()(x3)
x4=layers.BatchNormalization()(x4)
x1_mid1=layers.Dense(layer,activation=activation)(layers.concatenate([x1,x2]))
x2_mid1=layers.Dense(layer,activation=activation)(layers.concatenate([x1,x2]))
x1_mid2=layers.Dense(layer,activation=activation)(layers.concatenate([x3,x4]))
x2_mid2=layers.Dense(layer,activation=activation)(layers.concatenate([x3,x4]))
x1_mid1=layers.BatchNormalization()(x1_mid1)
x2_mid1=layers.BatchNormalization()(x2_mid1)
x1_mid2=layers.BatchNormalization()(x1_mid2)
x2_mid2=layers.BatchNormalization()(x2_mid2)
layer_classify=layers.Dense(recluster2.max()+1,activation='relu')
y1=layer_classify(x1_mid1)
y2=layer_classify(x2_mid1)
y3=layer_classify(x1_mid2)
y4=layer_classify(x2_mid2)
x1=layers.concatenate([x1_mid1,x1_mid2])#batch1
x2=layers.concatenate([x2_mid1,x2_mid2])#batch2
output1=layers.Dense(2*layer,activation=activation)(x1)
output2=layers.Dense(2*layer,activation=activation)(x2)
output1=layers.BatchNormalization()(output1)
output2=layers.BatchNormalization()(output2)
def loss_weight(input_):
return tf.reduce_sum(tf.multiply(input_[0],input_[1]),axis=-1)
def MSE(input_):
return tf.reduce_mean(tf.square(input_[0]-input_[1]),axis=-1)
def multi_classification_loss(input_):
return tf.keras.losses.categorical_crossentropy(input_[0],input_[1])
AE_loss_1=Lambda(MSE)([output1,x_concat1])
AE_loss_2=Lambda(MSE)([output2,x_concat2])
cls_loss_1=Lambda(MSE)([y1,Relabel1])
cls_loss_2=Lambda(MSE)([y2,Relabel2])
cls_loss_3=Lambda(MSE)([y3,Relabel3])
cls_loss_4=Lambda(MSE)([y4,Relabel4])
interweight1=Lambda(loss_weight)([Index1,Cor2])
interweight4=Lambda(loss_weight)([Index3,Cor4])
interloss_1=Lambda(MSE)([x1_mid1,x2_mid1])
interloss_4=Lambda(MSE)([x1_mid2,x2_mid2])
interloss_1=layers.Multiply()([interweight1,interloss_1])
interloss_4=layers.Multiply()([interweight4,interloss_4])
intraweight1=Lambda(loss_weight)([Relabel1,Relabel3])
intraweight2=Lambda(loss_weight)([Relabel2,Relabel4])
intraloss_1=Lambda(MSE)([x1_mid1,x1_mid2])
intraloss_2=Lambda(MSE)([x2_mid1,x2_mid2])
intraloss_1=layers.Multiply()([intraweight1,intraloss_1])
intraloss_2=layers.Multiply()([intraweight2,intraloss_2])
Loss1=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss1')([AE_loss_1,AE_loss_2])
Loss2=Lambda(lambda x:(x[0]*1+x[1]*1+x[2]*1+x[3]*1)/4,name='loss2')([cls_loss_1,cls_loss_2,cls_loss_3,cls_loss_4])
Loss3=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss3')([interloss_1,interloss_4])
Loss4=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss4')([intraloss_1,intraloss_2])
###############################################################################
network_train=K.models.Model([input1,input2,input3,input4],[Loss1,Loss2,Loss3,Loss4])
network_train.summary()
###############################################################################
intra_data1={}
inter_data1={}
for i in range(x_test1.shape[0]):
label_i=y_test1[i]
intra_data1[i]=np.where(y_test1==label_i)
inter_data1[i]=np.where(y_test1!=label_i)
intra_data2={}
inter_data2={}
for i in range(x_test2.shape[0]):
label_i=y_test2[i]
intra_data2[i]=np.where(y_test2==label_i)
inter_data2[i]=np.where(y_test2!=label_i)
###############################################################################
batch_size=256
train_loss=[]
loss1=[]
loss2=[]
loss3=[]
loss4=[]
###############################################################################
iterations=10000000
lr=1e-4
optimizer=K.optimizers.Adam(lr=lr)
loss_weights=[1,1,1,1]
#these four parts will not converge at the same speed, I don't know how to resolve it
#so I choose a hard strategy, if either one is too small, stop the training, enlarge its weight, do training again
#I think you can train this model better...or maybe you can teach me how to auto-balance the weight, thank you!
network_train.compile(optimizer=optimizer,
loss=[lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred],
loss_weights=loss_weights)
for i in range(iterations):
x_input1_series1_train=np.zeros(x_test1.shape)
index0=np.zeros(x_input1_series1_train.shape[0])
x_input1_series2_train=np.zeros(x_test2.shape)
index1=np.zeros(x_input1_series2_train.shape[0])
x_input2_series1_train=np.zeros(x_test1.shape)
index2=np.zeros(x_input2_series1_train.shape[0])
x_input2_series2_train=np.zeros(x_test2.shape)
index3=np.zeros(x_input2_series2_train.shape[0])
for ii in range(x_test1.shape[0]):
index0[ii]=random.choice(range(x_test1.shape[0]))
rand1=random.random()
in_rand1=np.where(x_test1[ii,:][PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]>0)[0]
out_rand1=np.where(x_test1[ii,:][PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]<=0)[0]
if rand1>=0.5:
index1[ii]=random.choice(in_rand1)
elif rand1<0.5:
index1[ii]=random.choice(out_rand1)
rand2=random.random()
if rand2>=0.5:
index2[ii]=random.choice(intra_data1[index0[ii]][0])
elif rand2<0.5:
index2[ii]=random.choice(inter_data1[index0[ii]][0])
rand3=random.random()
if rand3>=0.5:
index3[ii]=random.choice(intra_data2[index1[ii]][0])
elif rand3<0.5:
index3[ii]=random.choice(inter_data2[index1[ii]][0])
train1=x_test1[index0.astype('int'),:]
train2=x_test2[index1.astype('int'),:]
train3=x_test1[index2.astype('int'),:]
train4=x_test2[index3.astype('int'),:]
Train=network_train.fit([train1,train2,train3,train4],
[np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1])],
batch_size=batch_size,shuffle=True)
train_loss.append(Train.history['loss'][:][0])
loss1.append(Train.history['loss1_loss'][:][0]*loss_weights[0])
loss2.append(Train.history['loss2_loss'][:][0]*loss_weights[1])
loss3.append(Train.history['loss3_loss'][:][0]*loss_weights[2])
loss4.append(Train.history['loss4_loss'][:][0]*loss_weights[3])
print(i,'loss=',
Train.history['loss'][:][0],
Train.history['loss1_loss'][:][0]*loss_weights[0],
Train.history['loss2_loss'][:][0]*loss_weights[1],
Train.history['loss3_loss'][:][0]*loss_weights[2],
Train.history['loss4_loss'][:][0]*loss_weights[3])
if i>500:
plt.plot(train_loss[:])
plt.plot(loss1[:])
plt.plot(loss2[:])
plt.plot(loss3[:])
plt.plot(loss4[:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.ylim(0,max(max(train_loss[i-500:],loss1[i-500:],loss2[i-500:],loss3[i-500:],loss4[i-500:])))
plt.xlim(i-500,i)
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
plt.plot(train_loss[:])
plt.plot(loss1[:])
plt.plot(loss2[:])
plt.plot(loss3[:])
plt.plot(loss4[:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
else:
plt.plot(train_loss[500:])
plt.plot(loss1[500:])
plt.plot(loss2[500:])
plt.plot(loss3[500:])
plt.plot(loss4[500:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
###############################################################################
network_train.load_weights('pancreas42.h5')
network_predict=K.models.Model([input1,input2,input3,input4],[x1_mid1,x2_mid1,x1_mid2,x2_mid2])
[low_dim1,low_dim2,low_dim3,low_dim4]=network_predict.predict([x_test1,x_test2,x_test1,x_test2])
low_dim1=low_dim1[0:x_input1.shape[0]]
low_dim2=low_dim2[0:x_input2.shape[0]]
low_dim3=low_dim3[0:x_input1.shape[0]]
low_dim4=low_dim4[0:x_input2.shape[0]]
low_dim1=np.concatenate([low_dim1,low_dim3],axis=1)
low_dim2=np.concatenate([low_dim2,low_dim4],axis=1)
y_real_no1=y_testreal1[0:x_input1.shape[0]]
y_recluster_no1=recluster1[0:x_input1.shape[0]]
y_real_no2=y_testreal2[0:x_input2.shape[0]]
y_recluster_no2=recluster2[0:x_input2.shape[0]]
total_real_type=np.concatenate([y_real_no1,y_real_no2])
total_recluster_type=np.concatenate([y_recluster_no1,y_recluster_no2])
###############################################################################
series1=sc.AnnData(low_dim1)
series2=sc.AnnData(low_dim2)
mergedata=series1.concatenate(series2)
mergedata.obsm['NN']=mergedata.X
sc.pp.neighbors(mergedata,n_pcs=0)
sc.tl.louvain(mergedata)
sc.tl.leiden(mergedata)
sc.tl.umap(mergedata)
df=pd.DataFrame(total_real_type.astype('int'))
df=pd.Series(np.reshape(df.values,df.values.shape[0]), dtype="category")
mergedata.obs['real']=df.values
sc.pl.umap(mergedata,color='louvain',size=30)
sc.pl.umap(mergedata,color='leiden',size=30)
sc.pl.umap(mergedata,color='batch',size=30)
sc.pl.umap(mergedata,color='real',size=30)
type_louvain=mergedata.obs['louvain']
type_leiden=mergedata.obs['leiden']
type_batch=mergedata.obs['batch']
type_real=mergedata.obs['real']
###############################################################################
umapdata=pd.DataFrame(mergedata.obsm['X_umap'].T,index=['tSNE1','tSNE2'])
umapdata1=pd.DataFrame(mergedata.obsm['X_umap'][0:PCAseries1.shape[0],:].T,index=['tSNE1','tSNE2'])
umapdata2=pd.DataFrame(mergedata.obsm['X_umap'][PCAseries1.shape[0]:,:].T,index=['tSNE1','tSNE2'])
###############################################################################
plot_tSNE_batchclusters(umapdata1,umapdata2,choose_seriestype1,choose_seriestype2,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'batch1')
plot_tSNE_batchclusters(umapdata2,umapdata1,choose_seriestype2,choose_seriestype1,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'batch2')
plot_tSNE_clusters(umapdata,list(map(int,type_batch)), cluster_colors=cluster_colors,save=False,name=fromname+'batch')
plot_tSNE_sepclusters(umapdata1,umapdata2,choose_seriestype1,choose_seriestype2,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'label1')
plot_tSNE_sepclusters(umapdata2,umapdata1,choose_seriestype2,choose_seriestype1,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'label2')
plot_tSNE_clusters(umapdata,list(map(int,type_real)), cluster_colors=cluster_colors,save=False, name=fromname+'label')
#sio.savemat('pancreas_ourdata.mat',{'mergedata':mergedata.X,'umapdata':umapdata.values})#you need to see whether two batches are changed in turn, if so do changing again by yourself!!! | 30,362 | 46.815748 | 185 | py |
CBA | CBA-main/species/kBET.py | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 31 10:41:54 2021
@author: 17b90
"""
import numpy as np
import anndata
import scanpy as sc
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.sparse.csgraph import connected_components
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#import networkx as nx
from scIB.utils import *
from scIB.preprocessing import score_cell_cycle
from scIB.clustering import opt_louvain
from scipy import sparse
from scipy.sparse.csgraph import connected_components
from scipy.io import mmwrite
import sklearn
import sklearn.metrics
from time import time
import cProfile
from pstats import Stats
import memory_profiler
import itertools
import multiprocessing as multipro
import subprocess
import tempfile
import pathlib
from os import mkdir, path, remove, stat
import gc
import rpy2.rinterface_lib.callbacks
import logging
rpy2.rinterface_lib.callbacks.logger.setLevel(logging.ERROR) # Ignore R warning messages
import rpy2.robjects as ro
import anndata2ri
def checkAdata(adata):
if type(adata) is not anndata.AnnData:
raise TypeError('Input is not a valid AnnData object')
def checkBatch(batch, obs, verbose=False):
if batch not in obs:
raise ValueError(f'column {batch} is not in obs')
elif verbose:
print(f'Object contains {obs[batch].nunique()} batches.')
def diffusion_conn(adata, min_k=50, copy=True, max_iterations=26):
'''
This function performs graph diffusion on the connectivities matrix until a
minimum number `min_k` of entries per row are non-zero.
Note:
Due to self-loops min_k-1 non-zero connectivies entries is actually the stopping
criterion. This is equivalent to `sc.pp.neighbors`.
Returns:
The diffusion-enhanced connectivities matrix of a copy of the AnnData object
with the diffusion-enhanced connectivities matrix is in
`adata.uns["neighbors"]["conectivities"]`
'''
if 'neighbors' not in adata.uns:
raise ValueError('`neighbors` not in adata object. '
'Please compute a neighbourhood graph!')
if 'connectivities' not in adata.uns['neighbors']:
raise ValueError('`connectivities` not in `adata.uns["neighbors"]`. '
'Please pass an object with connectivities computed!')
T = adata.uns['neighbors']['connectivities']
#Normalize T with max row sum
# Note: This keeps the matrix symmetric and ensures |M| doesn't keep growing
T = sparse.diags(1/np.array([T.sum(1).max()]*T.shape[0]))*T
M = T
# Check for disconnected component
n_comp, labs = connected_components(adata.uns['neighbors']['connectivities'],
connection='strong')
if n_comp > 1:
tab = pd.value_counts(labs)
small_comps = tab.index[tab<min_k]
large_comp_mask = np.array(~pd.Series(labs).isin(small_comps))
else:
large_comp_mask = np.array([True]*M.shape[0])
T_agg = T
i = 2
while ((M[large_comp_mask,:][:,large_comp_mask]>0).sum(1).min() < min_k) and (i < max_iterations):
print(f'Adding diffusion to step {i}')
T_agg *= T
M += T_agg
i+=1
if (M[large_comp_mask,:][:,large_comp_mask]>0).sum(1).min() < min_k:
raise ValueError('could not create diffusion connectivities matrix'
f'with at least {min_k} non-zero entries in'
f'{max_iterations} iterations.\n Please increase the'
'value of max_iterations or reduce k_min.\n')
M.setdiag(0)
if copy:
adata_tmp = adata.copy()
adata_tmp.uns['neighbors'].update({'diffusion_connectivities': M})
return adata_tmp
else:
return M
def diffusion_nn(adata, k, max_iterations=26):
'''
This function generates a nearest neighbour list from a connectivities matrix
as supplied by BBKNN or Conos. This allows us to select a consistent number
of nearest neighbours across all methods.
Return:
`k_indices` a numpy.ndarray of the indices of the k-nearest neighbors.
'''
if 'neighbors' not in adata.uns:
raise ValueError('`neighbors` not in adata object. '
'Please compute a neighbourhood graph!')
if 'connectivities' not in adata.uns['neighbors']:
raise ValueError('`connectivities` not in `adata.uns["neighbors"]`. '
'Please pass an object with connectivities computed!')
T = adata.uns['neighbors']['connectivities']
# Row-normalize T
T = sparse.diags(1/T.sum(1).A.ravel())*T
T_agg = T**3
M = T+T**2+T_agg
i = 4
while ((M>0).sum(1).min() < (k+1)) and (i < max_iterations):
#note: k+1 is used as diag is non-zero (self-loops)
print(f'Adding diffusion to step {i}')
T_agg *= T
M += T_agg
i+=1
if (M>0).sum(1).min() < (k+1):
raise NeighborsError(f'could not find {k} nearest neighbors in {max_iterations}'
'diffusion steps.\n Please increase max_iterations or reduce'
' k.\n')
M.setdiag(0)
k_indices = np.argpartition(M.A, -k, axis=1)[:, -k:]
return k_indices
def kBET_single(matrix, batch, type_ = None, k0 = 10, knn=None, subsample=0.5, heuristic=True, verbose=False):
"""
params:
matrix: expression matrix (at the moment: a PCA matrix, so do.pca is set to FALSE
batch: series or list of batch assignemnts
subsample: fraction to be subsampled. No subsampling if `subsample=None`
returns:
kBET p-value
"""
anndata2ri.activate()
ro.r("library(kBET)")
if verbose:
print("importing expression matrix")
ro.globalenv['data_mtrx'] = matrix
ro.globalenv['batch'] = batch
#print(matrix.shape)
#print(len(batch))
if verbose:
print("kBET estimation")
#k0 = len(batch) if len(batch) < 50 else 'NULL'
ro.globalenv['knn_graph'] = knn
ro.globalenv['k0'] = k0
batch_estimate = ro.r(f"batch.estimate <- kBET(data_mtrx, batch, knn=knn_graph, k0=k0, plot=FALSE, do.pca=FALSE, heuristic=FALSE, adapt=FALSE, verbose={str(verbose).upper()})")
anndata2ri.deactivate()
try:
ro.r("batch.estimate$average.pval")[0]
except rpy2.rinterface_lib.embedded.RRuntimeError:
return np.nan
else:
return ro.r("batch.estimate$average.pval")[0]
def kbet(adata, batch_key, label_key, embed='X_pca', type_ = None,
hvg=False, subsample=0.5, heuristic=False, verbose=False):
"""
Compare the effect before and after integration
params:
matrix: matrix from adata to calculate on
return:
pd.DataFrame with kBET p-values per cluster for batch
"""
checkAdata(adata)
checkBatch(batch_key, adata.obs)
checkBatch(label_key, adata.obs)
#compute connectivities for non-knn type data integrations
#and increase neighborhoods for knn type data integrations
if type_ =='haveneighbor':
adata_tmp = adata
print('neighbor have already obtained!')
elif type_ != 'knn':
adata_tmp = sc.pp.neighbors(adata, n_neighbors = 50, use_rep=embed, copy=True)
else:
#check if pre-computed neighbours are stored in input file
adata_tmp = adata.copy()
if 'diffusion_connectivities' not in adata.uns['neighbors']:
if verbose:
print(f"Compute: Diffusion neighbours.")
adata_tmp = diffusion_conn(adata, min_k = 50, copy = True)
adata_tmp.uns['neighbors']['connectivities'] = adata_tmp.uns['neighbors']['diffusion_connectivities']
if verbose:
print(f"batch: {batch_key}")
#set upper bound for k0
size_max = 2**31 - 1
kBET_scores = {'cluster': [], 'kBET': []}
for clus in adata_tmp.obs[label_key].unique():
adata_sub = adata_tmp[adata_tmp.obs[label_key] == clus,:].copy()
#check if neighborhood size too small or only one batch in subset
if np.logical_or(adata_sub.n_obs < 10,
len(np.unique(adata_sub.obs[batch_key]))==1):
print(f"{clus} consists of a single batch or is too small. Skip.")
score = np.nan
else:
quarter_mean = np.floor(np.mean(adata_sub.obs[batch_key].value_counts())/4).astype('int')
k0 = np.min([70, np.max([10, quarter_mean])])
#check k0 for reasonability
if (k0*adata_sub.n_obs) >=size_max:
k0 = np.floor(size_max/adata_sub.n_obs).astype('int')
matrix = np.zeros(shape=(adata_sub.n_obs, k0+1))
if verbose:
print(f"Use {k0} nearest neighbors.")
n_comp, labs = connected_components(adata_sub.uns['neighbors']['connectivities'],
connection='strong')
if n_comp > 1:
#check the number of components where kBET can be computed upon
comp_size = pd.value_counts(labs)
#check which components are small
comp_size_thresh = 3*k0
idx_nonan = np.flatnonzero(np.in1d(labs,
comp_size[comp_size>=comp_size_thresh].index))
#check if 75% of all cells can be used for kBET run
if len(idx_nonan)/len(labs) >= 0.75:
#create another subset of components, assume they are not visited in a diffusion process
adata_sub_sub = adata_sub[idx_nonan,:].copy()
nn_index_tmp = np.empty(shape=(adata_sub.n_obs, k0))
nn_index_tmp[:] = np.nan
nn_index_tmp[idx_nonan] = diffusion_nn(adata_sub_sub, k=k0).astype('float')
#need to check neighbors (k0 or k0-1) as input?
score = kBET_single(
matrix=matrix,
batch=adata_sub.obs[batch_key],
knn = nn_index_tmp+1, #nn_index in python is 0-based and 1-based in R
subsample=subsample,
verbose=verbose,
heuristic=False,
k0 = k0,
type_ = type_
)
else:
#if there are too many too small connected components, set kBET score to 1
#(i.e. 100% rejection)
score = 1
else: #a single component to compute kBET on
#need to check neighbors (k0 or k0-1) as input?
nn_index_tmp = diffusion_nn(adata_sub, k=k0).astype('float')
score = kBET_single(
matrix=matrix,
batch=adata_sub.obs[batch_key],
knn = nn_index_tmp+1, #nn_index in python is 0-based and 1-based in R
subsample=subsample,
verbose=verbose,
heuristic=False,
k0 = k0,
type_ = type_
)
kBET_scores['cluster'].append(clus)
kBET_scores['kBET'].append(score)
kBET_scores = pd.DataFrame.from_dict(kBET_scores)
kBET_scores = kBET_scores.reset_index(drop=True)
return kBET_scores
def nmi(adata, group1, group2, method="arithmetic", nmi_dir=None):
"""
Normalized mutual information NMI based on 2 different cluster assignments `group1` and `group2`
params:
adata: Anndata object
group1: column name of `adata.obs` or group assignment
group2: column name of `adata.obs` or group assignment
method: NMI implementation
'max': scikit method with `average_method='max'`
'min': scikit method with `average_method='min'`
'geometric': scikit method with `average_method='geometric'`
'arithmetic': scikit method with `average_method='arithmetic'`
'Lancichinetti': implementation by A. Lancichinetti 2009 et al.
'ONMI': implementation by Aaron F. McDaid et al. (https://github.com/aaronmcdaid/Overlapping-NMI) Hurley 2011
nmi_dir: directory of compiled C code if 'Lancichinetti' or 'ONMI' are specified as `method`. Compilation should be done as specified in the corresponding README.
return:
normalized mutual information (NMI)
"""
checkAdata(adata)
if isinstance(group1, str):
checkBatch(group1, adata.obs)
group1 = adata.obs[group1].tolist()
elif isinstance(group1, pd.Series):
group1 = group1.tolist()
if isinstance(group2, str):
checkBatch(group2, adata.obs)
group2 = adata.obs[group2].tolist()
elif isinstance(group2, pd.Series):
group2 = group2.tolist()
if len(group1) != len(group2):
raise ValueError(f'different lengths in group1 ({len(group1)}) and group2 ({len(group2)})')
# choose method
if method in ['max', 'min', 'geometric', 'arithmetic']:
nmi_value = sklearn.metrics.normalized_mutual_info_score(group1, group2, average_method=method)
elif method == "Lancichinetti":
nmi_value = nmi_Lanc(group1, group2, nmi_dir=nmi_dir)
elif method == "ONMI":
nmi_value = onmi(group1, group2, nmi_dir=nmi_dir)
else:
raise ValueError(f"Method {method} not valid")
return nmi_value
def ari(adata, group1, group2):
"""
params:
adata: anndata object
group1: ground-truth cluster assignments (e.g. cell type labels)
group2: "predicted" cluster assignments
The function is symmetric, so group1 and group2 can be switched
"""
checkAdata(adata)
if isinstance(group1, str):
checkBatch(group1, adata.obs)
group1 = adata.obs[group1].tolist()
elif isinstance(group1, pd.Series):
group1 = group1.tolist()
if isinstance(group2, str):
checkBatch(group2, adata.obs)
group2 = adata.obs[group2].tolist()
elif isinstance(group2, pd.Series):
group2 = group2.tolist()
if len(group1) != len(group2):
raise ValueError(f'different lengths in group1 ({len(group1)}) and group2 ({len(group2)})')
return sklearn.metrics.cluster.adjusted_rand_score(group1, group2)
def silhouette(adata, group_key, metric='euclidean', embed='X_pca', scale=True):
"""
wrapper for sklearn silhouette function values range from [-1, 1] with 1 being an ideal fit, 0 indicating overlapping clusters and -1 indicating misclassified cells
"""
if embed not in adata.obsm.keys():
print(adata.obsm.keys())
raise KeyError(f'{embed} not in obsm')
asw = sklearn.metrics.silhouette_score(adata.obsm[embed], adata.obs[group_key], metric=metric)
if scale:
asw = (asw + 1)/2
return asw
### PC Regression
def pcr_comparison(adata_pre, adata_post, covariate, embed=None, n_comps=50, scale=True, verbose=False):
"""
Compare the effect before and after integration
Return either the difference of variance contribution before and after integration
or a score between 0 and 1 (`scaled=True`) with 0 if the variance contribution hasn't
changed. The larger the score, the more different the variance contributions are before
and after integration.
params:
adata_pre: uncorrected adata
adata_post: integrated adata
embed : if `embed=None`, use the full expression matrix (`adata.X`), otherwise
use the embedding provided in `adata_post.obsm[embed]`
scale: if True, return scaled score
return:
difference of R2Var value of PCR
"""
if embed == 'X_pca':
embed = None
pcr_before = pcr(adata_pre, covariate=covariate, recompute_pca=True,
n_comps=n_comps, verbose=verbose)
pcr_after = pcr(adata_post, covariate=covariate, embed=embed, recompute_pca=True,
n_comps=n_comps, verbose=verbose)
if scale:
score = (pcr_before - pcr_after)/pcr_before
if score < 0:
print("Variance contribution increased after integration!")
print("Setting PCR comparison score to 0.")
score = 0
return score
else:
return pcr_after - pcr_before
def pcr(adata, covariate, embed=None, n_comps=50, recompute_pca=True, verbose=False):
"""
PCR for Adata object
Checks whether to
+ compute PCA on embedding or expression data (set `embed` to name of embedding matrix e.g. `embed='X_emb'`)
+ use existing PCA (only if PCA entry exists)
+ recompute PCA on expression matrix (default)
params:
adata: Anndata object
embed : if `embed=None`, use the full expression matrix (`adata.X`), otherwise
use the embedding provided in `adata_post.obsm[embed]`
n_comps: number of PCs if PCA should be computed
covariate: key for adata.obs column to regress against
return:
R2Var of PCR
"""
checkAdata(adata)
checkBatch(covariate, adata.obs)
if verbose:
print(f"covariate: {covariate}")
batch = adata.obs[covariate]
# use embedding for PCA
if (embed is not None) and (embed in adata.obsm):
if verbose:
print(f"compute PCR on embedding n_comps: {n_comps}")
return pc_regression(adata.obsm[embed], batch, n_comps=n_comps)
# use existing PCA computation
elif (recompute_pca == False) and ('X_pca' in adata.obsm) and ('pca' in adata.uns):
if verbose:
print("using existing PCA")
return pc_regression(adata.obsm['X_pca'], batch, pca_var=adata.uns['pca']['variance'])
# recompute PCA
else:
if verbose:
print(f"compute PCA n_comps: {n_comps}")
return pc_regression(adata.X, batch, n_comps=n_comps)
def pc_regression(data, variable, pca_var=None, n_comps=50, svd_solver='arpack', verbose=False):
"""
params:
data: expression or PCA matrix. Will be assumed to be PCA values, if pca_sd is given
variable: series or list of batch assignments
n_comps: number of PCA components for computing PCA, only when pca_sd is not given. If no pca_sd is given and n_comps=None, comute PCA and don't reduce data
pca_var: iterable of variances for `n_comps` components. If `pca_sd` is not `None`, it is assumed that the matrix contains PCA values, else PCA is computed
PCA is only computed, if variance contribution is not given (pca_sd).
"""
if isinstance(data, (np.ndarray, sparse.csr_matrix)):
matrix = data
else:
raise TypeError(f'invalid type: {data.__class__} is not a numpy array or sparse matrix')
# perform PCA if no variance contributions are given
if pca_var is None:
if n_comps is None or n_comps > min(matrix.shape):
n_comps = min(matrix.shape)
if n_comps == min(matrix.shape):
svd_solver = 'full'
if verbose:
print("compute PCA")
pca = sc.tl.pca(matrix, n_comps=n_comps, use_highly_variable=False,
return_info=True, svd_solver=svd_solver, copy=True)
X_pca = pca[0].copy()
pca_var = pca[3].copy()
del pca
else:
X_pca = matrix
n_comps = matrix.shape[1]
## PC Regression
if verbose:
print("fit regression on PCs")
# handle categorical values
if pd.api.types.is_numeric_dtype(variable):
variable = np.array(variable).reshape(-1, 1)
else:
if verbose:
print("one-hot encode categorical values")
variable = pd.get_dummies(variable)
# fit linear model for n_comps PCs
r2 = []
for i in range(n_comps):
pc = X_pca[:, [i]]
lm = sklearn.linear_model.LinearRegression()
lm.fit(variable, pc)
r2_score = np.maximum(0,lm.score(variable, pc))
r2.append(r2_score)
Var = pca_var / sum(pca_var) * 100
R2Var = sum(r2*Var)/100
return R2Var | 20,538 | 37.390654 | 180 | py |
CBA | CBA-main/species/ywb_function.py | import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
from collections import Counter
import matplotlib.pyplot as plt
from keras.regularizers import l2
from sklearn import preprocessing
from keras.layers.core import Lambda
from keras.callbacks import TensorBoard
from keras.callbacks import LearningRateScheduler
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler
from mpl_toolkits.axes_grid1 import make_axes_locatable
def color(value):
digit = list(map(str, range(10))) + list("ABCDEF")
if isinstance(value, tuple):
string = '#'
for i in value:
a1 = i // 16
a2 = i % 16
string += digit[a1] + digit[a2]
return string
elif isinstance(value, str):
a1 = digit.index(value[1]) * 16 + digit.index(value[2])
a2 = digit.index(value[3]) * 16 + digit.index(value[4])
a3 = digit.index(value[5]) * 16 + digit.index(value[6])
return (a1, a2, a3)
cluster_colors=[
color((213,94,0)),
color((0,114,178)),
color((204,121,167)),
color((0,158,115)),
color((86,180,233)),
color((230,159,0)),
color((240,228,66)),
color((0,0,0)),
'#D3D3D3',
'#FF00FF',
'#aec470',
'#b3ee3d',
'#de4726',
'#f69149',
'#f81919',
'#ff49b0',
'#f05556',
'#fadf0b',
'#f8c495',
'#ffc1c1',
'#ffc125',
'#ffc0cb',
'#ffbbff',
'#ffb90f',
'#ffb6c1',
'#ffb5c5',
'#ff83fa',
'#ff8c00',
'#ff4040',
'#ff3030',
'#ff34b3',
'#00fa9a',
'#ca4479',
'#eead0e',
'#ff1493',
'#0ab4e4',
'#1e6a87',
'#800080',
'#00e5ee',
'#c71585',
'#027fd0',
'#004dba',
'#0a9fb4',
'#004b71',
'#285528',
'#2f7449',
'#21b183',
'#3e4198',
'#4e14a6',
'#5dd73d',
'#64a44e',
'#6787d6',
'#6c6b6b',
'#6c6b6b',
'#7759a4',
'#78edff',
'#762a14',
'#9805cc',
'#9b067d',
'#af7efe',
'#a7623d']
def plot_tSNE_clusters(df_tSNE,labels,cluster_colors=None,s=6,save=False,name=None):
fig,ax=plt.subplots(figsize=(4, 4))
ax.scatter(df_tSNE.loc['tSNE1'], df_tSNE.loc['tSNE2'],s=s,alpha=0.8,lw=0,c=[cluster_colors[i] for i in labels])
ax.axis('equal')
ax.set_axis_off()
if save==True:
plt.savefig('{}.eps'.format(name),dpi=600,format='eps')
def plot_tSNE_batchclusters(df_tSNE1,df_tSNE2,labels1,labels2,cluster_colors=None,s=0.8,save=False,name=None):
fig,ax=plt.subplots(figsize=(4, 4))
ax.scatter(df_tSNE2.loc['tSNE1'], df_tSNE2.loc['tSNE2'],s=s,alpha=0.8,lw=0,c='#D3D3D3')
ax.scatter(df_tSNE1.loc['tSNE1'], df_tSNE1.loc['tSNE2'],s=s,alpha=0.8,lw=0,c=[cluster_colors[1] for i in labels1])
ax.axis('equal')
ax.set_axis_off()
if save==True:
plt.savefig('{}.eps'.format(name),dpi=600,format='eps')
def plot_tSNE_sepclusters(df_tSNE1,df_tSNE2,labels1,labels2,cluster_colors=None,s=0.8,save=False,name=None):
fig,ax=plt.subplots(figsize=(4, 4))
ax.scatter(df_tSNE2.loc['tSNE1'], df_tSNE2.loc['tSNE2'],s=s,alpha=0.8,lw=0,c='#D3D3D3')
ax.scatter(df_tSNE1.loc['tSNE1'], df_tSNE1.loc['tSNE2'],s=s,alpha=0.8,lw=0,c=[cluster_colors[i] for i in labels1])
ax.axis('equal')
ax.set_axis_off()
if save==True:
plt.savefig('{}.eps'.format(name),dpi=600,format='eps')
def plot_tSNE_cluster(df_tSNE,labels,cluster_colors=None,s=6,save=False,name=None):
index=[[] for i in range(np.max(labels)+1)]
for i in range(len(labels)):
index[int(labels[i])].append(i)
index=[i for i in index if i!=[]]
for i in range(len(np.unique(labels))):
color=np.array(labels)[index[i]][0]
fig,ax=plt.subplots()
ax.scatter(df_tSNE.loc['tSNE1'], df_tSNE.loc['tSNE2'],c='#D3D3D3',s=s,lw=0)
ax.scatter(df_tSNE.loc['tSNE1'].iloc[index[i]],df_tSNE.loc['tSNE2'].iloc[index[i]],c=[cluster_colors[k] for k in np.array(labels)[index[i]]],s=s,lw=0)
ax.axis('equal')
ax.set_axis_off()
if save == True:
plt.savefig('{}.eps'.format(name+str(color)), dpi=600,format='eps')
def gen_labels(df, model):
if str(type(model)).startswith("<class 'sklearn.cluster"):
cell_labels = dict(zip(df.columns, model.labels_))
label_cells = {}
for l in np.unique(model.labels_):
label_cells[l] = []
for i, label in enumerate(model.labels_):
label_cells[label].append(df.columns[i])
cellID = list(df.columns)
labels = list(model.labels_)
labels_a = model.labels_
elif type(model) == np.ndarray:
cell_labels = dict(zip(df.columns, model))
label_cells = {}
for l in np.unique(model):
label_cells[l] = []
for i, label in enumerate(model):
label_cells[label].append(df.columns[i])
cellID = list(df.columns)
labels = list(model)
labels_a = model
else:
print('Error wrong input type')
return cell_labels, label_cells, cellID, labels, labels_a
def heatmap(correlation_recluster_cell_final,choose_seriestype1,choose_seriestype2,save=False,name=''):
df=pd.DataFrame(correlation_recluster_cell_final)
labels1=np.array(choose_seriestype1)
labels2=np.array(choose_seriestype2)
cell_labels1,label_cells1,cellID1,labels1,labels_a1=gen_labels(df.T,np.array(labels1))
cell_labels2,label_cells2,cellID2,labels2,labels_a2=gen_labels(df,np.array(labels2))
optimal_order=np.unique(np.concatenate([labels1,labels2]))
cl,lc=gen_labels(df,np.array(labels2))[:2]
optimal_sort_cells=sum([lc[i] for i in np.unique(labels2)],[])
optimal_sort_labels=[cl[i] for i in optimal_sort_cells]
fig,axHM=plt.subplots(figsize=(9,5))
df_full=df.copy()
z=df_full.values
z=pd.DataFrame(z, index=df_full.index,columns=df_full.columns)
z=z.loc[:,optimal_sort_cells].values
im=axHM.pcolormesh(z,cmap='viridis',vmax=1)
plt.gca().invert_yaxis()
plt.xlim(xmax=len(labels2))
plt.xticks([])
plt.yticks([])
divider=make_axes_locatable(axHM)
axLabel1=divider.append_axes("top",.3,pad=0,sharex=axHM)
axLabel2=divider.append_axes("left",.3,pad=0,sharex=axHM)
counter2=Counter(labels2)
counter1=Counter(labels1)
pos2=0
pos1=0
for l in optimal_order:
axLabel1.barh(y=0,left=pos2,width=counter2[l],color=cluster_colors[l],linewidth=0.5,edgecolor=cluster_colors[l])
pos2+=counter2[l]
optimal_order=np.flipud(optimal_order)
for l in optimal_order:
axLabel2.bar(x=0,bottom=pos1,height=counter1[l],color=cluster_colors[l],linewidth=50,edgecolor=cluster_colors[l])
pos1+=counter1[l]
axLabel1.set_xlim(xmax=len(labels2))
axLabel1.axis('off')
axLabel2.set_ylim(ymax=len(labels1))
axLabel2.axis('off')
cax=fig.add_axes([.91,0.13,0.01,0.22])
colorbar=fig.colorbar(im,cax=cax,ticks=[0,1])
colorbar.set_ticklabels(['0','max'])
plt.savefig('{}.jpg'.format(name),dpi=600,format='jpg') | 7,512 | 33.782407 | 158 | py |
CBA | CBA-main/species/species_main.py | """
Created on Fri Mar 27 18:58:59 2020
@author: 17b90
"""
import kBET
import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
import sklearn.metrics as sm
from collections import Counter
import matplotlib.pyplot as plt
from keras.regularizers import l2
from sklearn import preprocessing
from keras.layers.core import Lambda
from keras.callbacks import TensorBoard
from imblearn.over_sampling import SMOTE,ADASYN
from keras.callbacks import LearningRateScheduler
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.cluster.hierarchy import dendrogram, linkage
#input the data
H_acc=sc.read_mtx('GSE127774_ACC_H_matrix.mtx')
C_acc=sc.read_mtx('GSE127774_ACC_C_matrix.mtx')
H_acc_data=scipy.sparse.csr_matrix(H_acc.X, dtype=np.int8).toarray()
C_acc_data=scipy.sparse.csr_matrix(C_acc.X, dtype=np.int8).toarray()
H_acc_gene=pd.read_csv('GSE127774_ACC_H_genes.csv', header=None)
H_acc_data=pd.DataFrame(data=H_acc_data, index=H_acc_gene[0].values).astype(float)
C_acc_gene=pd.read_csv('GSE127774_ACC_C_genes.csv', header=None)
C_acc_data=pd.DataFrame(data=C_acc_data, index=C_acc_gene[0].values).astype(float)
human_chimpanzee_genecouple=pd.read_csv('human_chimpanzee.csv', header=None)
row=[]
for i in range(human_chimpanzee_genecouple.shape[0]):
if (human_chimpanzee_genecouple.values==human_chimpanzee_genecouple.loc[i][0]).sum()>=2 or (human_chimpanzee_genecouple.values==human_chimpanzee_genecouple.loc[i][1]).sum()>=2:
human_chimpanzee_genecouple.loc[i][0]='0'
human_chimpanzee_genecouple.loc[i][1]='0'
row.append(i)
human_chimpanzee_genecouple_new=human_chimpanzee_genecouple.drop(row)
human_chimpanzee_genecouple_new=pd.DataFrame(human_chimpanzee_genecouple_new.values)
series1=human_expressionlevel
series2=chimpanzee_expressionlevel
gene_couple=human_chimpanzee_genecouple_new
series1_gene=gene_couple[0][1:].values
series2_gene=gene_couple[1][1:].values
#to remove genes which only exist in single species
series1_gene='hg38____'+series1_gene
series2_gene='panTro5_'+series2_gene
series1_gene=list(series1_gene)
series2_gene=list(series2_gene)
for i in range(len(series1_gene)):
if series1_gene[i] not in list(series1.index) or series2_gene[i] not in list(series2.index):
series1_gene[i]=0
series2_gene[i]=0
series1_gene=list(filter(lambda x:x!=0,series1_gene))
series2_gene=list(filter(lambda x:x!=0,series2_gene))
#only choose these genes
series1_choose=series1.loc[series1_gene]
series2_choose=series2.loc[series2_gene]
series1_ann=sc.AnnData((series1_choose.values).T,obs=pd.DataFrame(series1_choose.columns), var=pd.DataFrame(series1_choose.index))
series2_ann=sc.AnnData((series2_choose.values).T,obs=pd.DataFrame(series2_choose.columns), var=pd.DataFrame(series2_choose.index))
RAWseries1=series1_ann.X.T
RAWseries2=series2_ann.X.T
fromname='humanchimpanzee'
pca_dim=20#the number of PCs
clusternumber=1
###############################################################################
anndata1=sc.AnnData(RAWseries1.T)
celluse=np.arange(0,anndata1.shape[0])
anndata1.obs['usecell']=celluse
sc.pp.filter_cells(anndata1,min_genes=20)#we cant to select some human cells, because my laptop is not good, so many cells are hard for it to do the training, moreover, the memory is also not enough
anndata2=sc.AnnData(RAWseries2.T)
celluse=np.arange(0,anndata2.shape[0])
anndata2.obs['usecell']=celluse
sc.pp.filter_cells(anndata2,min_genes=20)
anndata=anndata1.concatenate(anndata2)
sc.pp.filter_genes(anndata,min_cells=50)
sc.pp.normalize_per_cell(anndata,counts_per_cell_after=1e4)
sc.pp.log1p(anndata)
sc.pp.highly_variable_genes(anndata)
sc.pl.highly_variable_genes(anndata)
anndata=anndata[:,anndata.var['highly_variable']]
sc.tl.pca(anndata,n_comps=pca_dim)
Obtainseries1=(anndata.obsm['X_pca'])[anndata.obs['batch']=='0',:]
Obtainseries2=(anndata.obsm['X_pca'])[anndata.obs['batch']=='1',:]
Obtainseries1=sc.AnnData(Obtainseries1)
Obtainseries2=sc.AnnData(Obtainseries2)
sc.pp.neighbors(Obtainseries1,n_pcs=0)
sc.tl.umap(Obtainseries1)
sc.tl.louvain(Obtainseries1,resolution=1)
sc.pl.umap(Obtainseries1,color='louvain',size=30)
sc.pp.neighbors(Obtainseries2,n_pcs=0)
sc.tl.umap(Obtainseries2)
sc.tl.louvain(Obtainseries2,resolution=1)
sc.pl.umap(Obtainseries2,color='louvain',size=30)
PCAseries1=Obtainseries1.X
PCAseries2=Obtainseries2.X
###############################################################################
recluster1=np.array(list(map(int,Obtainseries1.obs['louvain'])))
recluster2=np.array(list(map(int,Obtainseries2.obs['louvain'])))
###############################################################################
#for i in range(len(np.unique(recluster1))):
# print((np.where(recluster1==i))[0].shape[0])
#for i in range(len(np.unique(recluster2))):
# print((np.where(recluster2==i))[0].shape[0])
#
##for the first batch
#number_cluster1=len(np.unique(recluster1))
#series1_data=np.zeros([0,PCAseries1.shape[1]])
#series1_index=np.zeros([0])
#recluster1plus=np.zeros([0])
#alpha=3#because the limiattion of memory of my laptop, I have to retain 1/3 human cells,so I preserve 1/3 human cells in each louvain cluster, this step is also unsupervised
#for i in range(number_cluster1):
# index=np.where(recluster1==i)[0]
# random.shuffle(index)
# series1_data=np.concatenate([series1_data,(PCAseries1)[index[0::alpha]]])
# series1_index=np.concatenate([series1_index,index[0::alpha]])
# recluster1plus=np.concatenate([recluster1plus,np.zeros([index[0::alpha].shape[0]])+i])
#
##for the second batch
#number_cluster2=len(np.unique(recluster2))
#series2_data=np.zeros([0,PCAseries2.shape[1]])
#series2_index=np.zeros([0])
#recluster2plus=np.zeros([0])
#beta=1#fortunately, we could retain all chimp cells!!!!!
#for i in range(number_cluster2):
# index=np.where(recluster2==i)[0]
# random.shuffle(index)
# series2_data=np.concatenate([series2_data,(PCAseries2)[index[0::beta]]])
# series2_index=np.concatenate([series2_index,index[0::beta]])
# recluster2plus=np.concatenate([recluster2plus,np.zeros([index[0::beta].shape[0]])+i])
#
#sio.savemat('series1_index.mat',{'series1_index':series1_index})
#sio.savemat('series2_index.mat',{'series2_index':series2_index})
#this is the indexes of cells I used
series1_index=sio.loadmat('series1_index.mat')['series1_index'][0].astype('int')
series2_index=sio.loadmat('series2_index.mat')['series2_index'][0].astype('int')
PCAseries1=PCAseries1[series1_index]
PCAseries2=PCAseries2[series2_index]
recluster1=recluster1[series1_index]
recluster2=recluster2[series2_index]
recluster1=recluster1.astype('int')
recluster2=recluster2.astype('int')
print(recluster1.shape[0])
print(recluster2.shape[0])
###############################################################################
def dis(P,Q,distance_method):
if distance_method==0:
return np.sqrt(np.sum(np.square(P-Q)))
if distance_method==1:
return 1-(np.multiply(P,Q).sum()/(np.sqrt(np.sum(np.square(P)))*np.sqrt(np.sum(np.square(Q)))))
###############################################################################
change=0
if len(np.unique(recluster1))<=len(np.unique(recluster2)):
PCAseries1plus=PCAseries2
PCAseries2plus=PCAseries1
recluster1plus=recluster2
recluster2plus=recluster1
change=1
else:
PCAseries1plus=PCAseries1
PCAseries2plus=PCAseries2
recluster1plus=recluster1
recluster2plus=recluster2
###############################################################################
#ok, let's calculate the similarity of cells/clusters
correlation_recluster=np.zeros([len(np.unique(recluster1plus)),len(np.unique(recluster2plus))])
correlation_recluster_cell=np.zeros([recluster1plus.shape[0],recluster2plus.shape[0]])
for i in range(len(np.unique(recluster1plus))):
for j in range(len(np.unique(recluster2plus))):
print(i,j)
index_series1=np.where(recluster1plus==i)[0]
index_series2=np.where(recluster2plus==j)[0]
cell_series1=PCAseries1plus[index_series1,:]
cell_series2=PCAseries2plus[index_series2,:]
mean1=0
for iq in range(cell_series1.shape[0]):
for jq in range(cell_series2.shape[0]):
mean1+=dis(cell_series1[iq,:],cell_series2[jq,:],1)
correlation_recluster[i,j]=mean1/(cell_series1.shape[0]*cell_series2.shape[0])
for ii in range(cell_series1.shape[0]):
for jj in range(cell_series2.shape[0]):
mean2=dis(cell_series1[ii,:],cell_series2[jj,:],0)
correlation_recluster_cell[index_series1[ii],index_series2[jj]]=mean2+0.00001
plt.imshow(correlation_recluster)
plt.imshow(correlation_recluster_cell)
correlation_recluster_div=-np.log10(correlation_recluster)
correlation_recluster_cell_div=-np.log10(correlation_recluster_cell)
correlation_recluster_norm=(correlation_recluster_div-correlation_recluster_div.min())/(correlation_recluster_div.max()-correlation_recluster_div.min())
correlation_recluster_cell_norm=(correlation_recluster_cell_div-correlation_recluster_cell_div.min())/(correlation_recluster_cell_div.max()-correlation_recluster_cell_div.min())
plt.imshow(correlation_recluster_norm)
plt.imshow(correlation_recluster_cell_norm)
###############################################################################
#remove bad parts, do the matching
correlation_recluster_select=np.zeros(correlation_recluster_norm.shape)
recluster_mid=np.zeros(recluster1plus.shape)
for kk in range(correlation_recluster_norm.shape[0]):
ind=np.sort(correlation_recluster_norm[kk,:])
select=correlation_recluster_norm[kk,:]<ind[-clusternumber]
select=(select==False)
recluster_mid[recluster1plus==kk]+=int(np.where(select==True)[0])
correlation_recluster_select[kk,:]=correlation_recluster_norm[kk,:]*select
plt.imshow(correlation_recluster_select)
correlation_recluster_cell_final=correlation_recluster_cell*0
for i in range(correlation_recluster_cell_norm.shape[0]):
for j in range(correlation_recluster_cell_norm.shape[1]):
label1=recluster1plus[i]
label2=recluster2plus[j]
mean1=correlation_recluster_select[label1,label2]
mean2=correlation_recluster_cell_norm[i,j]
if mean1==0:
correlation_recluster_cell_final[i,j]=0
else:
correlation_recluster_cell_final[i,j]=mean2
plt.imshow(correlation_recluster_select)
plt.imshow(correlation_recluster_cell_final)
recluster1plus=recluster_mid.astype('int')
np.unique(recluster1plus)
np.unique(recluster2plus)
sort_correlation_recluster_cell_final=correlation_recluster_cell_final[recluster1plus.argsort(),:]
sort_correlation_recluster_cell_final=sort_correlation_recluster_cell_final[:,recluster2plus.argsort()]
heatmap(sort_correlation_recluster_cell_final,recluster1plus,recluster2plus,save=True,name='speciesmatrix')
###############################################################################
if change==1:
PCAseries1=PCAseries2plus
PCAseries2=PCAseries1plus
recluster1=recluster2plus
recluster2=recluster1plus
else:
PCAseries1=PCAseries1plus
PCAseries2=PCAseries2plus
recluster1=recluster1plus
recluster2=recluster2plus
###############################################################################
Obtainseries1plus=sc.AnnData(PCAseries1)
Obtainseries2plus=sc.AnnData(PCAseries2)
sc.pp.neighbors(Obtainseries1plus,n_pcs=0)
sc.tl.umap(Obtainseries1plus)
df=pd.DataFrame(recluster1.astype('int'))
df=pd.Series(np.reshape(df.values,df.values.shape[0]), dtype="category")
Obtainseries1plus.obs['louvain']=df.values
sc.pl.umap(Obtainseries1plus,color='louvain',size=30)
umapdata1=pd.DataFrame(Obtainseries1plus.obsm['X_umap'].T,
index=['tSNE1','tSNE2'])
plot_tSNE_clusters(umapdata1,Obtainseries1plus.obs['louvain'],cluster_colors=cluster_colors,save=False, name=fromname+'louvain')
sc.pp.neighbors(Obtainseries2plus,n_pcs=0)
sc.tl.umap(Obtainseries2plus)
df=pd.DataFrame(recluster2.astype('int'))
df=pd.Series(np.reshape(df.values,df.values.shape[0]), dtype="category")
Obtainseries2plus.obs['louvain']=df.values
sc.pl.umap(Obtainseries2plus,color='louvain',size=30)
umapdata2=pd.DataFrame(Obtainseries2plus.obsm['X_umap'].T,
index=['tSNE1','tSNE2'])
plot_tSNE_clusters(umapdata2,Obtainseries2plus.obs['louvain'],cluster_colors=cluster_colors,save=False, name=fromname+'louvain')
###############################################################################
#ok, I use keras, cells in each input are randomly selected, I don't know how to match cells with their similarity
#I also don't know how to match the cell part with their distance, so I design the following inputs
#It will waste some time, it's not easy and unclear for readers, but it works!
PCAseries=np.concatenate([PCAseries1,PCAseries2])
PCAseries=preprocessing.StandardScaler().fit_transform(PCAseries)
PCAseries=preprocessing.MinMaxScaler().fit_transform(PCAseries)
PCAseries1=PCAseries[0:PCAseries1.shape[0]]
PCAseries2=PCAseries[PCAseries1.shape[0]:]
x_input1=np.zeros([PCAseries1.shape[0],PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]+max(recluster1.max(),recluster2.max())+1])
x_input2=np.zeros([PCAseries2.shape[0],PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]+max(recluster1.max(),recluster2.max())+1])
for i in range(PCAseries1.shape[0]):
print(i)
x_input1[i,0:PCAseries1.shape[1]]=PCAseries1[i,:]
x_input1[i,PCAseries1.shape[1]:PCAseries1.shape[1]+PCAseries1.shape[0]]=K.utils.np_utils.to_categorical(i,PCAseries1.shape[0])
x_input1[i,PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]=correlation_recluster_cell_final[i,:]
x_input1[i,PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]:]=K.utils.np_utils.to_categorical(recluster1[i],max(recluster1.max(),recluster2.max())+1)
for j in range(PCAseries2.shape[0]):
print(j)
x_input2[j,0:PCAseries2.shape[1]]=PCAseries2[j,:]
x_input2[j,PCAseries2.shape[1]:PCAseries2.shape[1]+PCAseries2.shape[0]]=K.utils.np_utils.to_categorical(j,PCAseries2.shape[0])
x_input2[j,PCAseries2.shape[1]+PCAseries2.shape[0]:PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]]=correlation_recluster_cell_final[:,j]
x_input2[j,PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]:]=K.utils.np_utils.to_categorical(recluster2[j],max(recluster1.max(),recluster2.max())+1)
###############################################################################
#interesting, I need to make two batches have the same number of cells, so I have to copy cells again and again
if x_input1.shape[0]>=x_input2.shape[0]:
x_test1=x_input1
y_test1=recluster1
y_testreal1=choose_seriestype1
repeat_num=int(np.ceil(x_input1.shape[0]/x_input2.shape[0]))
x_test2=np.tile(x_input2,(repeat_num,1))
y_test2=np.tile(recluster2,repeat_num)
y_testreal2=np.tile(choose_seriestype2,repeat_num)
x_test2=x_test2[0:x_test1.shape[0],:]
y_test2=y_test2[0:x_test1.shape[0]]
y_testreal2=y_testreal2[0:x_test1.shape[0]]
elif x_input1.shape[0]<x_input2.shape[0]:
x_test2=x_input2
y_test2=recluster2
y_testreal2=choose_seriestype2
repeat_num=int(np.ceil(x_input2.shape[0]/x_input1.shape[0]))
x_test1=np.tile(x_input1,(repeat_num,1))
y_test1=np.tile(recluster1,repeat_num)
y_testreal1=np.tile(choose_seriestype1,repeat_num)
x_test1=x_test1[0:x_test2.shape[0],:]
y_test1=y_test1[0:x_test2.shape[0]]
y_testreal1=y_testreal1[0:x_test2.shape[0]]
###############################################################################
def choose_info(x,info_number):
return x[:,0:info_number]
def choose_index(x,info_number,x_samplenumber):
return x[:,info_number:info_number+x_samplenumber]
def choose_corrlation(x,info_number,x_samplenumber,cor_number):
return x[:,info_number+x_samplenumber:info_number+x_samplenumber+cor_number]
def choose_relabel(x,info_number,x_samplenumber,cor_number):
return x[:,info_number+x_samplenumber+cor_number:]
def slic(input_):
return input_[:,0]
###############################################################################
activation='relu'
info_number=PCAseries1.shape[1]
layer=PCAseries1.shape[1]
input1=K.Input(shape=(x_test1.shape[1],))#line1 species1
input2=K.Input(shape=(x_test2.shape[1],))#line1 species2
input3=K.Input(shape=(x_test1.shape[1],))#line2 species1
input4=K.Input(shape=(x_test2.shape[1],))#line2 species2
Data1=Lambda(choose_info,arguments={'info_number':info_number})(input1)
Data2=Lambda(choose_info,arguments={'info_number':info_number})(input2)
Data3=Lambda(choose_info,arguments={'info_number':info_number})(input3)
Data4=Lambda(choose_info,arguments={'info_number':info_number})(input4)
Index1=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0]})(input1)
Index2=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0]})(input2)
Index3=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0]})(input3)
Index4=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0]})(input4)
Cor1=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input1)
Cor2=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input2)
Cor3=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input3)
Cor4=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input4)
Relabel1=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input1)
Relabel2=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input2)
Relabel3=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input3)
Relabel4=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input4)
x_concat1=layers.concatenate([Data1,Data3])#batch1
x_concat2=layers.concatenate([Data2,Data4])#batch2
x1=layers.Dense(layer,activation=activation)(Data1)
x2=layers.Dense(layer,activation=activation)(Data2)
x3=layers.Dense(layer,activation=activation)(Data3)
x4=layers.Dense(layer,activation=activation)(Data4)
x1=layers.BatchNormalization()(x1)
x2=layers.BatchNormalization()(x2)
x3=layers.BatchNormalization()(x3)
x4=layers.BatchNormalization()(x4)
x1_mid1=layers.Dense(layer,activation=activation)(layers.concatenate([x1,x2]))
x2_mid1=layers.Dense(layer,activation=activation)(layers.concatenate([x1,x2]))
x1_mid2=layers.Dense(layer,activation=activation)(layers.concatenate([x3,x4]))
x2_mid2=layers.Dense(layer,activation=activation)(layers.concatenate([x3,x4]))
x1_mid1=layers.BatchNormalization()(x1_mid1)
x2_mid1=layers.BatchNormalization()(x2_mid1)
x1_mid2=layers.BatchNormalization()(x1_mid2)
x2_mid2=layers.BatchNormalization()(x2_mid2)
layer_classify=layers.Dense(max(recluster1.max(),recluster2.max())+1,activation='relu')
y1=layer_classify(x1_mid1)
y2=layer_classify(x2_mid1)
y3=layer_classify(x1_mid2)
y4=layer_classify(x2_mid2)
x1=layers.concatenate([x1_mid1,x1_mid2])#batch1
x2=layers.concatenate([x2_mid1,x2_mid2])#batch2
output1=layers.Dense(2*layer,activation=activation)(x1)
output2=layers.Dense(2*layer,activation=activation)(x2)
output1=layers.BatchNormalization()(output1)
output2=layers.BatchNormalization()(output2)
def loss_weight(input_):
return tf.reduce_sum(tf.multiply(input_[0],input_[1]),axis=-1)
def MSE(input_):
return tf.reduce_mean(tf.square(input_[0]-input_[1]),axis=-1)
def multi_classification_loss(input_):
return tf.keras.losses.categorical_crossentropy(input_[0],input_[1])
AE_loss_1=Lambda(MSE)([output1,x_concat1])
AE_loss_2=Lambda(MSE)([output2,x_concat2])
cls_loss_1=Lambda(MSE)([y1,Relabel1])
cls_loss_2=Lambda(MSE)([y2,Relabel2])
cls_loss_3=Lambda(MSE)([y3,Relabel3])
cls_loss_4=Lambda(MSE)([y4,Relabel4])
interweight1=Lambda(loss_weight)([Index1,Cor2])
interweight4=Lambda(loss_weight)([Index3,Cor4])
interloss_1=Lambda(MSE)([x1_mid1,x2_mid1])
interloss_4=Lambda(MSE)([x1_mid2,x2_mid2])
interloss_1=layers.Multiply()([interweight1,interloss_1])
interloss_4=layers.Multiply()([interweight4,interloss_4])
intraweight1=Lambda(loss_weight)([Relabel1,Relabel3])
intraweight2=Lambda(loss_weight)([Relabel2,Relabel4])
intraloss_1=Lambda(MSE)([x1_mid1,x1_mid2])
intraloss_2=Lambda(MSE)([x2_mid1,x2_mid2])
intraloss_1=layers.Multiply()([intraweight1,intraloss_1])
intraloss_2=layers.Multiply()([intraweight2,intraloss_2])
Loss1=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss1')([AE_loss_1,AE_loss_2])
Loss2=Lambda(lambda x:(x[0]*1+x[1]*1+x[2]*1+x[3]*1)/4,name='loss2')([cls_loss_1,cls_loss_2,cls_loss_3,cls_loss_4])
Loss3=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss3')([interloss_1,interloss_4])
Loss4=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss4')([intraloss_1,intraloss_2])
###############################################################################
network_train=K.models.Model([input1,input2,input3,input4],
[Loss1,Loss2,Loss3,Loss4])
network_train.summary()
###############################################################################
intra_data1={}
inter_data1={}
for i in range(x_test1.shape[0]):
label_i=y_test1[i]
intra_data1[i]=np.where(y_test1==label_i)
inter_data1[i]=np.where(y_test1!=label_i)
intra_data2={}
inter_data2={}
for i in range(x_test2.shape[0]):
label_i=y_test2[i]
intra_data2[i]=np.where(y_test2==label_i)
inter_data2[i]=np.where(y_test2!=label_i)
###############################################################################
batch_size=512
train_loss=[]
loss1=[]
loss2=[]
loss3=[]
loss4=[]
###############################################################################
iterations=1
lr=5e-3
optimizer=K.optimizers.Adam(lr=lr)
loss_weights=[1,1,1,1]
network_train.compile(optimizer=optimizer,
loss=[lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred],
loss_weights=loss_weights)
for i in range(iterations):
x_input1_series1_train=np.zeros(x_test1.shape)
index0=np.zeros(x_input1_series1_train.shape[0])
x_input1_series2_train=np.zeros(x_test2.shape)
index1=np.zeros(x_input1_series2_train.shape[0])
x_input2_series1_train=np.zeros(x_test1.shape)
index2=np.zeros(x_input2_series1_train.shape[0])
x_input2_series2_train=np.zeros(x_test2.shape)
index3=np.zeros(x_input2_series2_train.shape[0])
for ii in range(x_test1.shape[0]):
index0[ii]=random.choice(range(x_test1.shape[0]))
rand1=random.random()
in_rand1=np.where(x_test1[ii,:][PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]>0)[0]
out_rand1=np.where(x_test1[ii,:][PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]<=0)[0]
if rand1>=0.5:
index1[ii]=random.choice(in_rand1)
elif rand1<0.5:
index1[ii]=random.choice(out_rand1)
rand2=random.random()
if rand2>=0.5:
index2[ii]=random.choice(intra_data1[index0[ii]][0])
elif rand2<0.5:
index2[ii]=random.choice(inter_data1[index0[ii]][0])
rand3=random.random()
if rand3>=0.5:
index3[ii]=random.choice(intra_data2[index1[ii]][0])
elif rand3<0.5:
index3[ii]=random.choice(inter_data2[index1[ii]][0])
train1=x_test1[index0.astype('int'),:]
train2=x_test2[index1.astype('int'),:]
train3=x_test1[index2.astype('int'),:]
train4=x_test2[index3.astype('int'),:]
Train=network_train.fit([train1,train2,train3,train4],
[np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1])],
batch_size=batch_size,shuffle=True)
train_loss.append(Train.history['loss'][:][0])
loss1.append(Train.history['loss1_loss'][:][0]*loss_weights[0])
loss2.append(Train.history['loss2_loss'][:][0]*loss_weights[1])
loss3.append(Train.history['loss3_loss'][:][0]*loss_weights[2])
loss4.append(Train.history['loss4_loss'][:][0]*loss_weights[3])
print(i,'loss=',
Train.history['loss'][:][0],
Train.history['loss1_loss'][:][0]*loss_weights[0],
Train.history['loss2_loss'][:][0]*loss_weights[1],
Train.history['loss3_loss'][:][0]*loss_weights[2],
Train.history['loss4_loss'][:][0]*loss_weights[3])
if i>10:
plt.plot(train_loss[:])
plt.plot(loss1[:])
plt.plot(loss2[:])
plt.plot(loss3[:])
plt.plot(loss4[:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.ylim(0,max(max(train_loss[len(train_loss)-10:],loss1[len(train_loss)-10:],
loss2[len(train_loss)-10:],loss3[len(train_loss)-10:],
loss4[len(train_loss)-10:])))
plt.xlim(len(train_loss)-10-10,len(train_loss))
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
plt.plot(train_loss[:])
plt.plot(loss1[:])
plt.plot(loss2[:])
plt.plot(loss3[:])
plt.plot(loss4[:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
else:
plt.plot(train_loss[10:])
plt.plot(loss1[10:])
plt.plot(loss2[10:])
plt.plot(loss3[10:])
plt.plot(loss4[10:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
###############################################################################
network_train.load_weights('speciesAC.h5')
network_predict=K.models.Model([input1,input2,input3,input4],
[x1_mid1,x2_mid1,x1_mid2,x2_mid2])
[low_dim1,low_dim2,low_dim3,low_dim4]=network_predict.predict([x_test1,x_test2,x_test1,x_test2])
low_dim1=low_dim1[0:x_input1.shape[0]]
low_dim2=low_dim2[0:x_input2.shape[0]]
low_dim3=low_dim3[0:x_input1.shape[0]]
low_dim4=low_dim4[0:x_input2.shape[0]]
y_recluster_no1=recluster1[0:x_input1.shape[0]]
y_recluster_no2=recluster2[0:x_input2.shape[0]]
###############################################################################
total_recluster_type=np.concatenate([y_recluster_no1,y_recluster_no2])
###############################################################################
series1=sc.AnnData(low_dim1)
series2=sc.AnnData(low_dim2)
mergedata=series1.concatenate(series2)
mergedata.obsm['NN']=mergedata.X
sc.pp.neighbors(mergedata,n_pcs=0)
sc.tl.louvain(mergedata)
sc.tl.leiden(mergedata)
sc.tl.umap(mergedata)
sc.pl.umap(mergedata,color='louvain',size=30)
sc.pl.umap(mergedata,color='leiden',size=30)
sc.pl.umap(mergedata,color='batch',size=30)
type_louvain=mergedata.obs['louvain']
type_leiden=mergedata.obs['leiden']
type_batch=mergedata.obs['batch']
###############################################################################
umapdata=pd.DataFrame(mergedata.obsm['X_umap'].T,index=['tSNE1','tSNE2'])
umapdata1=pd.DataFrame(mergedata.obsm['X_umap'][0:PCAseries1.shape[0],:].T,index=['tSNE1','tSNE2'])
umapdata2=pd.DataFrame(mergedata.obsm['X_umap'][PCAseries1.shape[0]:,:].T,index=['tSNE1','tSNE2'])
###############################################################################
fromname='一次审核之后的结果/figure/speciesCBA_'
plot_tSNE_sepclusters(umapdata1,umapdata2,y_recluster_noSMOTE1*0,y_recluster_noSMOTE2*0+1,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'label1')
plot_tSNE_sepclusters(umapdata2,umapdata1,y_recluster_noSMOTE2*0+1,y_recluster_noSMOTE1*0,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'label2')
plot_tSNE_clusters(umapdata,list(map(int,np.concatenate([y_recluster_noSMOTE1*0,y_recluster_noSMOTE2*0+1]))),cluster_colors=cluster_colors,save=False, name=fromname+'batch')
plot_tSNE_clusters(umapdata,list(map(int,type_louvain)), cluster_colors=cluster_colors,save=False, name=fromname+'louvain') | 29,319 | 43.969325 | 198 | py |
ColBERT | ColBERT-master/setup.py | import setuptools
with open('README.md', 'r') as f:
long_description = f.read()
setuptools.setup(
name='ColBERT',
version='0.2.0',
author='Omar Khattab',
author_email='[email protected]',
description="Efficient and Effective Passage Search via Contextualized Late Interaction over BERT",
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/stanford-futuredata/ColBERT',
packages=setuptools.find_packages(),
python_requires='>=3.6',
)
| 538 | 28.944444 | 103 | py |
ColBERT | ColBERT-master/utility/supervision/triples.py | """
Example: --positives 5,50 1,1000 ~~> best-5 (in top-50) + best-1 (in top-1000)
"""
import os
import sys
import git
import tqdm
import ujson
import random
from argparse import ArgumentParser
from colbert.utils.utils import print_message, load_ranking, groupby_first_item, create_directory
from utility.utils.save_metadata import save_metadata
MAX_NUM_TRIPLES = 40_000_000
def sample_negatives(negatives, num_sampled, biased=None):
assert biased in [None, 100, 200], "NOTE: We bias 50% from the top-200 negatives, if there are twice or more."
num_sampled = min(len(negatives), num_sampled)
if biased and num_sampled < len(negatives):
assert num_sampled % 2 == 0, num_sampled
num_sampled_top100 = num_sampled // 2
num_sampled_rest = num_sampled - num_sampled_top100
oversampled, undersampled = negatives[:biased], negatives[biased:]
if len(oversampled) < len(undersampled):
return random.sample(oversampled, num_sampled_top100) + random.sample(undersampled, num_sampled_rest)
return random.sample(negatives, num_sampled)
def sample_for_query(qid, ranking, args_positives, depth, permissive, biased):
"""
Requires that the ranks are sorted per qid.
"""
positives, negatives, triples = [], [], []
for pid, rank, *_, label in ranking:
assert rank >= 1, f"ranks should start at 1 \t\t got rank = {rank}"
assert label in [0, 1]
if rank > depth:
break
if label:
take_this_positive = any(rank <= maxDepth and len(positives) < maxBest for maxBest, maxDepth in args_positives)
if take_this_positive:
positives.append((pid, 0))
elif permissive:
positives.append((pid, rank)) # utilize with a few negatives, starting at (next) rank
else:
negatives.append(pid)
for pos, neg_start in positives:
num_sampled = 100 if neg_start == 0 else 5
negatives_ = negatives[neg_start:]
biased_ = biased if neg_start == 0 else None
for neg in sample_negatives(negatives_, num_sampled, biased=biased_):
triples.append((qid, pos, neg))
return triples
def main(args):
try:
rankings = load_ranking(args.ranking, types=[int, int, int, float, int])
except:
rankings = load_ranking(args.ranking, types=[int, int, int, int])
print_message("#> Group by QID")
qid2rankings = groupby_first_item(tqdm.tqdm(rankings))
Triples = []
NonEmptyQIDs = 0
for processing_idx, qid in enumerate(qid2rankings):
l = sample_for_query(qid, qid2rankings[qid], args.positives, args.depth, args.permissive, args.biased)
NonEmptyQIDs += (len(l) > 0)
Triples.extend(l)
if processing_idx % (10_000) == 0:
print_message(f"#> Done with {processing_idx+1} questions!\t\t "
f"{str(len(Triples) / 1000)}k triples for {NonEmptyQIDs} unqiue QIDs.")
print_message(f"#> Sub-sample the triples (if > {MAX_NUM_TRIPLES})..")
print_message(f"#> len(Triples) = {len(Triples)}")
if len(Triples) > MAX_NUM_TRIPLES:
Triples = random.sample(Triples, MAX_NUM_TRIPLES)
### Prepare the triples ###
print_message("#> Shuffling the triples...")
random.shuffle(Triples)
print_message("#> Writing {}M examples to file.".format(len(Triples) / 1000.0 / 1000.0))
with open(args.output, 'w') as f:
for example in Triples:
ujson.dump(example, f)
f.write('\n')
save_metadata(f'{args.output}.meta', args)
print('\n\n', args, '\n\n')
print(args.output)
print_message("#> Done.")
if __name__ == "__main__":
parser = ArgumentParser(description='Create training triples from ranked list.')
# Input / Output Arguments
parser.add_argument('--ranking', dest='ranking', required=True, type=str)
parser.add_argument('--output', dest='output', required=True, type=str)
# Weak Supervision Arguments.
parser.add_argument('--positives', dest='positives', required=True, nargs='+')
parser.add_argument('--depth', dest='depth', required=True, type=int) # for negatives
parser.add_argument('--permissive', dest='permissive', default=False, action='store_true')
# parser.add_argument('--biased', dest='biased', default=False, action='store_true')
parser.add_argument('--biased', dest='biased', default=None, type=int)
parser.add_argument('--seed', dest='seed', required=False, default=12345, type=int)
args = parser.parse_args()
random.seed(args.seed)
assert not os.path.exists(args.output), args.output
args.positives = [list(map(int, configuration.split(','))) for configuration in args.positives]
assert all(len(x) == 2 for x in args.positives)
assert all(maxBest <= maxDepth for maxBest, maxDepth in args.positives), args.positives
create_directory(os.path.dirname(args.output))
assert args.biased in [None, 100, 200]
main(args)
| 5,050 | 32.450331 | 123 | py |
ColBERT | ColBERT-master/utility/supervision/self_training.py | import os
import sys
import git
import tqdm
import ujson
import random
from argparse import ArgumentParser
from colbert.utils.utils import print_message, load_ranking, groupby_first_item
MAX_NUM_TRIPLES = 40_000_000
def sample_negatives(negatives, num_sampled, biased=False):
num_sampled = min(len(negatives), num_sampled)
if biased:
assert num_sampled % 2 == 0
num_sampled_top100 = num_sampled // 2
num_sampled_rest = num_sampled - num_sampled_top100
return random.sample(negatives[:100], num_sampled_top100) + random.sample(negatives[100:], num_sampled_rest)
return random.sample(negatives, num_sampled)
def sample_for_query(qid, ranking, npositives, depth_positive, depth_negative, cutoff_negative):
"""
Requires that the ranks are sorted per qid.
"""
assert npositives <= depth_positive < cutoff_negative < depth_negative
positives, negatives, triples = [], [], []
for pid, rank, *_ in ranking:
assert rank >= 1, f"ranks should start at 1 \t\t got rank = {rank}"
if rank > depth_negative:
break
if rank <= depth_positive:
positives.append(pid)
elif rank > cutoff_negative:
negatives.append(pid)
num_sampled = 100
for neg in sample_negatives(negatives, num_sampled):
positives_ = random.sample(positives, npositives)
positives_ = positives_[0] if npositives == 1 else positives_
triples.append((qid, positives_, neg))
return triples
def main(args):
rankings = load_ranking(args.ranking, types=[int, int, int, float, int])
print_message("#> Group by QID")
qid2rankings = groupby_first_item(tqdm.tqdm(rankings))
Triples = []
NonEmptyQIDs = 0
for processing_idx, qid in enumerate(qid2rankings):
l = sample_for_query(qid, qid2rankings[qid], args.positives, args.depth_positive, args.depth_negative, args.cutoff_negative)
NonEmptyQIDs += (len(l) > 0)
Triples.extend(l)
if processing_idx % (10_000) == 0:
print_message(f"#> Done with {processing_idx+1} questions!\t\t "
f"{str(len(Triples) / 1000)}k triples for {NonEmptyQIDs} unqiue QIDs.")
print_message(f"#> Sub-sample the triples (if > {MAX_NUM_TRIPLES})..")
print_message(f"#> len(Triples) = {len(Triples)}")
if len(Triples) > MAX_NUM_TRIPLES:
Triples = random.sample(Triples, MAX_NUM_TRIPLES)
### Prepare the triples ###
print_message("#> Shuffling the triples...")
random.shuffle(Triples)
print_message("#> Writing {}M examples to file.".format(len(Triples) / 1000.0 / 1000.0))
with open(args.output, 'w') as f:
for example in Triples:
ujson.dump(example, f)
f.write('\n')
with open(f'{args.output}.meta', 'w') as f:
args.cmd = ' '.join(sys.argv)
args.git_hash = git.Repo(search_parent_directories=True).head.object.hexsha
ujson.dump(args.__dict__, f, indent=4)
f.write('\n')
print('\n\n', args, '\n\n')
print(args.output)
print_message("#> Done.")
if __name__ == "__main__":
random.seed(12345)
parser = ArgumentParser(description='Create training triples from ranked list.')
# Input / Output Arguments
parser.add_argument('--ranking', dest='ranking', required=True, type=str)
parser.add_argument('--output', dest='output', required=True, type=str)
# Weak Supervision Arguments.
parser.add_argument('--positives', dest='positives', required=True, type=int)
parser.add_argument('--depth+', dest='depth_positive', required=True, type=int)
parser.add_argument('--depth-', dest='depth_negative', required=True, type=int)
parser.add_argument('--cutoff-', dest='cutoff_negative', required=True, type=int)
args = parser.parse_args()
assert not os.path.exists(args.output), args.output
main(args)
| 3,924 | 30.653226 | 132 | py |
ColBERT | ColBERT-master/utility/rankings/split_by_offset.py | """
Split the ranked lists after retrieval with a merged query set.
"""
import os
import random
from argparse import ArgumentParser
def main(args):
output_paths = ['{}.{}'.format(args.ranking, split) for split in args.names]
assert all(not os.path.exists(path) for path in output_paths), output_paths
output_files = [open(path, 'w') for path in output_paths]
with open(args.ranking) as f:
for line in f:
qid, pid, rank, *other = line.strip().split('\t')
qid = int(qid)
split_output_path = output_files[qid // args.gap - 1]
qid = qid % args.gap
split_output_path.write('\t'.join([str(x) for x in [qid, pid, rank, *other]]) + '\n')
print(f.name)
_ = [f.close() for f in output_files]
print("#> Done!")
if __name__ == "__main__":
random.seed(12345)
parser = ArgumentParser(description='Subsample the dev set.')
parser.add_argument('--ranking', dest='ranking', required=True)
parser.add_argument('--names', dest='names', required=False, default=['train', 'dev', 'test'], type=str, nargs='+') # order matters!
parser.add_argument('--gap', dest='gap', required=False, default=1_000_000_000, type=int) # larger than any individual query set
args = parser.parse_args()
main(args)
| 1,334 | 28.666667 | 137 | py |
ColBERT | ColBERT-master/utility/rankings/dev_subsample.py | import os
import ujson
import random
from argparse import ArgumentParser
from colbert.utils.utils import print_message, create_directory, load_ranking, groupby_first_item
from utility.utils.qa_loaders import load_qas_
def main(args):
print_message("#> Loading all..")
qas = load_qas_(args.qas)
rankings = load_ranking(args.ranking)
qid2rankings = groupby_first_item(rankings)
print_message("#> Subsampling all..")
qas_sample = random.sample(qas, args.sample)
with open(args.output, 'w') as f:
for qid, *_ in qas_sample:
for items in qid2rankings[qid]:
items = [qid] + items
line = '\t'.join(map(str, items)) + '\n'
f.write(line)
print('\n\n')
print(args.output)
print("#> Done.")
if __name__ == "__main__":
random.seed(12345)
parser = ArgumentParser(description='Subsample the dev set.')
parser.add_argument('--qas', dest='qas', required=True, type=str)
parser.add_argument('--ranking', dest='ranking', required=True)
parser.add_argument('--output', dest='output', required=True)
parser.add_argument('--sample', dest='sample', default=1500, type=int)
args = parser.parse_args()
assert not os.path.exists(args.output), args.output
create_directory(os.path.dirname(args.output))
main(args)
| 1,350 | 27.145833 | 97 | py |
ColBERT | ColBERT-master/utility/rankings/merge.py | """
Divide two or more ranking files, by score.
"""
import os
import tqdm
from argparse import ArgumentParser
from collections import defaultdict
from colbert.utils.utils import print_message, file_tqdm
def main(args):
Rankings = defaultdict(list)
for path in args.input:
print_message(f"#> Loading the rankings in {path} ..")
with open(path) as f:
for line in file_tqdm(f):
qid, pid, rank, score = line.strip().split('\t')
qid, pid, rank = map(int, [qid, pid, rank])
score = float(score)
Rankings[qid].append((score, rank, pid))
with open(args.output, 'w') as f:
print_message(f"#> Writing the output rankings to {args.output} ..")
for qid in tqdm.tqdm(Rankings):
ranking = sorted(Rankings[qid], reverse=True)
for rank, (score, original_rank, pid) in enumerate(ranking):
rank = rank + 1 # 1-indexed
if (args.depth > 0) and (rank > args.depth):
break
line = [qid, pid, rank, score]
line = '\t'.join(map(str, line)) + '\n'
f.write(line)
if __name__ == "__main__":
parser = ArgumentParser(description="merge_rankings.")
# Input Arguments.
parser.add_argument('--input', dest='input', required=True, nargs='+')
parser.add_argument('--output', dest='output', required=True, type=str)
parser.add_argument('--depth', dest='depth', required=True, type=int)
args = parser.parse_args()
assert not os.path.exists(args.output), args.output
main(args)
| 1,640 | 27.293103 | 76 | py |
ColBERT | ColBERT-master/utility/rankings/tune.py | import os
import ujson
import random
from argparse import ArgumentParser
from colbert.utils.utils import print_message, create_directory
from utility.utils.save_metadata import save_metadata
def main(args):
AllMetrics = {}
Scores = {}
for path in args.paths:
with open(path) as f:
metric = ujson.load(f)
AllMetrics[path] = metric
for k in args.metric:
metric = metric[k]
assert type(metric) is float
Scores[path] = metric
MaxKey = max(Scores, key=Scores.get)
MaxCKPT = int(MaxKey.split('/')[-2].split('.')[-1])
MaxARGS = os.path.join(os.path.dirname(MaxKey), 'logs', 'args.json')
with open(MaxARGS) as f:
logs = ujson.load(f)
MaxCHECKPOINT = logs['checkpoint']
assert MaxCHECKPOINT.endswith(f'colbert-{MaxCKPT}.dnn'), (MaxCHECKPOINT, MaxCKPT)
with open(args.output, 'w') as f:
f.write(MaxCHECKPOINT)
args.Scores = Scores
args.AllMetrics = AllMetrics
save_metadata(f'{args.output}.meta', args)
print('\n\n', args, '\n\n')
print(args.output)
print_message("#> Done.")
if __name__ == "__main__":
random.seed(12345)
parser = ArgumentParser(description='.')
# Input / Output Arguments
parser.add_argument('--metric', dest='metric', required=True, type=str) # e.g., success.20
parser.add_argument('--paths', dest='paths', required=True, type=str, nargs='+')
parser.add_argument('--output', dest='output', required=True, type=str)
args = parser.parse_args()
args.metric = args.metric.split('.')
assert not os.path.exists(args.output), args.output
create_directory(os.path.dirname(args.output))
main(args)
| 1,741 | 25 | 95 | py |
ColBERT | ColBERT-master/utility/rankings/split_by_queries.py | import os
import sys
import tqdm
import ujson
import random
from argparse import ArgumentParser
from collections import OrderedDict
from colbert.utils.utils import print_message, file_tqdm
def main(args):
qid_to_file_idx = {}
for qrels_idx, qrels in enumerate(args.all_queries):
with open(qrels) as f:
for line in f:
qid, *_ = line.strip().split('\t')
qid = int(qid)
assert qid_to_file_idx.get(qid, qrels_idx) == qrels_idx, (qid, qrels_idx)
qid_to_file_idx[qid] = qrels_idx
all_outputs_paths = [f'{args.ranking}.{idx}' for idx in range(len(args.all_queries))]
assert all(not os.path.exists(path) for path in all_outputs_paths)
all_outputs = [open(path, 'w') for path in all_outputs_paths]
with open(args.ranking) as f:
print_message(f"#> Loading ranked lists from {f.name} ..")
last_file_idx = -1
for line in file_tqdm(f):
qid, *_ = line.strip().split('\t')
file_idx = qid_to_file_idx[int(qid)]
if file_idx != last_file_idx:
print_message(f"#> Switched to file #{file_idx} at {all_outputs[file_idx].name}")
last_file_idx = file_idx
all_outputs[file_idx].write(line)
print()
for f in all_outputs:
print(f.name)
f.close()
print("#> Done!")
if __name__ == "__main__":
random.seed(12345)
parser = ArgumentParser(description='.')
# Input Arguments
parser.add_argument('--ranking', dest='ranking', required=True, type=str)
parser.add_argument('--all-queries', dest='all_queries', required=True, type=str, nargs='+')
args = parser.parse_args()
main(args)
| 1,736 | 24.544118 | 97 | py |
ColBERT | ColBERT-master/utility/preprocess/docs2passages.py | """
Divide a document collection into N-word/token passage spans (with wrap-around for last passage).
"""
import os
import math
import ujson
import random
from multiprocessing import Pool
from argparse import ArgumentParser
from colbert.utils.utils import print_message
Format1 = 'docid,text' # MS MARCO Passages
Format2 = 'docid,text,title' # DPR Wikipedia
Format3 = 'docid,url,title,text' # MS MARCO Documents
def process_page(inp):
"""
Wraps around if we split: make sure last passage isn't too short.
This is meant to be similar to the DPR preprocessing.
"""
(nwords, overlap, tokenizer), (title_idx, docid, title, url, content) = inp
if tokenizer is None:
words = content.split()
else:
words = tokenizer.tokenize(content)
words_ = (words + words) if len(words) > nwords else words
passages = [words_[offset:offset + nwords] for offset in range(0, len(words) - overlap, nwords - overlap)]
assert all(len(psg) in [len(words), nwords] for psg in passages), (list(map(len, passages)), len(words))
if tokenizer is None:
passages = [' '.join(psg) for psg in passages]
else:
passages = [' '.join(psg).replace(' ##', '') for psg in passages]
if title_idx % 100000 == 0:
print("#> ", title_idx, '\t\t\t', title)
for p in passages:
print("$$$ ", '\t\t', p)
print()
print()
print()
print()
return (docid, title, url, passages)
def main(args):
random.seed(12345)
print_message("#> Starting...")
letter = 'w' if not args.use_wordpiece else 't'
output_path = f'{args.input}.{letter}{args.nwords}_{args.overlap}'
assert not os.path.exists(output_path)
RawCollection = []
Collection = []
NumIllFormattedLines = 0
with open(args.input) as f:
for line_idx, line in enumerate(f):
if line_idx % (100*1000) == 0:
print(line_idx, end=' ')
title, url = None, None
try:
line = line.strip().split('\t')
if args.format == Format1:
docid, doc = line
elif args.format == Format2:
docid, doc, title = line
elif args.format == Format3:
docid, url, title, doc = line
RawCollection.append((line_idx, docid, title, url, doc))
except:
NumIllFormattedLines += 1
if NumIllFormattedLines % 1000 == 0:
print(f'\n[{line_idx}] NumIllFormattedLines = {NumIllFormattedLines}\n')
print()
print_message("# of documents is", len(RawCollection), '\n')
p = Pool(args.nthreads)
print_message("#> Starting parallel processing...")
tokenizer = None
if args.use_wordpiece:
from transformers import BertTokenizerFast
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
process_page_params = [(args.nwords, args.overlap, tokenizer)] * len(RawCollection)
Collection = p.map(process_page, zip(process_page_params, RawCollection))
print_message(f"#> Writing to {output_path} ...")
with open(output_path, 'w') as f:
line_idx = 1
if args.format == Format1:
f.write('\t'.join(['id', 'text']) + '\n')
elif args.format == Format2:
f.write('\t'.join(['id', 'text', 'title']) + '\n')
elif args.format == Format3:
f.write('\t'.join(['id', 'text', 'title', 'docid']) + '\n')
for docid, title, url, passages in Collection:
for passage in passages:
if args.format == Format1:
f.write('\t'.join([str(line_idx), passage]) + '\n')
elif args.format == Format2:
f.write('\t'.join([str(line_idx), passage, title]) + '\n')
elif args.format == Format3:
f.write('\t'.join([str(line_idx), passage, title, docid]) + '\n')
line_idx += 1
if __name__ == "__main__":
parser = ArgumentParser(description="docs2passages.")
# Input Arguments.
parser.add_argument('--input', dest='input', required=True)
parser.add_argument('--format', dest='format', required=True, choices=[Format1, Format2, Format3])
# Output Arguments.
parser.add_argument('--use-wordpiece', dest='use_wordpiece', default=False, action='store_true')
parser.add_argument('--nwords', dest='nwords', default=100, type=int)
parser.add_argument('--overlap', dest='overlap', default=0, type=int)
# Other Arguments.
parser.add_argument('--nthreads', dest='nthreads', default=28, type=int)
args = parser.parse_args()
assert args.nwords in range(50, 500)
main(args)
| 4,790 | 30.94 | 110 | py |
ColBERT | ColBERT-master/utility/preprocess/queries_split.py | """
Divide a query set into two.
"""
import os
import math
import ujson
import random
from argparse import ArgumentParser
from collections import OrderedDict
from colbert.utils.utils import print_message
def main(args):
random.seed(12345)
"""
Load the queries
"""
Queries = OrderedDict()
print_message(f"#> Loading queries from {args.input}..")
with open(args.input) as f:
for line in f:
qid, query = line.strip().split('\t')
assert qid not in Queries
Queries[qid] = query
"""
Apply the splitting
"""
size_a = len(Queries) - args.holdout
size_b = args.holdout
size_a, size_b = max(size_a, size_b), min(size_a, size_b)
assert size_a > 0 and size_b > 0, (len(Queries), size_a, size_b)
print_message(f"#> Deterministically splitting the queries into ({size_a}, {size_b})-sized splits.")
keys = list(Queries.keys())
sample_b_indices = sorted(list(random.sample(range(len(keys)), size_b)))
sample_a_indices = sorted(list(set.difference(set(list(range(len(keys)))), set(sample_b_indices))))
assert len(sample_a_indices) == size_a
assert len(sample_b_indices) == size_b
sample_a = [keys[idx] for idx in sample_a_indices]
sample_b = [keys[idx] for idx in sample_b_indices]
"""
Write the output
"""
output_path_a = f'{args.input}.a'
output_path_b = f'{args.input}.b'
assert not os.path.exists(output_path_a), output_path_a
assert not os.path.exists(output_path_b), output_path_b
print_message(f"#> Writing the splits out to {output_path_a} and {output_path_b} ...")
for output_path, sample in [(output_path_a, sample_a), (output_path_b, sample_b)]:
with open(output_path, 'w') as f:
for qid in sample:
query = Queries[qid]
line = '\t'.join([qid, query]) + '\n'
f.write(line)
if __name__ == "__main__":
parser = ArgumentParser(description="queries_split.")
# Input Arguments.
parser.add_argument('--input', dest='input', required=True)
parser.add_argument('--holdout', dest='holdout', required=True, type=int)
args = parser.parse_args()
main(args)
| 2,225 | 26.146341 | 104 | py |
ColBERT | ColBERT-master/utility/preprocess/wikipedia_to_tsv.py | import os
import ujson
from argparse import ArgumentParser
from colbert.utils.utils import print_message
def main(args):
input_path = args.input
output_path = args.output
assert not os.path.exists(output_path), output_path
RawCollection = []
walk = [(dirpath, filenames) for dirpath, _, filenames in os.walk(input_path)]
walk = sorted(walk)
for dirpath, filenames in walk:
print_message(f"#> Visiting {dirpath}")
for filename in sorted(filenames):
assert 'wiki_' in filename, (dirpath, filename)
filename = os.path.join(dirpath, filename)
print_message(f"#> Opening {filename} --- so far collected {len(RawCollection)} pages/passages")
with open(filename) as f:
for line in f:
RawCollection.append(ujson.loads(line))
with open(output_path, 'w') as f:
#line = '\t'.join(map(str, ['id', 'text', 'title'])) + '\n'
#f.write(line)
PID = 1
for doc in RawCollection:
title, text = doc['title'], doc['text']
# Join sentences and clean text
text = ' '.join(text.split())
if args.keep_empty_pages or len(text) > 0:
line = '\t'.join(map(str, [PID, text, title])) + '\n'
f.write(line)
PID += 1
print_message("#> All done.")
if __name__ == "__main__":
parser = ArgumentParser(description="docs2passages.")
# Input Arguments.
parser.add_argument('--input', dest='input', required=True)
parser.add_argument('--output', dest='output', required=True)
parser.add_argument('--keep-empty-pages', dest='keep_empty_pages', default=False, action='store_true')
args = parser.parse_args()
main(args)
| 1,793 | 27.03125 | 108 | py |
ColBERT | ColBERT-master/utility/evaluate/msmarco_passages.py | """
Evaluate MS MARCO Passages ranking.
"""
import os
import math
import tqdm
import ujson
import random
from argparse import ArgumentParser
from collections import defaultdict
from colbert.utils.utils import print_message, file_tqdm
def main(args):
qid2positives = defaultdict(list)
qid2ranking = defaultdict(list)
qid2mrr = {}
qid2recall = {depth: {} for depth in [50, 200, 1000]}
with open(args.qrels) as f:
print_message(f"#> Loading QRELs from {args.qrels} ..")
for line in file_tqdm(f):
qid, _, pid, label = map(int, line.strip().split())
assert label == 1
qid2positives[qid].append(pid)
with open(args.ranking) as f:
print_message(f"#> Loading ranked lists from {args.ranking} ..")
for line in file_tqdm(f):
qid, pid, rank, *score = line.strip().split('\t')
qid, pid, rank = int(qid), int(pid), int(rank)
if len(score) > 0:
assert len(score) == 1
score = float(score[0])
else:
score = None
qid2ranking[qid].append((rank, pid, score))
assert set.issubset(set(qid2ranking.keys()), set(qid2positives.keys()))
num_judged_queries = len(qid2positives)
num_ranked_queries = len(qid2ranking)
if num_judged_queries != num_ranked_queries:
print()
print_message("#> [WARNING] num_judged_queries != num_ranked_queries")
print_message(f"#> {num_judged_queries} != {num_ranked_queries}")
print()
print_message(f"#> Computing MRR@10 for {num_judged_queries} queries.")
for qid in tqdm.tqdm(qid2positives):
ranking = qid2ranking[qid]
positives = qid2positives[qid]
for rank, (_, pid, _) in enumerate(ranking):
rank = rank + 1 # 1-indexed
if pid in positives:
if rank <= 10:
qid2mrr[qid] = 1.0 / rank
break
for rank, (_, pid, _) in enumerate(ranking):
rank = rank + 1 # 1-indexed
if pid in positives:
for depth in qid2recall:
if rank <= depth:
qid2recall[depth][qid] = qid2recall[depth].get(qid, 0) + 1.0 / len(positives)
assert len(qid2mrr) <= num_ranked_queries, (len(qid2mrr), num_ranked_queries)
print()
mrr_10_sum = sum(qid2mrr.values())
print_message(f"#> MRR@10 = {mrr_10_sum / num_judged_queries}")
print_message(f"#> MRR@10 (only for ranked queries) = {mrr_10_sum / num_ranked_queries}")
print()
for depth in qid2recall:
assert len(qid2recall[depth]) <= num_ranked_queries, (len(qid2recall[depth]), num_ranked_queries)
print()
metric_sum = sum(qid2recall[depth].values())
print_message(f"#> Recall@{depth} = {metric_sum / num_judged_queries}")
print_message(f"#> Recall@{depth} (only for ranked queries) = {metric_sum / num_ranked_queries}")
print()
if args.annotate:
print_message(f"#> Writing annotations to {args.output} ..")
with open(args.output, 'w') as f:
for qid in tqdm.tqdm(qid2positives):
ranking = qid2ranking[qid]
positives = qid2positives[qid]
for rank, (_, pid, score) in enumerate(ranking):
rank = rank + 1 # 1-indexed
label = int(pid in positives)
line = [qid, pid, rank, score, label]
line = [x for x in line if x is not None]
line = '\t'.join(map(str, line)) + '\n'
f.write(line)
if __name__ == "__main__":
parser = ArgumentParser(description="msmarco_passages.")
# Input Arguments.
parser.add_argument('--qrels', dest='qrels', required=True, type=str)
parser.add_argument('--ranking', dest='ranking', required=True, type=str)
parser.add_argument('--annotate', dest='annotate', default=False, action='store_true')
args = parser.parse_args()
if args.annotate:
args.output = f'{args.ranking}.annotated'
assert not os.path.exists(args.output), args.output
main(args)
| 4,193 | 32.023622 | 105 | py |
ColBERT | ColBERT-master/utility/evaluate/annotate_EM.py | import os
import sys
import git
import tqdm
import ujson
import random
from argparse import ArgumentParser
from multiprocessing import Pool
from colbert.utils.utils import print_message, load_ranking, groupby_first_item
from utility.utils.qa_loaders import load_qas_, load_collection_
from utility.utils.save_metadata import format_metadata, get_metadata
from utility.evaluate.annotate_EM_helpers import *
# TODO: Tokenize passages in advance, especially if the ranked list is long! This requires changes to the has_answer input, slightly.
def main(args):
qas = load_qas_(args.qas)
collection = load_collection_(args.collection, retain_titles=True)
rankings = load_ranking(args.ranking)
parallel_pool = Pool(30)
print_message('#> Tokenize the answers in the Q&As in parallel...')
qas = list(parallel_pool.map(tokenize_all_answers, qas))
qid2answers = {qid: tok_answers for qid, _, tok_answers in qas}
assert len(qas) == len(qid2answers), (len(qas), len(qid2answers))
print_message('#> Lookup passages from PIDs...')
expanded_rankings = [(qid, pid, rank, collection[pid], qid2answers[qid])
for qid, pid, rank, *_ in rankings]
print_message('#> Assign labels in parallel...')
labeled_rankings = list(parallel_pool.map(assign_label_to_passage, enumerate(expanded_rankings)))
# Dump output.
print_message("#> Dumping output to", args.output, "...")
qid2rankings = groupby_first_item(labeled_rankings)
num_judged_queries, num_ranked_queries = check_sizes(qid2answers, qid2rankings)
# Evaluation metrics and depths.
success, counts = compute_and_write_labels(args.output, qid2answers, qid2rankings)
# Dump metrics.
with open(args.output_metrics, 'w') as f:
d = {'num_ranked_queries': num_ranked_queries, 'num_judged_queries': num_judged_queries}
extra = '__WARNING' if num_judged_queries != num_ranked_queries else ''
d[f'success{extra}'] = {k: v / num_judged_queries for k, v in success.items()}
d[f'counts{extra}'] = {k: v / num_judged_queries for k, v in counts.items()}
d['arguments'] = get_metadata(args)
f.write(format_metadata(d) + '\n')
print('\n\n')
print(args.output)
print(args.output_metrics)
print("#> Done\n")
if __name__ == "__main__":
random.seed(12345)
parser = ArgumentParser(description='.')
# Input / Output Arguments
parser.add_argument('--qas', dest='qas', required=True, type=str)
parser.add_argument('--collection', dest='collection', required=True, type=str)
parser.add_argument('--ranking', dest='ranking', required=True, type=str)
args = parser.parse_args()
args.output = f'{args.ranking}.annotated'
args.output_metrics = f'{args.ranking}.annotated.metrics'
assert not os.path.exists(args.output), args.output
main(args)
| 2,883 | 34.170732 | 133 | py |
ColBERT | ColBERT-master/utility/evaluate/annotate_EM_helpers.py | from colbert.utils.utils import print_message
from utility.utils.dpr import DPR_normalize, has_answer
def tokenize_all_answers(args):
qid, question, answers = args
return qid, question, [DPR_normalize(ans) for ans in answers]
def assign_label_to_passage(args):
idx, (qid, pid, rank, passage, tokenized_answers) = args
if idx % (1*1000*1000) == 0:
print(idx)
return qid, pid, rank, has_answer(tokenized_answers, passage)
def check_sizes(qid2answers, qid2rankings):
num_judged_queries = len(qid2answers)
num_ranked_queries = len(qid2rankings)
print_message('num_judged_queries =', num_judged_queries)
print_message('num_ranked_queries =', num_ranked_queries)
if num_judged_queries != num_ranked_queries:
assert num_ranked_queries <= num_judged_queries
print('\n\n')
print_message('[WARNING] num_judged_queries != num_ranked_queries')
print('\n\n')
return num_judged_queries, num_ranked_queries
def compute_and_write_labels(output_path, qid2answers, qid2rankings):
cutoffs = [1, 5, 10, 20, 30, 50, 100, 1000, 'all']
success = {cutoff: 0.0 for cutoff in cutoffs}
counts = {cutoff: 0.0 for cutoff in cutoffs}
with open(output_path, 'w') as f:
for qid in qid2answers:
if qid not in qid2rankings:
continue
prev_rank = 0 # ranks should start at one (i.e., and not zero)
labels = []
for pid, rank, label in qid2rankings[qid]:
assert rank == prev_rank+1, (qid, pid, (prev_rank, rank))
prev_rank = rank
labels.append(label)
line = '\t'.join(map(str, [qid, pid, rank, int(label)])) + '\n'
f.write(line)
for cutoff in cutoffs:
if cutoff != 'all':
success[cutoff] += sum(labels[:cutoff]) > 0
counts[cutoff] += sum(labels[:cutoff])
else:
success[cutoff] += sum(labels) > 0
counts[cutoff] += sum(labels)
return success, counts
# def dump_metrics(f, nqueries, cutoffs, success, counts):
# for cutoff in cutoffs:
# success_log = "#> P@{} = {}".format(cutoff, success[cutoff] / nqueries)
# counts_log = "#> D@{} = {}".format(cutoff, counts[cutoff] / nqueries)
# print('\n'.join([success_log, counts_log]) + '\n')
# f.write('\n'.join([success_log, counts_log]) + '\n\n')
| 2,495 | 32.28 | 81 | py |
ColBERT | ColBERT-master/utility/utils/save_metadata.py | import os
import sys
import git
import time
import copy
import ujson
import socket
def get_metadata(args):
args = copy.deepcopy(args)
args.hostname = socket.gethostname()
args.git_branch = git.Repo(search_parent_directories=True).active_branch.name
args.git_hash = git.Repo(search_parent_directories=True).head.object.hexsha
args.git_commit_datetime = str(git.Repo(search_parent_directories=True).head.object.committed_datetime)
args.current_datetime = time.strftime('%b %d, %Y ; %l:%M%p %Z (%z)')
args.cmd = ' '.join(sys.argv)
try:
args.input_arguments = copy.deepcopy(args.input_arguments.__dict__)
except:
args.input_arguments = None
return dict(args.__dict__)
def format_metadata(metadata):
assert type(metadata) == dict
return ujson.dumps(metadata, indent=4)
def save_metadata(path, args):
assert not os.path.exists(path), path
with open(path, 'w') as output_metadata:
data = get_metadata(args)
output_metadata.write(format_metadata(data) + '\n')
return data
| 1,068 | 24.452381 | 107 | py |
ColBERT | ColBERT-master/utility/utils/dpr.py | """
Source: DPR Implementation from Facebook Research
https://github.com/facebookresearch/DPR/tree/master/dpr
"""
import string
import spacy
import regex
import unicodedata
class Tokens(object):
"""A class to represent a list of tokenized text."""
TEXT = 0
TEXT_WS = 1
SPAN = 2
POS = 3
LEMMA = 4
NER = 5
def __init__(self, data, annotators, opts=None):
self.data = data
self.annotators = annotators
self.opts = opts or {}
def __len__(self):
"""The number of tokens."""
return len(self.data)
def slice(self, i=None, j=None):
"""Return a view of the list of tokens from [i, j)."""
new_tokens = copy.copy(self)
new_tokens.data = self.data[i: j]
return new_tokens
def untokenize(self):
"""Returns the original text (with whitespace reinserted)."""
return ''.join([t[self.TEXT_WS] for t in self.data]).strip()
def words(self, uncased=False):
"""Returns a list of the text of each token
Args:
uncased: lower cases text
"""
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data]
def offsets(self):
"""Returns a list of [start, end) character offsets of each token."""
return [t[self.SPAN] for t in self.data]
def pos(self):
"""Returns a list of part-of-speech tags of each token.
Returns None if this annotation was not included.
"""
if 'pos' not in self.annotators:
return None
return [t[self.POS] for t in self.data]
def lemmas(self):
"""Returns a list of the lemmatized text of each token.
Returns None if this annotation was not included.
"""
if 'lemma' not in self.annotators:
return None
return [t[self.LEMMA] for t in self.data]
def entities(self):
"""Returns a list of named-entity-recognition tags of each token.
Returns None if this annotation was not included.
"""
if 'ner' not in self.annotators:
return None
return [t[self.NER] for t in self.data]
def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):
"""Returns a list of all ngrams from length 1 to n.
Args:
n: upper limit of ngram length
uncased: lower cases text
filter_fn: user function that takes in an ngram list and returns
True or False to keep or not keep the ngram
as_string: return the ngram as a string vs list
"""
def _skip(gram):
if not filter_fn:
return False
return filter_fn(gram)
words = self.words(uncased)
ngrams = [(s, e + 1)
for s in range(len(words))
for e in range(s, min(s + n, len(words)))
if not _skip(words[s:e + 1])]
# Concatenate into strings
if as_strings:
ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]
return ngrams
def entity_groups(self):
"""Group consecutive entity tokens with the same NER tag."""
entities = self.entities()
if not entities:
return None
non_ent = self.opts.get('non_ent', 'O')
groups = []
idx = 0
while idx < len(entities):
ner_tag = entities[idx]
# Check for entity tag
if ner_tag != non_ent:
# Chomp the sequence
start = idx
while (idx < len(entities) and entities[idx] == ner_tag):
idx += 1
groups.append((self.slice(start, idx).untokenize(), ner_tag))
else:
idx += 1
return groups
class Tokenizer(object):
"""Base tokenizer class.
Tokenizers implement tokenize, which should return a Tokens class.
"""
def tokenize(self, text):
raise NotImplementedError
def shutdown(self):
pass
def __del__(self):
self.shutdown()
class SimpleTokenizer(Tokenizer):
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]+'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
'(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
if len(kwargs.get('annotators', {})) > 0:
logger.warning('%s only tokenizes! Skipping annotators: %s' %
(type(self).__name__, kwargs.get('annotators')))
self.annotators = set()
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
# Get text
token = matches[i].group()
# Get whitespace
span = matches[i].span()
start_ws = span[0]
if i + 1 < len(matches):
end_ws = matches[i + 1].span()[0]
else:
end_ws = span[1]
# Format data
data.append((
token,
text[start_ws: end_ws],
span,
))
return Tokens(data, self.annotators)
def has_answer(tokenized_answers, text):
text = DPR_normalize(text)
for single_answer in tokenized_answers:
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i: i + len(single_answer)]:
return True
return False
def locate_answers(tokenized_answers, text):
"""
Returns each occurrence of an answer as (offset, endpos) in terms of *characters*.
"""
tokenized_text = DPR_tokenize(text)
occurrences = []
text_words, text_word_positions = tokenized_text.words(uncased=True), tokenized_text.offsets()
answers_words = [ans.words(uncased=True) for ans in tokenized_answers]
for single_answer in answers_words:
for i in range(0, len(text_words) - len(single_answer) + 1):
if single_answer == text_words[i: i + len(single_answer)]:
(offset, _), (_, endpos) = text_word_positions[i], text_word_positions[i+len(single_answer)-1]
occurrences.append((offset, endpos))
return occurrences
STokenizer = SimpleTokenizer()
def DPR_tokenize(text):
return STokenizer.tokenize(unicodedata.normalize('NFD', text))
def DPR_normalize(text):
return DPR_tokenize(text).words(uncased=True)
# Source: https://github.com/shmsw25/qa-hard-em/blob/master/prepro_util.py
def strip_accents(text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
| 7,071 | 28.714286 | 110 | py |
ColBERT | ColBERT-master/utility/utils/qa_loaders.py | import os
import ujson
from collections import defaultdict
from colbert.utils.utils import print_message, file_tqdm
def load_collection_(path, retain_titles):
with open(path) as f:
collection = []
for line in file_tqdm(f):
_, passage, title = line.strip().split('\t')
if retain_titles:
passage = title + ' | ' + passage
collection.append(passage)
return collection
def load_qas_(path):
print_message("#> Loading the reference QAs from", path)
triples = []
with open(path) as f:
for line in f:
qa = ujson.loads(line)
triples.append((qa['qid'], qa['question'], qa['answers']))
return triples
| 726 | 20.382353 | 70 | py |
ColBERT | ColBERT-master/colbert/test.py | import os
import random
from colbert.utils.parser import Arguments
from colbert.utils.runs import Run
from colbert.evaluation.loaders import load_colbert, load_topK, load_qrels
from colbert.evaluation.loaders import load_queries, load_topK_pids, load_collection
from colbert.evaluation.ranking import evaluate
from colbert.evaluation.metrics import evaluate_recall
def main():
random.seed(12345)
parser = Arguments(description='Exhaustive (slow, not index-based) evaluation of re-ranking with ColBERT.')
parser.add_model_parameters()
parser.add_model_inference_parameters()
parser.add_reranking_input()
parser.add_argument('--depth', dest='depth', required=False, default=None, type=int)
args = parser.parse()
with Run.context():
args.colbert, args.checkpoint = load_colbert(args)
args.qrels = load_qrels(args.qrels)
if args.collection or args.queries:
assert args.collection and args.queries
args.queries = load_queries(args.queries)
args.collection = load_collection(args.collection)
args.topK_pids, args.qrels = load_topK_pids(args.topK, args.qrels)
else:
args.queries, args.topK_docs, args.topK_pids = load_topK(args.topK)
assert (not args.shortcircuit) or args.qrels, \
"Short-circuiting (i.e., applying minimal computation to queries with no positives in the re-ranked set) " \
"can only be applied if qrels is provided."
evaluate_recall(args.qrels, args.queries, args.topK_pids)
evaluate(args)
if __name__ == "__main__":
main()
| 1,627 | 31.56 | 120 | py |
ColBERT | ColBERT-master/colbert/index_faiss.py | import os
import random
import math
from colbert.utils.runs import Run
from colbert.utils.parser import Arguments
from colbert.indexing.faiss import index_faiss
from colbert.indexing.loaders import load_doclens
def main():
random.seed(12345)
parser = Arguments(description='Faiss indexing for end-to-end retrieval with ColBERT.')
parser.add_index_use_input()
parser.add_argument('--sample', dest='sample', default=None, type=float)
parser.add_argument('--slices', dest='slices', default=1, type=int)
args = parser.parse()
assert args.slices >= 1
assert args.sample is None or (0.0 < args.sample < 1.0), args.sample
with Run.context():
args.index_path = os.path.join(args.index_root, args.index_name)
assert os.path.exists(args.index_path), args.index_path
num_embeddings = sum(load_doclens(args.index_path))
print("#> num_embeddings =", num_embeddings)
if args.partitions is None:
args.partitions = 1 << math.ceil(math.log2(8 * math.sqrt(num_embeddings)))
print('\n\n')
Run.warn("You did not specify --partitions!")
Run.warn("Default computation chooses", args.partitions,
"partitions (for {} embeddings)".format(num_embeddings))
print('\n\n')
index_faiss(args)
if __name__ == "__main__":
main()
| 1,377 | 30.318182 | 91 | py |
ColBERT | ColBERT-master/colbert/retrieve.py | import os
import random
from colbert.utils.parser import Arguments
from colbert.utils.runs import Run
from colbert.evaluation.loaders import load_colbert, load_qrels, load_queries
from colbert.indexing.faiss import get_faiss_index_name
from colbert.ranking.retrieval import retrieve
from colbert.ranking.batch_retrieval import batch_retrieve
def main():
random.seed(12345)
parser = Arguments(description='End-to-end retrieval and ranking with ColBERT.')
parser.add_model_parameters()
parser.add_model_inference_parameters()
parser.add_ranking_input()
parser.add_retrieval_input()
parser.add_argument('--faiss_name', dest='faiss_name', default=None, type=str)
parser.add_argument('--faiss_depth', dest='faiss_depth', default=1024, type=int)
parser.add_argument('--part-range', dest='part_range', default=None, type=str)
parser.add_argument('--batch', dest='batch', default=False, action='store_true')
parser.add_argument('--depth', dest='depth', default=1000, type=int)
args = parser.parse()
args.depth = args.depth if args.depth > 0 else None
if args.part_range:
part_offset, part_endpos = map(int, args.part_range.split('..'))
args.part_range = range(part_offset, part_endpos)
with Run.context():
args.colbert, args.checkpoint = load_colbert(args)
args.qrels = load_qrels(args.qrels)
args.queries = load_queries(args.queries)
args.index_path = os.path.join(args.index_root, args.index_name)
if args.faiss_name is not None:
args.faiss_index_path = os.path.join(args.index_path, args.faiss_name)
else:
args.faiss_index_path = os.path.join(args.index_path, get_faiss_index_name(args))
if args.batch:
batch_retrieve(args)
else:
retrieve(args)
if __name__ == "__main__":
main()
| 1,882 | 32.035088 | 93 | py |
ColBERT | ColBERT-master/colbert/parameters.py | import torch
DEVICE = torch.device("cuda")
SAVED_CHECKPOINTS = [32*1000, 100*1000, 150*1000, 200*1000, 300*1000, 400*1000]
SAVED_CHECKPOINTS += [10*1000, 20*1000, 30*1000, 40*1000, 50*1000, 60*1000, 70*1000, 80*1000, 90*1000]
SAVED_CHECKPOINTS += [25*1000, 50*1000, 75*1000]
SAVED_CHECKPOINTS = set(SAVED_CHECKPOINTS)
| 321 | 31.2 | 102 | py |