Compare commits
No commits in common. "5848e22b1ef23fdab9da6a543942319038de88c5" and "fc0ccb1458840bf5b8116ceff00a96191a1c1609" have entirely different histories.
5848e22b1e
...
fc0ccb1458
@ -8,12 +8,11 @@ from utils.logger import write_to_record_file
|
|||||||
from utils.data import ImageObservationsDB
|
from utils.data import ImageObservationsDB
|
||||||
from parser import parse_args
|
from parser import parse_args
|
||||||
from env import REVERIENavBatch
|
from env import REVERIENavBatch
|
||||||
from agent import NavGPTAgent, RandomAgent
|
from agent import NavGPTAgent
|
||||||
|
|
||||||
def build_dataset(args, data_limit=100):
|
def build_dataset(args):
|
||||||
|
|
||||||
feat_db = ImageObservationsDB(args.obs_dir, args.obs_summary_dir, args.obj_dir)
|
feat_db = ImageObservationsDB(args.obs_dir, args.obs_summary_dir, args.obj_dir)
|
||||||
print(feat_db)
|
|
||||||
|
|
||||||
dataset_class = REVERIENavBatch
|
dataset_class = REVERIENavBatch
|
||||||
|
|
||||||
@ -26,7 +25,7 @@ def build_dataset(args, data_limit=100):
|
|||||||
)
|
)
|
||||||
val_env = dataset_class(
|
val_env = dataset_class(
|
||||||
feat_db, val_instr_data, args.connectivity_dir, args.navigable_dir,
|
feat_db, val_instr_data, args.connectivity_dir, args.navigable_dir,
|
||||||
batch_size=args.batch_size, seed=args.seed, name=split, data_limit=data_limit
|
batch_size=args.batch_size, seed=args.seed, name=split,
|
||||||
) # evaluation using all objects
|
) # evaluation using all objects
|
||||||
val_envs[split] = val_env
|
val_envs[split] = val_env
|
||||||
|
|
||||||
@ -35,7 +34,7 @@ def build_dataset(args, data_limit=100):
|
|||||||
|
|
||||||
def valid(args, val_envs):
|
def valid(args, val_envs):
|
||||||
|
|
||||||
agent = RandomAgent(next(iter(val_envs.values())), args)
|
agent = NavGPTAgent(next(iter(val_envs.values())), args)
|
||||||
|
|
||||||
with open(os.path.join(args.log_dir, 'validation_args.json'), 'w') as outf:
|
with open(os.path.join(args.log_dir, 'validation_args.json'), 'w') as outf:
|
||||||
json.dump(vars(args), outf, indent=4)
|
json.dump(vars(args), outf, indent=4)
|
||||||
@ -96,7 +95,7 @@ def valid_from_file(args, val_envs):
|
|||||||
def main():
|
def main():
|
||||||
args = parse_args()
|
args = parse_args()
|
||||||
|
|
||||||
val_envs = build_dataset(args, data_limit=100)
|
val_envs = build_dataset(args)
|
||||||
|
|
||||||
if args.valid_file is not None:
|
if args.valid_file is not None:
|
||||||
valid_from_file(args, val_envs)
|
valid_from_file(args, val_envs)
|
||||||
|
|||||||
171
nav_src/agent.py
171
nav_src/agent.py
@ -5,7 +5,6 @@ import re
|
|||||||
import warnings
|
import warnings
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from typing import Any, Callable, List, NamedTuple, Optional, Sequence, Tuple, Dict, Union
|
from typing import Any, Callable, List, NamedTuple, Optional, Sequence, Tuple, Dict, Union
|
||||||
import random
|
|
||||||
|
|
||||||
from env import REVERIENavBatch
|
from env import REVERIENavBatch
|
||||||
from argparse import Namespace
|
from argparse import Namespace
|
||||||
@ -46,11 +45,6 @@ FINAL_ANSWER_ACTION = "Final Answer:"
|
|||||||
EXCEPTION_TOOL_NAME = "_Exception"
|
EXCEPTION_TOOL_NAME = "_Exception"
|
||||||
MAX_SCRATCHPAD_LENGTH = 7000
|
MAX_SCRATCHPAD_LENGTH = 7000
|
||||||
|
|
||||||
FINAL_STOP_POINT = ""
|
|
||||||
SUCCESS = 0
|
|
||||||
TEMP_STEPS_COUNTER = 0
|
|
||||||
STEPS_COUNTER = 0
|
|
||||||
|
|
||||||
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = (
|
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = (
|
||||||
"Invalid Format: Missing 'Action:' after 'Thought:"
|
"Invalid Format: Missing 'Action:' after 'Thought:"
|
||||||
)
|
)
|
||||||
@ -69,19 +63,16 @@ class NavGPTOutputParser(AgentOutputParser):
|
|||||||
return FORMAT_INSTRUCTIONS
|
return FORMAT_INSTRUCTIONS
|
||||||
|
|
||||||
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||||
global STEPS_COUNTER
|
includes_answer = FINAL_ANSWER_ACTION in text
|
||||||
global TEMP_STEPS_COUNTER
|
|
||||||
global SUCCESS
|
|
||||||
# includes_answer = FINAL_ANSWER_ACTION in text
|
|
||||||
regex = (
|
regex = (
|
||||||
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*\"?([a-fA-F0-9]{32})\"?"
|
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*\"?([a-fA-F0-9]{32})\"?"
|
||||||
)
|
)
|
||||||
action_match = re.search(regex, text, re.DOTALL)
|
action_match = re.search(regex, text, re.DOTALL)
|
||||||
if action_match:
|
if action_match:
|
||||||
# if includes_answer:
|
if includes_answer:
|
||||||
# raise OutputParserException(
|
raise OutputParserException(
|
||||||
# f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
|
f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
|
||||||
# )
|
)
|
||||||
action = action_match.group(1).strip()
|
action = action_match.group(1).strip()
|
||||||
action_input = action_match.group(2)
|
action_input = action_match.group(2)
|
||||||
tool_input = action_input.strip(" ")
|
tool_input = action_input.strip(" ")
|
||||||
@ -89,36 +80,12 @@ class NavGPTOutputParser(AgentOutputParser):
|
|||||||
if tool_input.startswith("SELECT ") is False:
|
if tool_input.startswith("SELECT ") is False:
|
||||||
tool_input = tool_input.strip('"')
|
tool_input = tool_input.strip('"')
|
||||||
|
|
||||||
print("TEXT:", text)
|
|
||||||
print("ACTION: ", action_input)
|
|
||||||
print(f"MY FINAL_STOP_POINT = {FINAL_STOP_POINT}")
|
|
||||||
|
|
||||||
TEMP_STEPS_COUNTER += 1
|
|
||||||
print(f"TEMP_STEPS_COUNT = {TEMP_STEPS_COUNTER}")
|
|
||||||
print(f"STEPS_COUNT = {STEPS_COUNTER}")
|
|
||||||
print(f"SUCCESS = {SUCCESS}")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if FINAL_STOP_POINT in text:
|
|
||||||
STEPS_COUNTER += TEMP_STEPS_COUNTER
|
|
||||||
SUCCESS += 1
|
|
||||||
TEMP_STEPS_COUNTER = 0
|
|
||||||
print(f"TEMP_STEPS_COUNT = {TEMP_STEPS_COUNTER}")
|
|
||||||
print(f"STEPS_COUNT = {STEPS_COUNTER}")
|
|
||||||
print(f"SUCCESS = {SUCCESS}")
|
|
||||||
|
|
||||||
return AgentFinish(
|
|
||||||
{"output": action_input}, text
|
|
||||||
)
|
|
||||||
|
|
||||||
return AgentAction(action, tool_input, text)
|
return AgentAction(action, tool_input, text)
|
||||||
'''
|
|
||||||
elif includes_answer:
|
elif includes_answer:
|
||||||
return AgentFinish(
|
return AgentFinish(
|
||||||
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
|
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
|
||||||
)
|
)
|
||||||
'''
|
|
||||||
|
|
||||||
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
|
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
|
||||||
raise OutputParserException(
|
raise OutputParserException(
|
||||||
@ -230,14 +197,13 @@ class NavGPTAgent(BaseAgent):
|
|||||||
|
|
||||||
self.output_parser = NavGPTOutputParser()
|
self.output_parser = NavGPTOutputParser()
|
||||||
self.agent_executor = self.create_vln_agent()
|
self.agent_executor = self.create_vln_agent()
|
||||||
|
print("AGENT_EXECUTOR: ", type(self.agent_executor))
|
||||||
|
|
||||||
'''
|
|
||||||
plan_prompt = PromptTemplate(
|
plan_prompt = PromptTemplate(
|
||||||
template=PLANNER_PROMPT,
|
template=PLANNER_PROMPT,
|
||||||
input_variables=["instruction"],
|
input_variables=["instruction"],
|
||||||
)
|
)
|
||||||
self.plan_chain = LLMChain(llm=self.llm, prompt=plan_prompt)
|
self.plan_chain = LLMChain(llm=self.llm, prompt=plan_prompt)
|
||||||
'''
|
|
||||||
|
|
||||||
def parse_action(self, llm_output: str) -> Tuple[str, str]:
|
def parse_action(self, llm_output: str) -> Tuple[str, str]:
|
||||||
regex = r"(.*?)Final Answer:[\s]*(.*)"
|
regex = r"(.*?)Final Answer:[\s]*(.*)"
|
||||||
@ -623,8 +589,6 @@ class NavGPTAgent(BaseAgent):
|
|||||||
# We will be here
|
# We will be here
|
||||||
tools = [self.action_maker]
|
tools = [self.action_maker]
|
||||||
print(tools)
|
print(tools)
|
||||||
print("TOOL NAME: ", ", ".join([tool.name for tool in tools]))
|
|
||||||
print("TOOL DESCRIPTION: ", [f"{tool.name}: {tool.description}" for tool in tools])
|
|
||||||
prompt = PromptTemplate(
|
prompt = PromptTemplate(
|
||||||
template=VLN_GPT4_PROMPT if self.config.llm_model_name == 'gpt-4' else VLN_GPT35_PROMPT,
|
template=VLN_GPT4_PROMPT if self.config.llm_model_name == 'gpt-4' else VLN_GPT35_PROMPT,
|
||||||
input_variables=["action_plan", "init_observation", "agent_scratchpad"],
|
input_variables=["action_plan", "init_observation", "agent_scratchpad"],
|
||||||
@ -698,16 +662,6 @@ class NavGPTAgent(BaseAgent):
|
|||||||
else:
|
else:
|
||||||
obs = self.env._get_obs()
|
obs = self.env._get_obs()
|
||||||
|
|
||||||
global FINAL_STOP_POINT
|
|
||||||
global TEMP_STEPS_COUNTER
|
|
||||||
|
|
||||||
FINAL_STOP_POINT = obs[0]['stop']
|
|
||||||
|
|
||||||
if TEMP_STEPS_COUNTER != 0:
|
|
||||||
TEMP_STEPS_COUNTER = 0
|
|
||||||
|
|
||||||
print(f"HAVE SET FINAL_STOP_POINT = {FINAL_STOP_POINT}")
|
|
||||||
|
|
||||||
print(len(obs))
|
print(len(obs))
|
||||||
|
|
||||||
print(obs[0].keys())
|
print(obs[0].keys())
|
||||||
@ -816,115 +770,4 @@ class NavGPTAgent(BaseAgent):
|
|||||||
self.traj[i]['llm_thought'].append(thought)
|
self.traj[i]['llm_thought'].append(thought)
|
||||||
self.traj[i]['llm_observation'].append(observation)
|
self.traj[i]['llm_observation'].append(observation)
|
||||||
|
|
||||||
return self.traj
|
|
||||||
|
|
||||||
class RandomAgent(BaseAgent):
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
env: REVERIENavBatch,
|
|
||||||
config: Namespace):
|
|
||||||
"""
|
|
||||||
Initialize the LLM Navigation Agent.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
env: The Matterport3D environment.
|
|
||||||
config: The configuration.
|
|
||||||
"""
|
|
||||||
super().__init__(env)
|
|
||||||
self.config = config
|
|
||||||
|
|
||||||
|
|
||||||
def init_trajecotry(self, obs: List[dict]):
|
|
||||||
"""Initialize the trajectory with the given observation."""
|
|
||||||
# Record the navigation path
|
|
||||||
self.traj = [{
|
|
||||||
'instr_id': ob['instr_id'],
|
|
||||||
'path': [[ob['start']]],
|
|
||||||
'details': [],
|
|
||||||
} for ob in obs]
|
|
||||||
# Record the history of actions taken
|
|
||||||
|
|
||||||
|
|
||||||
def make_equiv_action(self, actions: List[str]) -> str:
|
|
||||||
"""
|
|
||||||
Interface between Panoramic view and Egocentric view
|
|
||||||
Take in the next viewpoint ID and move the agent to that viewpoint
|
|
||||||
return the turned angle and new observation
|
|
||||||
"""
|
|
||||||
def normalize_angle(angle):
|
|
||||||
while angle > 180:
|
|
||||||
angle -= 360
|
|
||||||
while angle <= -180:
|
|
||||||
angle += 360
|
|
||||||
return angle
|
|
||||||
|
|
||||||
def angle_to_left_right(angle):
|
|
||||||
return f"left {-angle:.2f}" if angle < 0 else f"right {angle:.2f}"
|
|
||||||
|
|
||||||
# Get current agent facing angle
|
|
||||||
cur_obs = self.env._get_obs()[0]
|
|
||||||
cur_heading = np.rad2deg(cur_obs['heading'])
|
|
||||||
# Make the action
|
|
||||||
new_obs = self.env.step(actions)[0]
|
|
||||||
new_heading = np.rad2deg(new_obs['heading'])
|
|
||||||
# Record the trajectory
|
|
||||||
self.traj[0]['path'].append(self.env.env.sims[0].gmap.bfs_shortest_path(cur_obs['viewpoint'], actions[0])[1:])
|
|
||||||
# Calculate the turned angle
|
|
||||||
turned_angle = new_heading - cur_heading
|
|
||||||
# Generate action description
|
|
||||||
cur_heading = angle_to_left_right(normalize_angle(cur_heading))
|
|
||||||
new_heading = angle_to_left_right(normalize_angle(new_heading))
|
|
||||||
action_description = f'Turn heading direction {turned_angle:.2f} degrees from {cur_heading} to {new_heading}.'
|
|
||||||
return action_description, new_obs
|
|
||||||
|
|
||||||
def rollout(self, reset=True):
|
|
||||||
if reset: # Reset env
|
|
||||||
obs = self.env.reset()
|
|
||||||
else:
|
|
||||||
obs = self.env._get_obs()
|
|
||||||
|
|
||||||
global FINAL_STOP_POINT
|
|
||||||
global TEMP_STEPS_COUNTER
|
|
||||||
global STEPS_COUNTER
|
|
||||||
global SUCCESS
|
|
||||||
|
|
||||||
FINAL_STOP_POINT = obs[0]['stop']
|
|
||||||
|
|
||||||
if TEMP_STEPS_COUNTER != 0:
|
|
||||||
TEMP_STEPS_COUNTER = 0
|
|
||||||
|
|
||||||
print("=="*20)
|
|
||||||
|
|
||||||
# Initialize the trajectory
|
|
||||||
self.init_trajecotry(obs)
|
|
||||||
|
|
||||||
for iteration in range(self.config.max_iterations):
|
|
||||||
next_point = None
|
|
||||||
print(obs[0].keys())
|
|
||||||
print(obs[0]['viewpoint'])
|
|
||||||
for i, init_ob in enumerate(obs):
|
|
||||||
navigable = [ k for k, v in init_ob['candidate'].items() ]
|
|
||||||
next_point = random.choice(navigable)
|
|
||||||
print(next_point)
|
|
||||||
turned_angle, obs = self.make_equiv_action([next_point])
|
|
||||||
obs = [obs]
|
|
||||||
|
|
||||||
print(f"TEMP_STEPS_COUNTER={TEMP_STEPS_COUNTER}")
|
|
||||||
print(f"STEPS_COUNTER={STEPS_COUNTER}")
|
|
||||||
TEMP_STEPS_COUNTER += 1
|
|
||||||
|
|
||||||
if next_point == FINAL_STOP_POINT:
|
|
||||||
print(" SUCCESS")
|
|
||||||
STEPS_COUNTER += TEMP_STEPS_COUNTER
|
|
||||||
SUCCESS += 1
|
|
||||||
TEMP_STEPS_COUNTER = 0
|
|
||||||
break
|
|
||||||
|
|
||||||
print(f"FINAL_STOP_POINT={FINAL_STOP_POINT}")
|
|
||||||
print(f"SUCCESS={SUCCESS}")
|
|
||||||
print(f"TEMP_STEPS_COUNTER={TEMP_STEPS_COUNTER}")
|
|
||||||
print(f"STEPS_COUNTER={STEPS_COUNTER}")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
return self.traj
|
return self.traj
|
||||||
|
|||||||
@ -14,6 +14,10 @@ class BaseAgent(object):
|
|||||||
output.append({'instr_id': k, 'trajectory': v['path']})
|
output.append({'instr_id': k, 'trajectory': v['path']})
|
||||||
if detailed_output:
|
if detailed_output:
|
||||||
output[-1]['details'] = v['details']
|
output[-1]['details'] = v['details']
|
||||||
|
output[-1]['action_plan'] = v['action_plan']
|
||||||
|
output[-1]['llm_output'] = v['llm_output']
|
||||||
|
output[-1]['llm_thought'] = v['llm_thought']
|
||||||
|
output[-1]['llm_observation'] = v['llm_observation']
|
||||||
return output
|
return output
|
||||||
|
|
||||||
def rollout(self, **args):
|
def rollout(self, **args):
|
||||||
|
|||||||
@ -141,7 +141,7 @@ class REVERIENavBatch(object):
|
|||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self, view_db, instr_data, connectivity_dir, navigable_dir,
|
self, view_db, instr_data, connectivity_dir, navigable_dir,
|
||||||
batch_size=1, seed=0, name=None, data_limit=100
|
batch_size=1, seed=0, name=None,
|
||||||
):
|
):
|
||||||
self.env = EnvBatch(navigable_dir, feat_db=view_db, batch_size=batch_size)
|
self.env = EnvBatch(navigable_dir, feat_db=view_db, batch_size=batch_size)
|
||||||
self.data = instr_data
|
self.data = instr_data
|
||||||
@ -157,8 +157,6 @@ class REVERIENavBatch(object):
|
|||||||
random.seed(self.seed)
|
random.seed(self.seed)
|
||||||
random.shuffle(self.data)
|
random.shuffle(self.data)
|
||||||
|
|
||||||
self.data = self.data[:data_limit]
|
|
||||||
|
|
||||||
self.ix = 0
|
self.ix = 0
|
||||||
self._load_nav_graphs()
|
self._load_nav_graphs()
|
||||||
|
|
||||||
@ -265,7 +263,7 @@ class REVERIENavBatch(object):
|
|||||||
stops = [item['stop'] for item in self.batch]
|
stops = [item['stop'] for item in self.batch]
|
||||||
starts = [item['start'] for item in self.batch]
|
starts = [item['start'] for item in self.batch]
|
||||||
targets = [item['target'] for item in self.batch]
|
targets = [item['target'] for item in self.batch]
|
||||||
self.env.newEpisodes(scanIds, starts, headings, stops, starts, targets)
|
self.env.newEpisodes(scanIds, viewpointIds, headings, stops, starts, targets)
|
||||||
return self._get_obs()
|
return self._get_obs()
|
||||||
|
|
||||||
def step(self, next_viewpoint_IDs):
|
def step(self, next_viewpoint_IDs):
|
||||||
|
|||||||
@ -18,7 +18,7 @@ def parse_args():
|
|||||||
# parser.add_argument('--llm_model_name', type=str, default='gpt-4', help='llm model name')
|
# parser.add_argument('--llm_model_name', type=str, default='gpt-4', help='llm model name')
|
||||||
# parser.add_argument('--llm_model_name', type=str, default='LlaMA-2-13b', help='llm model name')
|
# parser.add_argument('--llm_model_name', type=str, default='LlaMA-2-13b', help='llm model name')
|
||||||
parser.add_argument('--batch_size', type=int, default=1)
|
parser.add_argument('--batch_size', type=int, default=1)
|
||||||
parser.add_argument('--max_iterations', type=int, default=25)
|
parser.add_argument('--max_iterations', type=int, default=10)
|
||||||
|
|
||||||
# General config
|
# General config
|
||||||
parser.add_argument('--iters', type=int, default=10, help='number of iterations to run')
|
parser.add_argument('--iters', type=int, default=10, help='number of iterations to run')
|
||||||
|
|||||||
@ -248,11 +248,11 @@ VLN_GPT35_PROMPT = """As an intelligent embodied agent, you will navigate an ind
|
|||||||
|
|
||||||
You will receive a trajectory instruction at the start and will have access to step history (your Thought, Action, Action Input and Obeservation after the Begin! sign) and current viewpoint observation (including scene descriptions, objects, and navigable directions/distances within 3 meters) during navigation. Orientations range from -180 to 180 degrees, with 0 being forward, right 90 rightward, right/left 180 backward, and left 90 leftward.
|
You will receive a trajectory instruction at the start and will have access to step history (your Thought, Action, Action Input and Obeservation after the Begin! sign) and current viewpoint observation (including scene descriptions, objects, and navigable directions/distances within 3 meters) during navigation. Orientations range from -180 to 180 degrees, with 0 being forward, right 90 rightward, right/left 180 backward, and left 90 leftward.
|
||||||
|
|
||||||
Explore the environment and don't stay at the original point. Keep Walking! Reach within 3 meters of the instructed destination, and if it's visible but no objects are detected, move closer.
|
Explore the environment while avoiding revisiting viewpoints by comparing current and previously visited IDs. Reach within 3 meters of the instructed destination, and if it's visible but no objects are detected, move closer.
|
||||||
|
|
||||||
If you find the object but I haven't said you can stop. You cannot say you have finished the task! Keep exploring the nearby area.
|
At each step, determine if you've reached the destination.
|
||||||
|
If yes, stop and output 'Final Answer: Finished!'.
|
||||||
continue by considering your location and the next viewpoint based on the instruction, using the action_maker tool.
|
If not, continue by considering your location and the next viewpoint based on the instruction, using the action_maker tool.
|
||||||
Show your reasoning in the Thought section.
|
Show your reasoning in the Thought section.
|
||||||
|
|
||||||
Follow the given format and use provided tools.
|
Follow the given format and use provided tools.
|
||||||
@ -266,8 +266,11 @@ Instruction: the instruction describing the whole trajectory
|
|||||||
Initial Observation: the initial observation of the environment
|
Initial Observation: the initial observation of the environment
|
||||||
Thought: you should always think about what to do next and why
|
Thought: you should always think about what to do next and why
|
||||||
Action: the action to take, must be one of the tools [{tool_names}]
|
Action: the action to take, must be one of the tools [{tool_names}]
|
||||||
Action Input: "Viewpoint ID" but do not stay in the original viewpoint
|
Action Input: "Viewpoint ID"
|
||||||
Observation: the result of the action
|
Observation: the result of the action
|
||||||
|
... (this Thought/Action/Action Input/Observation can repeat N times)
|
||||||
|
Thought: I have reached the destination, I can stop.
|
||||||
|
Final Answer: Finished!
|
||||||
----
|
----
|
||||||
|
|
||||||
Begin!
|
Begin!
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user