Unverified Commit 0cceabfc authored by Yiming Shi's avatar Yiming Shi Committed by GitHub
Browse files

Merge branch 'master' into move_to_keraslayers_fasterrcnn_fpn_keras_feature_extractor

parents 17821c0d 39ee0ac9
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configs for stanford navigation environment.
Base config for stanford navigation enviornment.
"""
import numpy as np
import src.utils as utils
import datasets.nav_env as nav_env
def nav_env_base_config():
"""Returns the base config for stanford navigation environment.
Returns:
Base config for stanford navigation environment.
"""
robot = utils.Foo(radius=15,
base=10,
height=140,
sensor_height=120,
camera_elevation_degree=-15)
env = utils.Foo(padding=10,
resolution=5,
num_point_threshold=2,
valid_min=-10,
valid_max=200,
n_samples_per_face=200)
camera_param = utils.Foo(width=225,
height=225,
z_near=0.05,
z_far=20.0,
fov=60.,
modalities=['rgb'],
img_channels=3)
data_augment = utils.Foo(lr_flip=0,
delta_angle=0.5,
delta_xy=4,
relight=True,
relight_fast=False,
structured=False) # if True, uses the same perturb for the whole episode.
outputs = utils.Foo(images=True,
rel_goal_loc=False,
loc_on_map=True,
gt_dist_to_goal=True,
ego_maps=False,
ego_goal_imgs=False,
egomotion=False,
visit_count=False,
analytical_counts=False,
node_ids=True,
readout_maps=False)
# class_map_names=['board', 'chair', 'door', 'sofa', 'table']
class_map_names = ['chair', 'door', 'table']
semantic_task = utils.Foo(class_map_names=class_map_names, pix_distance=16,
sampling='uniform')
# time per iteration for cmp is 0.82 seconds per episode with 3.4s overhead per batch.
task_params = utils.Foo(max_dist=32,
step_size=8,
num_steps=40,
num_actions=4,
batch_size=4,
building_seed=0,
num_goals=1,
img_height=None,
img_width=None,
img_channels=None,
modalities=None,
outputs=outputs,
map_scales=[1.],
map_crop_sizes=[64],
rel_goal_loc_dim=4,
base_class='Building',
task='map+plan',
n_ori=4,
type='room_to_room_many',
data_augment=data_augment,
room_regex='^((?!hallway).)*$',
toy_problem=False,
map_channels=1,
gt_coverage=False,
input_type='maps',
full_information=False,
aux_delta_thetas=[],
semantic_task=semantic_task,
num_history_frames=0,
node_ids_dim=1,
perturbs_dim=4,
map_resize_method='linear_noantialiasing',
readout_maps_channels=1,
readout_maps_scales=[],
readout_maps_crop_sizes=[],
n_views=1,
reward_time_penalty=0.1,
reward_at_goal=1.,
discount_factor=0.99,
rejection_sampling_M=100,
min_dist=None)
navtask_args = utils.Foo(
building_names=['area1_gates_wingA_floor1_westpart'],
env_class=nav_env.VisualNavigationEnv,
robot=robot,
task_params=task_params,
env=env,
camera_param=camera_param,
cache_rooms=True)
return navtask_args
### Pre-Trained Models
We provide the following pre-trained models:
Config Name | Checkpoint | Mean Dist. | 50%ile Dist. | 75%ile Dist. | Success %age |
:-: | :-: | :-: | :-: | :-: | :-: |
cmp.lmap_Msc.clip5.sbpd_d_r2r | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/cmp.lmap_Msc.clip5.sbpd_d_r2r.tar) | 4.79 | 0 | 1 | 78.9 |
cmp.lmap_Msc.clip5.sbpd_rgb_r2r | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/cmp.lmap_Msc.clip5.sbpd_rgb_r2r.tar) | 7.74 | 0 | 14 | 62.4 |
cmp.lmap_Msc.clip5.sbpd_d_ST | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/cmp.lmap_Msc.clip5.sbpd_d_ST.tar) | 10.67 | 9 | 19 | 39.7 |
cmp.lmap_Msc.clip5.sbpd_rgb_ST | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/cmp.lmap_Msc.clip5.sbpd_rgb_ST.tar) | 11.27 | 10 | 19 | 35.6 |
cmp.lmap_Msc.clip5.sbpd_d_r2r_h0_64_80 | [ckpt](http:////download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/cmp.lmap_Msc.clip5.sbpd_d_r2r_h0_64_80.tar) | 11.6 | 0 | 19 | 66.9 |
bl.v2.noclip.sbpd_d_r2r | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/bl.v2.noclip.sbpd_d_r2r.tar) | 5.90 | 0 | 6 | 71.2 |
bl.v2.noclip.sbpd_rgb_r2r | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/bl.v2.noclip.sbpd_rgb_r2r.tar) | 10.21 | 1 | 21 | 53.4 |
bl.v2.noclip.sbpd_d_ST | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/bl.v2.noclip.sbpd_d_ST.tar) | 13.29 | 14 | 23 | 28.0 |
bl.v2.noclip.sbpd_rgb_ST | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/bl.v2.noclip.sbpd_rgb_ST.tar) | 13.37 | 13 | 20 | 24.2 |
bl.v2.noclip.sbpd_d_r2r_h0_64_80 | [ckpt](http:////download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/bl.v2.noclip.sbpd_d_r2r_h0_64_80.tar) | 15.30 | 0 | 29 | 57.9 |
10c10
< from OpenGL import platform, constant, arrays
---
> from OpenGL import platform, constant, arrays, contextdata
249a250
> from OpenGL._bytes import _NULL_8_BYTE
399c400
< array = ArrayDatatype.asArray( pointer, type )
---
> array = arrays.ArrayDatatype.asArray( pointer, type )
405c406
< ArrayDatatype.voidDataPointer( array )
---
> arrays.ArrayDatatype.voidDataPointer( array )
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
echo $VIRTUAL_ENV
patch $VIRTUAL_ENV/local/lib/python2.7/site-packages/OpenGL/GLES2/VERSION/GLES2_2_0.py patches/GLES2_2_0.py.patch
patch $VIRTUAL_ENV/local/lib/python2.7/site-packages/OpenGL/platform/ctypesloader.py patches/ctypesloader.py.patch
45c45,46
< return dllType( name, mode )
---
> print './' + name
> return dllType( './' + name, mode )
47,48c48,53
< err.args += (name,fullName)
< raise
---
> try:
> print name
> return dllType( name, mode )
> except:
> err.args += (name,fullName)
> raise
// This shader computes per-pixel depth (-z coordinate in the camera space, or
// orthogonal distance to the camera plane). The result is multiplied by the
// `kFixedPointFraction` constant and is encoded to RGB channels as an integer
// (R being the least significant byte).
#ifdef GL_ES
#ifdef GL_FRAGMENT_PRECISION_HIGH
precision highp float;
#else
precision mediump float;
#endif
#endif
const float kFixedPointFraction = 1000.0;
varying float vDepth;
void main(void) {
float d = vDepth;
// Encode the depth to RGB.
d *= (kFixedPointFraction / 255.0);
gl_FragColor.r = mod(d, 1.0);
d = (d - gl_FragColor.r) / 255.0;
gl_FragColor.g = mod(d, 1.0);
d = (d - gl_FragColor.g) / 255.0;
gl_FragColor.b = mod(d, 1.0);
gl_FragColor.a = 1.0;
}
uniform mat4 uViewMatrix;
uniform mat4 uProjectionMatrix;
attribute vec3 aPosition;
varying float vDepth;
void main(void) {
vec4 worldPosition = vec4(aPosition, 1.0);
vec4 viewPosition = uViewMatrix * worldPosition;
gl_Position = uProjectionMatrix * viewPosition;
// Orthogonal depth is simply -z in the camera space.
vDepth = -viewPosition.z;
}
precision highp float;
varying vec4 vColor;
varying vec2 vTextureCoord;
uniform sampler2D uTexture;
void main(void) {
vec4 color = vColor;
color = texture2D(uTexture, vTextureCoord);
gl_FragColor = color;
}
uniform mat4 uViewMatrix;
uniform mat4 uProjectionMatrix;
uniform vec4 uColor;
attribute vec4 aColor;
attribute vec3 aPosition;
attribute vec2 aTextureCoord;
varying vec4 vColor;
varying vec2 vTextureCoord;
void main(void) {
vec4 worldPosition = vec4(aPosition, 1.0);
gl_Position = uProjectionMatrix * (uViewMatrix * worldPosition);
vColor = aColor * uColor;
vTextureCoord = aTextureCoord;
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment