Skip to content

Commit 4b2da8f

Browse files
author
The TensorFlow Datasets Authors
committed
Automated documentation update.
PiperOrigin-RevId: 670284563
1 parent 27e8df9 commit 4b2da8f

File tree

67 files changed

+746
-631
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

67 files changed

+746
-631
lines changed

docs/catalog/_toc.yaml

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -610,6 +610,9 @@ toc:
610610
title: databricks_dolly
611611
- path: /datasets/catalog/dices
612612
title: dices
613+
- path: /datasets/catalog/dolma
614+
status: nightly
615+
title: dolma
613616
- path: /datasets/catalog/e2e_cleaned
614617
title: e2e_cleaned
615618
- path: /datasets/catalog/irc_disentanglement
@@ -668,6 +671,9 @@ toc:
668671
title: bot_adversarial_dialogue
669672
- path: /datasets/catalog/dices
670673
title: dices
674+
- path: /datasets/catalog/dolma
675+
status: nightly
676+
title: dolma
671677
- path: /datasets/catalog/e2e_cleaned
672678
title: e2e_cleaned
673679
- path: /datasets/catalog/imdb_reviews
@@ -1358,6 +1364,9 @@ toc:
13581364
title: dices
13591365
- path: /datasets/catalog/doc_nli
13601366
title: doc_nli
1367+
- path: /datasets/catalog/dolma
1368+
status: nightly
1369+
title: dolma
13611370
- path: /datasets/catalog/dolphin_number_word
13621371
title: dolphin_number_word
13631372
- path: /datasets/catalog/drop

docs/catalog/asu_table_top_converted_externally_to_rlds.md

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -48,31 +48,31 @@ FeaturesDict({
4848
'file_path': Text(shape=(), dtype=string),
4949
}),
5050
'steps': Dataset({
51-
'action': Tensor(shape=(7,), dtype=float32),
52-
'action_delta': Tensor(shape=(7,), dtype=float32),
51+
'action': Tensor(shape=(7,), dtype=float32, description=Robot action, consists of [7x joint velocities, 2x gripper velocities, 1x terminate episode].),
52+
'action_delta': Tensor(shape=(7,), dtype=float32, description=Robot delta action, consists of [7x joint velocities, 2x gripper velocities, 1x terminate episode].),
5353
'action_inst': Text(shape=(), dtype=string),
54-
'discount': Scalar(shape=(), dtype=float32),
54+
'discount': Scalar(shape=(), dtype=float32, description=Discount if provided, default to 1.),
5555
'goal_object': Text(shape=(), dtype=string),
5656
'ground_truth_states': FeaturesDict({
57-
'EE': Tensor(shape=(6,), dtype=float32),
58-
'bottle': Tensor(shape=(6,), dtype=float32),
59-
'bread': Tensor(shape=(6,), dtype=float32),
60-
'coke': Tensor(shape=(6,), dtype=float32),
61-
'cube': Tensor(shape=(6,), dtype=float32),
62-
'milk': Tensor(shape=(6,), dtype=float32),
63-
'pepsi': Tensor(shape=(6,), dtype=float32),
57+
'EE': Tensor(shape=(6,), dtype=float32, description=xyzrpy),
58+
'bottle': Tensor(shape=(6,), dtype=float32, description=xyzrpy),
59+
'bread': Tensor(shape=(6,), dtype=float32, description=xyzrpy),
60+
'coke': Tensor(shape=(6,), dtype=float32, description=xyzrpy),
61+
'cube': Tensor(shape=(6,), dtype=float32, description=xyzrpy),
62+
'milk': Tensor(shape=(6,), dtype=float32, description=xyzrpy),
63+
'pepsi': Tensor(shape=(6,), dtype=float32, description=xyzrpy),
6464
}),
6565
'is_first': bool,
6666
'is_last': bool,
6767
'is_terminal': bool,
68-
'language_embedding': Tensor(shape=(512,), dtype=float32),
68+
'language_embedding': Tensor(shape=(512,), dtype=float32, description=Kona language embedding. See https://tfhub.dev/google/universal-sentence-encoder-large/5),
6969
'language_instruction': Text(shape=(), dtype=string),
7070
'observation': FeaturesDict({
71-
'image': Image(shape=(224, 224, 3), dtype=uint8),
72-
'state': Tensor(shape=(7,), dtype=float32),
73-
'state_vel': Tensor(shape=(7,), dtype=float32),
71+
'image': Image(shape=(224, 224, 3), dtype=uint8, description=Main camera RGB observation.),
72+
'state': Tensor(shape=(7,), dtype=float32, description=Robot state, consists of [6x robot joint angles, 1x gripper position].),
73+
'state_vel': Tensor(shape=(7,), dtype=float32, description=Robot joint velocity, consists of [6x robot joint angles, 1x gripper position].),
7474
}),
75-
'reward': Scalar(shape=(), dtype=float32),
75+
'reward': Scalar(shape=(), dtype=float32, description=Reward if provided, 1 on final step for demos.),
7676
}),
7777
})
7878
```

docs/catalog/austin_buds_dataset_converted_externally_to_rlds.md

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -48,19 +48,19 @@ FeaturesDict({
4848
'file_path': Text(shape=(), dtype=string),
4949
}),
5050
'steps': Dataset({
51-
'action': Tensor(shape=(7,), dtype=float32),
52-
'discount': Scalar(shape=(), dtype=float32),
51+
'action': Tensor(shape=(7,), dtype=float32, description=Robot action, consists of [6x end effector delta pose, 1x gripper position].),
52+
'discount': Scalar(shape=(), dtype=float32, description=Discount if provided, default to 1.),
5353
'is_first': bool,
5454
'is_last': bool,
5555
'is_terminal': bool,
56-
'language_embedding': Tensor(shape=(512,), dtype=float32),
56+
'language_embedding': Tensor(shape=(512,), dtype=float32, description=Kona language embedding. See https://tfhub.dev/google/universal-sentence-encoder-large/5),
5757
'language_instruction': Text(shape=(), dtype=string),
5858
'observation': FeaturesDict({
59-
'image': Image(shape=(128, 128, 3), dtype=uint8),
60-
'state': Tensor(shape=(24,), dtype=float32),
61-
'wrist_image': Image(shape=(128, 128, 3), dtype=uint8),
59+
'image': Image(shape=(128, 128, 3), dtype=uint8, description=Main camera RGB observation.),
60+
'state': Tensor(shape=(24,), dtype=float32, description=Robot state, consists of [7x robot joint angles, 1x gripper position, 16x robot end-effector homogeneous matrix].),
61+
'wrist_image': Image(shape=(128, 128, 3), dtype=uint8, description=Wrist camera RGB observation.),
6262
}),
63-
'reward': Scalar(shape=(), dtype=float32),
63+
'reward': Scalar(shape=(), dtype=float32, description=Reward if provided, 1 on final step for demos.),
6464
}),
6565
})
6666
```

docs/catalog/austin_sailor_dataset_converted_externally_to_rlds.md

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -48,22 +48,22 @@ FeaturesDict({
4848
'file_path': Text(shape=(), dtype=string),
4949
}),
5050
'steps': Dataset({
51-
'action': Tensor(shape=(7,), dtype=float32),
52-
'discount': Scalar(shape=(), dtype=float32),
51+
'action': Tensor(shape=(7,), dtype=float32, description=Robot action, consists of [3x ee relative pos, 3x ee relative rotation, 1x gripper action].),
52+
'discount': Scalar(shape=(), dtype=float32, description=Discount if provided, default to 1.),
5353
'is_first': bool,
5454
'is_last': bool,
5555
'is_terminal': bool,
56-
'language_embedding': Tensor(shape=(512,), dtype=float32),
56+
'language_embedding': Tensor(shape=(512,), dtype=float32, description=Kona language embedding. See https://tfhub.dev/google/universal-sentence-encoder-large/5),
5757
'language_instruction': Text(shape=(), dtype=string),
5858
'observation': FeaturesDict({
59-
'image': Image(shape=(128, 128, 3), dtype=uint8),
60-
'state': Tensor(shape=(8,), dtype=float32),
61-
'state_ee': Tensor(shape=(16,), dtype=float32),
62-
'state_gripper': Tensor(shape=(1,), dtype=float32),
63-
'state_joint': Tensor(shape=(7,), dtype=float32),
64-
'wrist_image': Image(shape=(128, 128, 3), dtype=uint8),
59+
'image': Image(shape=(128, 128, 3), dtype=uint8, description=Main camera RGB observation.),
60+
'state': Tensor(shape=(8,), dtype=float32, description=Default robot state, consists of [3x robot ee pos, 3x ee quat, 1x gripper state].),
61+
'state_ee': Tensor(shape=(16,), dtype=float32, description=End-effector state, represented as 4x4 homogeneous transformation matrix of ee pose.),
62+
'state_gripper': Tensor(shape=(1,), dtype=float32, description=Robot gripper opening width. Ranges between ~0 (closed) to ~0.077 (open)),
63+
'state_joint': Tensor(shape=(7,), dtype=float32, description=Robot 7-dof joint information (not used in original SAILOR dataset).),
64+
'wrist_image': Image(shape=(128, 128, 3), dtype=uint8, description=Wrist camera RGB observation.),
6565
}),
66-
'reward': Scalar(shape=(), dtype=float32),
66+
'reward': Scalar(shape=(), dtype=float32, description=True on last step of the episode.),
6767
}),
6868
})
6969
```

docs/catalog/austin_sirius_dataset_converted_externally_to_rlds.md

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -48,24 +48,24 @@ FeaturesDict({
4848
'file_path': Text(shape=(), dtype=string),
4949
}),
5050
'steps': Dataset({
51-
'action': Tensor(shape=(7,), dtype=float32),
52-
'action_mode': Tensor(shape=(1,), dtype=float32),
53-
'discount': Scalar(shape=(), dtype=float32),
54-
'intv_label': Tensor(shape=(1,), dtype=float32),
51+
'action': Tensor(shape=(7,), dtype=float32, description=Robot action, consists of [3x ee relative pos, 3x ee relative rotation, 1x gripper action].),
52+
'action_mode': Tensor(shape=(1,), dtype=float32, description=Type of interaction. -1: initial human demonstration. 1: intervention. 0: autonomuos robot execution (includes pre-intervention class)),
53+
'discount': Scalar(shape=(), dtype=float32, description=Discount if provided, default to 1.),
54+
'intv_label': Tensor(shape=(1,), dtype=float32, description=Same as action_modes, except 15 timesteps preceding intervention are labeled as -10.),
5555
'is_first': bool,
5656
'is_last': bool,
5757
'is_terminal': bool,
58-
'language_embedding': Tensor(shape=(512,), dtype=float32),
58+
'language_embedding': Tensor(shape=(512,), dtype=float32, description=Kona language embedding. See https://tfhub.dev/google/universal-sentence-encoder-large/5),
5959
'language_instruction': Text(shape=(), dtype=string),
6060
'observation': FeaturesDict({
61-
'image': Image(shape=(84, 84, 3), dtype=uint8),
62-
'state': Tensor(shape=(8,), dtype=float32),
63-
'state_ee': Tensor(shape=(16,), dtype=float32),
64-
'state_gripper': Tensor(shape=(1,), dtype=float32),
65-
'state_joint': Tensor(shape=(7,), dtype=float32),
66-
'wrist_image': Image(shape=(84, 84, 3), dtype=uint8),
61+
'image': Image(shape=(84, 84, 3), dtype=uint8, description=Main camera RGB observation.),
62+
'state': Tensor(shape=(8,), dtype=float32, description=Default robot state, consists of [7x robot joint state, 1x gripper state].),
63+
'state_ee': Tensor(shape=(16,), dtype=float32, description=End-effector state, represented as 4x4 homogeneous transformation matrix of ee pose.),
64+
'state_gripper': Tensor(shape=(1,), dtype=float32, description=Robot gripper opening width. Ranges between ~0 (closed) to ~0.077 (open)),
65+
'state_joint': Tensor(shape=(7,), dtype=float32, description=Robot 7-dof joint information.),
66+
'wrist_image': Image(shape=(84, 84, 3), dtype=uint8, description=Wrist camera RGB observation.),
6767
}),
68-
'reward': Scalar(shape=(), dtype=float32),
68+
'reward': Scalar(shape=(), dtype=float32, description=Reward if provided, 1 on final step for demos.),
6969
}),
7070
})
7171
```

docs/catalog/bc_z.md

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -47,23 +47,23 @@ Split | Examples
4747
FeaturesDict({
4848
'steps': Dataset({
4949
'action': FeaturesDict({
50-
'future/axis_angle_residual': Tensor(shape=(30,), dtype=float32),
51-
'future/target_close': Tensor(shape=(10,), dtype=int64),
52-
'future/xyz_residual': Tensor(shape=(30,), dtype=float32),
50+
'future/axis_angle_residual': Tensor(shape=(30,), dtype=float32, description=The next 10 actions for the rotation. Each action is a 3D delta to add to the current axis angle.),
51+
'future/target_close': Tensor(shape=(10,), dtype=int64, description=The next 10 actions for the gripper. Each action is the value the gripper closure should be changed to (notably it is *not* a delta.)),
52+
'future/xyz_residual': Tensor(shape=(30,), dtype=float32, description=The next 10 actions for the positions. Each action is a 3D delta to add to current position.),
5353
}),
5454
'is_first': bool,
5555
'is_last': bool,
5656
'is_terminal': bool,
5757
'observation': FeaturesDict({
5858
'episode_success': float32,
59-
'image': Image(shape=(171, 213, 3), dtype=uint8),
60-
'natural_language_embedding': Tensor(shape=(512,), dtype=float32),
59+
'image': Image(shape=(171, 213, 3), dtype=uint8, description=Camera image of the robot, downsampled 3x),
60+
'natural_language_embedding': Tensor(shape=(512,), dtype=float32, description=An embedding of the task via Universal Sentence Encoder (https://tfhub.dev/google/universal-sentence-encoder/4)),
6161
'natural_language_instruction': string,
6262
'present/autonomous': int64,
63-
'present/axis_angle': Tensor(shape=(3,), dtype=float32),
63+
'present/axis_angle': Tensor(shape=(3,), dtype=float32, description=The current rotation of the end effector in axis-angle representation.),
6464
'present/intervention': int64,
65-
'present/sensed_close': Tensor(shape=(1,), dtype=float32),
66-
'present/xyz': Tensor(shape=(3,), dtype=float32),
65+
'present/sensed_close': Tensor(shape=(1,), dtype=float32, description=How much the gripper is currently closed. Scaled from 0 to 1, but not all values from 0 to 1 are reachable. The range in the data is about 0.2 to 1),
66+
'present/xyz': Tensor(shape=(3,), dtype=float32, description=The current position of the end effector in axis-angle representation, in robot frame),
6767
'sequence_length': int64,
6868
}),
6969
'reward': Scalar(shape=(), dtype=float32),

docs/catalog/berkeley_autolab_ur5.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -48,9 +48,9 @@ FeaturesDict({
4848
'steps': Dataset({
4949
'action': FeaturesDict({
5050
'gripper_closedness_action': float32,
51-
'rotation_delta': Tensor(shape=(3,), dtype=float32),
51+
'rotation_delta': Tensor(shape=(3,), dtype=float32, description=Delta change in roll, pitch, yaw.),
5252
'terminate_episode': float32,
53-
'world_vector': Tensor(shape=(3,), dtype=float32),
53+
'world_vector': Tensor(shape=(3,), dtype=float32, description=Delta change in XYZ.),
5454
}),
5555
'is_first': bool,
5656
'is_last': bool,
@@ -61,7 +61,7 @@ FeaturesDict({
6161
'image_with_depth': Image(shape=(480, 640, 1), dtype=float32),
6262
'natural_language_embedding': Tensor(shape=(512,), dtype=float32),
6363
'natural_language_instruction': string,
64-
'robot_state': Tensor(shape=(15,), dtype=float32),
64+
'robot_state': Tensor(shape=(15,), dtype=float32, description=Explanation of the robot state can be found at https://sites.google.com/corp/view/berkeley-ur5),
6565
}),
6666
'reward': Scalar(shape=(), dtype=float32),
6767
}),

docs/catalog/berkeley_cable_routing.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,9 +47,9 @@ Split | Examples
4747
FeaturesDict({
4848
'steps': Dataset({
4949
'action': FeaturesDict({
50-
'rotation_delta': Tensor(shape=(3,), dtype=float32),
50+
'rotation_delta': Tensor(shape=(3,), dtype=float32, description=Angular velocity about the z axis.),
5151
'terminate_episode': float32,
52-
'world_vector': Tensor(shape=(3,), dtype=float32),
52+
'world_vector': Tensor(shape=(3,), dtype=float32, description=Velocity in XYZ.),
5353
}),
5454
'is_first': bool,
5555
'is_last': bool,

docs/catalog/berkeley_fanuc_manipulation.md

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -48,20 +48,20 @@ FeaturesDict({
4848
'file_path': Text(shape=(), dtype=string),
4949
}),
5050
'steps': Dataset({
51-
'action': Tensor(shape=(6,), dtype=float32),
52-
'discount': Scalar(shape=(), dtype=float32),
51+
'action': Tensor(shape=(6,), dtype=float32, description=Robot action, consists of [dx, dy, dz] and [droll, dpitch, dyaw]),
52+
'discount': Scalar(shape=(), dtype=float32, description=Discount if provided, default to 1.),
5353
'is_first': bool,
5454
'is_last': bool,
5555
'is_terminal': bool,
56-
'language_embedding': Tensor(shape=(512,), dtype=float32),
56+
'language_embedding': Tensor(shape=(512,), dtype=float32, description=Kona language embedding. See https://tfhub.dev/google/universal-sentence-encoder-large/5),
5757
'language_instruction': Text(shape=(), dtype=string),
5858
'observation': FeaturesDict({
59-
'end_effector_state': Tensor(shape=(7,), dtype=float32),
60-
'image': Image(shape=(224, 224, 3), dtype=uint8),
61-
'state': Tensor(shape=(13,), dtype=float32),
62-
'wrist_image': Image(shape=(224, 224, 3), dtype=uint8),
59+
'end_effector_state': Tensor(shape=(7,), dtype=float32, description=Robot gripper end effector state, consists of [x, y, z] and 4x quaternion),
60+
'image': Image(shape=(224, 224, 3), dtype=uint8, description=Main camera RGB observation.),
61+
'state': Tensor(shape=(13,), dtype=float32, description=Robot joints state, consists of [6x robot joint angles, 1x gripper open status, 6x robot joint velocities].),
62+
'wrist_image': Image(shape=(224, 224, 3), dtype=uint8, description=Wrist camera RGB observation.),
6363
}),
64-
'reward': Scalar(shape=(), dtype=float32),
64+
'reward': Scalar(shape=(), dtype=float32, description=Reward if provided, 1 on final step for demos.),
6565
}),
6666
})
6767
```

docs/catalog/berkeley_gnm_cory_hall.md

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -48,21 +48,21 @@ FeaturesDict({
4848
'file_path': Text(shape=(), dtype=string),
4949
}),
5050
'steps': Dataset({
51-
'action': Tensor(shape=(2,), dtype=float64),
52-
'action_angle': Tensor(shape=(3,), dtype=float64),
53-
'discount': Scalar(shape=(), dtype=float64),
51+
'action': Tensor(shape=(2,), dtype=float64, description=Robot action, consists of 2x position),
52+
'action_angle': Tensor(shape=(3,), dtype=float64, description=Robot action, consists of 2x position, 1x yaw),
53+
'discount': Scalar(shape=(), dtype=float64, description=Discount if provided, default to 1.),
5454
'is_first': bool,
5555
'is_last': bool,
5656
'is_terminal': bool,
57-
'language_embedding': Tensor(shape=(512,), dtype=float32),
57+
'language_embedding': Tensor(shape=(512,), dtype=float32, description=Kona language embedding. See https://tfhub.dev/google/universal-sentence-encoder-large/5),
5858
'language_instruction': Text(shape=(), dtype=string),
5959
'observation': FeaturesDict({
60-
'image': Image(shape=(64, 85, 3), dtype=uint8),
61-
'position': Tensor(shape=(2,), dtype=float64),
62-
'state': Tensor(shape=(3,), dtype=float64),
63-
'yaw': Tensor(shape=(1,), dtype=float64),
60+
'image': Image(shape=(64, 85, 3), dtype=uint8, description=Main camera RGB observation.),
61+
'position': Tensor(shape=(2,), dtype=float64, description=Robot position),
62+
'state': Tensor(shape=(3,), dtype=float64, description=Robot state, consists of [2x position, 1x yaw]),
63+
'yaw': Tensor(shape=(1,), dtype=float64, description=Robot yaw),
6464
}),
65-
'reward': Scalar(shape=(), dtype=float64),
65+
'reward': Scalar(shape=(), dtype=float64, description=Reward if provided, 1 on final step for demos.),
6666
}),
6767
})
6868
```

0 commit comments

Comments
 (0)