haixuantao commited on
Commit
ad32c3d
1 Parent(s): 157706c

Upload 35 files

Browse files
.gitattributes CHANGED
@@ -53,3 +53,18 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ v0.0.1/videos/cam_bottom_episode_1.mp4 filter=lfs diff=lfs merge=lfs -text
57
+ v0.0.1/videos/cam_bottom_episode_2.mp4 filter=lfs diff=lfs merge=lfs -text
58
+ v0.0.1/videos/cam_bottom_episode_3.mp4 filter=lfs diff=lfs merge=lfs -text
59
+ v0.0.1/videos/cam_bottom_episode_4.mp4 filter=lfs diff=lfs merge=lfs -text
60
+ v0.0.1/videos/cam_left_wrist_episode_1.mp4 filter=lfs diff=lfs merge=lfs -text
61
+ v0.0.1/videos/cam_left_wrist_episode_2.mp4 filter=lfs diff=lfs merge=lfs -text
62
+ v0.0.1/videos/cam_left_wrist_episode_4.mp4 filter=lfs diff=lfs merge=lfs -text
63
+ v0.0.1/videos/cam_right_wrist_episode_1.mp4 filter=lfs diff=lfs merge=lfs -text
64
+ v0.0.1/videos/cam_right_wrist_episode_2.mp4 filter=lfs diff=lfs merge=lfs -text
65
+ v0.0.1/videos/cam_right_wrist_episode_3.mp4 filter=lfs diff=lfs merge=lfs -text
66
+ v0.0.1/videos/cam_right_wrist_episode_4.mp4 filter=lfs diff=lfs merge=lfs -text
67
+ v0.0.1/videos/cam_up_episode_1.mp4 filter=lfs diff=lfs merge=lfs -text
68
+ v0.0.1/videos/cam_up_episode_2.mp4 filter=lfs diff=lfs merge=lfs -text
69
+ v0.0.1/videos/cam_up_episode_3.mp4 filter=lfs diff=lfs merge=lfs -text
70
+ v0.0.1/videos/cam_up_episode_4.mp4 filter=lfs diff=lfs merge=lfs -text
v0.0.1/aloha-client/Cargo.toml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [package]
2
+ name = "aloha-client"
3
+ version = "0.1.0"
4
+ edition = "2021"
5
+
6
+ # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
7
+
8
+ [dependencies]
9
+
10
+ serialport = "4.2.0"
11
+ rustypot = { git = "https://github.com/haixuanTao/rustypot" }
12
+ tokio = { version = "1.37.0", features = ["full"] }
13
+ eyre = "0.6.12"
14
+ dora-node-api = "0.3.3"
v0.0.1/aloha-client/src/main.rs ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ use dora_node_api::{
2
+ arrow::array::UInt32Array, dora_core::config::DataId, DoraNode, Event, IntoArrow,
3
+ };
4
+ use eyre::Result;
5
+ use rustypot::{device::xm, DynamixelSerialIO};
6
+ use std::time::Duration;
7
+
8
+ fn main() -> Result<()> {
9
+ let (mut node, mut events) = DoraNode::init_from_env()?;
10
+ let mut puppet_serial_port = serialport::new("/dev/ttyDXL_puppet_right", 1_000_000)
11
+ .timeout(Duration::from_millis(20))
12
+ .open()
13
+ .expect("Failed to open port");
14
+ let io = DynamixelSerialIO::v2();
15
+ xm::sync_write_torque_enable(
16
+ &io,
17
+ puppet_serial_port.as_mut(),
18
+ &[1, 2, 3, 4, 5, 6, 7, 8, 9],
19
+ &[1; 9],
20
+ )
21
+ .expect("Communication error");
22
+
23
+ while let Some(Event::Input {
24
+ id,
25
+ metadata: _,
26
+ data,
27
+ }) = events.recv()
28
+ {
29
+ match id.as_str() {
30
+ "puppet_goal_position" => {
31
+ let buffer: UInt32Array = data.to_data().into();
32
+ let target: &[u32] = buffer.values();
33
+ xm::sync_write_goal_position(
34
+ &io,
35
+ puppet_serial_port.as_mut(),
36
+ &[1, 2, 3, 4, 5, 6, 7, 8, 9],
37
+ &target,
38
+ )
39
+ .expect("Communication error");
40
+ }
41
+ "tick" => {
42
+ let pos = xm::sync_read_present_position(
43
+ &io,
44
+ puppet_serial_port.as_mut(),
45
+ &[1, 2, 3, 4, 5, 6, 7, 8, 9],
46
+ )
47
+ .expect("Communication error");
48
+ node.send_output(
49
+ DataId::from("puppet_position".to_owned()),
50
+ Default::default(),
51
+ pos.into_arrow(),
52
+ )?;
53
+ }
54
+ _ => todo!(),
55
+ };
56
+ }
57
+
58
+ Ok(())
59
+ }
v0.0.1/aloha-teleop/Cargo.toml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [package]
2
+ name = "aloha-teleop"
3
+ version = "0.1.0"
4
+ edition = "2021"
5
+
6
+ # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
7
+
8
+ [dependencies]
9
+
10
+ serialport = "4.2.0"
11
+ rustypot = { git = "https://github.com/haixuanTao/rustypot" }
12
+ tokio = { version = "1.37.0", features = ["full"] }
13
+ eyre = "0.6.12"
14
+ clap = { version = "4.5.4", features = ["derive"] }
15
+ dora-node-api = "0.3.3"
v0.0.1/aloha-teleop/src/main.rs ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ use dora_node_api::{dora_core::config::DataId, DoraNode, IntoArrow, MetadataParameters};
2
+ use eyre::{Context, Result};
3
+ use rustypot::{device::xm, DynamixelSerialIO};
4
+ use serialport::SerialPort;
5
+ use std::{
6
+ sync::mpsc,
7
+ time::{Duration, Instant},
8
+ };
9
+
10
+ static MAX_MASTER_GRIPER: u32 = 2554;
11
+ static MAX_PUPPET_GRIPER: u32 = 3145;
12
+
13
+ static MIN_MASTER_GRIPER: u32 = 1965;
14
+ static MIN_PUPPET_GRIPER: u32 = 2500;
15
+ use clap::Parser;
16
+
17
+ /// Simple aloha teleop program for recording data
18
+ #[derive(Parser, Debug)]
19
+ #[command(version, about, long_about = None)]
20
+ struct Args {
21
+ #[arg(short, long, default_value = "/dev/ttyDXL_master_right")]
22
+ master_path: String,
23
+
24
+ #[arg(short, long, default_value = "/dev/ttyDXL_puppet_right")]
25
+ puppet_path: String,
26
+
27
+ #[arg(long, default_value = "1000000")]
28
+ master_baudrate: u32,
29
+
30
+ #[arg(long, default_value = "1000000")]
31
+ puppet_baudrate: u32,
32
+ }
33
+
34
+ enum State {
35
+ Position(Vec<u32>),
36
+ GoalPosition(Vec<u32>),
37
+ }
38
+
39
+ fn main_multithreaded(
40
+ io: DynamixelSerialIO,
41
+ mut master_serial_port: Box<dyn SerialPort>,
42
+ mut puppet_serial_port: Box<dyn SerialPort>,
43
+ ) -> Result<()> {
44
+ let (tx, rx) = mpsc::channel();
45
+ let (tx_dora, rx_dora) = mpsc::channel();
46
+ let tx_dora_read = tx_dora.clone();
47
+ std::thread::spawn(move || loop {
48
+ let now = Instant::now();
49
+ let pos = xm::sync_read_present_position(
50
+ &io,
51
+ master_serial_port.as_mut(),
52
+ &[1, 2, 3, 4, 5, 6, 7, 8, 9],
53
+ )
54
+ .expect("Read Communication error");
55
+ tx.send((now, pos.clone())).unwrap();
56
+ tx_dora_read.send(State::Position(pos)).unwrap();
57
+ });
58
+
59
+ let io = DynamixelSerialIO::v2();
60
+ let join = std::thread::spawn(move || {
61
+ while let Ok((_now, pos)) = rx.recv() {
62
+ // Compute linear interpolation for gripper as input and output range missmatch
63
+ let gripper = (pos[8] - MIN_MASTER_GRIPER) * (MAX_PUPPET_GRIPER - MIN_PUPPET_GRIPER)
64
+ / (MAX_MASTER_GRIPER - MIN_MASTER_GRIPER)
65
+ + MIN_PUPPET_GRIPER;
66
+ let mut target = pos;
67
+ target[8] = gripper;
68
+ xm::sync_write_goal_position(
69
+ &io,
70
+ puppet_serial_port.as_mut(),
71
+ &[1, 2, 3, 4, 5, 6, 7, 8, 9],
72
+ &target,
73
+ )
74
+ .expect("Write Communication error");
75
+ // println!("elapsed time: {:?}", now.elapsed());
76
+ tx_dora.send(State::GoalPosition(target)).unwrap();
77
+ }
78
+ });
79
+
80
+ if std::env::var("DORA_NODE_CONFIG").is_ok() {
81
+ let (mut node, mut events) = DoraNode::init_from_env()?;
82
+ while let Ok(target) = rx_dora.recv() {
83
+ let parameters = MetadataParameters::default();
84
+ match target {
85
+ State::Position(pos) => {
86
+ let output = DataId::from("puppet_goal_position".to_owned());
87
+ node.send_output(output.clone(), parameters, pos.into_arrow())?;
88
+ }
89
+ State::GoalPosition(pos) => {
90
+ let output = DataId::from("puppet_state".to_owned());
91
+ node.send_output(output.clone(), parameters, pos.into_arrow())?;
92
+ }
93
+ }
94
+ if events.recv_timeout(Duration::from_nanos(100)).is_none() {
95
+ println!("Events channel finished");
96
+ break;
97
+ }
98
+ }
99
+ } else {
100
+ join.join().unwrap();
101
+ };
102
+ Ok(())
103
+ }
104
+
105
+ fn main() -> Result<()> {
106
+ let args = Args::parse();
107
+ let master_serial_port = serialport::new(args.master_path, args.master_baudrate)
108
+ .timeout(Duration::from_millis(2))
109
+ .open()
110
+ .context("Failed to open port")?;
111
+ let mut puppet_serial_port = serialport::new(args.puppet_path, args.puppet_baudrate)
112
+ .timeout(Duration::from_millis(2))
113
+ .open()
114
+ .context("Failed to open port")?;
115
+ let io = DynamixelSerialIO::v2();
116
+ xm::sync_write_torque_enable(
117
+ &io,
118
+ puppet_serial_port.as_mut(),
119
+ &[1, 2, 3, 4, 5, 6, 7, 8, 9],
120
+ &[1; 9],
121
+ )
122
+ .expect("Communication error");
123
+
124
+ main_multithreaded(io, master_serial_port, puppet_serial_port)?;
125
+ Ok(())
126
+ }
v0.0.1/compress_image.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dora import Node
2
+ import shutil
3
+
4
+ FPS = 50
5
+
6
+ frame_id = 0
7
+
8
+ node = Node()
9
+
10
+ for event in node:
11
+ if event["TYPE"] != "INPUT":
12
+ break
13
+
14
+ result = {"path": f"videos/{fname}", "timestamp": frame_id / FPS}
15
+
16
+ frame_id += 1
17
+
18
+
19
+ tmp_imgs_dir = out_dir / "tmp_images"
20
+ save_images_concurrently(imgs_array, tmp_imgs_dir)
21
+
22
+ # encode images to a mp4 video
23
+ fname = f"{img_key}_episode_{ep_idx:06d}.mp4"
24
+ video_path = "out" / "videos" / fname
25
+ encode_video_frames(tmp_imgs_dir, video_path, FPS)
26
+
27
+ # clean temporary images directory
28
+ shutil.rmtree(tmp_imgs_dir)
v0.0.1/episode_1.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb0cb717b54bda0a3460241096e9b5e974e9da44a89722959693c5a347af69d2
3
+ size 42285
v0.0.1/episode_2.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0350bb61ff20b4c9d4b67e1293b1c45ec82942fadbefaf51a8d29b3998cdbe66
3
+ size 36604
v0.0.1/episode_3.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c37d6ac89906fd6f313641ad371923c0f9044e45463deeb274b9003ad46eab5
3
+ size 33102
v0.0.1/episode_4.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61ce815a1d8a9907f6b0935884f25a8c1359835669e8598424f50610433884e4
3
+ size 70380
v0.0.1/keyboard_node.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pynput import keyboard
2
+ from pynput.keyboard import Key, Events
3
+ import pyarrow as pa
4
+ from dora import Node
5
+
6
+
7
+ node = Node()
8
+ buffer_text = ""
9
+ space = False
10
+ submitted_text = []
11
+ cursor = 1
12
+ with keyboard.Events() as events:
13
+ while True:
14
+ event = events.get(0.1)
15
+ if event is not None and isinstance(event, Events.Press):
16
+ if event.key == Key.space and space == False:
17
+ node.send_output("space", pa.array([cursor]))
18
+ space = True
19
+
20
+ elif event is not None and isinstance(event, Events.Release):
21
+ if event.key == Key.space:
22
+ node.send_output("space", pa.array([0]))
23
+ cursor += 1
24
+ space = False
25
+
26
+ if node.next(0.001) is None:
27
+ break
28
+ # 1
v0.0.1/lerobot_webcam_saver.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import os
5
+ import time
6
+ import numpy as np
7
+ import cv2
8
+ import pyarrow as pa
9
+ from pathlib import Path
10
+ from dora import Node
11
+ import subprocess
12
+
13
+ node = Node()
14
+
15
+ CAMERA_NAME = os.getenv("CAMERA_NAME", "camera" )
16
+ CAMERA_WIDTH = 640
17
+ CAMERA_HEIGHT = 480
18
+ FPS = 50
19
+
20
+ i = 0
21
+ episode = 0
22
+ out_dir = "videos" / Path(f"cam_{CAMERA_NAME}_episode_{episode}")
23
+
24
+ for event in node:
25
+ event_type = event["type"]
26
+ if event_type == "INPUT":
27
+ if event["id"] == "record_episode":
28
+ record_episode = event["value"].to_numpy()[0]
29
+ print(f"Recording episode {record_episode}", flush=True)
30
+ # Save Episode Video
31
+ if episode != 0 and record_episode == 0:
32
+ out_dir = "videos" / Path(f"{CAMERA_NAME}_episode_{episode}")
33
+ fname = f"{CAMERA_NAME}_episode_{episode}.mp4"
34
+ video_path = Path("videos") / fname
35
+ # Save video
36
+ ffmpeg_cmd = (
37
+ f"ffmpeg -r {FPS} "
38
+ "-f image2 "
39
+ "-loglevel error "
40
+ f"-i {str(out_dir / 'frame_%06d.png')} "
41
+ "-vcodec libx264 "
42
+ "-g 2 "
43
+ "-pix_fmt yuv444p "
44
+ f"{str(video_path)}"
45
+ )
46
+ print(ffmpeg_cmd, flush=True)
47
+ subprocess.Popen(ffmpeg_cmd.split(" "), start_new_session=True)
48
+ episode = record_episode
49
+
50
+ # Make new directory and start saving images
51
+ elif episode == 0 and record_episode != 0:
52
+ episode = record_episode
53
+ out_dir = "videos" / Path(f"{CAMERA_NAME}_episode_{episode}")
54
+ out_dir.mkdir(parents=True, exist_ok=True)
55
+ i = 0
56
+ else:
57
+ continue
58
+
59
+ elif event["id"] == "image":
60
+ # Only record image when in episode.
61
+ # Episode 0 is for not recording periods.
62
+ if episode == 0:
63
+ continue
64
+
65
+ fname = f"{CAMERA_NAME}_episode_{episode}.mp4"
66
+ node.send_output(
67
+ "saved_image",
68
+ pa.array([{"path": f"videos/{fname}", "timestamp": i / FPS}]),
69
+ event["metadata"],
70
+ )
71
+ image = event["value"].to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
72
+ path = str(out_dir / f"frame_{i:06d}.png")
73
+ cv2.imwrite(path, image)
74
+ i += 1
v0.0.1/llm_op.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dora import DoraStatus
2
+ import pylcs
3
+ import os
4
+ import pyarrow as pa
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer
6
+ import torch
7
+
8
+ import gc # garbage collect library
9
+ import re
10
+ import time
11
+
12
+ CHATGPT = False
13
+ MODEL_NAME_OR_PATH = "TheBloke/deepseek-coder-6.7B-instruct-GPTQ"
14
+
15
+ CODE_MODIFIER_TEMPLATE = """
16
+ ### Instruction
17
+ Respond with one block of modified code only in ```python block. No explaination.
18
+
19
+ ```python
20
+ {code}
21
+ ```
22
+
23
+ {user_message}
24
+
25
+ ### Response:
26
+ """
27
+
28
+
29
+ model = AutoModelForCausalLM.from_pretrained(
30
+ MODEL_NAME_OR_PATH,
31
+ device_map="auto",
32
+ trust_remote_code=True,
33
+ revision="main",
34
+ max_length=1024,
35
+ ).to("cuda:0")
36
+
37
+
38
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME_OR_PATH, use_fast=True)
39
+
40
+
41
+ def extract_python_code_blocks(text):
42
+ """
43
+ Extracts Python code blocks from the given text that are enclosed in triple backticks with a python language identifier.
44
+
45
+ Parameters:
46
+ - text: A string that may contain one or more Python code blocks.
47
+
48
+ Returns:
49
+ - A list of strings, where each string is a block of Python code extracted from the text.
50
+ """
51
+ pattern = r"```python\n(.*?)\n```"
52
+ matches = re.findall(pattern, text, re.DOTALL)
53
+ if len(matches) == 0:
54
+ pattern = r"```python\n(.*?)(?:\n```|$)"
55
+ matches = re.findall(pattern, text, re.DOTALL)
56
+ if len(matches) == 0:
57
+ return [text]
58
+ else:
59
+ matches = [remove_last_line(matches[0])]
60
+
61
+ return matches
62
+
63
+
64
+ def remove_last_line(python_code):
65
+ """
66
+ Removes the last line from a given string of Python code.
67
+
68
+ Parameters:
69
+ - python_code: A string representing Python source code.
70
+
71
+ Returns:
72
+ - A string with the last line removed.
73
+ """
74
+ lines = python_code.split("\n") # Split the string into lines
75
+ if lines: # Check if there are any lines to remove
76
+ lines.pop() # Remove the last line
77
+ return "\n".join(lines) # Join the remaining lines back into a string
78
+
79
+
80
+ def calculate_similarity(source, target):
81
+ """
82
+ Calculate a similarity score between the source and target strings.
83
+ This uses the edit distance relative to the length of the strings.
84
+ """
85
+ edit_distance = pylcs.edit_distance(source, target)
86
+ max_length = max(len(source), len(target))
87
+ # Normalize the score by the maximum possible edit distance (the length of the longer string)
88
+ similarity = 1 - (edit_distance / max_length)
89
+ return similarity
90
+
91
+
92
+ def find_best_match_location(source_code, target_block):
93
+ """
94
+ Find the best match for the target_block within the source_code by searching line by line,
95
+ considering blocks of varying lengths.
96
+ """
97
+ source_lines = source_code.split("\n")
98
+ target_lines = target_block.split("\n")
99
+
100
+ best_similarity = 0
101
+ best_start_index = 0
102
+ best_end_index = -1
103
+
104
+ # Iterate over the source lines to find the best matching range for all lines in target_block
105
+ for start_index in range(len(source_lines) - len(target_lines) + 1):
106
+ for end_index in range(start_index + len(target_lines), len(source_lines) + 1):
107
+ current_window = "\n".join(source_lines[start_index:end_index])
108
+ current_similarity = calculate_similarity(current_window, target_block)
109
+ if current_similarity > best_similarity:
110
+ best_similarity = current_similarity
111
+ best_start_index = start_index
112
+ best_end_index = end_index
113
+
114
+ # Convert line indices back to character indices for replacement
115
+ char_start_index = len("\n".join(source_lines[:best_start_index])) + (
116
+ 1 if best_start_index > 0 else 0
117
+ )
118
+ char_end_index = len("\n".join(source_lines[:best_end_index]))
119
+
120
+ return char_start_index, char_end_index
121
+
122
+
123
+ def replace_code_in_source(source_code, replacement_block: str):
124
+ """
125
+ Replace the best matching block in the source_code with the replacement_block, considering variable block lengths.
126
+ """
127
+ replacement_block = extract_python_code_blocks(replacement_block)[0]
128
+ start_index, end_index = find_best_match_location(source_code, replacement_block)
129
+ if start_index != -1 and end_index != -1:
130
+ # Replace the best matching part with the replacement block
131
+ new_source = (
132
+ source_code[:start_index] + replacement_block + source_code[end_index:]
133
+ )
134
+ return new_source
135
+ else:
136
+ return source_code
137
+
138
+
139
+ class Operator:
140
+ def __init__(self) -> None:
141
+ self.policy_init = False
142
+
143
+ def on_event(
144
+ self,
145
+ dora_event,
146
+ send_output,
147
+ ) -> DoraStatus:
148
+ global model, tokenizer
149
+ if dora_event["type"] == "INPUT" and dora_event["id"] == "text":
150
+ input = dora_event["value"][0].as_py()
151
+ # Path to the current file
152
+ current_file_path = __file__
153
+
154
+ # Directory of the current file
155
+ current_directory = os.path.dirname(current_file_path)
156
+ path = current_directory + "/policy.py"
157
+
158
+ with open(path, "r", encoding="utf8") as f:
159
+ code = f.read()
160
+
161
+ user_message = input
162
+ start_llm = time.time()
163
+
164
+ output = self.ask_llm(
165
+ CODE_MODIFIER_TEMPLATE.format(code=code, user_message=user_message)
166
+ )
167
+
168
+ source_code = replace_code_in_source(code, output)
169
+ print("response time:", time.time() - start_llm, flush=True)
170
+
171
+ print("response: ", output, flush=True)
172
+ with open(path, "w") as file:
173
+ file.write(source_code)
174
+
175
+ gc.collect()
176
+ torch.cuda.empty_cache()
177
+
178
+ return DoraStatus.CONTINUE
179
+
180
+ def ask_llm(self, prompt):
181
+
182
+ # Generate output
183
+ # prompt = PROMPT_TEMPLATE.format(system_message=system_message, prompt=prompt))
184
+ input = tokenizer(prompt, return_tensors="pt")
185
+ input_ids = input.input_ids.cuda()
186
+
187
+ # add attention mask here
188
+ attention_mask = input.attention_mask.cuda()
189
+
190
+ output = model.generate(
191
+ inputs=input_ids,
192
+ temperature=0.7,
193
+ do_sample=True,
194
+ top_p=0.95,
195
+ top_k=40,
196
+ max_new_tokens=512,
197
+ attention_mask=attention_mask,
198
+ eos_token_id=tokenizer.eos_token_id,
199
+ )
200
+ # Get the tokens from the output, decode them, print them
201
+
202
+ # Get text between im_start and im_end
203
+ return tokenizer.decode(output[0], skip_special_tokens=True)[len(prompt) :]
204
+
205
+
206
+ if __name__ == "__main__":
207
+ op = Operator()
208
+
209
+ # Path to the current file
210
+ current_file_path = __file__
211
+
212
+ # Directory of the current file
213
+ current_directory = os.path.dirname(current_file_path)
214
+
215
+ path = current_directory + "/policy.py"
216
+ with open(path, "r", encoding="utf8") as f:
217
+ raw = f.read()
218
+
219
+ op.on_event(
220
+ {
221
+ "type": "INPUT",
222
+ "id": "text",
223
+ "value": pa.array(
224
+ [
225
+ {
226
+ "path": path,
227
+ "user_message": "When I say suit up, get the hat and the get the food.",
228
+ },
229
+ ]
230
+ ),
231
+ "metadata": [],
232
+ },
233
+ print,
234
+ )
v0.0.1/plot_node.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ from dora import Node
4
+
5
+
6
+ IMAGE_WIDTH = int(os.getenv("IMAGE_WIDTH", "1280"))
7
+ IMAGE_HEIGHT = int(os.getenv("IMAGE_HEIGHT", "720"))
8
+ FONT = cv2.FONT_HERSHEY_SIMPLEX
9
+
10
+
11
+ node = Node()
12
+
13
+ joint = None
14
+ text = None
15
+
16
+ for event in node:
17
+ if event["type"] == "INPUT":
18
+ dora_id = event["id"]
19
+
20
+ if dora_id == "position":
21
+ joint = event["value"].to_numpy()
22
+ if "text" in dora_id:
23
+ text = event["value"][0].as_py()
24
+
25
+ if dora_id == "image":
26
+ image = (
27
+ event["value"].to_numpy().reshape((IMAGE_HEIGHT, IMAGE_WIDTH, 3)).copy()
28
+ )
29
+ if text is not None:
30
+ cv2.putText(
31
+ image,
32
+ f"Speech: {text}",
33
+ (20, 40),
34
+ FONT,
35
+ 0.5,
36
+ (190, 250, 0),
37
+ 2,
38
+ )
39
+
40
+ if joint is not None:
41
+ cv2.putText(
42
+ image,
43
+ f"pos: {joint}",
44
+ (20, 20),
45
+ FONT,
46
+ 0.5,
47
+ (190, 250, 100),
48
+ 2,
49
+ )
50
+
51
+ cv2.imshow("frame", image)
52
+ if cv2.waitKey(1) & 0xFF == ord("q"):
53
+ break
v0.0.1/policy.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pyarrow as pa
2
+ from dora import DoraStatus
3
+
4
+
5
+ class Operator:
6
+ def __init__(self):
7
+ self.actions = ["get_food", "get_hat"]
8
+
9
+ def on_event(self, event: dict, send_output) -> DoraStatus:
10
+ if event["type"] == "INPUT":
11
+ id = event["id"]
12
+ # On initialization
13
+ if id == "speech":
14
+ text: str = event["value"][0].as_py().lower()
15
+ # send_output("action", pa.array([""]))
16
+
17
+ return DoraStatus.CONTINUE
v0.0.1/realsense_node.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pyrealsense2 as rs
2
+ import numpy as np
3
+ from dora import Node
4
+ import pyarrow as pa
5
+ import os
6
+ import cv2
7
+
8
+ IMAGE_WIDTH = int(os.getenv("IMAGE_WIDTH", "1280"))
9
+ IMAGE_HEIGHT = int(os.getenv("IMAGE_HEIGHT", "720"))
10
+ CAMERA_ID = os.getenv("CAMERA_ID")
11
+ pipe = rs.pipeline()
12
+ config = rs.config()
13
+ config.enable_device(CAMERA_ID)
14
+ config.enable_stream(rs.stream.color, IMAGE_WIDTH, IMAGE_HEIGHT, rs.format.bgr8, 15)
15
+ profile = pipe.start(config)
16
+
17
+ node = Node()
18
+
19
+ for event in node:
20
+ frames = pipe.wait_for_frames()
21
+ color_frame = frames.get_color_frame()
22
+ color_images = np.asanyarray(color_frame.get_data())
23
+ node.send_output("image", pa.array(color_images.ravel()))
24
+ cv2.imshow(CAMERA_ID, color_images)
25
+ if cv2.waitKey(1) & 0xFF == ord("q"):
26
+ break
v0.0.1/record_2arms_teleop.yml ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ # - id: teleop_left
3
+ # custom:
4
+ # source: cargo
5
+ # args: run --release -p aloha-teleop -- --master-path /dev/ttyDXL_master_left --puppet-path /dev/ttyDXL_puppet_left
6
+ # outputs:
7
+ # - puppet_goal_position
8
+ # - puppet_state
9
+
10
+ - id: teleop_right
11
+ custom:
12
+ source: cargo
13
+ args: run --release -p aloha-teleop -- --master-path /dev/ttyDXL_master_right --puppet-path /dev/ttyDXL_puppet_right
14
+ inputs:
15
+ heartbeat: dora/timer/millis/20
16
+ outputs:
17
+ - puppet_goal_position
18
+ - puppet_state
19
+
20
+ - id: dora-record
21
+ custom:
22
+ build: cargo install --git https://github.com/dora-rs/dora dora-record
23
+ source: dora-record
24
+ inputs:
25
+ # puppet_left_goal_position: teleop_left/puppet_goal_position
26
+ action: teleop_right/puppet_goal_position
27
+ # puppet_left_state: teleop_left/puppet_state
28
+ state: teleop_right/puppet_state
29
+ record_episode: keyboard/space
30
+ cam_left_wrist: cam_saver_left_wrist/saved_image
31
+ cam_right_wrist: cam_saver_right_wrist/saved_image
32
+ cam_bottom: cam_saver_bottom/saved_image
33
+ cam_high: cam_saver_high/saved_image
34
+
35
+ - id: cam_left_wrist
36
+ custom:
37
+ source: ../nodes/webcam.py
38
+ inputs:
39
+ tick: dora/timer/millis/20
40
+ outputs:
41
+ - image
42
+ envs:
43
+ CAMERA_INDEX: 2
44
+
45
+ - id: cam_right_wrist
46
+ custom:
47
+ source: ../nodes/webcam.py
48
+ inputs:
49
+ tick: dora/timer/millis/20
50
+ outputs:
51
+ - image
52
+ envs:
53
+ CAMERA_INDEX: 22
54
+
55
+ - id: cam_bottom
56
+ custom:
57
+ source: ../nodes/webcam.py
58
+ inputs:
59
+ tick: dora/timer/millis/20
60
+ outputs:
61
+ - image
62
+ envs:
63
+ CAMERA_INDEX: 8
64
+
65
+ - id: cam_high
66
+ custom:
67
+ source: ../nodes/webcam.py
68
+ inputs:
69
+ tick: dora/timer/millis/20
70
+ outputs:
71
+ - image
72
+ envs:
73
+ CAMERA_INDEX: 14
74
+
75
+ - id: keyboard
76
+ custom:
77
+ source: ../nodes/keyboard_node.py
78
+ inputs:
79
+ heartbeat: dora/timer/millis/20
80
+ outputs:
81
+ - space
82
+
83
+ - id: cam_saver_left_wrist
84
+ custom:
85
+ source: ../nodes/lerobot_webcam_saver.py
86
+ inputs:
87
+ image: cam_left_wrist/image
88
+ record_episode: keyboard/space
89
+ outputs:
90
+ - saved_image
91
+ envs:
92
+ CAMERA_NAME: cam_left_wrist
93
+
94
+ - id: cam_saver_right_wrist
95
+ custom:
96
+ source: ../nodes/lerobot_webcam_saver.py
97
+ inputs:
98
+ image: cam_right_wrist/image
99
+ record_episode: keyboard/space
100
+ outputs:
101
+ - saved_image
102
+ envs:
103
+ CAMERA_NAME: cam_right_wrist
104
+
105
+ - id: cam_saver_bottom
106
+ custom:
107
+ source: ../nodes/lerobot_webcam_saver.py
108
+ inputs:
109
+ image: cam_bottom/image
110
+ record_episode: keyboard/space
111
+ outputs:
112
+ - saved_image
113
+ envs:
114
+ CAMERA_NAME: cam_bottom
115
+
116
+ - id: cam_saver_high
117
+ custom:
118
+ source: ../nodes/lerobot_webcam_saver.py
119
+ inputs:
120
+ image: cam_up/image
121
+ record_episode: keyboard/space
122
+ outputs:
123
+ - saved_image
124
+ envs:
125
+ CAMERA_NAME: cam_high
v0.0.1/replay.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dora import Node
2
+ import pandas as pd
3
+ import pyarrow as pa
4
+ import time
5
+
6
+
7
+ TOPIC = "puppet_goal_position"
8
+
9
+
10
+ if __name__ == "__main__":
11
+ node = Node()
12
+
13
+ for event in node:
14
+ if event["type"] == "INPUT":
15
+ for action in event["value"]:
16
+ print(action, flush=True)
17
+ action = action.as_py()
18
+
19
+ df = pd.read_parquet(action + ".parquet")
20
+
21
+ initial_time = df["timestamp_utc"].iloc[0]
22
+ current_time = initial_time
23
+ for index, row in df.iterrows():
24
+ delta_time = (row["timestamp_utc"] - current_time).microseconds
25
+ current_time = row["timestamp_utc"]
26
+ time.sleep(delta_time / 1_000_000)
27
+ node.send_output(TOPIC, pa.array(row[TOPIC], type=pa.uint32()))
v0.0.1/videos/cam_bottom_episode_1.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e4f0bcfd515557c5f2bab4c3ab9017259e0bee53472219707a226007194cf30
3
+ size 3158732
v0.0.1/videos/cam_bottom_episode_2.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e10c84b3ef013fbff8e9c57ab11f0a70caf32be719aacc7c1cd5cf9968b79521
3
+ size 2678292
v0.0.1/videos/cam_bottom_episode_3.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be545dfb0cc4e44e3b0ff1d5332afb55dc7db71331f0a7feac289ab2802af877
3
+ size 2198902
v0.0.1/videos/cam_bottom_episode_4.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf203cf3af951a0750c000dcacfe5e5e9b60df0d35bc9c3a08e97ccfd42051ab
3
+ size 6130834
v0.0.1/videos/cam_left_wrist_episode_1.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc61cb434dd1e11ce5e9a4c620d847c1d15a02119ec358731989193ce2d180f6
3
+ size 2300015
v0.0.1/videos/cam_left_wrist_episode_2.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e804bc3ae0802ff2e6bc8664f595e39cb7f14806fde7a0e81ea7fc1d0752cb29
3
+ size 1133727
v0.0.1/videos/cam_left_wrist_episode_3.mp4 ADDED
Binary file (922 kB). View file
 
v0.0.1/videos/cam_left_wrist_episode_4.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4cb5314c5cf9ba0c99d44fb23d0dc7703eb153962c94666fb797f9806940199
3
+ size 2517780
v0.0.1/videos/cam_right_wrist_episode_1.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44f7a972481d14c291425d06edb6adcd58604a3f06aa7adeb0e1d48881b04a04
3
+ size 3803200
v0.0.1/videos/cam_right_wrist_episode_2.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b8c56eb557a192e5ca892e156102a9f5de80b52bd0281e34dbc105a9a8666c7
3
+ size 2467078
v0.0.1/videos/cam_right_wrist_episode_3.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62a442a29ed41d0c94a815c79f4b89b580dd2e7c2344072f1894fe318e7a35c9
3
+ size 2297313
v0.0.1/videos/cam_right_wrist_episode_4.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33d95fb5203acc5a4b65896717db822c0836510998fa3f81d6fce72b491a081b
3
+ size 4973378
v0.0.1/videos/cam_up_episode_1.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ea20f9e369f5f26380403504d1701e3b2f1fface1ce8af88b7b8e950e4985be
3
+ size 3389350
v0.0.1/videos/cam_up_episode_2.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2807059c207e75704445a58f70a22278f6398d6fd1b7ef2f59455cff531f57f
3
+ size 2830621
v0.0.1/videos/cam_up_episode_3.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b8e2672570044735cf32a28e9ad786701dca327118062002f7021290e3f32a5
3
+ size 2379028
v0.0.1/videos/cam_up_episode_4.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf09eaee125f7c244fdbd3a238d27e608ab5d87133186c3ab07876080460361b
3
+ size 6435591
v0.0.1/webcam.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import os
5
+ import time
6
+ import numpy as np
7
+ import cv2
8
+ import pyarrow as pa
9
+
10
+ from dora import Node
11
+
12
+ node = Node()
13
+
14
+ CAMERA_INDEX = int(os.getenv("CAMERA_INDEX", 0))
15
+ CAMERA_WIDTH = 640
16
+ CAMERA_HEIGHT = 480
17
+ video_capture = cv2.VideoCapture(CAMERA_INDEX)
18
+ font = cv2.FONT_HERSHEY_SIMPLEX
19
+
20
+
21
+ for event in node:
22
+ event_type = event["type"]
23
+ if event_type == "INPUT":
24
+ ret, frame = video_capture.read()
25
+ if not ret:
26
+ frame = np.zeros((CAMERA_HEIGHT, CAMERA_WIDTH, 3), dtype=np.uint8)
27
+ cv2.putText(
28
+ frame,
29
+ "No Webcam was found at index %d" % (CAMERA_INDEX),
30
+ (int(30), int(30)),
31
+ font,
32
+ 0.75,
33
+ (255, 255, 255),
34
+ 2,
35
+ 1,
36
+ )
37
+ node.send_output(
38
+ "image",
39
+ pa.array(frame.ravel()),
40
+ event["metadata"],
41
+ )
42
+ cv2.imshow(str(CAMERA_INDEX), frame)
43
+ if cv2.waitKey(1) & 0xFF == ord("q"):
44
+ break
45
+ video_capture.release()
v0.0.1/whisper_node.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pyarrow as pa
2
+ import whisper
3
+ from pynput import keyboard
4
+ from pynput.keyboard import Key, Events
5
+ from dora import Node
6
+
7
+ import torch
8
+ import numpy as np
9
+ import pyarrow as pa
10
+ import sounddevice as sd
11
+ import gc # garbage collect library
12
+
13
+ model = whisper.load_model("base")
14
+
15
+ SAMPLE_RATE = 16000
16
+
17
+ node = Node()
18
+
19
+
20
+ def get_text(duration) -> str:
21
+
22
+ ## Microphone
23
+ audio_data = sd.rec(
24
+ int(SAMPLE_RATE * duration),
25
+ samplerate=SAMPLE_RATE,
26
+ channels=1,
27
+ dtype=np.int16,
28
+ blocking=True,
29
+ )
30
+
31
+ audio = audio_data.ravel().astype(np.float32) / 32768.0
32
+
33
+ ## Speech to text
34
+ audio = whisper.pad_or_trim(audio)
35
+ return model.transcribe(audio, language="en")
36
+
37
+
38
+ ## Check for keyboard event
39
+ with keyboard.Events() as events:
40
+ for dora_event in node:
41
+ if dora_event["type"] == "INPUT":
42
+ event = events.get(0.1)
43
+ if (
44
+ event is not None
45
+ and (event.key == Key.alt_r or event.key == Key.ctrl_r)
46
+ and isinstance(event, Events.Press)
47
+ ):
48
+ if event.key == Key.alt_r:
49
+ result = get_text(5)
50
+ node.send_output(
51
+ "text_llm", pa.array([result["text"]]), dora_event["metadata"]
52
+ )
53
+ elif event.key == Key.ctrl_r:
54
+ result = get_text(3)
55
+ node.send_output(
56
+ "text_policy",
57
+ pa.array([result["text"]]),
58
+ dora_event["metadata"],
59
+ )