Skip to content

Commit 5bb24fb

Browse files
Add sample module ChannelDataPersister.
1 parent f3d04fe commit 5bb24fb

File tree

3 files changed

+140
-2
lines changed

3 files changed

+140
-2
lines changed

Cargo.toml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
[workspace]
22

33
members = [
4-
"lightning",
5-
"lightning-net-tokio",
4+
"lightning",
5+
"lightning-net-tokio",
6+
"lightning-data-persister",
67
]
78

89
# Our tests do actual crypo and lots of work, the tradeoff for -O1 is well worth it

lightning-data-persister/Cargo.toml

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
[package]
2+
name = "lightning-data-persister"
3+
version = "0.0.1"
4+
authors = ["Valentine Wallace"]
5+
license = "Apache-2.0"
6+
edition = "2018"
7+
description = """
8+
Utilities to manage channel data persistence and retrieval.
9+
"""
10+
11+
[dependencies]
12+
bitcoin = "0.23"
13+
lightning = { version = "0.0.11", path = "../lightning" }

lightning-data-persister/src/lib.rs

Lines changed: 124 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
use lightning::ln::data_persister::ChannelDataPersister;
2+
use lightning::chain::transaction::OutPoint;
3+
use lightning::chain::keysinterface::ChannelKeys;
4+
use lightning::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr};
5+
use lightning::util::ser::{Writeable, Readable};
6+
use bitcoin::hash_types::{BlockHash, Txid};
7+
use bitcoin::hashes::hex::{ToHex, FromHex};
8+
use std::collections::HashMap;
9+
use std::fs;
10+
use std::io::{Error, ErrorKind, Cursor};
11+
use std::marker::PhantomData;
12+
13+
/// LinuxPersister can persist channel data on disk on Linux machines, where
14+
/// each channel's data is stored in a file named after its outpoint.
15+
pub struct LinuxPersister<ChanSigner: ChannelKeys + Readable + Writeable> {
16+
path_to_channel_data: String,
17+
phantom: PhantomData<ChanSigner>, // TODO: is there a way around this?
18+
}
19+
20+
impl<ChanSigner: ChannelKeys + Readable + Writeable> LinuxPersister<ChanSigner> {
21+
/// Initialize a new LinuxPersister and set the path to the individual channels'
22+
/// files.
23+
pub fn new(path_to_channel_data: String) -> Self {
24+
return Self {
25+
path_to_channel_data,
26+
phantom: PhantomData,
27+
}
28+
}
29+
30+
fn get_full_filepath(&self, funding_txo: OutPoint) -> String {
31+
format!("{}/{}_{}", self.path_to_channel_data, funding_txo.txid.to_hex(), funding_txo.index)
32+
}
33+
34+
fn write_channel_data(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChanSigner>) -> std::io::Result<()> {
35+
// Do a crazy dance with lots of fsync()s to be overly cautious here...
36+
// We never want to end up in a state where we've lost the old data, or end up using the
37+
// old data on power loss after we've returned
38+
// Note that this actually *isn't* enough (at least on Linux)! We need to fsync an fd with
39+
// the containing dir, but Rust doesn't let us do that directly, sadly. TODO: Fix this with
40+
// the libc crate!
41+
let filename = self.get_full_filepath(funding_txo);
42+
let tmp_filename = filename.clone() + ".tmp";
43+
44+
{
45+
let mut f = fs::File::create(&tmp_filename)?;
46+
monitor.write_for_disk(&mut f)?;
47+
f.sync_all()?;
48+
}
49+
// We don't need to create a backup if didn't already have the file, but in any other case
50+
// try to create the backup and expect failure on fs::copy() if eg there's a perms issue.
51+
let need_bk = match fs::metadata(&filename) {
52+
Ok(data) => {
53+
if !data.is_file() { return Err(Error::new(ErrorKind::InvalidInput, "Filename given was not a file")); }
54+
true
55+
},
56+
Err(e) => match e.kind() {
57+
std::io::ErrorKind::NotFound => false,
58+
_ => true,
59+
}
60+
};
61+
let bk_filename = filename.clone() + ".bk";
62+
if need_bk {
63+
fs::copy(&filename, &bk_filename)?;
64+
{
65+
let f = fs::File::open(&bk_filename)?;
66+
f.sync_all()?;
67+
}
68+
}
69+
fs::rename(&tmp_filename, &filename)?;
70+
{
71+
let f = fs::File::open(&filename)?;
72+
f.sync_all()?;
73+
}
74+
if need_bk {
75+
fs::remove_file(&bk_filename)?;
76+
}
77+
Ok(())
78+
}
79+
}
80+
81+
impl<ChanSigner: ChannelKeys + Readable + Writeable + Send + Sync> ChannelDataPersister for LinuxPersister<ChanSigner> {
82+
type Keys = ChanSigner;
83+
84+
fn persist_channel_data(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<Self::Keys>) -> Result<(), ChannelMonitorUpdateErr> {
85+
match self.write_channel_data(funding_txo, monitor) {
86+
Ok(_) => Ok(()),
87+
Err(_) => Err(ChannelMonitorUpdateErr::TemporaryFailure)
88+
}
89+
}
90+
91+
fn update_channel_data(&self, funding_txo: OutPoint, _update: &ChannelMonitorUpdate, monitor: &ChannelMonitor<ChanSigner>) -> Result<(), ChannelMonitorUpdateErr> {
92+
match self.write_channel_data(funding_txo, monitor) {
93+
Ok(_) => Ok(()),
94+
Err(_) => Err(ChannelMonitorUpdateErr::TemporaryFailure)
95+
}
96+
}
97+
98+
fn load_channel_data(&self) -> Result<HashMap<OutPoint, ChannelMonitor<ChanSigner>>, ChannelMonitorUpdateErr> {
99+
let mut res = HashMap::new();
100+
for file_option in fs::read_dir(&self.path_to_channel_data).unwrap() {
101+
let mut loaded = false;
102+
let file = file_option.unwrap();
103+
if let Some(filename) = file.file_name().to_str() {
104+
if filename.is_ascii() && filename.len() > 65 {
105+
if let Ok(txid) = Txid::from_hex(filename.split_at(64).0) {
106+
if let Ok(index) = filename.split_at(65).1.split('.').next().unwrap().parse() {
107+
if let Ok(contents) = fs::read(&file.path()) {
108+
if let Ok((_, loaded_monitor)) = <(BlockHash, ChannelMonitor<ChanSigner>)>::read(&mut Cursor::new(&contents)) {
109+
res.insert(OutPoint { txid, index }, loaded_monitor);
110+
loaded = true;
111+
}
112+
}
113+
}
114+
}
115+
}
116+
}
117+
if !loaded {
118+
// TODO(val): this should prob error not just print something
119+
println!("WARNING: Failed to read one of the channel monitor storage files! Check perms!");
120+
}
121+
}
122+
Ok(res)
123+
}
124+
}

0 commit comments

Comments
 (0)