Skip to content

Add new custom channels itest liquidity edge case for safe HTLC failure #1031

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
May 2, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
199 changes: 191 additions & 8 deletions itest/assets_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1033,6 +1033,7 @@ func sendAssetKeySendPayment(t *testing.T, src, dst *HarnessNode, amt uint64,
DestCustomRecords: customRecords,
PaymentHash: hash[:],
TimeoutSeconds: int32(PaymentTimeout.Seconds()),
MaxParts: cfg.maxShards,
}

request := &tchrpc.SendPaymentRequest{
Expand Down Expand Up @@ -1090,7 +1091,7 @@ func sendKeySendPayment(t *testing.T, src, dst *HarnessNode,
stream, err := src.RouterClient.SendPaymentV2(ctxt, req)
require.NoError(t, err)

result, err := getPaymentResult(stream)
result, err := getFinalPaymentResult(stream)
require.NoError(t, err)
require.Equal(t, lnrpc.Payment_SUCCEEDED, result.Status)
}
Expand Down Expand Up @@ -1135,6 +1136,12 @@ func createAndPayNormalInvoice(t *testing.T, src, rfqPeer, dst *HarnessNode,
func payInvoiceWithSatoshi(t *testing.T, payer *HarnessNode,
invoice *lnrpc.AddInvoiceResponse, opts ...payOpt) {

payPayReqWithSatoshi(t, payer, invoice.PaymentRequest, opts...)
}

func payPayReqWithSatoshi(t *testing.T, payer *HarnessNode, payReq string,
opts ...payOpt) {

cfg := defaultPayConfig()
for _, opt := range opts {
opt(cfg)
Expand All @@ -1145,15 +1152,30 @@ func payInvoiceWithSatoshi(t *testing.T, payer *HarnessNode,
defer cancel()

sendReq := &routerrpc.SendPaymentRequest{
PaymentRequest: invoice.PaymentRequest,
TimeoutSeconds: int32(PaymentTimeout.Seconds()),
MaxShardSizeMsat: 80_000_000,
FeeLimitMsat: 1_000_000,
PaymentRequest: payReq,
TimeoutSeconds: int32(PaymentTimeout.Seconds()),
FeeLimitMsat: 1_000_000,
MaxParts: cfg.maxShards,
}

if cfg.smallShards {
sendReq.MaxShardSizeMsat = 80_000_000
}

stream, err := payer.RouterClient.SendPaymentV2(ctxt, sendReq)
require.NoError(t, err)

result, err := getPaymentResult(stream)
if cfg.payStatus == lnrpc.Payment_IN_FLIGHT {
t.Logf("Waiting for initial stream response...")
result, err := stream.Recv()
require.NoError(t, err)

require.Equal(t, cfg.payStatus, result.Status)

return
}

result, err := getFinalPaymentResult(stream)
if cfg.errSubStr != "" {
require.ErrorContains(t, err, cfg.errSubStr)
} else {
Expand Down Expand Up @@ -1212,6 +1234,7 @@ func payInvoiceWithSatoshiLastHop(t *testing.T, payer *HarnessNode,

type payConfig struct {
smallShards bool
maxShards uint32
errSubStr string
allowOverpay bool
feeLimit lnwire.MilliSatoshi
Expand Down Expand Up @@ -1240,6 +1263,12 @@ func withSmallShards() payOpt {
}
}

func withMaxShards(maxShards uint32) payOpt {
return func(c *payConfig) {
c.maxShards = maxShards
}
}

func withPayErrSubStr(errSubStr string) payOpt {
return func(c *payConfig) {
c.errSubStr = errSubStr
Expand Down Expand Up @@ -1310,6 +1339,7 @@ func payInvoiceWithAssets(t *testing.T, payer, rfqPeer *HarnessNode,
TimeoutSeconds: int32(PaymentTimeout.Seconds()),
FeeLimitMsat: int64(cfg.feeLimit),
DestCustomRecords: cfg.destCustomRecords,
MaxParts: cfg.maxShards,
}

if cfg.smallShards {
Expand Down Expand Up @@ -1394,8 +1424,9 @@ func payInvoiceWithAssets(t *testing.T, payer, rfqPeer *HarnessNode,
}

type invoiceConfig struct {
errSubStr string
groupKey []byte
errSubStr string
groupKey []byte
routeHints []*lnrpc.RouteHint
}

func defaultInvoiceConfig() *invoiceConfig {
Expand Down Expand Up @@ -2292,6 +2323,158 @@ func macFromBytes(macBytes []byte) (grpc.DialOption, error) {
return grpc.WithPerRPCCredentials(cred), nil
}

func assertMinNumHtlcs(t *testing.T, node *HarnessNode, expected int) {
t.Helper()

ctxb := context.Background()

err := wait.NoError(func() error {
listChansRequest := &lnrpc.ListChannelsRequest{}
listChansResp, err := node.ListChannels(ctxb, listChansRequest)
if err != nil {
return err
}

var numHtlcs int
for _, channel := range listChansResp.Channels {
numHtlcs += len(channel.PendingHtlcs)
}

if numHtlcs < expected {
return fmt.Errorf("expected %v HTLCs, got %v, %v",
expected, numHtlcs,
toProtoJSON(t, listChansResp))
}

return nil
}, defaultTimeout)
require.NoError(t, err)
}

type subscribeEventsClient = routerrpc.Router_SubscribeHtlcEventsClient

type htlcEventConfig struct {
timeout time.Duration
numEvents int
withLinkFailure bool
withForwardFailure bool
withFailureDetail routerrpc.FailureDetail
}

func defaultHtlcEventConfig() *htlcEventConfig {
return &htlcEventConfig{
timeout: defaultTimeout,
}
}

type htlcEventOpt func(*htlcEventConfig)

func withTimeout(timeout time.Duration) htlcEventOpt {
return func(config *htlcEventConfig) {
config.timeout = timeout
}
}

func withNumEvents(numEvents int) htlcEventOpt {
return func(config *htlcEventConfig) {
config.numEvents = numEvents
}
}

func withLinkFailure(detail routerrpc.FailureDetail) htlcEventOpt {
return func(config *htlcEventConfig) {
config.withLinkFailure = true
config.withFailureDetail = detail
}
}

func withForwardFailure() htlcEventOpt {
return func(config *htlcEventConfig) {
config.withForwardFailure = true
}
}

func assertHtlcEvents(t *testing.T, c subscribeEventsClient,
opts ...htlcEventOpt) {

t.Helper()

cfg := defaultHtlcEventConfig()
for _, opt := range opts {
opt(cfg)
}

timeout := time.After(cfg.timeout)
events := make(chan *routerrpc.HtlcEvent)

go func() {
defer close(events)

for {
evt, err := c.Recv()
if err != nil {
t.Logf("Received HTLC event error: %v", err)
return
}

select {
case events <- evt:
case <-timeout:
t.Logf("Htlc event receive timeout")
return
}
}
}()

var numEvents int
for {
type (
linkFailEvent = *routerrpc.HtlcEvent_LinkFailEvent
forwardFailEvent = *routerrpc.HtlcEvent_ForwardFailEvent
)

select {
case evt, ok := <-events:
if !ok {
t.Fatalf("Htlc event stream closed")
return
}

if cfg.withLinkFailure {
linkEvent, ok := evt.Event.(linkFailEvent)
if !ok {
// We only count link failure events.
continue
}

if linkEvent.LinkFailEvent.FailureDetail !=
cfg.withFailureDetail {

continue
}
}

if cfg.withForwardFailure {
_, ok := evt.Event.(forwardFailEvent)
if !ok {
// We only count link failure events.
continue
}
}

numEvents++

if numEvents == cfg.numEvents {
return
}

case <-timeout:
t.Fatalf("Htlc event receive timeout")
return
}
}
}

func assertNumHtlcs(t *testing.T, node *HarnessNode, expected int) {
t.Helper()

Expand Down
4 changes: 2 additions & 2 deletions itest/litd_accounts_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -416,12 +416,12 @@ func payNode(invoiceCtx, paymentCtx context.Context, t *harnessTest,
stream, err := from.SendPaymentV2(paymentCtx, sendReq)
require.NoError(t.t, err)

result, err := getPaymentResult(stream)
result, err := getFinalPaymentResult(stream)
require.NoError(t.t, err)
require.Equal(t.t, result.Status, lnrpc.Payment_SUCCEEDED)
}

func getPaymentResult(stream routerrpc.Router_SendPaymentV2Client) (
func getFinalPaymentResult(stream routerrpc.Router_SendPaymentV2Client) (
*lnrpc.Payment, error) {

for {
Expand Down
Loading
Loading