Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 23 additions & 2 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -616,7 +616,7 @@ jobs:
gather:
# A dummy task that depends on the full matrix of tests, and
# signals successful completion. Used for the PR status to pass
# before merging.
# before merging. Needs to run even if they failed!
name: CI completion
runs-on: ubuntu-22.04
needs:
Expand All @@ -626,7 +626,28 @@ jobs:
- integration-sanitizers
- min-btc-support
- check-flake
if: ${{ always() }}
steps:
- name: Complete
env:
JOB_NAMES: "INTEGRATION CHECK_UNITS VALGRIND SANITIZERS BTC FLAKE"
INTEGRATION: ${{ needs.integration.result }}
CHECK_UNITS: ${{ needs['check-units'].result }}
VALGRIND: ${{ needs['integration-valgrind'].result }}
SANITIZERS: ${{ needs['integration-sanitizers'].result }}
DOCS: ${{ needs['update-docs-examples'].result }}
BTC: ${{ needs['min-btc-support'].result }}
FLAKE: ${{ needs['check-flake'].result }}
run: |
echo CI completed successfully
failed=""
for name in $JOB_NAMES; do
result="${!name}"
echo "$name: $result"
if [[ "$result" != "success" ]]; then
failed="yes"
fi
done
if [[ "$failed" == "yes" ]]; then
echo "One or more required jobs failed"
exit 1
fi
12 changes: 6 additions & 6 deletions contrib/msggen/msggen/schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -309,19 +309,19 @@
"The bias, positive being good and negative being bad (0 being no bias). Useful values are +/-1 through +/-10, though -100 through +100 are possible values."
]
},
"description": {
"type": "string",
"description": [
"Description/annotation to display in askrene-listlayers(7)"
]
},
"relative": {
"type": "boolean",
"added": "v25.05",
"default": false,
"description": [
"The bias will be added to the previous value."
]
},
"description": {
"type": "string",
"description": [
"Description/annotation to display in askrene-listlayers(7)"
]
}
}
},
Expand Down
12 changes: 6 additions & 6 deletions doc/schemas/askrene-bias-channel.json
Original file line number Diff line number Diff line change
Expand Up @@ -33,19 +33,19 @@
"The bias, positive being good and negative being bad (0 being no bias). Useful values are +/-1 through +/-10, though -100 through +100 are possible values."
]
},
"description": {
"type": "string",
"description": [
"Description/annotation to display in askrene-listlayers(7)"
]
},
"relative": {
"type": "boolean",
"added": "v25.05",
"default": false,
"description": [
"The bias will be added to the previous value."
]
},
"description": {
"type": "string",
"description": [
"Description/annotation to display in askrene-listlayers(7)"
]
}
}
},
Expand Down
6 changes: 6 additions & 0 deletions lightningd/connect_control.c
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,12 @@ static struct command_result *json_connect(struct command *cmd,
&peer->addr);
}

/* When a peer disconnects, we give subds time to clean themselves up
* (this lets connectd ensure they've seen the final messages). But
* now it's going to try to reconnect, we've gotta force them out. */
if (peer)
peer_channels_cleanup(peer);

subd_send_msg(cmd->ld->connectd,
take(towire_connectd_connect_to_peer(NULL, &id_addr.id, addr, true)));

Expand Down
22 changes: 10 additions & 12 deletions lightningd/peer_control.c
Original file line number Diff line number Diff line change
Expand Up @@ -177,16 +177,10 @@ void maybe_delete_peer(struct peer *peer)
delete_peer(peer);
}

static void peer_channels_cleanup(struct lightningd *ld,
const struct node_id *id)
void peer_channels_cleanup(struct peer *peer)
{
struct peer *peer;
struct channel *c, **channels;

peer = peer_by_id(ld, id);
if (!peer)
return;

/* Freeing channels can free peer, so gather first. */
channels = tal_arr(tmpctx, struct channel *, 0);
list_for_each(&peer->channels, c, list)
Expand Down Expand Up @@ -1726,11 +1720,6 @@ void peer_connected(struct lightningd *ld, const u8 *msg)
fatal("Connectd gave bad CONNECT_PEER_CONNECTED message %s",
tal_hex(msg, msg));

/* When a peer disconnects, we give subds time to clean themselves up
* (this lets connectd ensure they've seen the final messages). But
* now it's reconnected, we've gotta force them out. */
peer_channels_cleanup(ld, &id);

/* If we connected, and it's a normal address */
if (!hook_payload->incoming
&& hook_payload->addr.itype == ADDR_INTERNAL_WIREADDR
Expand All @@ -1743,6 +1732,15 @@ void peer_connected(struct lightningd *ld, const u8 *msg)
/* If we're already dealing with this peer, hand off to correct
* subdaemon. Otherwise, we'll hand to openingd to wait there. */
peer = peer_by_id(ld, &id);
if (peer) {
/* When a peer disconnects, we give subds time to clean themselves up
* (this lets connectd ensure they've seen the final messages). But
* now it's reconnected, we've gotta force them out. This might free
* the peer! */
peer_channels_cleanup(peer);
peer = peer_by_id(ld, &id);
}

if (!peer) {
/* If we connected to them, we know this is a good address. */
peer = new_peer(ld, 0, &id, &hook_payload->addr,
Expand Down
3 changes: 3 additions & 0 deletions lightningd/peer_control.h
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,9 @@ command_find_channel(struct command *cmd,
const char *buffer, const jsmntok_t *tok,
struct channel **channel);

/* We do this lazily, when reconnecting */
void peer_channels_cleanup(struct peer *peer);

/* Ancient (0.7.0 and before) releases could create invalid commitment txs! */
bool invalid_last_tx(const struct bitcoin_tx *tx);

Expand Down
2 changes: 1 addition & 1 deletion plugins/askrene/askrene.c
Original file line number Diff line number Diff line change
Expand Up @@ -1104,8 +1104,8 @@ static struct command_result *json_askrene_bias_channel(struct command *cmd,
p_req("layer", param_known_layer, &layer),
p_req("short_channel_id_dir", param_short_channel_id_dir, &scidd),
p_req("bias", param_s8_hundred, &bias),
p_opt_def("relative", param_bool, &relative, false),
p_opt("description", param_string, &description),
p_opt_def("relative", param_bool, &relative, false),
NULL))
return command_param_failed();
plugin_log(cmd->plugin, LOG_TRACE, "%s called: %.*s", __func__,
Expand Down
17 changes: 17 additions & 0 deletions tests/test_askrene.py
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,23 @@ def test_layers(node_factory):
with pytest.raises(RpcError, match="bias: should be a number between -100 and 100"):
l2.rpc.askrene_bias_channel('test_layers', '1x1x1/1', 101, "bigger bias")

# We can make them relative.
l2.rpc.askrene_bias_channel('test_layers', '1x1x1/1', 1, 'adding bias', True)
expect['biases'] = [{'short_channel_id_dir': '1x1x1/1', 'bias': -4, 'description': "adding bias"}]
listlayers = l2.rpc.askrene_listlayers('test_layers')
assert listlayers == {'layers': [expect]}

l2.rpc.askrene_bias_channel(layer='test_layers', short_channel_id_dir='1x1x1/1', bias=-1, relative=True)
expect['biases'] = [{'short_channel_id_dir': '1x1x1/1', 'bias': -5}]
listlayers = l2.rpc.askrene_listlayers('test_layers')
assert listlayers == {'layers': [expect]}

# They truncate on +/- 100 though:
l2.rpc.askrene_bias_channel('test_layers', '1x1x1/1', -99, None, True)
expect['biases'] = [{'short_channel_id_dir': '1x1x1/1', 'bias': -100}]
listlayers = l2.rpc.askrene_listlayers('test_layers')
assert listlayers == {'layers': [expect]}

# We can remove them.
l2.rpc.askrene_bias_channel('test_layers', '1x1x1/1', 0)
expect['biases'] = []
Expand Down
17 changes: 13 additions & 4 deletions tests/test_closing.py
Original file line number Diff line number Diff line change
Expand Up @@ -4187,9 +4187,18 @@ def test_anchorspend_using_to_remote(node_factory, bitcoind, anchors):
# Don't need l4 any more
l4.stop()

for n in (l1, l2, l3):
wait_for(lambda: n.rpc.listchannels() == {'channels': []})

# Now l1->l2<-l3 but push funds to l2 so it can forward.
node_factory.join_nodes([l1, l2], wait_for_announce=True)
node_factory.join_nodes([l3, l2], wait_for_announce=True)
node_factory.join_nodes([l1, l2])
node_factory.join_nodes([l3, l2])

# Make sure everyone knows about everyone else!
bitcoind.generate_block(5)
for n in (l1, l2, l3):
wait_for(lambda: len(n.rpc.listchannels()['channels']) == 4)

l3.rpc.pay(l2.rpc.invoice(200000000, 'test2', 'test2')['bolt11'])
wait_for(lambda: only_one(l2.rpc.listpeerchannels(l3.info['id'])['channels'])['htlcs'] == [])

Expand All @@ -4203,13 +4212,13 @@ def test_anchorspend_using_to_remote(node_factory, bitcoind, anchors):
# Give l2 a sense of urgency, by ensuring there's an HTLC in-channel
# when it needs to go onchain.
# Make sure HTLC expiry is what we expect!
l2.daemon.wait_for_log('Adding HTLC 0 amount=100000000msat cltv=128 gave CHANNEL_ERR_ADD_OK')
l2.daemon.wait_for_log('Adding HTLC 0 amount=100000000msat cltv=123 gave CHANNEL_ERR_ADD_OK')

# Kill l1 and l3, we just care about l2.
l3.stop()
l1.stop()

for block in range(117, 128):
for block in range(112, 123):
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])

Expand Down
2 changes: 1 addition & 1 deletion tests/test_misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -3382,7 +3382,7 @@ def test_listforwards_and_listhtlcs(node_factory, bitcoind):
assert l2.rpc.wait('htlcs', 'deleted', 0)['deleted'] == 0

# 99 blocks is not enough for them to be deleted.
bitcoind.generate_block(97)
bitcoind.generate_block(97, wait_for_mempool=1)
assert l2.rpc.wait('htlcs', 'deleted', 0)['deleted'] == 0

# This will forget c23
Expand Down
Loading