Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[WFLY-11487]: Bump messaging schema version to 6.0
- Loading branch information
1 parent
75c223a
commit e501546
Showing
12 changed files
with
3,325 additions
and
9 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
660 changes: 660 additions & 0 deletions
660
.../src/main/java/org/wildfly/extension/messaging/activemq/MessagingSubsystemParser_6_0.java
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
1,009 changes: 1,009 additions & 0 deletions
1,009
messaging-activemq/src/main/resources/schema/wildfly-messaging-activemq_6_0.xsd
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
320 changes: 320 additions & 0 deletions
320
...ava/org/wildfly/extension/messaging/activemq/MessagingActiveMQSubsystem_6_0_TestCase.java
Large diffs are not rendered by default.
Oops, something went wrong.
672 changes: 672 additions & 0 deletions
672
...ng-activemq/src/test/resources/org/wildfly/extension/messaging/activemq/subsystem_6_0.xml
Large diffs are not rendered by default.
Oops, something went wrong.
92 changes: 92 additions & 0 deletions
92
...q/src/test/resources/org/wildfly/extension/messaging/activemq/subsystem_6_0_ha-policy.xml
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Original file line | Diff line number | Diff line change |
---|---|---|---|
@@ -0,0 +1,92 @@ | |||
<subsystem xmlns="urn:jboss:domain:messaging-activemq:6.0"> | |||
<server name="ha-policy-live-only-scale-down-discovery-group"> | |||
<live-only> | |||
<scale-down enabled="${scale-down.enabled:true}" | |||
cluster-name="${scale-down.cluster-name:mycluster}" | |||
group-name="${scale-down.group-name:mygroup}" | |||
discovery-group="groupC"/> | |||
</live-only> | |||
</server> | |||
<server name="ha-policy-live-only-scale-down-connectors"> | |||
<live-only> | |||
<scale-down enabled="${scale-down.enabled:true}" | |||
cluster-name="${scale-down.cluster-name:mycluster}" | |||
group-name="${scale-down.group-name:mygroup}" | |||
connectors="netty in-vm"/> | |||
</live-only> | |||
</server> | |||
<server name="ha-policy-replication-master"> | |||
<replication-master | |||
cluster-name="${replication-master.cluster-name:mycluster}" | |||
group-name="${replication-master.cluster-name:mygroup}" | |||
check-for-live-server="${replication-master.check-for-live-server:false}" | |||
initial-replication-sync-timeout="${replication-master.initial-replication-sync-timeout:1234}"/> | |||
</server> | |||
<server name="ha-policy-replication-slave"> | |||
<replication-slave | |||
cluster-name="${replication-slave.cluster-name:mycluster}" | |||
group-name="${replication-slave.cluster-name:mygroup}" | |||
allow-failback="${replication-slave.allow-failback:true}" | |||
initial-replication-sync-timeout="${replication-master.initial-replication-sync-timeout:1234}" | |||
restart-backup="${replication-slave.restart-backup:true}" | |||
max-saved-replicated-journal-size="${replication-slave.max-saved-replicated-journal-size:24}"> | |||
<scale-down enabled="${replication-slave-scale-down.enabled:true}" | |||
cluster-name="${replication-slave-scale-down.cluster-name:mycluster}" | |||
group-name="${replication-slave-scale-down.group-name:mygroup}" | |||
connectors="netty"/> | |||
</replication-slave> | |||
</server> | |||
<server name="ha-policy-replication-colocated"> | |||
<replication-colocated request-backup="${replication-colocated.request-backup:false}" | |||
backup-request-retries="${replication-colocated.backup-request-retries:-1}" | |||
backup-request-retry-interval="${replication-colocated.backup-request-retry-interval:5098}" | |||
max-backups="${replication-colocated.max-backups:5}" | |||
backup-port-offset="${replication-colocated.backup-port-offset:500}" | |||
excluded-connectors="netty"> | |||
<master cluster-name="${replication-colocated-master.cluster-name:mycluster}" | |||
group-name="${replication-colocated-master.cluster-name:mygroup}" | |||
check-for-live-server="${replication-colocated-master.check-for-live-server:false}" /> | |||
<slave cluster-name="${replication-colocated-slave.cluster-name:mycluster}" | |||
group-name="${replication-colocated-slave.cluster-name:mygroup}" | |||
allow-failback="${replication-colocated-slave.allow-failback:true}" | |||
initial-replication-sync-timeout="${replication-colocated-slave.initial-replication-sync-timeout:1234}" | |||
restart-backup="${replication-colocated-slave.restart-backup:true}" | |||
max-saved-replicated-journal-size="${replication-colocated-slave.max-saved-replicated-journal-size:24}"> | |||
<scale-down enabled="${replication-colocated-slave-scale-down.enabled:true}" | |||
cluster-name="${replication-colocated-slave-scale-down.cluster-name:mycluster}" | |||
group-name="${replication-colocated-slave-scale-down.group-name:mygroup}" | |||
connectors="netty"/> | |||
</slave> | |||
</replication-colocated> | |||
</server> | |||
<server name="ha-policy-shared-store-master"> | |||
<shared-store-master failover-on-server-shutdown="${shared-store-master.failover-on-server-shutdown:true}" /> | |||
</server> | |||
<server name="ha-policy-shared-store-slave"> | |||
<shared-store-slave allow-failback="${shared-store-slave.allow-failback:false}" | |||
failover-on-server-shutdown="${shared-store-slave.failover-on-server-shutdown:true}" | |||
restart-backup="${shared-store-slave.restart-backup:false}"> | |||
<scale-down enabled="${shared-store-slave-scale-down.enabled:true}" | |||
cluster-name="${shared-store-slave-scale-down.cluster-name:mycluster}" | |||
group-name="${shared-store-slave-slave-scale-down.group-name:mygroup}" | |||
connectors="netty" /> | |||
</shared-store-slave> | |||
</server> | |||
<server name="ha-policy-shared-store-colocated"> | |||
<shared-store-colocated request-backup="${shared-store-colocated.request-backup:false}" | |||
backup-request-retries="${shared-store-colocated.backup-request-retries:-1}" | |||
backup-request-retry-interval="${shared-store-colocated.backup-request-retry-interval:5098}" | |||
max-backups="${shared-store-colocated.max-backups:5}" | |||
backup-port-offset="${shared-store-colocated.backup-port-offset:500}"> | |||
<master failover-on-server-shutdown="${shared-store-colocated-master.failover-on-server-shutdown:true}" /> | |||
<slave allow-failback="${shared-store-colocated-slave.allow-failback:false}" | |||
failover-on-server-shutdown="${shared-store-colocated-slave.failover-on-server-shutdown:true}" | |||
restart-backup="${shared-store-colocated-slave.restart-backup:false}"> | |||
<scale-down enabled="${shared-store-colocated-slave-scale-down.enabled:true}" | |||
cluster-name="${shared-store-colocated-slave-scale-down.cluster-name:mycluster}" | |||
group-name="${shared-store-colocated-slave-slave-scale-down.group-name:mygroup}" | |||
connectors="netty" /> | |||
</slave> | |||
</shared-store-colocated> | |||
</server> | |||
</subsystem> |
151 changes: 151 additions & 0 deletions
151
...est/resources/org/wildfly/extension/messaging/activemq/subsystem_6_0_reject_transform.xml
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Original file line | Diff line number | Diff line change |
---|---|---|---|
@@ -0,0 +1,151 @@ | |||
<subsystem xmlns="urn:jboss:domain:messaging-activemq:6.0"> | |||
<global-client thread-pool-max-size="${global.client.thread-pool-max-size:32}" | |||
scheduled-thread-pool-max-size="${global.client.scheduled.thread-pool-max-size:54}" /> | |||
|
|||
<http-connector name="client-http" | |||
socket-binding="http" | |||
endpoint="http" | |||
server-name="=foo"> | |||
<param name="batch-delay" value="${batch.delay:50}"/> | |||
</http-connector> | |||
|
|||
<remote-connector name="client-netty-throughput" | |||
socket-binding="messaging-throughput"> | |||
<param name="batch-delay" value="${batch.delay:50}"/> | |||
</remote-connector> | |||
|
|||
<in-vm-connector name="in-vm-client" | |||
server-id="${my.server-id:0}"/> | |||
|
|||
<connector name="myconnector-client" | |||
factory-class="org.apache.activemq.artemis.core.remoting.impl.netty.NettyConnectorFactory"> | |||
<param name="host" value="192.168.1.2"/> | |||
<param name="port" value="5445"/> | |||
<param name="key-store-path" value="path/to/server.jks"/> | |||
<param name="key-store-password" value="${VAULT::server-key::key-store-password::sharedKey}"/> | |||
</connector> | |||
|
|||
<discovery-group name="client-discovery-group-1" | |||
socket-binding="group-t-binding"/> | |||
|
|||
<connection-factory name="InVmConnectionFactory" | |||
connectors="in-vm client" | |||
entries="${connection-factory.entries.entry:java:/ClientConnectionFactory}" /> | |||
|
|||
<pooled-connection-factory name="hornetq-ra-local" | |||
transaction="local" | |||
user="alice" | |||
password="alicepassword" | |||
use-auto-recovery="${use.auto.recovery:true}" | |||
connectors="in-vm-client" | |||
entries="java:/JmsLocal"/> | |||
|
|||
<external-jms-queue name="testQueue" | |||
entries="${jms-queue.entry:queue/client-test}"/> | |||
<external-jms-queue name="testQueue2" | |||
entries="java:/global/queue/test java:/global/queue/client-test2"/> | |||
<external-jms-topic name="testTopic" | |||
entries="${jms-topic.entry:topic/client-test}"/> | |||
|
|||
<server name="default"> | |||
|
|||
<security elytron-domain="elytronDomain"/> | |||
<cluster user="testuser"> | |||
<credential-reference store="cs1" alias="testuser"/> | |||
</cluster> | |||
<journal datasource="fooDS" | |||
database="mysql" | |||
jdbc-lock-expiration="891" | |||
jdbc-lock-renew-period="892" | |||
jdbc-network-timeout="4567" | |||
messages-table="MY_MESSAGES" | |||
bindings-table="MY_BINDINGS" | |||
jms-bindings-table="MY_JMS_BINDINGS" | |||
large-messages-table="MY_LARGE_MESSAGES" | |||
node-manager-store-table="MY_NODE_MANAGER_STORE" | |||
page-store-table="MY_PAGE_STORE" | |||
global-max-disk-usage="70" | |||
global-max-memory-size="100000" | |||
disk-scan-period="10000"/> | |||
|
|||
<replication-colocated> | |||
<master /> | |||
</replication-colocated> | |||
|
|||
<address-setting name="#" | |||
auto-create-queues="true" | |||
auto-delete-queues="true" | |||
auto-create-addresses="false" | |||
auto-delete-addresses="false" /> | |||
|
|||
<http-connector name="http" | |||
socket-binding="http" | |||
endpoint="http" | |||
server-name="=foo"> | |||
<param name="batch-delay" value="${batch.delay:50}"/> | |||
</http-connector> | |||
|
|||
<cluster-connection name="cc1" | |||
address="${address:cc1-address}" | |||
connector-name="netty" | |||
producer-window-size="${producer.windows.size:5678}" | |||
static-connectors="in-vm netty" /> | |||
|
|||
<bridge name="bridge1" | |||
queue-name="${queue.name:coreQueueA}" | |||
forwarding-address="${forwarding.address:forwardingaddress1}" | |||
producer-window-size="${producer.windows.size:5678}" | |||
static-connectors="in-vm netty" | |||
user="${user:Brian}"> | |||
<credential-reference clear-text="secret1"/> | |||
</bridge> | |||
<bridge name="bridge2" | |||
queue-name="${queue.name:coreQueueA}" | |||
forwarding-address="${forwarding.address:forwardingaddress1}" | |||
producer-window-size="${producer.windows.size:5678}" | |||
static-connectors="in-vm netty" | |||
user="${user:Brian}" | |||
password="${password:secret}"> | |||
</bridge> | |||
|
|||
<connection-factory name="otherConnectionFactory" | |||
discovery-group="groupC" | |||
entries="otherConnectionFactory" | |||
deserialization-black-list="org.foo.Bar org.foo.Baz" | |||
deserialization-white-list="org.bar.Bar org.baz.Baz" | |||
initial-message-packet-size="${initial.message.packet.size:12345}"/> | |||
|
|||
<pooled-connection-factory name="hornetq-ra-local" | |||
transaction="local" | |||
user="alice" | |||
password="alicepassword" | |||
connectors="in-vm" | |||
entries="java:/JmsLocal" | |||
statistics-enabled="true"> | |||
<inbound-config | |||
rebalance-connections="true" /> | |||
<outbound-config | |||
allow-local-transactions="true" /> | |||
|
|||
</pooled-connection-factory> | |||
<pooled-connection-factory name="pcf-with-credential-reference" | |||
entries="java:/JmsLocal2" | |||
connectors="in-vm" | |||
user="foo" | |||
deserialization-black-list="org.foo.Bar org.foo.Baz" | |||
deserialization-white-list="org.bar.Bar org.baz.Baz"> | |||
<credential-reference clear-text="passwordOut!"/> | |||
</pooled-connection-factory> | |||
|
|||
<broadcast-group name="groupT" | |||
jgroups-channel="ee" | |||
jgroups-cluster="activemq-cluster"/> | |||
|
|||
<discovery-group name="groupU" | |||
jgroups-channel="ee" | |||
jgroups-cluster="activemq-cluster"/> | |||
</server> | |||
<server name="other"> | |||
<replication-master /> | |||
</server> | |||
</subsystem> |
Oops, something went wrong.