Skip to content

Commit

Permalink
[WFLY-11487]: Bump messaging schema version to 6.0
Browse files Browse the repository at this point in the history
  • Loading branch information
TomasHofman authored and ehsavoie committed Dec 17, 2018
1 parent 75c223a commit e501546
Show file tree
Hide file tree
Showing 12 changed files with 3,325 additions and 9 deletions.
Expand Up @@ -178,14 +178,15 @@ public class MessagingExtension implements Extension {


static final String RESOURCE_NAME = MessagingExtension.class.getPackage().getName() + ".LocalDescriptions"; static final String RESOURCE_NAME = MessagingExtension.class.getPackage().getName() + ".LocalDescriptions";


protected static final ModelVersion VERSION_6_0_0 = ModelVersion.create(6, 0, 0);
protected static final ModelVersion VERSION_5_0_0 = ModelVersion.create(5, 0, 0); protected static final ModelVersion VERSION_5_0_0 = ModelVersion.create(5, 0, 0);
protected static final ModelVersion VERSION_4_0_0 = ModelVersion.create(4, 0, 0); protected static final ModelVersion VERSION_4_0_0 = ModelVersion.create(4, 0, 0);
protected static final ModelVersion VERSION_3_0_0 = ModelVersion.create(3, 0, 0); protected static final ModelVersion VERSION_3_0_0 = ModelVersion.create(3, 0, 0);
protected static final ModelVersion VERSION_2_0_0 = ModelVersion.create(2, 0, 0); protected static final ModelVersion VERSION_2_0_0 = ModelVersion.create(2, 0, 0);
protected static final ModelVersion VERSION_1_0_0 = ModelVersion.create(1, 0, 0); protected static final ModelVersion VERSION_1_0_0 = ModelVersion.create(1, 0, 0);
private static final ModelVersion CURRENT_MODEL_VERSION = VERSION_5_0_0; private static final ModelVersion CURRENT_MODEL_VERSION = VERSION_6_0_0;


private static final MessagingSubsystemParser_5_0 CURRENT_PARSER = new MessagingSubsystemParser_5_0(); private static final MessagingSubsystemParser_6_0 CURRENT_PARSER = new MessagingSubsystemParser_6_0();




public static ResourceDescriptionResolver getResourceDescriptionResolver(final String... keyPrefix) { public static ResourceDescriptionResolver getResourceDescriptionResolver(final String... keyPrefix) {
Expand Down Expand Up @@ -259,6 +260,7 @@ public void initializeParsers(ExtensionParsingContext context) {
context.setSubsystemXmlMapping(SUBSYSTEM_NAME, MessagingSubsystemParser_2_0.NAMESPACE, MessagingSubsystemParser_2_0::new); context.setSubsystemXmlMapping(SUBSYSTEM_NAME, MessagingSubsystemParser_2_0.NAMESPACE, MessagingSubsystemParser_2_0::new);
context.setSubsystemXmlMapping(SUBSYSTEM_NAME, MessagingSubsystemParser_3_0.NAMESPACE, MessagingSubsystemParser_3_0::new); context.setSubsystemXmlMapping(SUBSYSTEM_NAME, MessagingSubsystemParser_3_0.NAMESPACE, MessagingSubsystemParser_3_0::new);
context.setSubsystemXmlMapping(SUBSYSTEM_NAME, MessagingSubsystemParser_4_0.NAMESPACE, MessagingSubsystemParser_4_0::new); context.setSubsystemXmlMapping(SUBSYSTEM_NAME, MessagingSubsystemParser_4_0.NAMESPACE, MessagingSubsystemParser_4_0::new);
context.setSubsystemXmlMapping(SUBSYSTEM_NAME, MessagingSubsystemParser_5_0.NAMESPACE, CURRENT_PARSER); context.setSubsystemXmlMapping(SUBSYSTEM_NAME, MessagingSubsystemParser_5_0.NAMESPACE, MessagingSubsystemParser_5_0::new);
context.setSubsystemXmlMapping(SUBSYSTEM_NAME, MessagingSubsystemParser_6_0.NAMESPACE, CURRENT_PARSER);
} }
} }

Large diffs are not rendered by default.

Expand Up @@ -74,7 +74,7 @@ public void registerTransformers(SubsystemTransformerRegistration registration)
registerTransformers_EAP_7_0_0(builder.createBuilder(MessagingExtension.VERSION_2_0_0, MessagingExtension.VERSION_1_0_0)); registerTransformers_EAP_7_0_0(builder.createBuilder(MessagingExtension.VERSION_2_0_0, MessagingExtension.VERSION_1_0_0));


builder.buildAndRegister(registration, new ModelVersion[] { MessagingExtension.VERSION_1_0_0, MessagingExtension.VERSION_2_0_0, builder.buildAndRegister(registration, new ModelVersion[] { MessagingExtension.VERSION_1_0_0, MessagingExtension.VERSION_2_0_0,
MessagingExtension.VERSION_3_0_0, MessagingExtension.VERSION_4_0_0}); MessagingExtension.VERSION_3_0_0, MessagingExtension.VERSION_4_0_0, MessagingExtension.VERSION_5_0_0});
} }


private static void registerTransformers_WF_15(ResourceTransformationDescriptionBuilder subsystem) { private static void registerTransformers_WF_15(ResourceTransformationDescriptionBuilder subsystem) {
Expand Down
1,009 changes: 1,009 additions & 0 deletions messaging-activemq/src/main/resources/schema/wildfly-messaging-activemq_6_0.xsd

Large diffs are not rendered by default.

Expand Up @@ -3,7 +3,7 @@
<config> <config>
<!-- This is very different from the normal messaging setup, do duplicating the config is easier --> <!-- This is very different from the normal messaging setup, do duplicating the config is easier -->
<extension-module>org.wildfly.extension.messaging-activemq</extension-module> <extension-module>org.wildfly.extension.messaging-activemq</extension-module>
<subsystem xmlns="urn:jboss:domain:messaging-activemq:5.0"> <subsystem xmlns="urn:jboss:domain:messaging-activemq:6.0">
<server name="default" <server name="default"
persistence-enabled="true"> persistence-enabled="true">
<cluster password="${jboss.messaging.cluster.password:CHANGE ME!!}" /> <cluster password="${jboss.messaging.cluster.password:CHANGE ME!!}" />
Expand Down
Expand Up @@ -2,7 +2,7 @@
<!-- See src/resources/configuration/ReadMe.txt for how the configuration assembly works --> <!-- See src/resources/configuration/ReadMe.txt for how the configuration assembly works -->
<config default-supplement="default"> <config default-supplement="default">
<extension-module>org.wildfly.extension.messaging-activemq</extension-module> <extension-module>org.wildfly.extension.messaging-activemq</extension-module>
<subsystem xmlns="urn:jboss:domain:messaging-activemq:5.0"> <subsystem xmlns="urn:jboss:domain:messaging-activemq:6.0">


<server name="default"> <server name="default">


Expand Down
Expand Up @@ -102,10 +102,9 @@ protected Properties getResolvedProperties() {
return properties; return properties;
} }


@Test
@Override @Override
public void testSchemaOfSubsystemTemplates() throws Exception { protected KernelServices standardSubsystemTest(String configId, boolean compareXml) throws Exception {
super.testSchemaOfSubsystemTemplates(); return super.standardSubsystemTest(configId, false);
} }


///////////////////////////////////////// /////////////////////////////////////////
Expand Down

Large diffs are not rendered by default.

Large diffs are not rendered by default.

@@ -0,0 +1,92 @@
<subsystem xmlns="urn:jboss:domain:messaging-activemq:6.0">
<server name="ha-policy-live-only-scale-down-discovery-group">
<live-only>
<scale-down enabled="${scale-down.enabled:true}"
cluster-name="${scale-down.cluster-name:mycluster}"
group-name="${scale-down.group-name:mygroup}"
discovery-group="groupC"/>
</live-only>
</server>
<server name="ha-policy-live-only-scale-down-connectors">
<live-only>
<scale-down enabled="${scale-down.enabled:true}"
cluster-name="${scale-down.cluster-name:mycluster}"
group-name="${scale-down.group-name:mygroup}"
connectors="netty in-vm"/>
</live-only>
</server>
<server name="ha-policy-replication-master">
<replication-master
cluster-name="${replication-master.cluster-name:mycluster}"
group-name="${replication-master.cluster-name:mygroup}"
check-for-live-server="${replication-master.check-for-live-server:false}"
initial-replication-sync-timeout="${replication-master.initial-replication-sync-timeout:1234}"/>
</server>
<server name="ha-policy-replication-slave">
<replication-slave
cluster-name="${replication-slave.cluster-name:mycluster}"
group-name="${replication-slave.cluster-name:mygroup}"
allow-failback="${replication-slave.allow-failback:true}"
initial-replication-sync-timeout="${replication-master.initial-replication-sync-timeout:1234}"
restart-backup="${replication-slave.restart-backup:true}"
max-saved-replicated-journal-size="${replication-slave.max-saved-replicated-journal-size:24}">
<scale-down enabled="${replication-slave-scale-down.enabled:true}"
cluster-name="${replication-slave-scale-down.cluster-name:mycluster}"
group-name="${replication-slave-scale-down.group-name:mygroup}"
connectors="netty"/>
</replication-slave>
</server>
<server name="ha-policy-replication-colocated">
<replication-colocated request-backup="${replication-colocated.request-backup:false}"
backup-request-retries="${replication-colocated.backup-request-retries:-1}"
backup-request-retry-interval="${replication-colocated.backup-request-retry-interval:5098}"
max-backups="${replication-colocated.max-backups:5}"
backup-port-offset="${replication-colocated.backup-port-offset:500}"
excluded-connectors="netty">
<master cluster-name="${replication-colocated-master.cluster-name:mycluster}"
group-name="${replication-colocated-master.cluster-name:mygroup}"
check-for-live-server="${replication-colocated-master.check-for-live-server:false}" />
<slave cluster-name="${replication-colocated-slave.cluster-name:mycluster}"
group-name="${replication-colocated-slave.cluster-name:mygroup}"
allow-failback="${replication-colocated-slave.allow-failback:true}"
initial-replication-sync-timeout="${replication-colocated-slave.initial-replication-sync-timeout:1234}"
restart-backup="${replication-colocated-slave.restart-backup:true}"
max-saved-replicated-journal-size="${replication-colocated-slave.max-saved-replicated-journal-size:24}">
<scale-down enabled="${replication-colocated-slave-scale-down.enabled:true}"
cluster-name="${replication-colocated-slave-scale-down.cluster-name:mycluster}"
group-name="${replication-colocated-slave-scale-down.group-name:mygroup}"
connectors="netty"/>
</slave>
</replication-colocated>
</server>
<server name="ha-policy-shared-store-master">
<shared-store-master failover-on-server-shutdown="${shared-store-master.failover-on-server-shutdown:true}" />
</server>
<server name="ha-policy-shared-store-slave">
<shared-store-slave allow-failback="${shared-store-slave.allow-failback:false}"
failover-on-server-shutdown="${shared-store-slave.failover-on-server-shutdown:true}"
restart-backup="${shared-store-slave.restart-backup:false}">
<scale-down enabled="${shared-store-slave-scale-down.enabled:true}"
cluster-name="${shared-store-slave-scale-down.cluster-name:mycluster}"
group-name="${shared-store-slave-slave-scale-down.group-name:mygroup}"
connectors="netty" />
</shared-store-slave>
</server>
<server name="ha-policy-shared-store-colocated">
<shared-store-colocated request-backup="${shared-store-colocated.request-backup:false}"
backup-request-retries="${shared-store-colocated.backup-request-retries:-1}"
backup-request-retry-interval="${shared-store-colocated.backup-request-retry-interval:5098}"
max-backups="${shared-store-colocated.max-backups:5}"
backup-port-offset="${shared-store-colocated.backup-port-offset:500}">
<master failover-on-server-shutdown="${shared-store-colocated-master.failover-on-server-shutdown:true}" />
<slave allow-failback="${shared-store-colocated-slave.allow-failback:false}"
failover-on-server-shutdown="${shared-store-colocated-slave.failover-on-server-shutdown:true}"
restart-backup="${shared-store-colocated-slave.restart-backup:false}">
<scale-down enabled="${shared-store-colocated-slave-scale-down.enabled:true}"
cluster-name="${shared-store-colocated-slave-scale-down.cluster-name:mycluster}"
group-name="${shared-store-colocated-slave-slave-scale-down.group-name:mygroup}"
connectors="netty" />
</slave>
</shared-store-colocated>
</server>
</subsystem>
@@ -0,0 +1,151 @@
<subsystem xmlns="urn:jboss:domain:messaging-activemq:6.0">
<global-client thread-pool-max-size="${global.client.thread-pool-max-size:32}"
scheduled-thread-pool-max-size="${global.client.scheduled.thread-pool-max-size:54}" />

<http-connector name="client-http"
socket-binding="http"
endpoint="http"
server-name="=foo">
<param name="batch-delay" value="${batch.delay:50}"/>
</http-connector>

<remote-connector name="client-netty-throughput"
socket-binding="messaging-throughput">
<param name="batch-delay" value="${batch.delay:50}"/>
</remote-connector>

<in-vm-connector name="in-vm-client"
server-id="${my.server-id:0}"/>

<connector name="myconnector-client"
factory-class="org.apache.activemq.artemis.core.remoting.impl.netty.NettyConnectorFactory">
<param name="host" value="192.168.1.2"/>
<param name="port" value="5445"/>
<param name="key-store-path" value="path/to/server.jks"/>
<param name="key-store-password" value="${VAULT::server-key::key-store-password::sharedKey}"/>
</connector>

<discovery-group name="client-discovery-group-1"
socket-binding="group-t-binding"/>

<connection-factory name="InVmConnectionFactory"
connectors="in-vm client"
entries="${connection-factory.entries.entry:java:/ClientConnectionFactory}" />

<pooled-connection-factory name="hornetq-ra-local"
transaction="local"
user="alice"
password="alicepassword"
use-auto-recovery="${use.auto.recovery:true}"
connectors="in-vm-client"
entries="java:/JmsLocal"/>

<external-jms-queue name="testQueue"
entries="${jms-queue.entry:queue/client-test}"/>
<external-jms-queue name="testQueue2"
entries="java:/global/queue/test java:/global/queue/client-test2"/>
<external-jms-topic name="testTopic"
entries="${jms-topic.entry:topic/client-test}"/>

<server name="default">

<security elytron-domain="elytronDomain"/>
<cluster user="testuser">
<credential-reference store="cs1" alias="testuser"/>
</cluster>
<journal datasource="fooDS"
database="mysql"
jdbc-lock-expiration="891"
jdbc-lock-renew-period="892"
jdbc-network-timeout="4567"
messages-table="MY_MESSAGES"
bindings-table="MY_BINDINGS"
jms-bindings-table="MY_JMS_BINDINGS"
large-messages-table="MY_LARGE_MESSAGES"
node-manager-store-table="MY_NODE_MANAGER_STORE"
page-store-table="MY_PAGE_STORE"
global-max-disk-usage="70"
global-max-memory-size="100000"
disk-scan-period="10000"/>

<replication-colocated>
<master />
</replication-colocated>

<address-setting name="#"
auto-create-queues="true"
auto-delete-queues="true"
auto-create-addresses="false"
auto-delete-addresses="false" />

<http-connector name="http"
socket-binding="http"
endpoint="http"
server-name="=foo">
<param name="batch-delay" value="${batch.delay:50}"/>
</http-connector>

<cluster-connection name="cc1"
address="${address:cc1-address}"
connector-name="netty"
producer-window-size="${producer.windows.size:5678}"
static-connectors="in-vm netty" />

<bridge name="bridge1"
queue-name="${queue.name:coreQueueA}"
forwarding-address="${forwarding.address:forwardingaddress1}"
producer-window-size="${producer.windows.size:5678}"
static-connectors="in-vm netty"
user="${user:Brian}">
<credential-reference clear-text="secret1"/>
</bridge>
<bridge name="bridge2"
queue-name="${queue.name:coreQueueA}"
forwarding-address="${forwarding.address:forwardingaddress1}"
producer-window-size="${producer.windows.size:5678}"
static-connectors="in-vm netty"
user="${user:Brian}"
password="${password:secret}">
</bridge>

<connection-factory name="otherConnectionFactory"
discovery-group="groupC"
entries="otherConnectionFactory"
deserialization-black-list="org.foo.Bar org.foo.Baz"
deserialization-white-list="org.bar.Bar org.baz.Baz"
initial-message-packet-size="${initial.message.packet.size:12345}"/>

<pooled-connection-factory name="hornetq-ra-local"
transaction="local"
user="alice"
password="alicepassword"
connectors="in-vm"
entries="java:/JmsLocal"
statistics-enabled="true">
<inbound-config
rebalance-connections="true" />
<outbound-config
allow-local-transactions="true" />

</pooled-connection-factory>
<pooled-connection-factory name="pcf-with-credential-reference"
entries="java:/JmsLocal2"
connectors="in-vm"
user="foo"
deserialization-black-list="org.foo.Bar org.foo.Baz"
deserialization-white-list="org.bar.Bar org.baz.Baz">
<credential-reference clear-text="passwordOut!"/>
</pooled-connection-factory>

<broadcast-group name="groupT"
jgroups-channel="ee"
jgroups-cluster="activemq-cluster"/>

<discovery-group name="groupU"
jgroups-channel="ee"
jgroups-cluster="activemq-cluster"/>
</server>
<server name="other">
<replication-master />
</server>
</subsystem>

0 comments on commit e501546

Please sign in to comment.