diff --git a/grabdish/destroy-multicloud.sh b/grabdish/destroy-multicloud.sh index 6e2c76206..9bef2595e 100755 --- a/grabdish/destroy-multicloud.sh +++ b/grabdish/destroy-multicloud.sh @@ -7,5 +7,8 @@ echo Deleting the Verrazzano custom resource.... kubectl delete verrazzano example-verrazzano +echo Set verrazzano-managed=false istio-injection=disabled ... +kubectl label namespace msdataworkshop verrazzano-managed=false istio-injection=disabled --overwrite + echo Viewing the uninstall logs... kubectl logs -f $(kubectl get pod -l job-name=uninstall-example-verrazzano -o jsonpath="{.items[0].metadata.name}") \ No newline at end of file diff --git a/grabdish/frontend-helidon/src/main/resources/web/index.html b/grabdish/frontend-helidon/src/main/resources/web/index.html index 7460ff0ee..265ad34b3 100644 --- a/grabdish/frontend-helidon/src/main/resources/web/index.html +++ b/grabdish/frontend-helidon/src/main/resources/web/index.html @@ -144,6 +144,13 @@

Add Inventory Remove Inventory Get Inventory +

+
Transactional Exactly-Once Message Delivery Tests... +
+ Crash order service after Order is inserted (before Order message is sent to Inventory service) +
Crash Inventory service after Order message is received (before inventory for order is checked) +
Crash Inventory service after inventory for order is checked (before Inventory status message is sent) +
Crash Inventory service after inventory for order is checked (before Inventory status message is sent)
@@ -799,6 +806,82 @@

+ + + + + 4.0.0 + inventory-postgres-kafka + 0.0.1-SNAPSHOT + ${project.artifactId} + stateful microservices demo + + + io.helidon.applications + helidon-mp + 2.2.0 + + + + + true + true + true + libs + ${env.DOCKER_REGISTRY} + + + + + src/main/resources + true + + + + + + com.spotify + dockerfile-maven-plugin + 1.4.13 + + + default + + build + + + + + ${env.DOCKER_REGISTRY}/${project.artifactId} + 0.1 + + ${project.build.finalName}.jar + + + + + maven-dependency-plugin + + + copy-dependencies + prepare-package + + copy-dependencies + + + ${project.build.directory}/${dependenciesDirectory} + false + false + true + true + runtime + test + false + + + + + + maven-jar-plugin + + + + true + ${dependenciesDirectory} + io.helidon.microprofile.server.Main + + + + + + maven-resources-plugin + + + copy-resources + process-resources + + copy-resources + + + ${project.build.directory} + + + src/main/docker + true + + Dockerfile + + + + + + + + + + + + + + + + io.helidon + helidon-bom + 1.4.2 + pom + import + + + + + + + + io.helidon.microprofile.bundles + helidon-microprofile + + + org.jboss + jandex + runtime + true + + + jakarta.activation + jakarta.activation-api + runtime + + + com.fasterxml.jackson.core + jackson-databind + + + javax.json.bind + javax.json.bind-api + 1.0 + + + io.helidon.integrations.cdi + helidon-integrations-cdi-datasource-hikaricp + runtime + + + org.postgresql + postgresql + 42.2.0 + + + org.apache.kafka + kafka-clients + 2.8.0 + + + + + + + + + diff --git a/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/Inventory.java b/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/Inventory.java new file mode 100644 index 000000000..355919e4b --- /dev/null +++ b/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/Inventory.java @@ -0,0 +1,42 @@ +/* + + ** + ** Copyright (c) 2021 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + */ +package io.helidon.data.examples; + +public class Inventory { + + private String orderid; + private String itemid; + private String inventorylocation; + private String suggestiveSale; + + public Inventory() { + + } + + public Inventory(String orderid, String itemid, String inventorylocation, String suggestiveSale) { + this.orderid = orderid; + this.itemid = itemid; + this.inventorylocation = inventorylocation; + this.suggestiveSale = suggestiveSale; + } + + public String getOrderid() { + return orderid; + } + + public String getItemid() { + return itemid; + } + + public String getInventorylocation() { + return inventorylocation; + } + + public String getSuggestiveSale() { + return suggestiveSale; + } +} diff --git a/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/InventoryApplication.java b/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/InventoryApplication.java new file mode 100644 index 000000000..1856a9832 --- /dev/null +++ b/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/InventoryApplication.java @@ -0,0 +1,26 @@ +/* + + ** + ** Copyright (c) 2021 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + */ +package io.helidon.data.examples; + +import javax.enterprise.context.ApplicationScoped; +import javax.ws.rs.ApplicationPath; +import javax.ws.rs.core.Application; +import java.util.HashSet; +import java.util.Set; + +@ApplicationScoped +@ApplicationPath("/") +public class InventoryApplication extends Application { + + @Override + public Set> getClasses() { + Set> s = new HashSet>(); + s.add(KafkaPostgressInventoryResource.class); + return s; + } + +} diff --git a/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/JsonUtils.java b/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/JsonUtils.java new file mode 100644 index 000000000..beca30217 --- /dev/null +++ b/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/JsonUtils.java @@ -0,0 +1,70 @@ +/* + + ** + ** Copyright (c) 2021 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + */ +package io.helidon.data.examples; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; + +import java.io.IOException; + +public class JsonUtils { + private final ObjectMapper json; + public JsonUtils() { + json = new ObjectMapper(); + json.configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false); + } + public static ObjectMapper json() { + return InstanceHolder.json.json; + } + public static T read(String src, Class valueType) { + try { + return json().readValue(src, valueType); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + public static T read(String src, TypeReference valueTypeRef) { + try { + return json().readValue(src, valueTypeRef); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + public static T read(byte[] src, Class valueType) { + try { + return json().readValue(src, valueType); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + public static T read(byte[] src, TypeReference valueTypeRef) { + try { + return json().readValue(src, valueTypeRef); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + public static String writeValueAsString(Object value) { + try { + return json().writeValueAsString(value); + } catch (JsonProcessingException e) { + throw new IllegalStateException(e); + } + } + public static byte[] writeValueAsBytes(Object value) { + try { + return json().writeValueAsBytes(value); + } catch (JsonProcessingException e) { + throw new IllegalStateException(e); + } + } + private static class InstanceHolder { + static final JsonUtils json = new JsonUtils(); + } +} \ No newline at end of file diff --git a/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/KafkaPostgresOrderEventConsumer.java b/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/KafkaPostgresOrderEventConsumer.java new file mode 100644 index 000000000..2fd84e97d --- /dev/null +++ b/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/KafkaPostgresOrderEventConsumer.java @@ -0,0 +1,151 @@ +/* + + ** + ** Copyright (c) 2021 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + */ +package io.helidon.data.examples; + +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaConsumer; + +import java.sql.*; + + +import java.util.Properties; +import java.util.Arrays; + +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; + +public class KafkaPostgresOrderEventConsumer implements Runnable { + + + final static String orderTopicName = "order.topic"; + final static String inventoryTopicName = "inventory.topic"; + KafkaPostgressInventoryResource inventoryResource; + + public KafkaPostgresOrderEventConsumer(KafkaPostgressInventoryResource inventoryResource) { + this.inventoryResource = inventoryResource; + } + + @Override + public void run() { + System.out.println("Receive messages..."); + try { + listenForOrderEvents(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + public void listenForOrderEvents() { + String topicName = orderTopicName; + Properties props = new Properties(); + props.put("bootstrap.servers", "kafka-service:9092"); + props.put("group.id", "test"); + props.put("enable.auto.commit", "true"); + props.put("auto.commit.interval.ms", "1000"); + props.put("session.timeout.ms", "30000"); + props.put("key.deserializer", + "org.apache.kafka.common.serialization.StringDeserializer"); + props.put("value.deserializer", + "org.apache.kafka.common.serialization.StringDeserializer"); + System.out.println("KafkaPostgresOrderEventConsumer about to listen for messages..."); + KafkaConsumer consumer = new KafkaConsumer (props); + System.out.println("KafkaPostgresOrderEventConsumer consumer:" + consumer); + consumer.subscribe(Arrays.asList(topicName)); + System.out.println("Subscribed to topic " + topicName); + while (true) { + ConsumerRecords records = consumer.poll(100); + for (ConsumerRecord record : records) { + System.out.printf("KafkaPostgresOrderEventConsumer message offset = %d, key = %s, value = %s\n", + record.offset(), record.key(), record.value()); + String txt = record.value(); + System.out.println("KafkaPostgresOrderEventConsumer txt " + txt); + if (txt.indexOf("{") > -1) try { + Order order = JsonUtils.read(txt, Order.class); + System.out.print(" orderid:" + order.getOrderid()); + System.out.print(" itemid:" + order.getItemid()); + if (inventoryResource.crashAfterOrderMessageReceived) System.exit(-1); + updateDataAndSendEventOnInventory(order.getOrderid(), order.getItemid()); + if (inventoryResource.crashAfterOrderMessageProcessed) System.exit(-1); + } catch (Exception ex) { + System.out.printf("message did not contain order"); + ex.printStackTrace(); + } + } + } + } + + private void updateDataAndSendEventOnInventory( String orderid, String itemid) throws Exception { + String inventorylocation = evaluateInventory(itemid); + Inventory inventory = new Inventory(orderid, itemid, inventorylocation, "beer"); //static suggestiveSale - represents an additional service/event + String jsonString = JsonUtils.writeValueAsString(inventory); + System.out.println("send inventory status message... jsonString:" + jsonString ); + System.out.println("sendInsertAndSendOrderMessage........."); + String topicName = inventoryTopicName; + Properties props = new Properties(); + props.put("bootstrap.servers", "kafka-service:9092"); + props.put("acks", "all"); + props.put("retries", 0); + props.put("batch.size", 16384); + props.put("linger.ms", 1); + props.put("buffer.memory", 33554432); + props.put("key.serializer", + "org.apache.kafka.common.serialization.StringSerializer"); + props.put("value.serializer", + "org.apache.kafka.common.serialization.StringSerializer"); + Producer producer = new KafkaProducer + (props); + producer.send(new ProducerRecord(topicName, + "inventory", jsonString)); + System.out.println("KafkaPostgresOrderEventConsumer.Message sent successfully:" + jsonString); + producer.close(); + } + + private String evaluateInventory(String id) { + System.out.println("KafkaPostgresOrderEventConsumer postgresDataSource:" + inventoryResource.postgresDataSource); + System.out.println("KafkaPostgresOrderEventConsumer evaluateInventory for inventoryid:" + id); + String DECREMENT_BY_ID = + "update inventory set inventorycount = inventorycount - 1 where inventoryid = ? and inventorycount > 0 returning inventorylocation into ?"; +// try (CallableStatement st = inventoryResource.postgresDataSource.getConnection().prepareCall(DECREMENT_BY_ID)) { + try (PreparedStatement st = inventoryResource.postgresDataSource.getConnection().prepareStatement( + "select inventorycount, inventorylocation from inventory where inventoryid = ?" + )) { + st.setString(1, id); +// st.re.registerOutParameter(2, Types.VARCHAR); + ResultSet rs = st.executeQuery(); + rs.next(); + int inventoryCount = rs.getInt(1); + String inventorylocation = rs.getString(2); + rs.close(); + System.out.println("InventoryServiceOrderEventConsumer.updateDataAndSendEventOnInventory id {" + id + "} location {" + inventorylocation + "} inventoryCount:" + inventoryCount); + if (inventoryCount > 0) { + return inventorylocation; + } else { + return "inventorydoesnotexist"; + } + } catch (SQLException throwables) { + throwables.printStackTrace(); + } + return "unable to find inventory status"; + } + + private void createInventoryTable(Connection connection) throws SQLException { + System.out.println("KafkaPostgresOrderEventConsumer createInventoryTable"); + connection.prepareStatement( + "create table inventory ( inventoryid varchar(16) PRIMARY KEY NOT NULL, inventorylocation varchar(32), inventorycount integer CONSTRAINT positive_inventory CHECK (inventorycount >= 0) )").execute(); + } + + private void populateInventoryTable(Connection connection) throws SQLException { + System.out.println("KafkaPostgresOrderEventConsumer populateInventoryTable"); + connection.prepareStatement("insert into inventory values ('sushi', '1468 WEBSTER ST,San Francisco,CA', 0)").execute(); + connection.prepareStatement("insert into inventory values ('pizza', '1469 WEBSTER ST,San Francisco,CA', 0)").execute(); + connection.prepareStatement("insert into inventory values ('burger', '1470 WEBSTER ST,San Francisco,CA', 0)").execute(); + } + + +} diff --git a/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/KafkaPostgressInventoryResource.java b/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/KafkaPostgressInventoryResource.java new file mode 100755 index 000000000..92ed5ee8a --- /dev/null +++ b/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/KafkaPostgressInventoryResource.java @@ -0,0 +1,132 @@ +/* + + ** + ** Copyright (c) 2021 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + */ +package io.helidon.data.examples; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; + +import javax.enterprise.context.ApplicationScoped; +import javax.enterprise.context.Initialized; +import javax.enterprise.event.Observes; +import javax.inject.Inject; +import javax.inject.Named; +import javax.sql.DataSource; +import javax.ws.rs.*; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +@Path("/") +@ApplicationScoped +public class KafkaPostgressInventoryResource { + + + @Inject + @Named("postgresDataSource") + DataSource postgresDataSource; + + static boolean crashAfterOrderMessageReceived; + static boolean crashAfterOrderMessageProcessed; + + public void init(@Observes @Initialized(ApplicationScoped.class) Object init) throws SQLException { + System.out.println("InventoryResource.init KafkaPostgresOrderEventConsumer().testConnection() " + init); + listenForMessages(); + } + + public Response listenForMessages() { + new Thread(new KafkaPostgresOrderEventConsumer(this)).start(); + final Response returnValue = Response.ok() + .entity("now listening for messages...") + .build(); + return returnValue; + } + + @Path("/addInventory") + @GET + @Produces(MediaType.TEXT_PLAIN) + public Response addInventory(@QueryParam("itemid") String itemid) { + String response; + System.out.println("KafkaPostgressInventoryResource.addInventory itemid:" + itemid); + try { + Connection conn = postgresDataSource.getConnection(); + conn.createStatement().execute( + "UPDATE inventory SET inventorycount = inventorycount + 1 where inventoryid = '" + itemid + "'"); + response = getInventoryCount(itemid, conn); + } catch (SQLException ex) { + response = ex.getMessage(); + } + return Response.ok() + .entity(response) + .build(); + } + + @Path("/removeInventory") + @GET + @Produces(MediaType.TEXT_PLAIN) + public Response removeInventory(@QueryParam("itemid") String itemid) { + String response; + System.out.println("KafkaPostgressInventoryResource.removeInventory itemid:" + itemid); + try (Connection conn = postgresDataSource.getConnection()) { + conn.createStatement().execute( + "UPDATE inventory SET inventorycount = inventorycount - 1 where inventoryid = '" + itemid + "'"); + response = getInventoryCount(itemid, conn); + } catch (SQLException ex) { + response = ex.getMessage(); + } + return Response.ok() + .entity(response) + .build(); + } + + @Path("/getInventory") + @GET + @Produces(MediaType.TEXT_PLAIN) + public Response getInventoryCount(@QueryParam("itemid") String itemid) { + String response; + System.out.println("KafkaPostgressInventoryResource.getInventoryCount itemid:" + itemid); + try (Connection conn = postgresDataSource.getConnection()) { + response = getInventoryCount(itemid, conn); + } catch (SQLException ex) { + response = ex.getMessage(); + } + return Response.ok() + .entity(response) + .build(); + } + + private String getInventoryCount(String itemid, Connection conn) throws SQLException { + ResultSet resultSet = conn.createStatement().executeQuery( + "select inventorycount from inventory where inventoryid = '" + itemid + "'"); + int inventorycount; + if (resultSet.next()) { + inventorycount = resultSet.getInt("inventorycount"); + System.out.println("KafkaPostgressInventoryResource.getInventoryCount inventorycount:" + inventorycount); + } else inventorycount = 0; + conn.close(); + return "inventorycount for " + itemid + " is now " + inventorycount; + } + + @Path("/crashAfterOrderMessageReceived") + @GET + @Produces(MediaType.TEXT_PLAIN) + public Response crashAfterOrderMessageReceived() { + crashAfterOrderMessageReceived = true; + return Response.ok() + .entity("inventory crashAfterOrderMessageReceived set") + .build(); + } + + @Path("/crashAfterOrderMessageProcessed") + @GET + @Produces(MediaType.TEXT_PLAIN) + public Response crashAfterOrderMessageProcessed() { + crashAfterOrderMessageProcessed = true; + return Response.ok() + .entity("inventory crashAfterOrderMessageProcessed set") + .build(); + } +} diff --git a/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/Order.java b/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/Order.java new file mode 100644 index 000000000..001270c5e --- /dev/null +++ b/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/Order.java @@ -0,0 +1,75 @@ +/* + + ** + ** Copyright (c) 2021 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + */ +package io.helidon.data.examples; + +import javax.json.bind.annotation.JsonbProperty; + +public class Order { + private String orderid; + private String itemid; + private String deliverylocation; + @JsonbProperty(nillable = true) + private String status; + @JsonbProperty(nillable = true) + private String inventoryLocation; + @JsonbProperty(nillable = true) + private String suggestiveSale; + + public Order() { + } + + public Order(String orderId, String itemId, String deliverylocation) { + this.orderid = orderId; + this.itemid = itemId; + this.deliverylocation = deliverylocation; + } + + public Order(String orderId, String itemId, String deliverylocation, + String status, String inventoryLocation, String suggestiveSale) { + this.orderid = orderId; + this.itemid = itemId; + this.deliverylocation = deliverylocation; + this.status = status; + this.inventoryLocation = inventoryLocation; + this.suggestiveSale = suggestiveSale; + } + + public String getOrderid() { + return orderid; + } + + public String getItemid() { + return itemid; + } + + public String getDeliverylocation() { + return deliverylocation; + } + + public String getStatus() { + return status; + } + + public String getInventoryLocation() { + return inventoryLocation; + } + + public String getSuggestiveSale() { + return suggestiveSale; + } + + public String toString() { + String returnString = ""; + returnString+="
orderId = " + orderid; + returnString+="
itemid = " + itemid; + returnString+="
suggestiveSale = " + suggestiveSale; + returnString+="
inventoryLocation = " + inventoryLocation; + returnString+="
orderStatus = " + status; + returnString+="
deliveryLocation = " + deliverylocation; + return returnString; + } +} diff --git a/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/package-info.java b/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/package-info.java new file mode 100755 index 000000000..41e577a26 --- /dev/null +++ b/grabdish/inventory-postgres-kafka/src/main/java/io/helidon/data/examples/package-info.java @@ -0,0 +1,12 @@ +/* + + ** + ** Copyright (c) 2021 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + */ + +/** + * Provides JAX-RS-related classes and interfaces for this example + * project. + */ +package io.helidon.data.examples; diff --git a/grabdish/inventory-postgres-kafka/src/main/resources/META-INF/beans.xml b/grabdish/inventory-postgres-kafka/src/main/resources/META-INF/beans.xml new file mode 100755 index 000000000..0c0b5fc54 --- /dev/null +++ b/grabdish/inventory-postgres-kafka/src/main/resources/META-INF/beans.xml @@ -0,0 +1,18 @@ + + + + + diff --git a/grabdish/inventory-postgres-kafka/src/main/resources/META-INF/microprofile-config.properties b/grabdish/inventory-postgres-kafka/src/main/resources/META-INF/microprofile-config.properties new file mode 100755 index 000000000..e5027ed7f --- /dev/null +++ b/grabdish/inventory-postgres-kafka/src/main/resources/META-INF/microprofile-config.properties @@ -0,0 +1,26 @@ + +## +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + + +# DB properties +javax.sql.DataSource.postgresDataSource.dataSourceClassName = org.postgresql.ds.PGSimpleDataSource +javax.sql.DataSource.postgresDataSource.dataSource.url = jdbc:postgresql://postgres.msdataworkshop:5432/postgresdb +javax.sql.DataSource.postgresDataSource.dataSource.user = postgresadmin +javax.sql.DataSource.postgresDataSource.dataSource.password = admin123 + +# Microprofile server properties +server.port=8080 +server.host=0.0.0.0 + +# Microprofile Tracing Properties +tracing.service=inventory-postgres-kafka.msdataworkshop +tracing.protocol=http +tracing.host=jaeger-collector.msdataworkshop +tracing.port=14268 +tracing.path=/api/traces +tracing.propagation=b3 +tracing.log-spans=true +#tracing.sampler-type=const +tracing.sampler-param=1 \ No newline at end of file diff --git a/grabdish/inventory-postgres-kafka/src/main/resources/logging.properties b/grabdish/inventory-postgres-kafka/src/main/resources/logging.properties new file mode 100644 index 000000000..d38b23789 --- /dev/null +++ b/grabdish/inventory-postgres-kafka/src/main/resources/logging.properties @@ -0,0 +1,12 @@ +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +handlers=java.util.logging.FileHandler, java.util.logging.ConsoleHandler +# Global default logging level. Can be overriden by specific handlers and loggers +.level=INFO + +org.apache.kafka.common.utils.AppInfoParser.level=SEVERE +org.apache.kafka.clients.level=WARNING +org.apache.kafka.clients.consumer.ConsumerConfig.level=SEVERE +org.apache.kafka.clients.producer.ProducerConfig.level=SEVERE +io.jaegertracing.internal.level=OFF \ No newline at end of file diff --git a/grabdish/inventory-postgres-kafka/undeploy-verrazzano.sh b/grabdish/inventory-postgres-kafka/undeploy-verrazzano.sh new file mode 100755 index 000000000..c7da1a53b --- /dev/null +++ b/grabdish/inventory-postgres-kafka/undeploy-verrazzano.sh @@ -0,0 +1,9 @@ +#!/bin/bash +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + + +echo delete frontend OAM Component and ApplicationConfiguration + +kubectl delete applicationconfiguration inventory-postgres-kafka-appconf -n msdataworkshop +kubectl delete component inventory-postgres-kafka-component -n msdataworkshop diff --git a/grabdish/inventory-postgres-kafka/undeploy.sh b/grabdish/inventory-postgres-kafka/undeploy.sh new file mode 100755 index 000000000..7ffcde299 --- /dev/null +++ b/grabdish/inventory-postgres-kafka/undeploy.sh @@ -0,0 +1,8 @@ +#!/bin/bash +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + + +echo delete inventory-postgres-kafka deployment... + +kubectl delete deployment inventory-postgres-kafka -n msdataworkshop diff --git a/grabdish/mongodb-kafka-postgres/docker-compose.yml b/grabdish/mongodb-kafka-postgres/docker-compose.yml new file mode 100755 index 000000000..71f93a9d2 --- /dev/null +++ b/grabdish/mongodb-kafka-postgres/docker-compose.yml @@ -0,0 +1,16 @@ +version: '3' +services: + mongodb: + image: mongo + ports: + - "27017:27017" + volumes: + - "mongodata:/data/db" + networks: + - network1 + +volumes: + mongodata: + +networks: + network1: \ No newline at end of file diff --git a/grabdish/mongodb-kafka-postgres/install-kafka.sh b/grabdish/mongodb-kafka-postgres/install-kafka.sh new file mode 100755 index 000000000..a546710fb --- /dev/null +++ b/grabdish/mongodb-kafka-postgres/install-kafka.sh @@ -0,0 +1,5 @@ +#!/bin/bash +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +kubectl apply -f ./kafka-all.yaml -n msdataworkshop diff --git a/grabdish/mongodb-kafka-postgres/install-mongodb.sh b/grabdish/mongodb-kafka-postgres/install-mongodb.sh new file mode 100755 index 000000000..bc70c7dbf --- /dev/null +++ b/grabdish/mongodb-kafka-postgres/install-mongodb.sh @@ -0,0 +1,8 @@ +#!/bin/bash +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +kubectl create -f mongodata-persistentvolumeclaim.yaml -n msdataworkshop +kubectl create -f mongodb-deployment.yaml -n msdataworkshop +kubectl create -f mongodb-service.yaml -n msdataworkshop +kubectl expose deployment mongodb --type=LoadBalancer -n msdataworkshop diff --git a/grabdish/mongodb-kafka-postgres/install-postgres.sh b/grabdish/mongodb-kafka-postgres/install-postgres.sh new file mode 100755 index 000000000..03f9b5fff --- /dev/null +++ b/grabdish/mongodb-kafka-postgres/install-postgres.sh @@ -0,0 +1,10 @@ +#!/bin/bash +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +kubectl create -f postgres-configmap.yaml -n msdataworkshop +kubectl create -f postgres-storage.yaml -n msdataworkshop +kubectl create -f postgres-deployment.yaml -n msdataworkshop +kubectl create -f postgres-service.yaml -n msdataworkshop +kubectl get svc postgres + diff --git a/grabdish/mongodb-kafka-postgres/kafka-all.yaml b/grabdish/mongodb-kafka-postgres/kafka-all.yaml new file mode 100755 index 000000000..6ebfbd518 --- /dev/null +++ b/grabdish/mongodb-kafka-postgres/kafka-all.yaml @@ -0,0 +1,108 @@ +apiVersion: v1 +kind: Service +metadata: + name: zoo1 + labels: + app: zookeeper-1 +spec: + ports: + - name: client + port: 2181 + protocol: TCP + - name: follower + port: 2888 + protocol: TCP + - name: leader + port: 3888 + protocol: TCP + selector: + app: zookeeper-1 +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-service + labels: + name: kafka +spec: + ports: + - port: 9092 + name: kafka-port + protocol: TCP + selector: + app: kafka + id: "0" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zookeeper-deployment-1 +spec: + selector: + matchLabels: + app: zookeeper-1 + template: + metadata: + labels: + app: zookeeper-1 + spec: + containers: + - name: zoo1 + image: digitalwonderland/zookeeper + ports: + - containerPort: 2181 + env: + - name: ZOOKEEPER_ID + value: "1" + - name: ZOOKEEPER_SERVER_1 + value: zoo1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-broker0 +spec: + selector: + matchLabels: + app: kafka + template: + metadata: + labels: + app: kafka + id: "0" + spec: + containers: + - name: kafka + image: wurstmeister/kafka + ports: + - containerPort: 9092 + env: + - name: KAFKA_ADVERTISED_PORT + value: "9092" + - name: KAFKA_ADVERTISED_HOST_NAME + value: kafka-service + - name: KAFKA_ZOOKEEPER_CONNECT + value: zoo1:2181 + - name: KAFKA_BROKER_ID + value: "0" + - name: KAFKA_CREATE_TOPICS + value: sample.topic:1:1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-cat +spec: + selector: + matchLabels: + app: kafka-cat + template: + metadata: + labels: + app: kafka-cat + spec: + containers: + - name: kafka-cat + image: confluentinc/cp-kafkacat + command: ["/bin/sh"] + args: ["-c", "trap : TERM INT; sleep infinity & wait"] \ No newline at end of file diff --git a/grabdish/mongodb-kafka-postgres/mongodata-persistentvolumeclaim.yaml b/grabdish/mongodb-kafka-postgres/mongodata-persistentvolumeclaim.yaml new file mode 100755 index 000000000..564620064 --- /dev/null +++ b/grabdish/mongodb-kafka-postgres/mongodata-persistentvolumeclaim.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: mongodata + name: mongodata +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +status: {} diff --git a/grabdish/mongodb-kafka-postgres/mongodb-deployment.yaml b/grabdish/mongodb-kafka-postgres/mongodb-deployment.yaml new file mode 100755 index 000000000..5c5af52e6 --- /dev/null +++ b/grabdish/mongodb-kafka-postgres/mongodb-deployment.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert -f docker-compose.yml + kompose.version: 1.11.0 (39ad614) + creationTimestamp: null + labels: + io.kompose.service: mongodb + name: mongodb +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: mongodb + strategy: + type: Recreate + template: + metadata: + creationTimestamp: null + labels: + io.kompose.service: mongodb + spec: + containers: + - image: mongo + name: mongodb + ports: + - containerPort: 27017 + resources: {} + volumeMounts: + - mountPath: /data/db + name: mongodata + restartPolicy: Always + volumes: + - name: mongodata + persistentVolumeClaim: + claimName: mongodata +status: {} diff --git a/grabdish/mongodb-kafka-postgres/mongodb-service.yaml b/grabdish/mongodb-kafka-postgres/mongodb-service.yaml new file mode 100755 index 000000000..04a1b6bca --- /dev/null +++ b/grabdish/mongodb-kafka-postgres/mongodb-service.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert -f docker-compose.yml + kompose.version: 1.11.0 (39ad614) + creationTimestamp: null + labels: + io.kompose.service: mongodb + name: mongodb +spec: + type: LoadBalancer + ports: + - name: "27017" + port: 27017 + targetPort: 27017 + selector: + io.kompose.service: mongodb +status: + loadBalancer: {} diff --git a/grabdish/mongodb-kafka-postgres/postgres-configmap.yaml b/grabdish/mongodb-kafka-postgres/postgres-configmap.yaml new file mode 100755 index 000000000..c94b6ad16 --- /dev/null +++ b/grabdish/mongodb-kafka-postgres/postgres-configmap.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-config + labels: + app: postgres +data: + POSTGRES_DB: postgresdb + POSTGRES_USER: postgresadmin + POSTGRES_PASSWORD: admin123 \ No newline at end of file diff --git a/grabdish/mongodb-kafka-postgres/postgres-deployment.yaml b/grabdish/mongodb-kafka-postgres/postgres-deployment.yaml new file mode 100755 index 000000000..d8f4e2600 --- /dev/null +++ b/grabdish/mongodb-kafka-postgres/postgres-deployment.yaml @@ -0,0 +1,32 @@ +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: postgres +spec: + replicas: 1 + selector: + matchLabels: + app: postgres + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: postgres:10.4 + imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 5432 + envFrom: + - configMapRef: + name: postgres-config + volumeMounts: + - mountPath: /var/lib/postgresql/data + name: postgredb + volumes: + - name: postgredb + persistentVolumeClaim: + claimName: postgres-pv-claim diff --git a/grabdish/mongodb-kafka-postgres/postgres-service.yaml b/grabdish/mongodb-kafka-postgres/postgres-service.yaml new file mode 100755 index 000000000..901a21e50 --- /dev/null +++ b/grabdish/mongodb-kafka-postgres/postgres-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: postgres + labels: + app: postgres +spec: + type: NodePort + ports: + - port: 5432 + selector: + app: postgres \ No newline at end of file diff --git a/grabdish/mongodb-kafka-postgres/postgres-storage.yaml b/grabdish/mongodb-kafka-postgres/postgres-storage.yaml new file mode 100755 index 000000000..36a8db28d --- /dev/null +++ b/grabdish/mongodb-kafka-postgres/postgres-storage.yaml @@ -0,0 +1,29 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: postgres-pv-volume + labels: + type: local + app: postgres +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteMany + hostPath: + path: "/mnt/data" +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: postgres-pv-claim + labels: + app: postgres +spec: + storageClassName: manual + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Gi \ No newline at end of file diff --git a/grabdish/mongodb-kafka-postgres/uninstall-all.sh b/grabdish/mongodb-kafka-postgres/uninstall-all.sh new file mode 100755 index 000000000..29c8531d8 --- /dev/null +++ b/grabdish/mongodb-kafka-postgres/uninstall-all.sh @@ -0,0 +1,5 @@ +#!/bin/bash +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +kubectl delete -f ./kafka-all.yaml \ No newline at end of file diff --git a/grabdish/order-helidon/pom.xml b/grabdish/order-helidon/pom.xml index 77fdfc95e..a9ed2e910 100755 --- a/grabdish/order-helidon/pom.xml +++ b/grabdish/order-helidon/pom.xml @@ -206,11 +206,6 @@ com.fasterxml.jackson.core jackson-databind - - org.mongodb - mongo-java-driver - 3.12.8 - com.oracle.oci.sdk oci-java-sdk-vault diff --git a/grabdish/order-helidon/src/main/java/io/helidon/data/examples/MongoDBAccess.java b/grabdish/order-helidon/src/main/java/io/helidon/data/examples/MongoDBAccess.java deleted file mode 100644 index 643e03d0b..000000000 --- a/grabdish/order-helidon/src/main/java/io/helidon/data/examples/MongoDBAccess.java +++ /dev/null @@ -1,104 +0,0 @@ -package io.helidon.data.examples; - -import java.net.UnknownHostException; - -import com.mongodb.MongoClient; -import com.mongodb.MongoClientURI; -import com.mongodb.ServerAddress; - -import com.mongodb.client.MongoDatabase; -import com.mongodb.client.MongoCollection; - -import org.bson.Document; -import java.util.Arrays; -import com.mongodb.Block; - -import com.mongodb.client.MongoCursor; -import static com.mongodb.client.model.Filters.*; -import com.mongodb.client.result.DeleteResult; -import static com.mongodb.client.model.Updates.*; -import com.mongodb.client.result.UpdateResult; -import java.util.ArrayList; -import java.util.List; - -public class MongoDBAccess { - public void quicktest() { - // Create seed data - - List seedData = new ArrayList(); - - seedData.add(new Document("decade", "1970s") - .append("artist", "Debby Boone") - .append("song", "You Light Up My Life") - .append("weeksAtOne", 10) - ); - - seedData.add(new Document("decade", "1980s") - .append("artist", "Olivia Newton-John") - .append("song", "Physical") - .append("weeksAtOne", 10) - ); - - seedData.add(new Document("decade", "1990s") - .append("artist", "Mariah Carey") - .append("song", "One Sweet Day") - .append("weeksAtOne", 16) - ); - - // Standard URI format: mongodb://[dbuser:dbpassword@]host:port/dbname - - MongoClientURI uri = new MongoClientURI("mongodb://user:pass@host:port/db"); - MongoClient client = new MongoClient(uri); - MongoDatabase db = client.getDatabase(uri.getDatabase()); - - /* - * First we'll add a few songs. Nothing is required to create the - * songs collection; it is created automatically when we insert. - */ - - MongoCollection songs = db.getCollection("songs"); - - // Note that the insert method can take either an array or a document. - - songs.insertMany(seedData); - - /* - * Then we need to give Boyz II Men credit for their contribution to - * the hit "One Sweet Day". - */ - - Document updateQuery = new Document("song", "One Sweet Day"); - songs.updateOne(updateQuery, new Document("$set", new Document("artist", "Mariah Carey ft. Boyz II Men"))); - - /* - * Finally we run a query which returns all the hits that spent 10 - * or more weeks at number 1. - */ - - Document findQuery = new Document("weeksAtOne", new Document("$gte",10)); - Document orderBy = new Document("decade", 1); - - MongoCursor cursor = songs.find(findQuery).sort(orderBy).iterator(); - - try { - while (cursor.hasNext()) { - Document doc = cursor.next(); - System.out.println( - "In the " + doc.get("decade") + ", " + doc.get("song") + - " by " + doc.get("artist") + " topped the charts for " + - doc.get("weeksAtOne") + " straight weeks." - ); - } - } finally { - cursor.close(); - } - - // Since this is an example, we'll clean up after ourselves. - - songs.drop(); - - // Only close the connection when your app is terminating - - client.close(); -} -} \ No newline at end of file diff --git a/grabdish/order-helidon/src/main/java/io/helidon/data/examples/OrderResource.java b/grabdish/order-helidon/src/main/java/io/helidon/data/examples/OrderResource.java index 201c41b32..399e55eea 100755 --- a/grabdish/order-helidon/src/main/java/io/helidon/data/examples/OrderResource.java +++ b/grabdish/order-helidon/src/main/java/io/helidon/data/examples/OrderResource.java @@ -6,6 +6,7 @@ */ package io.helidon.data.examples; +import java.io.IOException; import java.sql.Connection; import java.sql.SQLException; import java.util.HashMap; diff --git a/grabdish/order-mongodb-kafka/Dockerfile b/grabdish/order-mongodb-kafka/Dockerfile new file mode 100644 index 000000000..d08f3798d --- /dev/null +++ b/grabdish/order-mongodb-kafka/Dockerfile @@ -0,0 +1,9 @@ +FROM openjdk:11-jre-slim + +ENTRYPOINT ["java", "-jar", "/usr/share/myservice/myservice.jar"] + +# Add Maven dependencies +ADD target/libs /usr/share/myservice/libs +# Add the service itself +ARG JAR_FILE +ADD target/${JAR_FILE} /usr/share/myservice/myservice.jar \ No newline at end of file diff --git a/grabdish/order-mongodb-kafka/build.sh b/grabdish/order-mongodb-kafka/build.sh new file mode 100755 index 000000000..6d4162f7c --- /dev/null +++ b/grabdish/order-mongodb-kafka/build.sh @@ -0,0 +1,27 @@ +#!/bin/bash +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + + +SCRIPT_DIR=$(dirname $0) + +IMAGE_NAME=order-mongodb-kafka +IMAGE_VERSION=0.1 + +export DOCKER_REGISTRY=$(state_get DOCKER_REGISTRY) + +if [ -z "$DOCKER_REGISTRY" ]; then + echo "Error: DOCKER_REGISTRY env variable needs to be set!" + exit 1 +fi + +export IMAGE=${DOCKER_REGISTRY}/${IMAGE_NAME}:${IMAGE_VERSION} + +# mvn install +# mvn package docker:build +mvn package + +docker push $IMAGE +if [ $? -eq 0 ]; then + docker rmi ${IMAGE} +fi \ No newline at end of file diff --git a/grabdish/order-mongodb-kafka/deploy-verrazzano.sh b/grabdish/order-mongodb-kafka/deploy-verrazzano.sh new file mode 100755 index 000000000..cc27cf86f --- /dev/null +++ b/grabdish/order-mongodb-kafka/deploy-verrazzano.sh @@ -0,0 +1,31 @@ +#!/bin/bash +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + + +SCRIPT_DIR=$(dirname $0) + +export DOCKER_REGISTRY="$(state_get DOCKER_REGISTRY)" +export ORDER_PDB_NAME="$(state_get ORDER_DB_NAME)" +export OCI_REGION="$(state_get OCI_REGION)" +export VAULT_SECRET_OCID="" + +echo create order-mongodb-kafka OAM Component and ApplicationConfiguration +export CURRENTTIME=$( date '+%F_%H:%M:%S' ) +echo CURRENTTIME is $CURRENTTIME ...this will be appended to generated deployment yaml + +cp order-mongodb-kafka-comp.yaml order-mongodb-kafka-comp-$CURRENTTIME.yaml + +#may hit sed incompat issue with mac +sed -i "s|%DOCKER_REGISTRY%|${DOCKER_REGISTRY}|g" order-mongodb-kafka-comp-$CURRENTTIME.yaml +sed -i "s|%ORDER_PDB_NAME%|${ORDER_PDB_NAME}|g" order-mongodb-kafka-comp-${CURRENTTIME}.yaml +sed -i "s|%OCI_REGION%|${OCI_REGION}|g" order-mongodb-kafka-comp-${CURRENTTIME}.yaml +sed -i "s|%VAULT_SECRET_OCID%|${VAULT_SECRET_OCID}|g" order-mongodb-kafka-comp-${CURRENTTIME}.yaml + +if [ -z "$1" ]; then + kubectl apply -f $SCRIPT_DIR/order-mongodb-kafka-comp-$CURRENTTIME.yaml + kubectl apply -f $SCRIPT_DIR/order-mongodb-kafka-app.yaml +else + kubectl apply -f <(istioctl kube-inject -f $SCRIPT_DIR/order-mongodb-kafka-comp-$CURRENTTIME.yaml) -n msdataworkshop +fi + diff --git a/grabdish/order-mongodb-kafka/deploy.sh b/grabdish/order-mongodb-kafka/deploy.sh new file mode 100755 index 000000000..d46a4af57 --- /dev/null +++ b/grabdish/order-mongodb-kafka/deploy.sh @@ -0,0 +1,31 @@ +#!/bin/bash +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + + +SCRIPT_DIR=$(dirname $0) + +export DOCKER_REGISTRY="$(state_get DOCKER_REGISTRY)" +export ORDER_PDB_NAME="$(state_get ORDER_DB_NAME)" +export OCI_REGION="$(state_get OCI_REGION)" +export VAULT_SECRET_OCID="" + +echo create order-mongodb-kafka deployment and service... +export CURRENTTIME=$( date '+%F_%H:%M:%S' ) +echo CURRENTTIME is $CURRENTTIME ...this will be appended to generated deployment yaml + +cp order-mongodb-kafka-deployment.yaml order-mongodb-kafka-deployment-$CURRENTTIME.yaml + +#may hit sed incompat issue with mac +sed -i "s|%DOCKER_REGISTRY%|${DOCKER_REGISTRY}|g" order-mongodb-kafka-deployment-$CURRENTTIME.yaml +sed -i "s|%ORDER_PDB_NAME%|${ORDER_PDB_NAME}|g" order-mongodb-kafka-deployment-${CURRENTTIME}.yaml +sed -i "s|%OCI_REGION%|${OCI_REGION}|g" order-mongodb-kafka-deployment-${CURRENTTIME}.yaml +sed -i "s|%VAULT_SECRET_OCID%|${VAULT_SECRET_OCID}|g" order-mongodb-kafka-deployment-${CURRENTTIME}.yaml + +if [ -z "$1" ]; then + kubectl apply -f $SCRIPT_DIR/order-mongodb-kafka-deployment-$CURRENTTIME.yaml -n msdataworkshop +else + kubectl apply -f <(istioctl kube-inject -f $SCRIPT_DIR/order-mongodb-kafka-deployment-$CURRENTTIME.yaml) -n msdataworkshop +fi + +kubectl apply -f $SCRIPT_DIR/order-service.yaml -n msdataworkshop diff --git a/grabdish/order-mongodb-kafka/order-mongodb-kafka-app.yaml b/grabdish/order-mongodb-kafka/order-mongodb-kafka-app.yaml new file mode 100644 index 000000000..d4caf0926 --- /dev/null +++ b/grabdish/order-mongodb-kafka/order-mongodb-kafka-app.yaml @@ -0,0 +1,29 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +apiVersion: core.oam.dev/v1alpha2 +kind: ApplicationConfiguration +metadata: + name: order-mongodb-kafka-appconf + namespace: msdataworkshop + annotations: + version: v1.0.0 + description: "Order Helidon application" +spec: + components: + - componentName: order-mongodb-kafka-component + traits: + - trait: + apiVersion: oam.verrazzano.io/v1alpha1 + kind: MetricsTrait + spec: + scraper: verrazzano-system/vmi-system-prometheus-0 + - trait: + apiVersion: oam.verrazzano.io/v1alpha1 + kind: IngressTrait + metadata: + name: order-mongodb-kafka-ingress + spec: + rules: + - paths: + - path: "/" + pathType: Prefix \ No newline at end of file diff --git a/grabdish/order-mongodb-kafka/order-mongodb-kafka-comp.yaml b/grabdish/order-mongodb-kafka/order-mongodb-kafka-comp.yaml new file mode 100644 index 000000000..183dbb848 --- /dev/null +++ b/grabdish/order-mongodb-kafka/order-mongodb-kafka-comp.yaml @@ -0,0 +1,73 @@ +# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +apiVersion: core.oam.dev/v1alpha2 +kind: Component +metadata: + name: order-mongodb-kafka-component + namespace: msdataworkshop +spec: + workload: + apiVersion: oam.verrazzano.io/v1alpha1 + kind: VerrazzanoHelidonWorkload + metadata: + name: order-mongodb-kafka-workload + labels: + app: order + spec: + deploymentTemplate: + metadata: + name: order # this will be the deployment, pod, and service name +# every port on each container will be exposed (mapped with same port number) and name of that serviceport would be containername + - + portnumber + podSpec: +# containers: +# - name: hello-helidon-container +# image: "ghcr.io/verrazzano/example-helidon-greet-app-v1:0.1.12-1-20210409130027-707ecc4" +# ports: +# - containerPort: 8080 +# name: http +# spec: + containers: + - name: order + image: %DOCKER_REGISTRY%/order-mongodb-kafka:0.1 + imagePullPolicy: Always + env: + - name: oracle.ucp.jdbc.PoolDataSource.orderpdb.user + value: "ORDERUSER" + - name: oracle.ucp.jdbc.PoolDataSource.orderpdb.URL + value: "jdbc:oracle:thin:@%ORDER_PDB_NAME%_tp?TNS_ADMIN=/msdataworkshop/creds" + - name: orderqueuename + value: "orderqueue" + - name: inventoryqueuename + value: "inventoryqueue" + - name: OCI_REGION + value: "%OCI_REGION%" + - name: VAULT_SECRET_OCID + value: "%VAULT_SECRET_OCID%" + - name: dbpassword + valueFrom: + secretKeyRef: + name: dbuser + key: dbpassword + optional: true #not needed/used if using VAULT_SECRET_OCID exists + volumeMounts: + - name: creds + mountPath: /msdataworkshop/creds + ports: + - containerPort: 8080 + livenessProbe: + httpGet: #alternatives include exec with command, etc. + path: /health/live + port: 8080 + initialDelaySeconds: 220 + periodSeconds: 3 + readinessProbe: + httpGet: + path: /health/ready + port: 8080 + initialDelaySeconds: 40 + periodSeconds: 3 + restartPolicy: Always + volumes: + - name: creds + secret: + secretName: db-wallet-secret \ No newline at end of file diff --git a/grabdish/order-mongodb-kafka/order-mongodb-kafka-deployment.yaml b/grabdish/order-mongodb-kafka/order-mongodb-kafka-deployment.yaml new file mode 100644 index 000000000..7905e7229 --- /dev/null +++ b/grabdish/order-mongodb-kafka/order-mongodb-kafka-deployment.yaml @@ -0,0 +1,64 @@ + +## +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: order-mongodb-kafka +spec: + replicas: 1 + selector: + matchLabels: + app: order + template: + metadata: + labels: + app: order + version: helidon-mp + spec: + containers: + - name: order + image: %DOCKER_REGISTRY%/order-mongodb-kafka:0.1 + imagePullPolicy: Always + env: + - name: oracle.ucp.jdbc.PoolDataSource.orderpdb.user + value: "ORDERUSER" + - name: oracle.ucp.jdbc.PoolDataSource.orderpdb.URL + value: "jdbc:oracle:thin:@%ORDER_PDB_NAME%_tp?TNS_ADMIN=/msdataworkshop/creds" + - name: orderqueuename + value: "orderqueue" + - name: inventoryqueuename + value: "inventoryqueue" + - name: OCI_REGION + value: "%OCI_REGION%" + - name: VAULT_SECRET_OCID + value: "%VAULT_SECRET_OCID%" + - name: dbpassword + valueFrom: + secretKeyRef: + name: dbuser + key: dbpassword + optional: true #not needed/used if using VAULT_SECRET_OCID exists + volumeMounts: + - name: creds + mountPath: /msdataworkshop/creds + ports: + - containerPort: 8080 + livenessProbe: + httpGet: #alternatives include exec with command, etc. + path: /health/live + port: 8080 + initialDelaySeconds: 220 + periodSeconds: 3 + readinessProbe: + httpGet: + path: /health/ready + port: 8080 + initialDelaySeconds: 40 + periodSeconds: 3 + restartPolicy: Always + volumes: + - name: creds + secret: + secretName: db-wallet-secret \ No newline at end of file diff --git a/grabdish/order-mongodb-kafka/order-service.yaml b/grabdish/order-mongodb-kafka/order-service.yaml new file mode 100644 index 000000000..768d1e32d --- /dev/null +++ b/grabdish/order-mongodb-kafka/order-service.yaml @@ -0,0 +1,17 @@ + +## +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ +apiVersion: v1 +kind: Service +metadata: + name: order + labels: + app: order +spec: + type: NodePort + ports: + - port: 8080 + name: http + selector: + app: order diff --git a/grabdish/order-mongodb-kafka/pom.xml b/grabdish/order-mongodb-kafka/pom.xml new file mode 100755 index 000000000..25818fa5a --- /dev/null +++ b/grabdish/order-mongodb-kafka/pom.xml @@ -0,0 +1,188 @@ + + + + + + 4.0.0 + order-mongodb-kafka + 0.0.1-SNAPSHOT + ${project.artifactId} + stateful microservices demo + + + io.helidon.applications + helidon-mp + 2.2.0 + + + + + true + true + true + libs + ${env.DOCKER_REGISTRY} + + + + + src/main/resources + true + + + + + com.spotify + dockerfile-maven-plugin + 1.4.13 + + + default + + build + + + + + ${env.DOCKER_REGISTRY}/${project.artifactId} + 0.1 + + ${project.build.finalName}.jar + + + + + maven-dependency-plugin + + + copy-dependencies + prepare-package + + copy-dependencies + + + ${project.build.directory}/${dependenciesDirectory} + false + false + true + true + runtime + test + false + + + + + + maven-jar-plugin + + + + true + ${dependenciesDirectory} + io.helidon.microprofile.server.Main + + + + + + maven-resources-plugin + + + copy-resources + process-resources + + copy-resources + + + ${project.build.directory} + + + src/main/docker + true + + Dockerfile + + + + + + + + + + + + + io.helidon.microprofile.bundles + helidon-microprofile + + + org.jboss + jandex + runtime + true + + + jakarta.activation + jakarta.activation-api + runtime + + + + io.helidon.health + helidon-health + + + io.helidon.health + helidon-health-checks + + + + io.helidon.metrics + helidon-metrics + + + + io.helidon.tracing + helidon-tracing-jaeger + + + javax.json.bind + javax.json.bind-api + 1.0 + + + com.fasterxml.jackson.core + jackson-databind + + + org.mongodb + mongo-java-driver + 3.12.8 + + + org.apache.kafka + kafka-clients + 2.8.0 + + + + + + + + + + diff --git a/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/Inventory.java b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/Inventory.java new file mode 100644 index 000000000..355919e4b --- /dev/null +++ b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/Inventory.java @@ -0,0 +1,42 @@ +/* + + ** + ** Copyright (c) 2021 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + */ +package io.helidon.data.examples; + +public class Inventory { + + private String orderid; + private String itemid; + private String inventorylocation; + private String suggestiveSale; + + public Inventory() { + + } + + public Inventory(String orderid, String itemid, String inventorylocation, String suggestiveSale) { + this.orderid = orderid; + this.itemid = itemid; + this.inventorylocation = inventorylocation; + this.suggestiveSale = suggestiveSale; + } + + public String getOrderid() { + return orderid; + } + + public String getItemid() { + return itemid; + } + + public String getInventorylocation() { + return inventorylocation; + } + + public String getSuggestiveSale() { + return suggestiveSale; + } +} diff --git a/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/JsonUtils.java b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/JsonUtils.java new file mode 100644 index 000000000..beca30217 --- /dev/null +++ b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/JsonUtils.java @@ -0,0 +1,70 @@ +/* + + ** + ** Copyright (c) 2021 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + */ +package io.helidon.data.examples; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; + +import java.io.IOException; + +public class JsonUtils { + private final ObjectMapper json; + public JsonUtils() { + json = new ObjectMapper(); + json.configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false); + } + public static ObjectMapper json() { + return InstanceHolder.json.json; + } + public static T read(String src, Class valueType) { + try { + return json().readValue(src, valueType); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + public static T read(String src, TypeReference valueTypeRef) { + try { + return json().readValue(src, valueTypeRef); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + public static T read(byte[] src, Class valueType) { + try { + return json().readValue(src, valueType); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + public static T read(byte[] src, TypeReference valueTypeRef) { + try { + return json().readValue(src, valueTypeRef); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + public static String writeValueAsString(Object value) { + try { + return json().writeValueAsString(value); + } catch (JsonProcessingException e) { + throw new IllegalStateException(e); + } + } + public static byte[] writeValueAsBytes(Object value) { + try { + return json().writeValueAsBytes(value); + } catch (JsonProcessingException e) { + throw new IllegalStateException(e); + } + } + private static class InstanceHolder { + static final JsonUtils json = new JsonUtils(); + } +} \ No newline at end of file diff --git a/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/KafkaMongoDBOrderProducer.java b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/KafkaMongoDBOrderProducer.java new file mode 100644 index 000000000..3339827dc --- /dev/null +++ b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/KafkaMongoDBOrderProducer.java @@ -0,0 +1,95 @@ +package io.helidon.data.examples; + +import java.util.Properties; + +import com.mongodb.BasicDBObject; +import com.mongodb.DBCursor; +import com.mongodb.MongoClient; +import com.mongodb.client.FindIterable; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.model.Filters; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.bson.Document; + +import static io.helidon.data.examples.KafkaMongoOrderResource.crashAfterInsert; + + +public class KafkaMongoDBOrderProducer { + + + public String updateDataAndSendEvent(String orderid, String itemid, String deliverylocation) throws Exception { + System.out.println("KafkaMongoDBOrderProducer.updateDataAndSendEvent orderid = " + orderid + ", itemid = " + itemid + ", deliverylocation = " + deliverylocation); + System.out.println("KafkaMongoDBOrderProducer.insert order into mongodb........."); + Order insertedOrder = insertOrderInMongoDB(orderid, itemid, deliverylocation); + if (crashAfterInsert) System.exit(-1); + String jsonString = JsonUtils.writeValueAsString(insertedOrder); + System.out.println("send message to kafka........."); + String topicName = KafkaMongoOrderResource.orderTopicName; + Properties props = new Properties(); + props.put("bootstrap.servers", "kafka-service:9092"); + props.put("acks", "all"); + props.put("retries", 0); + props.put("batch.size", 16384); + props.put("linger.ms", 1); + props.put("buffer.memory", 33554432); + props.put("key.serializer", + "org.apache.kafka.common.serialization.StringSerializer"); + props.put("value.serializer", + "org.apache.kafka.common.serialization.StringSerializer"); + Producer producer = new KafkaProducer + (props); + producer.send(new ProducerRecord(topicName, + "order", jsonString)); + System.out.println("KafkaMongoDBOrderProducer.Message sent successfully:" + jsonString); + producer.close(); + return "end send messages"; + } + + public Order insertOrderInMongoDB(String orderid, String itemid, String deliverylocation) { + System.out.println("insertOrderInMongoDB orderid = " + orderid + ", itemid = " + itemid + ", deliverylocation = " + deliverylocation); + Order order = new Order(orderid, itemid, deliverylocation, "pending", "", ""); + MongoClient mongoClient = KafkaMongoOrderEventConsumer.getMongoClient(); + MongoCollection orders = KafkaMongoOrderEventConsumer.getDocumentMongoCollection(mongoClient); + orders.insertOne(new Document() + .append("orderid", orderid) + .append("itemid", itemid) + .append("deliverylocation", deliverylocation) + .append("status", "pending") + ); + mongoClient.close(); + return order; +} + + + public Order getOrderFromMongoDB(String orderId) { + System.out.println("KafkaMongoDBOrderProducer.getOrderFromMongoDB orderId:" + orderId); + MongoClient mongoClient = KafkaMongoOrderEventConsumer.getMongoClient(); + MongoCollection orders = KafkaMongoOrderEventConsumer.getDocumentMongoCollection(mongoClient); + FindIterable documentFindIterable = orders.find(Filters.eq("orderid", orderId)); + if (documentFindIterable == null) { + System.out.println("KafkaMongoDBOrderProducer.getOrderFromMongoDB no entry found for orderId:" + orderId); + } + for(Document doc : documentFindIterable) { + String inventorylocation = doc.getString("inventorylocation"); + System.out.println("KafkaMongoDBOrderProducer.getOrderFromMongoDB inventorylocation:" + inventorylocation); + return new Order(orderId, doc.getString("itemid"), doc.getString("deliverylocation"), doc.getString("status"), inventorylocation, doc.getString("suggestiveSale")); + } + return new Order(orderId, "unknown", "unknown", "unknown", "", ""); + } + + public String deleteOrderFromMongoDB(String orderId) { + MongoClient mongoClient = KafkaMongoOrderEventConsumer.getMongoClient(); + MongoCollection orders = KafkaMongoOrderEventConsumer.getDocumentMongoCollection(mongoClient); + orders.deleteOne(Filters.eq("orderid", orderId)); + return "orderId:" + orderId + " deleted"; + } + + public Object dropOrderFromMongoDB() { + MongoClient mongoClient = KafkaMongoOrderEventConsumer.getMongoClient(); + MongoCollection orders = KafkaMongoOrderEventConsumer.getDocumentMongoCollection(mongoClient); + orders.drop(); + return "orders collection dropped"; + } +} \ No newline at end of file diff --git a/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/KafkaMongoOrderEventConsumer.java b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/KafkaMongoOrderEventConsumer.java new file mode 100644 index 000000000..a128eee13 --- /dev/null +++ b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/KafkaMongoOrderEventConsumer.java @@ -0,0 +1,105 @@ +package io.helidon.data.examples; + + +import com.mongodb.MongoClient; +import com.mongodb.MongoClientURI; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoCursor; +import com.mongodb.client.MongoDatabase; +import org.apache.kafka.clients.admin.AdminClient; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.bson.Document; + +import javax.inject.Inject; +import javax.inject.Named; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Properties; + +public class KafkaMongoOrderEventConsumer implements Runnable { + + + Properties props = new Properties(); + static MongoClientURI uri = new MongoClientURI("mongodb://orderuser:Welcome12345@mongodb:27017/orderdb"); + + static MongoClient getMongoClient() { + return new MongoClient(KafkaMongoOrderEventConsumer.uri); + } + + static MongoCollection getDocumentMongoCollection(MongoClient mongoClient) { + MongoDatabase db = mongoClient.getDatabase(KafkaMongoOrderEventConsumer.uri.getDatabase()); + return db.getCollection("orders"); + } + + @Override + public void run() { + // Standard URI format: mongodb://[dbuser:dbpassword@]host:port/dbname +// MongoClientURI uri = new MongoClientURI("mongodb://mongodb:27017"); + uri = new MongoClientURI("mongodb://orderuser:Welcome12345@mongodb:27017/orderdb"); + props.put("bootstrap.servers", "kafka-service:9092"); + props.put("group.id", "test"); + props.put("enable.auto.commit", "true"); + props.put("auto.commit.interval.ms", "1000"); + props.put("session.timeout.ms", "30000"); + props.put("key.deserializer", + "org.apache.kafka.common.serialization.StringDeserializer"); + props.put("value.deserializer", + "org.apache.kafka.common.serialization.StringDeserializer"); + //todo potentially gate this for init +// createTopic(KafkaMongoOrderResource.orderTopicName); +// createTopic(KafkaMongoOrderResource.inventoryTopicName); + dolistenForMessages(); + } + + private void createTopic(String topicName) { + System.out.println("KafkaMongoOrderEventConsumer.createTopic creating " + topicName + "... "); + AdminClient adminClient = AdminClient.create(props); + NewTopic newTopic = new NewTopic(topicName, 1, (short) 1); //new NewTopic(topicName, numPartitions, replicationFactor) + List newTopics = new ArrayList(); + newTopics.add(newTopic); + adminClient.createTopics(newTopics); + adminClient.close(); + } + + public void dolistenForMessages() { + System.out.println("KafkaPostgresOrderEventConsumer about to listen for messages..."); + KafkaConsumer consumer = new KafkaConsumer + (props); + System.out.println("KafkaPostgresOrderEventConsumer consumer:" + consumer); + consumer.subscribe(Arrays.asList(KafkaMongoOrderResource.inventoryTopicName)); + System.out.println("Subscribed to topic " + KafkaMongoOrderResource.inventoryTopicName); + while (true) { + ConsumerRecords records = consumer.poll(100); + for (ConsumerRecord record : records) { + System.out.printf("message offset = %d, key = %s, value = %s\n", + record.offset(), record.key(), record.value()); + String messageText = record.value(); + if (messageText.indexOf("{") > -1) { + Inventory inventory = JsonUtils.read(messageText, Inventory.class); + String orderid = inventory.getOrderid(); + String itemid = inventory.getItemid(); + String inventorylocation = inventory.getInventorylocation(); + boolean isSuccessfulInventoryCheck = !(inventorylocation == null || inventorylocation.equals("") + || inventorylocation.equals("inventorydoesnotexist") + || inventorylocation.equals("none")); + System.out.println("Update orderid:" + orderid + "(itemid:" + itemid + ") in MongoDB isSuccessfulInventoryCheck:" + isSuccessfulInventoryCheck); + MongoClient mongoClient = getMongoClient(); + MongoCollection orders = getDocumentMongoCollection(mongoClient); + Document updateQuery = new Document().append("orderid", orderid); + if (isSuccessfulInventoryCheck) { + orders.updateOne(updateQuery, new Document("$set", new Document("inventorylocation", inventorylocation))); +// order.setStatus("success inventory exists"); +// order.setSuggestiveSale(inventory.getSuggestiveSale()); + } else { + orders.updateOne(updateQuery, new Document("$set", new Document("inventorylocation", "failed inventory does not exist"))); + } + + } + } + } + } +} diff --git a/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/KafkaMongoOrderResource.java b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/KafkaMongoOrderResource.java new file mode 100755 index 000000000..1748f9069 --- /dev/null +++ b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/KafkaMongoOrderResource.java @@ -0,0 +1,276 @@ +/* + + ** + ** Copyright (c) 2021 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + */ +package io.helidon.data.examples; + +import java.util.HashMap; +import java.util.Map; + +import javax.enterprise.context.ApplicationScoped; +import javax.enterprise.context.Initialized; +import javax.enterprise.event.Observes; +import javax.inject.Inject; +import javax.ws.rs.*; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +import org.eclipse.microprofile.metrics.annotation.Counted; +import org.eclipse.microprofile.metrics.annotation.Timed; +import org.eclipse.microprofile.openapi.annotations.Operation; +import org.eclipse.microprofile.openapi.annotations.enums.SchemaType; +import org.eclipse.microprofile.openapi.annotations.media.Content; +import org.eclipse.microprofile.openapi.annotations.media.Schema; +import org.eclipse.microprofile.openapi.annotations.parameters.Parameter; +import org.eclipse.microprofile.openapi.annotations.responses.APIResponse; +import org.eclipse.microprofile.openapi.annotations.responses.APIResponses; +import org.eclipse.microprofile.opentracing.Traced; +import io.opentracing.Tracer; +import io.opentracing.Span; + +@Path("/") +@ApplicationScoped +@Traced +public class KafkaMongoOrderResource { + + @Inject + private Tracer tracer; + + KafkaMongoDBOrderProducer orderServiceEventProducer = new KafkaMongoDBOrderProducer(); + final static String orderTopicName = "order.topic"; + final static String inventoryTopicName = "inventory.topic"; + static boolean liveliness = true; + static boolean crashAfterInsert = false; + static boolean readiness = true; + private static String lastContainerStartTime; + Map cachedOrders = new HashMap<>(); + + @Path("/lastContainerStartTime") + @GET + @Produces(MediaType.TEXT_PLAIN) + public Response lastContainerStartTime() { + System.out.println("--->lastContainerStartTime..."); + return Response.ok() + .entity("lastContainerStartTime = " + lastContainerStartTime) + .build(); + } + + public void init(@Observes @Initialized(ApplicationScoped.class) Object init) throws Exception { + System.out.println("KafkaMongoOrderResource.init " + init); + startEventConsumer(); + lastContainerStartTime = new java.util.Date().toString(); + System.out.println("____________________________________________________"); + System.out.println("----------->KafkaMongoOrderResource (container) starting at: " + lastContainerStartTime); + System.out.println("____________________________________________________"); + } + + private void startEventConsumer() { + System.out.println("OrderResource.startEventConsumerIfNotStarted startEventConsumer..."); + KafkaMongoOrderEventConsumer orderServiceEventConsumer = new KafkaMongoOrderEventConsumer(); + new Thread(orderServiceEventConsumer).start(); + } + + @Operation(summary = "Places a new order", + description = "Orders a specific item for delivery to a location") + @APIResponses({ + @APIResponse( + responseCode = "200", + description = "Confirmation of a successfully-placed order", + content = @Content(mediaType = "text/plain") + ), + @APIResponse( + responseCode = "500", + description = "Error report of a failure to place an order", + content = @Content(mediaType = "text/plain") + ) + }) + @Path("/placeOrder") + @GET + @Produces(MediaType.APPLICATION_JSON) + @Traced(operationName = "OrderResource.placeOrder") + @Timed(name = "placeOrder_timed") //length of time of an object + @Counted(name = "placeOrder_counted") //amount of invocations + public Response placeOrder( + @Parameter(description = "The order ID for the order", + required = true, + example = "66", + schema = @Schema(type = SchemaType.STRING)) + @QueryParam("orderid") String orderid, + + @Parameter(description = "The item ID of the item being ordered", + required = true, + example = "sushi", + schema = @Schema(type = SchemaType.STRING)) + @QueryParam("itemid") String itemid, + + @Parameter(description = "Where the item should be delivered", + required = true, + example = "Home", + schema = @Schema(type = SchemaType.STRING)) + @QueryParam("deliverylocation") String deliverylocation) { + System.out.println("--->placeOrder... orderid:" + orderid + " itemid:" + itemid); + OrderDetail orderDetail = new OrderDetail(); + orderDetail.setOrderId(orderid); + orderDetail.setItemId(itemid); + orderDetail.setOrderStatus("pending"); + orderDetail.setDeliveryLocation(deliverylocation); + cachedOrders.put(orderid, orderDetail); + + Span activeSpan = tracer.buildSpan("orderDetail").asChildOf(tracer.activeSpan()).start(); + activeSpan.log("begin placing order"); // logs are for a specific moment or event within the span (in contrast to tags which should apply to the span regardless of time). + activeSpan.setTag("orderid", orderid); //tags are annotations of spans in order to query, filter, and comprehend trace data + activeSpan.setTag("itemid", itemid); + activeSpan.setTag("db.user", "mongodb"); // https://github.com/opentracing/specification/blob/master/semantic_conventions.md + activeSpan.setBaggageItem("sagaid", "testsagaid" + orderid); //baggage is part of SpanContext and carries data across process boundaries for access throughout the trace + activeSpan.setBaggageItem("orderid", orderid); + + try { + System.out.println("--->insertOrderAndSendEvent..." + + orderServiceEventProducer.updateDataAndSendEvent(orderid, itemid, deliverylocation)); + } catch (Exception e) { + e.printStackTrace(); + return Response.serverError() + .entity("orderid = " + orderid + " failed with exception:" + e.getCause()) + .build(); + } finally { + activeSpan.log("end placing order"); + activeSpan.finish(); + } + return Response.ok() + .entity("orderid = " + orderid + " orderstatus = " + orderDetail.getOrderStatus() + " order placed") + .build(); + } + + @Operation(summary = "Displays an order", + description = "Displays a previously-placed order, excluding if the order is cached") + @APIResponses({ + @APIResponse( + responseCode = "200", + description = "Previously-placed order", + content = @Content(mediaType = "application/json", + schema = @Schema( + implementation = Order.class + )) + ) + }) + @Path("/showorder") + @GET + @Produces(MediaType.APPLICATION_JSON) + public Response showorder( + @Parameter(description = "The order ID for the order", + required = true, + example = "1", + schema = @Schema(type = SchemaType.STRING)) + @QueryParam("orderid") String orderId) { + System.out.println("--->showorder (via JSON/SODA query) for orderId:" + orderId); + try { + Order order = orderServiceEventProducer.getOrderFromMongoDB(orderId); + String returnJSON = JsonUtils.writeValueAsString(order); + System.out.println("OrderResource.showorder returnJSON:" + returnJSON); + return Response.ok() + .entity(returnJSON) + .build(); + } catch (Exception e) { + e.printStackTrace(); + return Response.serverError() + .entity("showorder orderid = " + orderId + " failed with exception:" + e.toString()) + .build(); + } + + } + + + @Operation(summary = "Deletes an order", + description = "Deletes a previously-placed order") + @APIResponses({ + @APIResponse( + responseCode = "200", + description = "Confirmation/result of the order deletion", + content = @Content(mediaType = "text/plain") + ) + }) + @Path("/deleteorder") + @GET + @Produces(MediaType.TEXT_PLAIN) + public Response deleteorder( + @Parameter(description = "The order ID for the order", + required = true, + example = "1", + schema = @Schema(type = SchemaType.STRING)) + @QueryParam("orderid") String orderId) { + System.out.println("--->deleteorder for orderId:" + orderId); + String returnString = "orderId = " + orderId + "
"; + try { + returnString += orderServiceEventProducer.deleteOrderFromMongoDB(orderId); + return Response.ok() + .entity(returnString) + .build(); + } catch (Exception e) { + e.printStackTrace(); + return Response.ok() + .entity("orderid = " + orderId + " failed with exception:" + e.toString()) + .build(); + } + } + + @Operation(summary = "Deletes all orders", + description = "Deletes all previously-placed orders") + @APIResponses({ + @APIResponse( + responseCode = "200", + description = "Confirmation/result of the order deletion", + content = @Content(mediaType = "application/json") + ) + }) + @Path("/deleteallorders") + @GET + @Produces(MediaType.APPLICATION_JSON) + public Response deleteallorders() { + System.out.println("--->deleteallorders"); + try { + return Response.ok() + .entity(orderServiceEventProducer.dropOrderFromMongoDB()) + .build(); + } catch (Exception e) { + e.printStackTrace(); + return Response.ok() + .entity("deleteallorders failed with exception:" + e.toString()) + .build(); + } + } + + + @Path("/crashAfterInsert") + @GET + @Produces(MediaType.TEXT_PLAIN) + public Response crashAfterInsert() { + crashAfterInsert = true; + return Response.ok() + .entity("order crashAfterInsert set") + .build(); + } + + @Path("/ordersetlivenesstofalse") + @GET + @Produces(MediaType.TEXT_PLAIN) + public Response ordersetlivenesstofalse() { + liveliness = false; + return Response.ok() + .entity("order liveness set to false - OKE should restart the pod due to liveness probe") + .build(); + } + + @Path("/ordersetreadinesstofalse") + @GET + @Produces(MediaType.TEXT_PLAIN) + public Response ordersetreadinesstofalse() { + liveliness = false; + return Response.ok() + .entity("order readiness set to false") + .build(); + } + + +} \ No newline at end of file diff --git a/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/Order.java b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/Order.java new file mode 100644 index 000000000..8bc3aceaf --- /dev/null +++ b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/Order.java @@ -0,0 +1,99 @@ +/* + + ** + ** Copyright (c) 2021 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + */ +package io.helidon.data.examples; + +import javax.json.bind.annotation.JsonbProperty; + +public class Order { + private String orderid; + private String itemid; + private String deliverylocation; + @JsonbProperty(nillable = true) + private String status; + @JsonbProperty(nillable = true) + private String inventoryLocation; + @JsonbProperty(nillable = true) + private String suggestiveSale; + + public Order() { + } + + // orderdetail is the cache object and order is the JSON message and DB object so we have this mapping at least for now... + public Order(OrderDetail orderDetail) { + this(orderDetail.getOrderId(), orderDetail.getItemId(), orderDetail.getDeliveryLocation(), + orderDetail.getOrderStatus(), orderDetail.getInventoryLocation(), orderDetail.getSuggestiveSale()); + } + + public Order(String orderId, String itemId, String deliverylocation, + String status, String inventoryLocation, String suggestiveSale) { + this.orderid = orderId; + this.itemid = itemId; + this.deliverylocation = deliverylocation; + this.status = status; + this.inventoryLocation = inventoryLocation; + this.suggestiveSale = suggestiveSale; + } + + public String getOrderid() { + return orderid; + } + + public String getItemid() { + return itemid; + } + + public String getDeliverylocation() { + return deliverylocation; + } + + public void setOrderid(String orderid) { + this.orderid = orderid; + } + + public void setItemid(String itemid) { + this.itemid = itemid; + } + + public void setDeliverylocation(String deliverylocation) { + this.deliverylocation = deliverylocation; + } + + public void setStatus(String status) { + this.status = status; + } + + public void setInventoryLocation(String inventoryLocation) { + this.inventoryLocation = inventoryLocation; + } + + public void setSuggestiveSale(String suggestiveSale) { + this.suggestiveSale = suggestiveSale; + } + + public String getStatus() { + return status; + } + + public String getInventoryLocation() { + return inventoryLocation; + } + + public String getSuggestiveSale() { + return suggestiveSale; + } + + public String toString() { + String returnString = ""; + returnString+="
orderId = " + orderid; + returnString+="
itemid = " + itemid; + returnString+="
suggestiveSale = " + suggestiveSale; + returnString+="
inventoryLocation = " + inventoryLocation; + returnString+="
orderStatus = " + status; + returnString+="
deliveryLocation = " + deliverylocation; + return returnString; + } +} diff --git a/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/OrderApplication.java b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/OrderApplication.java new file mode 100644 index 000000000..c861faa95 --- /dev/null +++ b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/OrderApplication.java @@ -0,0 +1,33 @@ +/* + + ** + ** Copyright (c) 2021 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + */ +package io.helidon.data.examples; + + +import org.eclipse.microprofile.openapi.annotations.OpenAPIDefinition; +import org.eclipse.microprofile.openapi.annotations.info.Info; + +import javax.enterprise.context.ApplicationScoped; +import javax.ws.rs.ApplicationPath; +import javax.ws.rs.core.Application; +import java.util.HashSet; +import java.util.Set; + +@ApplicationScoped +@ApplicationPath("/") +@OpenAPIDefinition( + info = @Info(title = "GrabDish", description = "Order processing for GrabDish", version = "0.0.1") +) +public class OrderApplication extends Application { + + @Override + public Set> getClasses() { + Set> s = new HashSet>(); + s.add(KafkaMongoOrderResource.class); + return s; + } + +} diff --git a/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/OrderDetail.java b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/OrderDetail.java new file mode 100644 index 000000000..b478dc664 --- /dev/null +++ b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/OrderDetail.java @@ -0,0 +1,126 @@ +/* + + ** + ** Copyright (c) 2021 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + */ +package io.helidon.data.examples; + +import javax.json.bind.annotation.JsonbProperty; + +public class OrderDetail { + + @JsonbProperty("orderId") + private String orderId; + @JsonbProperty(nillable = true) // todo nillable is not necessary + private String itemId; + @JsonbProperty(nillable = true) + private String suggestiveSaleItem = ""; + @JsonbProperty(nillable = true) + private String suggestiveSale = ""; + @JsonbProperty + private String inventoryLocationItem = ""; + @JsonbProperty + private String inventoryLocation = "none"; + @JsonbProperty + private String shippingEstimate = "none"; + @JsonbProperty + private String shippingEstimateItem = ""; + @JsonbProperty + private String orderStatus = "none"; + @JsonbProperty + private String deliveryLocation = "none"; + + + public String toString() { + String returnString = ""; + returnString+="
orderId = " + orderId; + returnString+="
suggestiveSale = " + suggestiveSale; + returnString+="
inventoryLocation = " + inventoryLocation; + returnString+="
orderStatus = " + orderStatus; + returnString+="
deliveryLocation = " + deliveryLocation; + return returnString; + } + + public String getOrderId() { + return orderId; + } + + public void setOrderId(String orderId) { + this.orderId = orderId; + } + + public String getItemId() { + return itemId; + } + + public void setItemId(String itemId) { + this.itemId = itemId; + } + + public String getSuggestiveSaleItem() { + return suggestiveSaleItem; + } + + public void setSuggestiveSaleItem(String suggestiveSaleItem) { + this.suggestiveSaleItem = suggestiveSaleItem; + } + + public String getSuggestiveSale() { + return suggestiveSale; + } + + public void setSuggestiveSale(String suggestiveSale) { + this.suggestiveSale = suggestiveSale; + } + + public String getInventoryLocationItem() { + return inventoryLocationItem; + } + + public void setInventoryLocationItem(String inventoryLocationItem) { + this.inventoryLocationItem = inventoryLocationItem; + } + + public String getInventoryLocation() { + return inventoryLocation; + } + + public void setInventoryLocation(String inventoryLocation) { + this.inventoryLocation = inventoryLocation; + } + + public String getShippingEstimate() { + return shippingEstimate; + } + + public void setShippingEstimate(String shippingEstimate) { + this.shippingEstimate = shippingEstimate; + } + + public String getShippingEstimateItem() { + return shippingEstimateItem; + } + + public void setShippingEstimateItem(String shippingEstimateItem) { + this.shippingEstimateItem = shippingEstimateItem; + } + + public String getOrderStatus() { + return orderStatus; + } + + public void setOrderStatus(String orderStatus) { + this.orderStatus = orderStatus; + } + + public void setDeliveryLocation(String deliverylocation) { + this.deliveryLocation = deliverylocation; + } + + public String getDeliveryLocation() { + return deliveryLocation; + } + +} + diff --git a/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/OrderInit.java b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/OrderInit.java new file mode 100644 index 000000000..3717a1f50 --- /dev/null +++ b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/OrderInit.java @@ -0,0 +1,21 @@ +/* + + ** + ** Copyright (c) 2021 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + */ +package io.helidon.data.examples; + +import javax.enterprise.context.ApplicationScoped; +import javax.enterprise.context.Initialized; +import javax.enterprise.event.Observes; + +@ApplicationScoped +public class OrderInit { + + public void init(@Observes @Initialized(ApplicationScoped.class) Object init) { + System.out.println("Order.init " + init); + } + +} + diff --git a/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/OrderServiceCPUStress.java b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/OrderServiceCPUStress.java new file mode 100644 index 000000000..921cb351c --- /dev/null +++ b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/OrderServiceCPUStress.java @@ -0,0 +1,37 @@ +/* + + ** + ** Copyright (c) 2021 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + */ +package io.helidon.data.examples; + +public class OrderServiceCPUStress { + boolean isStressOn = false; + + public void start() { + isStressOn = true; + for (int thread = 0; thread < 10; thread++) { + new CPUStressThread().start(); + } + } + + public void stop() { + isStressOn = false; + } + + private class CPUStressThread extends Thread { + public void run() { + try { + System.out.println("CPUStressThread.run isStressOn:" + isStressOn + " thread:" + Thread.currentThread()); + while (isStressOn) { + if (System.currentTimeMillis() % 100 == 0) { + Thread.sleep((long) Math.floor((.2) * 100)); + } + } + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + } +} diff --git a/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/OrderServiceLivenessHealthCheck.java b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/OrderServiceLivenessHealthCheck.java new file mode 100644 index 000000000..b7128abb7 --- /dev/null +++ b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/OrderServiceLivenessHealthCheck.java @@ -0,0 +1,37 @@ +/* + + ** + ** Copyright (c) 2021 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + */ +package io.helidon.data.examples; + +import org.eclipse.microprofile.health.HealthCheck; +import org.eclipse.microprofile.health.HealthCheckResponse; +import org.eclipse.microprofile.health.Liveness; + +import javax.enterprise.context.ApplicationScoped; +import javax.inject.Inject; + +@Liveness +@ApplicationScoped +public class OrderServiceLivenessHealthCheck implements HealthCheck { + + @Inject + public OrderServiceLivenessHealthCheck() { + } + + @Override + public HealthCheckResponse call() { + if (!KafkaMongoOrderResource.liveliness) { + return HealthCheckResponse.named("OrderServerLivenessDown") + .down() + .withData("databaseconnections", "not live") + .build(); + } else return HealthCheckResponse.named("OrderServerLivenessUp") + .up() + .withData("databaseconnections", "live") + .build(); + } +} + diff --git a/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/OrderServiceReadinessHealthCheck.java b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/OrderServiceReadinessHealthCheck.java new file mode 100644 index 000000000..266114744 --- /dev/null +++ b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/OrderServiceReadinessHealthCheck.java @@ -0,0 +1,38 @@ +/* + + ** + ** Copyright (c) 2021 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + */ +package io.helidon.data.examples; + +import org.eclipse.microprofile.health.HealthCheck; +import org.eclipse.microprofile.health.HealthCheckResponse; +import org.eclipse.microprofile.health.Readiness; + +import javax.enterprise.context.ApplicationScoped; +import javax.inject.Inject; + +@Readiness +@ApplicationScoped +public class OrderServiceReadinessHealthCheck implements HealthCheck { + + @Inject + public OrderServiceReadinessHealthCheck() { + } + + @Override + public HealthCheckResponse call() { + if (!KafkaMongoOrderResource.readiness) { + return HealthCheckResponse.named("OrderServerReadinessDown") + .down() + .withData("data-initialized", "not ready") //data initialized via eventsourcing, view query, etc. + .withData("connections-created", "not ready") + .build(); + } else return HealthCheckResponse.named("OrderServerReadinessUp") + .up() + .withData("data-initialized", "ready") //data initialized via eventsourcing, view query, etc. + .withData("connections-created", "ready") + .build(); + } +} diff --git a/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/package-info.java b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/package-info.java new file mode 100644 index 000000000..41e577a26 --- /dev/null +++ b/grabdish/order-mongodb-kafka/src/main/java/io/helidon/data/examples/package-info.java @@ -0,0 +1,12 @@ +/* + + ** + ** Copyright (c) 2021 Oracle and/or its affiliates. + ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + */ + +/** + * Provides JAX-RS-related classes and interfaces for this example + * project. + */ +package io.helidon.data.examples; diff --git a/grabdish/order-mongodb-kafka/src/main/resources/META-INF/beans.xml b/grabdish/order-mongodb-kafka/src/main/resources/META-INF/beans.xml new file mode 100755 index 000000000..dfde707e2 --- /dev/null +++ b/grabdish/order-mongodb-kafka/src/main/resources/META-INF/beans.xml @@ -0,0 +1,18 @@ + + + + + diff --git a/grabdish/order-mongodb-kafka/src/main/resources/META-INF/microprofile-config.properties b/grabdish/order-mongodb-kafka/src/main/resources/META-INF/microprofile-config.properties new file mode 100755 index 000000000..76b8ea34b --- /dev/null +++ b/grabdish/order-mongodb-kafka/src/main/resources/META-INF/microprofile-config.properties @@ -0,0 +1,22 @@ + +## +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + + +# Microprofile Tracing Properties +tracing.service=order.msdataworkshop +tracing.protocol=http +tracing.host=jaeger-collector.msdataworkshop +tracing.port=14268 +tracing.path=/api/traces +tracing.propagation=b3 +tracing.log-spans=true +tracing.sampler-type=const +tracing.sampler-param=1 +tracing.components.tracing.enabled=false + + +# Microprofile server properties +server.port=8080 +server.host=0.0.0.0 diff --git a/grabdish/order-mongodb-kafka/src/main/resources/logging.properties b/grabdish/order-mongodb-kafka/src/main/resources/logging.properties new file mode 100644 index 000000000..d38b23789 --- /dev/null +++ b/grabdish/order-mongodb-kafka/src/main/resources/logging.properties @@ -0,0 +1,12 @@ +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +handlers=java.util.logging.FileHandler, java.util.logging.ConsoleHandler +# Global default logging level. Can be overriden by specific handlers and loggers +.level=INFO + +org.apache.kafka.common.utils.AppInfoParser.level=SEVERE +org.apache.kafka.clients.level=WARNING +org.apache.kafka.clients.consumer.ConsumerConfig.level=SEVERE +org.apache.kafka.clients.producer.ProducerConfig.level=SEVERE +io.jaegertracing.internal.level=OFF \ No newline at end of file diff --git a/grabdish/order-mongodb-kafka/undeploy-verrazzano.sh b/grabdish/order-mongodb-kafka/undeploy-verrazzano.sh new file mode 100755 index 000000000..3b399cbc9 --- /dev/null +++ b/grabdish/order-mongodb-kafka/undeploy-verrazzano.sh @@ -0,0 +1,9 @@ +#!/bin/bash +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + + +echo delete frontend OAM Component and ApplicationConfiguration + +kubectl delete applicationconfiguration order-mongodb-kafka-appconf -n msdataworkshop +kubectl delete component order-mongodb-kafka-component -n msdataworkshop diff --git a/grabdish/order-mongodb-kafka/undeploy.sh b/grabdish/order-mongodb-kafka/undeploy.sh new file mode 100755 index 000000000..0fea7f00a --- /dev/null +++ b/grabdish/order-mongodb-kafka/undeploy.sh @@ -0,0 +1,9 @@ +#!/bin/bash +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +echo delete order deployment and service... + +kubectl delete deployment order-mongodb-kafka -n msdataworkshop + +kubectl delete service order -n msdataworkshop diff --git a/grabdish/setup-multicloud.sh b/grabdish/setup-multicloud.sh index b38d532bd..b5a7444d9 100755 --- a/grabdish/setup-multicloud.sh +++ b/grabdish/setup-multicloud.sh @@ -47,12 +47,13 @@ kubectl wait \ # -o jsonpath="{.items[0].metadata.name}" \ # ) -echo Adding labels identifying the msdataworkshop namespace as managed by Verrazzano and enabled for Istio... -kubectl label namespace msdataworkshop verrazzano-managed=true istio-injection=enabled - echo Creating msdataworkshop namespace... If the namespace already exists there will be an error to that effect that can safely be ignored. kubectl create namespace msdataworkshop +echo Adding labels identifying the msdataworkshop namespace as managed by Verrazzano and enabled for Istio... +kubectl label namespace msdataworkshop verrazzano-managed=true istio-injection=enabled --overwrite + + echo Adding VerrazzanoProject #export CLUSTERS_NAME="$(state_get CLUSTER_NAME)" # eg cluster-cyxypetwerq, also notice the plural/CLUSTERS_NAME and singular/CLUSTER_NAME export CLUSTERS_NAME=$1