Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse code

Javadoc; document limitations

  • Loading branch information...
commit b167e4049c6553214d2da869b0d095c7f038d7db 1 parent a0e7a94
Sam Stokes authored September 29, 2011
11  pom.xml
@@ -21,4 +21,15 @@
21 21
       <version>2.6.1</version>
22 22
     </dependency>
23 23
   </dependencies>
  24
+  <build>
  25
+    <plugins>
  26
+      <plugin>
  27
+        <groupId>org.apache.maven.plugins</groupId>
  28
+        <artifactId>maven-javadoc-plugin</artifactId>
  29
+        <version>2.8</version>
  30
+        <configuration>
  31
+        </configuration>
  32
+      </plugin>
  33
+    </plugins>
  34
+  </build>
24 35
 </project>
52  src/main/java/com/rapportive/storm/spout/AMQPSpout.java
@@ -13,6 +13,40 @@
13 13
 import backtype.storm.topology.IRichSpout;
14 14
 import backtype.storm.topology.OutputFieldsDeclarer;
15 15
 
  16
+
  17
+/**
  18
+ * Spout to feed messages into Storm from an AMQP exchange.
  19
+ *
  20
+ * This should not currently be used where guaranteed message processing is
  21
+ * required, because of two limitations:
  22
+ *
  23
+ * <ol>
  24
+ * <li>
  25
+ * Uses a temporary queue to bind to the specified exchange when the topology
  26
+ * calls <tt>open()</tt> on the spout, so it will only receive messages
  27
+ * published to the exchange after the call to <tt>open()</tt>, and if the
  28
+ * spout worker restarts or the topology is killed, it will not receive any
  29
+ * messages published while the worker or topology is down.
  30
+ * </li>
  31
+ *
  32
+ * <li>
  33
+ * Currently auto-acks all consumed messages with the AMQP broker, and does not
  34
+ * implement Storm's reliability API, so if processing a message fails it will
  35
+ * simply be discarded.
  36
+ * </li>
  37
+ * </ol>
  38
+ *
  39
+ * Limitation 1 also means this spout cannot currently be distributed among
  40
+ * multiple workers (each worker gets its own exclusive queue, so multiple
  41
+ * workers would each receive their own copy of every message).
  42
+ *
  43
+ * Improvements are planned to overcome both these limitations and support
  44
+ * guaranteed message processing, distributed across any number of workers.
  45
+ * These improvements may require API changes (e.g. to specify the name of an
  46
+ * existing queue to consume, rather than an exchange to bind to).
  47
+ *
  48
+ * @author Sam Stokes (sam@rapportive.com)
  49
+ */
16 50
 public class AMQPSpout implements IRichSpout {
17 51
     private static final long serialVersionUID = 11258942292629263L;
18 52
 
@@ -36,6 +70,24 @@
36 70
     private SpoutOutputCollector collector;
37 71
 
38 72
 
  73
+    /**
  74
+     * Create a new AMQP spout.  When
  75
+     * {@link #open(Map, TopologyContext, SpoutOutputCollector)} is called, it
  76
+     * will create a new server-named, exclusive, auto-delete queue, bind it to
  77
+     * the specified exchange on the specified server with the specified
  78
+     * routing key, and start consuming messages.  It will use the provided
  79
+     * <tt>scheme</tt> to deserialise each AMQP message into a Storm tuple.
  80
+     *
  81
+     * @param host  hostname of the AMQP broker node
  82
+     * @param port  port number of the AMQP broker node
  83
+     * @param username  username to log into to the broker
  84
+     * @param password  password to authenticate to the broker
  85
+     * @param vhost  vhost on the broker
  86
+     * @param exchange  exchange to bind to
  87
+     * @param routingKey  routing key for the binding
  88
+     * @param scheme  {@link backtype.storm.spout.Scheme} used to deserialise
  89
+     *          each AMQP message into a Storm tuple
  90
+     */
39 91
     public AMQPSpout(String host, int port, String username, String password, String vhost, String exchange, String routingKey, Scheme scheme) {
40 92
         this.amqpHost = host;
41 93
         this.amqpPort = port;

0 notes on commit b167e40

Please sign in to comment.
Something went wrong with that request. Please try again.