<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question Nifi ConsumeKafka processor in Archives of Support Questions (Read Only)</title>
    <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Nifi-ConsumeKafka-processor/m-p/172157#M45899</link>
    <description>&lt;P&gt;Hi, &lt;/P&gt;&lt;P&gt;Could someone shed some light on how the above processor initiates the commit?  Is there a way to enforce commit only after a set of subsequent processors completes successfully, for e.g. PutHDFS?&lt;/P&gt;</description>
    <pubDate>Fri, 11 Nov 2016 01:25:52 GMT</pubDate>
    <dc:creator>sivaraman_js</dc:creator>
    <dc:date>2016-11-11T01:25:52Z</dc:date>
    <item>
      <title>Nifi ConsumeKafka processor</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Nifi-ConsumeKafka-processor/m-p/172157#M45899</link>
      <description>&lt;P&gt;Hi, &lt;/P&gt;&lt;P&gt;Could someone shed some light on how the above processor initiates the commit?  Is there a way to enforce commit only after a set of subsequent processors completes successfully, for e.g. PutHDFS?&lt;/P&gt;</description>
      <pubDate>Fri, 11 Nov 2016 01:25:52 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Nifi-ConsumeKafka-processor/m-p/172157#M45899</guid>
      <dc:creator>sivaraman_js</dc:creator>
      <dc:date>2016-11-11T01:25:52Z</dc:date>
    </item>
    <item>
      <title>Re: Nifi ConsumeKafka processor</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Nifi-ConsumeKafka-processor/m-p/172158#M45900</link>
      <description>&lt;P&gt;ConsumeKafka commits the offsets to Kafka right after the data has been written to flow file and the session for that flow flow has been committed. This way there is no chance for the data to be lost before committing the offsets to Kafka because the data has already been persisted to NiFi's repositories.&lt;/P&gt;&lt;P&gt;Currently there is not a concept of having a series of processors treated as one operation. Right now you can think of it as two separate transfers of data, the first being from Kafka to NiFi, the second from NiFi to HDFS.&lt;/P&gt;</description>
      <pubDate>Fri, 11 Nov 2016 01:34:59 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Nifi-ConsumeKafka-processor/m-p/172158#M45900</guid>
      <dc:creator>bbende</dc:creator>
      <dc:date>2016-11-11T01:34:59Z</dc:date>
    </item>
  </channel>
</rss>

