<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question Re: Best practice architecture and naming hdfs path names/hive database for dev and test on 1 cluste in Support Questions</title>
    <link>https://community.cloudera.com/t5/Support-Questions/Best-practice-architecture-and-naming-hdfs-path-names-hive/m-p/62832#M49796</link>
    <description>&lt;P&gt;I'm loocking by best practices for architecture and naming&amp;nbsp;hdfs considering the user are analytical users who implement data preparation and data modeling process?&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;I appreciate to share tips to desing a service on HDFS with overwrite strategy enough to get easy and friendly data model for train analytical and statistical models process in an modeling as a service.&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;For instance to get files with +3000 columns and storage more than 48 months of history. any tip to manage huge volumen of data.&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;</description>
    <pubDate>Sun, 17 Dec 2017 02:17:46 GMT</pubDate>
    <dc:creator>jarteaga</dc:creator>
    <dc:date>2017-12-17T02:17:46Z</dc:date>
  </channel>
</rss>

