I have developed a custom Solr search component for which I need to write unit tests. As I have seen in the code of other Solr components, writing unit tests in Solr is done by extending the SolrTestCaseJ4 class. Unfortunately, SolrTestCaseJ4 doesn't deal with testing in a distributed setting, and my custom component works only in such a setting. As a matter of fact, my component deliberately returns empty responses when not in a distributed setting.
I'm trying to think of a way to use the BaseDistributedSearchTestCase class to test my component. The problem with BaseDistributedSearchTestCase is that how it works won't solve my issue. When using BaseDistributedSearchTestCase you define a single test method where you index all the documents and perform some queries. Running the tests executes the requests both on a distributed setting and on a single core setting. It then compares the responses of each setting to verify their equality. I cannot explicitly assert anything in that flow. How do I write unit tests for a Solr distributed component?
Created 12-26-2016 09:33 PM
Since Solr 4.7 has been added a class, MiniSolrCloudCluster, that actually "deploys" locally (and if you want ram only or on a temp dir) a complete solr cluster, with zookeeper, shards and everything, for your tests. You can find the jira here : https://issues.apache.org/jira/browse/SOLR-5865
Here is an example:
private static MiniSolrCloudCluster miniCluster; private static CloudSolrServer cloudSolrServer; @BeforeClass public static void setup() throws Exception { miniCluster = new MiniSolrCloudCluster(2, null, new File("src/main/solr/solr.xml"), null, null); uploadConfigToZk("src/main/solr/content/conf/", "content"); // override settings in the solrconfig include System.setProperty("solr.tests.maxBufferedDocs", "100000"); System.setProperty("solr.tests.maxIndexingThreads", "-1"); System.setProperty("solr.tests.ramBufferSizeMB", "100"); // use non-test classes so RandomizedRunner isn't necessary System.setProperty("solr.tests.mergeScheduler", "org.apache.lucene.index.ConcurrentMergeScheduler"); System.setProperty("solr.directoryFactory", "solr.RAMDirectoryFactory"); cloudSolrServer = new CloudSolrServer(miniCluster.getZkServer().getZkAddress(), false); cloudSolrServer.setRequestWriter(new RequestWriter()); cloudSolrServer.setParser(new XMLResponseParser()); cloudSolrServer.setDefaultCollection("content"); cloudSolrServer.setParallelUpdates(false); cloudSolrServer.connect(); createCollection(cloudSolrServer, "content", 2, 1, "content"); } protected static void uploadConfigToZk(String configDir, String configName) throws Exception { SolrZkClient zkClient = null; try { zkClient = new SolrZkClient(miniCluster.getZkServer().getZkAddress(), 10000, 45000, null); uploadConfigFileToZk(zkClient, configName, "solrconfig.xml", new File(configDir, "solrconfig.xml")); uploadConfigFileToZk(zkClient, configName, "schema.xml", new File(configDir, "schema.xml")); uploadConfigFileToZk(zkClient, configName, "stopwords_en.txt", new File(configDir, "stopwords_en.txt")); uploadConfigFileToZk(zkClient, configName, "stopwords_it.txt", new File(configDir, "stopwords_it.txt")); System.out.println(zkClient.getChildren(ZkController.CONFIGS_ZKNODE + "/" + configName, null, true)); } finally { if (zkClient != null) zkClient.close(); } } protected static void uploadConfigFileToZk(SolrZkClient zkClient, String configName, String nameInZk, File file) throws Exception { zkClient.makePath(ZkController.CONFIGS_ZKNODE + "/" + configName + "/" + nameInZk, file, false, true); } @AfterClass public static void shutDown() throws Exception { miniCluster.shutdown(); } protected static NamedList createCollection(CloudSolrServer server, String name, int numShards, int replicationFactor, String configName) throws Exception { ModifiableSolrParams modParams = new ModifiableSolrParams(); modParams.set(CoreAdminParams.ACTION, CollectionAction.CREATE.name()); modParams.set("name", name); modParams.set("numShards", numShards); modParams.set("replicationFactor", replicationFactor); modParams.set("collection.configName", configName); QueryRequest request = new QueryRequest(modParams); request.setPath("/admin/collections"); return server.request(request); } @Test public void test() throws Exception { // Do you stuff here using cloudSolrServer as a normal solrServer }
Created 12-26-2016 09:33 PM
Since Solr 4.7 has been added a class, MiniSolrCloudCluster, that actually "deploys" locally (and if you want ram only or on a temp dir) a complete solr cluster, with zookeeper, shards and everything, for your tests. You can find the jira here : https://issues.apache.org/jira/browse/SOLR-5865
Here is an example:
private static MiniSolrCloudCluster miniCluster; private static CloudSolrServer cloudSolrServer; @BeforeClass public static void setup() throws Exception { miniCluster = new MiniSolrCloudCluster(2, null, new File("src/main/solr/solr.xml"), null, null); uploadConfigToZk("src/main/solr/content/conf/", "content"); // override settings in the solrconfig include System.setProperty("solr.tests.maxBufferedDocs", "100000"); System.setProperty("solr.tests.maxIndexingThreads", "-1"); System.setProperty("solr.tests.ramBufferSizeMB", "100"); // use non-test classes so RandomizedRunner isn't necessary System.setProperty("solr.tests.mergeScheduler", "org.apache.lucene.index.ConcurrentMergeScheduler"); System.setProperty("solr.directoryFactory", "solr.RAMDirectoryFactory"); cloudSolrServer = new CloudSolrServer(miniCluster.getZkServer().getZkAddress(), false); cloudSolrServer.setRequestWriter(new RequestWriter()); cloudSolrServer.setParser(new XMLResponseParser()); cloudSolrServer.setDefaultCollection("content"); cloudSolrServer.setParallelUpdates(false); cloudSolrServer.connect(); createCollection(cloudSolrServer, "content", 2, 1, "content"); } protected static void uploadConfigToZk(String configDir, String configName) throws Exception { SolrZkClient zkClient = null; try { zkClient = new SolrZkClient(miniCluster.getZkServer().getZkAddress(), 10000, 45000, null); uploadConfigFileToZk(zkClient, configName, "solrconfig.xml", new File(configDir, "solrconfig.xml")); uploadConfigFileToZk(zkClient, configName, "schema.xml", new File(configDir, "schema.xml")); uploadConfigFileToZk(zkClient, configName, "stopwords_en.txt", new File(configDir, "stopwords_en.txt")); uploadConfigFileToZk(zkClient, configName, "stopwords_it.txt", new File(configDir, "stopwords_it.txt")); System.out.println(zkClient.getChildren(ZkController.CONFIGS_ZKNODE + "/" + configName, null, true)); } finally { if (zkClient != null) zkClient.close(); } } protected static void uploadConfigFileToZk(SolrZkClient zkClient, String configName, String nameInZk, File file) throws Exception { zkClient.makePath(ZkController.CONFIGS_ZKNODE + "/" + configName + "/" + nameInZk, file, false, true); } @AfterClass public static void shutDown() throws Exception { miniCluster.shutdown(); } protected static NamedList createCollection(CloudSolrServer server, String name, int numShards, int replicationFactor, String configName) throws Exception { ModifiableSolrParams modParams = new ModifiableSolrParams(); modParams.set(CoreAdminParams.ACTION, CollectionAction.CREATE.name()); modParams.set("name", name); modParams.set("numShards", numShards); modParams.set("replicationFactor", replicationFactor); modParams.set("collection.configName", configName); QueryRequest request = new QueryRequest(modParams); request.setPath("/admin/collections"); return server.request(request); } @Test public void test() throws Exception { // Do you stuff here using cloudSolrServer as a normal solrServer }