001package io.ebean; 002 003import io.ebean.docstore.DocQueryContext; 004import io.ebean.docstore.RawDoc; 005 006import javax.annotation.Nullable; 007import java.io.IOException; 008import java.util.List; 009import java.util.Map; 010import java.util.function.Consumer; 011import java.util.function.Predicate; 012 013/** 014 * Document storage operations. 015 */ 016public interface DocumentStore { 017 018 /** 019 * Update the associated document store using the result of the query. 020 * <p> 021 * This will execute the query against the database creating a document for each 022 * bean graph and sending this to the document store. 023 * </p> 024 * <p> 025 * Note that the select and fetch paths of the query is set for you to match the 026 * document structure needed based on <code>@DocStore</code> and <code>@DocStoreEmbedded</code> 027 * so what this query requires is the predicates only. 028 * </p> 029 * <p> 030 * This query will be executed using findEach so it is safe to use a query 031 * that will fetch a lot of beans. The default bulkBatchSize is used. 032 * </p> 033 * 034 * @param query The query that selects object to send to the document store. 035 */ 036 <T> void indexByQuery(Query<T> query); 037 038 /** 039 * Update the associated document store index using the result of the query additionally specifying a 040 * bulkBatchSize to use for sending the messages to ElasticSearch. 041 * 042 * @param query The query that selects object to send to the document store. 043 * @param bulkBatchSize The batch size to use when bulk sending to the document store. 044 */ 045 <T> void indexByQuery(Query<T> query, int bulkBatchSize); 046 047 /** 048 * Update the document store for all beans of this type. 049 * <p> 050 * This is the same as indexByQuery where the query has no predicates and so fetches all rows. 051 * </p> 052 */ 053 void indexAll(Class<?> beanType); 054 055 /** 056 * Return the bean by fetching it's content from the document store. 057 * If the document is not found null is returned. 058 * <p> 059 * Typically this is called indirectly by findOne() on the query. 060 * </p> 061 * <pre>{@code 062 * 063 * Customer customer = 064 * database.find(Customer.class) 065 * .setUseDocStore(true) 066 * .setId(42) 067 * .findOne(); 068 * 069 * }</pre> 070 */ 071 @Nullable 072 <T> T find(DocQueryContext<T> request); 073 074 /** 075 * Execute the find list query. This request is prepared to execute secondary queries. 076 * <p> 077 * Typically this is called indirectly by findList() on the query that has setUseDocStore(true). 078 * </p> 079 * <pre>{@code 080 * 081 * List<Customer> newCustomers = 082 * database.find(Customer.class) 083 * .setUseDocStore(true) 084 * .where().eq("status, Customer.Status.NEW) 085 * .findList(); 086 * 087 * }</pre> 088 */ 089 <T> List<T> findList(DocQueryContext<T> request); 090 091 /** 092 * Execute the query against the document store returning the paged list. 093 * <p> 094 * The query should have <code>firstRow</code> or <code>maxRows</code> set prior to calling this method. 095 * </p> 096 * <p> 097 * Typically this is called indirectly by findPagedList() on the query that has setUseDocStore(true). 098 * </p> 099 * <pre>{@code 100 * 101 * PagedList<Customer> newCustomers = 102 * database.find(Customer.class) 103 * .setUseDocStore(true) 104 * .where().eq("status, Customer.Status.NEW) 105 * .setMaxRows(50) 106 * .findPagedList(); 107 * 108 * }</pre> 109 */ 110 <T> PagedList<T> findPagedList(DocQueryContext<T> request); 111 112 /** 113 * Execute the query against the document store with the expectation of a large set of results 114 * that are processed in a scrolling resultSet fashion. 115 * <p> 116 * For example, with the ElasticSearch doc store this uses SCROLL. 117 * </p> 118 * <p> 119 * Typically this is called indirectly by findEach() on the query that has setUseDocStore(true). 120 * </p> 121 * <pre>{@code 122 * 123 * database.find(Order.class) 124 * .setUseDocStore(true) 125 * .where()... // perhaps add predicates 126 * .findEach((Order order) -> { 127 * // process the bean ... 128 * }); 129 * 130 * }</pre> 131 */ 132 <T> void findEach(DocQueryContext<T> query, Consumer<T> consumer); 133 134 /** 135 * Execute the query against the document store with the expectation of a large set of results 136 * that are processed in a scrolling resultSet fashion. 137 * <p> 138 * Unlike findEach() this provides the opportunity to stop iterating through the large query. 139 * </p> 140 * <p> 141 * For example, with the ElasticSearch doc store this uses SCROLL. 142 * </p> 143 * <p> 144 * Typically this is called indirectly by findEachWhile() on the query that has setUseDocStore(true). 145 * </p> 146 * <pre>{@code 147 * 148 * database.find(Order.class) 149 * .setUseDocStore(true) 150 * .where()... // perhaps add predicates 151 * .findEachWhile(new Predicate<Order>() { 152 * @Override 153 * public void accept(Order bean) { 154 * // process the bean 155 * 156 * // return true to continue, false to stop 157 * // boolean shouldContinue = ... 158 * return shouldContinue; 159 * } 160 * }); 161 * 162 * }</pre> 163 */ 164 <T> void findEachWhile(DocQueryContext<T> query, Predicate<T> consumer); 165 166 /** 167 * Find each processing raw documents. 168 * 169 * @param indexNameType The full index name and type 170 * @param rawQuery The query to execute 171 * @param consumer Consumer to process each document 172 */ 173 void findEach(String indexNameType, String rawQuery, Consumer<RawDoc> consumer); 174 175 /** 176 * Find each processing raw documents stopping when the predicate returns false. 177 * 178 * @param indexNameType The full index name and type 179 * @param rawQuery The query to execute 180 * @param consumer Consumer to process each document until false is returned 181 */ 182 void findEachWhile(String indexNameType, String rawQuery, Predicate<RawDoc> consumer); 183 184 /** 185 * Process the queue entries sending updates to the document store or queuing them for later processing. 186 */ 187 long process(List<DocStoreQueueEntry> queueEntries) throws IOException; 188 189 /** 190 * Drop the index from the document store (similar to DDL drop table). 191 * <pre>{@code 192 * 193 * DocumentStore documentStore = database.docStore(); 194 * 195 * documentStore.dropIndex("product_copy"); 196 * 197 * }</pre> 198 */ 199 void dropIndex(String indexName); 200 201 /** 202 * Create an index given a mapping file as a resource in the classPath (similar to DDL create table). 203 * <pre>{@code 204 * 205 * DocumentStore documentStore = database.docStore(); 206 * 207 * // uses product_copy.mapping.json resource 208 * // ... to define mappings for the index 209 * 210 * documentStore.createIndex("product_copy", null); 211 * 212 * }</pre> 213 * 214 * @param indexName the name of the new index 215 * @param alias the alias of the index 216 */ 217 void createIndex(String indexName, String alias); 218 219 /** 220 * Modify the settings on an index. 221 * <p> 222 * For example, this can be used be used to set elasticSearch refresh_interval 223 * on an index before a bulk update. 224 * </p> 225 * <pre>{@code 226 * 227 * // refresh_interval -1 ... disable refresh while bulk loading 228 * 229 * Map<String,Object> settings = new LinkedHashMap<>(); 230 * settings.put("refresh_interval", "-1"); 231 * 232 * documentStore.indexSettings("product", settings); 233 * 234 * }</pre> 235 * <pre>{@code 236 * 237 * // refresh_interval 1s ... restore after bulk loading 238 * 239 * Map<String,Object> settings = new LinkedHashMap<>(); 240 * settings.put("refresh_interval", "1s"); 241 * 242 * documentStore.indexSettings("product", settings); 243 * 244 * }</pre> 245 * 246 * @param indexName the name of the index to update settings on 247 * @param settings the settings to set on the index 248 */ 249 void indexSettings(String indexName, Map<String, Object> settings); 250 251 /** 252 * Copy the index to a new index. 253 * <p> 254 * This copy process does not use the database but instead will copy from the source index to a destination index. 255 * </p> 256 * <pre>{@code 257 * 258 * long copyCount = documentStore.copyIndex(Product.class, "product_copy"); 259 * 260 * }</pre> 261 * 262 * @param beanType The bean type of the source index 263 * @param newIndex The name of the index to copy to 264 * @return the number of documents copied to the new index 265 */ 266 long copyIndex(Class<?> beanType, String newIndex); 267 268 /** 269 * Copy entries from an index to a new index but limiting to documents that have been 270 * modified since the sinceEpochMillis time. 271 * <p> 272 * To support this the document needs to have a <code>@WhenModified</code> property. 273 * </p> 274 * <pre>{@code 275 * 276 * long copyCount = documentStore.copyIndex(Product.class, "product_copy", sinceMillis); 277 * 278 * }</pre> 279 * 280 * @param beanType The bean type of the source index 281 * @param newIndex The name of the index to copy to 282 * @return the number of documents copied to the new index 283 */ 284 long copyIndex(Class<?> beanType, String newIndex, long sinceEpochMillis); 285 286 /** 287 * Copy from a source index to a new index taking only the documents 288 * matching the given query. 289 * <pre>{@code 290 * 291 * // predicates to select the source documents to copy 292 * Query<Product> query = database.find(Product.class) 293 * .where() 294 * .ge("whenModified", new Timestamp(since)) 295 * .ge("name", "A") 296 * .lt("name", "D") 297 * .query(); 298 * 299 * // copy from the source index to "product_copy" index 300 * long copyCount = documentStore.copyIndex(query, "product_copy", 1000); 301 * 302 * }</pre> 303 * 304 * @param query The query to select the source documents to copy 305 * @param newIndex The target index to copy the documents to 306 * @param bulkBatchSize The ElasticSearch bulk batch size, if 0 uses the default. 307 * @return The number of documents copied to the new index. 308 */ 309 long copyIndex(Query<?> query, String newIndex, int bulkBatchSize); 310}